From 7a6bbd5ba31882c6826b542520c7f599be547f59 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 13 Nov 2023 14:54:51 -0500 Subject: [PATCH] WIP: Merge osbuildbootc into here, use osbuild/images PR - Take the code from https://github.com/achilleas-k/images/tree/bifrost-image/cmd/osbuild-deploy-container and merge it into this repository, using the code from https://github.com/osbuild/images/pull/243 as a `replace` - Also merge in osbuildbootc --- Containerfile | 4 +- Containerfile.orig => Containerfile.odc | 6 +- Containerfile.osbuildbootc | 9 + Makefile | 13 +- README-osbuildbootc.md | 53 + README.md | 54 +- ...orig => README.md.osbuild-deploy-container | 0 build.sh | 1 - cmd/osbuild-deploy-container/fedora-eln.json | 58 + cmd/osbuild-deploy-container/image.go | 113 + cmd/osbuild-deploy-container/main.go | 181 + .../partition_tables.go | 107 + go.mod | 92 +- go.sum | 347 +- vendor/github.com/BurntSushi/toml/.gitignore | 2 + vendor/github.com/BurntSushi/toml/COPYING | 21 + vendor/github.com/BurntSushi/toml/README.md | 120 + vendor/github.com/BurntSushi/toml/decode.go | 602 + .../BurntSushi/toml/decode_go116.go | 19 + .../github.com/BurntSushi/toml/deprecated.go | 29 + vendor/github.com/BurntSushi/toml/doc.go | 11 + vendor/github.com/BurntSushi/toml/encode.go | 759 + vendor/github.com/BurntSushi/toml/error.go | 279 + .../github.com/BurntSushi/toml/internal/tz.go | 36 + vendor/github.com/BurntSushi/toml/lex.go | 1283 + vendor/github.com/BurntSushi/toml/meta.go | 121 + vendor/github.com/BurntSushi/toml/parse.go | 811 + .../github.com/BurntSushi/toml/type_fields.go | 242 + .../github.com/BurntSushi/toml/type_toml.go | 70 + vendor/github.com/VividCortex/ewma/.gitignore | 3 + .../github.com/VividCortex/ewma/.whitesource | 3 + vendor/github.com/VividCortex/ewma/LICENSE | 21 + vendor/github.com/VividCortex/ewma/README.md | 145 + .../github.com/VividCortex/ewma/codecov.yml | 6 + vendor/github.com/VividCortex/ewma/ewma.go | 126 + vendor/github.com/acarl005/stripansi/LICENSE | 21 + .../github.com/acarl005/stripansi/README.md | 30 + .../acarl005/stripansi/stripansi.go | 13 + .../asaskevich/govalidator/.gitignore | 15 + .../asaskevich/govalidator/.travis.yml | 12 + .../asaskevich/govalidator/CODE_OF_CONDUCT.md | 43 + .../asaskevich/govalidator/CONTRIBUTING.md | 63 + .../github.com/asaskevich/govalidator/LICENSE | 21 + .../asaskevich/govalidator/README.md | 622 + .../asaskevich/govalidator/arrays.go | 87 + .../asaskevich/govalidator/converter.go | 81 + .../github.com/asaskevich/govalidator/doc.go | 3 + .../asaskevich/govalidator/error.go | 47 + .../asaskevich/govalidator/numerics.go | 100 + .../asaskevich/govalidator/patterns.go | 113 + .../asaskevich/govalidator/types.go | 656 + .../asaskevich/govalidator/utils.go | 270 + .../asaskevich/govalidator/validator.go | 1768 + .../asaskevich/govalidator/wercker.yml | 15 + vendor/github.com/containers/common/LICENSE | 201 + .../containers/common/pkg/retry/retry.go | 131 + .../common/pkg/retry/retry_linux.go | 9 + .../common/pkg/retry/retry_unsupported.go | 8 + vendor/github.com/containers/image/v5/LICENSE | 189 + .../containers/image/v5/copy/blob.go | 187 + .../containers/image/v5/copy/compression.go | 342 + .../containers/image/v5/copy/copy.go | 394 + .../image/v5/copy/digesting_reader.go | 62 + .../containers/image/v5/copy/encryption.go | 134 + .../containers/image/v5/copy/manifest.go | 224 + .../containers/image/v5/copy/multiple.go | 351 + .../containers/image/v5/copy/progress_bars.go | 160 + .../image/v5/copy/progress_channel.go | 79 + .../containers/image/v5/copy/sign.go | 115 + .../containers/image/v5/copy/single.go | 904 + .../v5/directory/explicitfilepath/path.go | 55 + .../image/v5/docker/archive/dest.go | 82 + .../image/v5/docker/archive/reader.go | 121 + .../containers/image/v5/docker/archive/src.go | 41 + .../image/v5/docker/archive/transport.go | 206 + .../image/v5/docker/archive/writer.go | 103 + .../containers/image/v5/docker/body_reader.go | 253 + .../containers/image/v5/docker/cache.go | 23 + .../image/v5/docker/distribution_error.go | 147 + .../image/v5/docker/docker_client.go | 1161 + .../image/v5/docker/docker_image.go | 163 + .../image/v5/docker/docker_image_dest.go | 888 + .../image/v5/docker/docker_image_src.go | 789 + .../image/v5/docker/docker_transport.go | 168 + .../containers/image/v5/docker/errors.go | 101 + .../image/v5/docker/internal/tarfile/dest.go | 173 + .../v5/docker/internal/tarfile/reader.go | 273 + .../image/v5/docker/internal/tarfile/src.go | 319 + .../image/v5/docker/internal/tarfile/types.go | 28 + .../v5/docker/internal/tarfile/writer.go | 379 + .../image/v5/docker/paths_common.go | 6 + .../image/v5/docker/paths_freebsd.go | 6 + .../v5/docker/policyconfiguration/naming.go | 78 + .../image/v5/docker/reference/README.md | 2 + .../image/v5/docker/reference/helpers.go | 42 + .../image/v5/docker/reference/normalize.go | 181 + .../image/v5/docker/reference/reference.go | 433 + .../v5/docker/reference/regexp-additions.go | 6 + .../image/v5/docker/reference/regexp.go | 156 + .../image/v5/docker/registries_d.go | 293 + .../image/v5/docker/wwwauthenticate.go | 172 + .../internal/blobinfocache/blobinfocache.go | 70 + .../image/v5/internal/blobinfocache/types.go | 52 + .../image/v5/internal/image/docker_list.go | 34 + .../image/v5/internal/image/docker_schema1.go | 257 + .../image/v5/internal/image/docker_schema2.go | 413 + .../image/v5/internal/image/manifest.go | 121 + .../image/v5/internal/image/memory.go | 64 + .../containers/image/v5/internal/image/oci.go | 323 + .../image/v5/internal/image/oci_index.go | 34 + .../image/v5/internal/image/sourced.go | 134 + .../image/v5/internal/image/unparsed.go | 119 + .../internal/imagedestination/impl/compat.go | 101 + .../internal/imagedestination/impl/helpers.go | 20 + .../imagedestination/impl/properties.go | 72 + .../stubs/put_blob_partial.go | 52 + .../imagedestination/stubs/signatures.go | 50 + .../internal/imagedestination/stubs/stubs.go | 27 + .../v5/internal/imagedestination/wrapper.go | 96 + .../v5/internal/imagesource/impl/compat.go | 55 + .../internal/imagesource/impl/layer_infos.go | 23 + .../internal/imagesource/impl/properties.go | 27 + .../internal/imagesource/impl/signatures.go | 19 + .../internal/imagesource/stubs/get_blob_at.go | 52 + .../v5/internal/imagesource/stubs/stubs.go | 28 + .../image/v5/internal/imagesource/wrapper.go | 56 + .../image/v5/internal/iolimits/iolimits.go | 58 + .../image/v5/internal/manifest/common.go | 72 + .../v5/internal/manifest/docker_schema2.go | 15 + .../internal/manifest/docker_schema2_list.go | 312 + .../image/v5/internal/manifest/errors.go | 56 + .../image/v5/internal/manifest/list.go | 131 + .../image/v5/internal/manifest/manifest.go | 167 + .../image/v5/internal/manifest/oci_index.go | 444 + .../internal/pkg/platform/platform_matcher.go | 197 + .../image/v5/internal/private/private.go | 164 + .../internal/putblobdigest/put_blob_digest.go | 57 + .../image/v5/internal/rootless/rootless.go | 25 + .../containers/image/v5/internal/set/set.go | 52 + .../image/v5/internal/signature/signature.go | 102 + .../image/v5/internal/signature/sigstore.go | 87 + .../image/v5/internal/signature/simple.go | 29 + .../image/v5/internal/signer/signer.go | 47 + .../v5/internal/streamdigest/stream_digest.go | 40 + .../image/v5/internal/tmpdir/tmpdir.go | 44 + .../v5/internal/unparsedimage/wrapper.go | 38 + .../v5/internal/uploadreader/upload_reader.go | 61 + .../image/v5/internal/useragent/useragent.go | 6 + .../containers/image/v5/manifest/common.go | 152 + .../image/v5/manifest/docker_schema1.go | 331 + .../image/v5/manifest/docker_schema2.go | 306 + .../image/v5/manifest/docker_schema2_list.go | 32 + .../containers/image/v5/manifest/list.go | 35 + .../containers/image/v5/manifest/manifest.go | 170 + .../containers/image/v5/manifest/oci.go | 274 + .../containers/image/v5/manifest/oci_index.go | 27 + .../image/v5/oci/archive/oci_dest.go | 189 + .../image/v5/oci/archive/oci_src.go | 162 + .../image/v5/oci/archive/oci_transport.go | 193 + .../image/v5/oci/internal/oci_util.go | 121 + .../image/v5/oci/layout/oci_dest.go | 321 + .../containers/image/v5/oci/layout/oci_src.go | 216 + .../image/v5/oci/layout/oci_transport.go | 254 + .../image/v5/pkg/blobinfocache/default.go | 76 + .../internal/prioritize/prioritize.go | 111 + .../v5/pkg/blobinfocache/memory/memory.go | 200 + .../image/v5/pkg/blobinfocache/none/none.go | 50 + .../v5/pkg/blobinfocache/sqlite/sqlite.go | 553 + .../image/v5/pkg/compression/compression.go | 165 + .../v5/pkg/compression/internal/types.go | 65 + .../image/v5/pkg/compression/types/types.go | 41 + .../image/v5/pkg/compression/zstd.go | 59 + .../image/v5/pkg/docker/config/config.go | 824 + .../image/v5/pkg/strslice/README.md | 1 + .../image/v5/pkg/strslice/strslice.go | 30 + .../v5/pkg/sysregistriesv2/paths_common.go | 12 + .../v5/pkg/sysregistriesv2/paths_freebsd.go | 12 + .../v5/pkg/sysregistriesv2/shortnames.go | 350 + .../sysregistriesv2/system_registries_v2.go | 1056 + .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 103 + .../containers/image/v5/signature/docker.go | 102 + .../image/v5/signature/fulcio_cert.go | 204 + .../image/v5/signature/internal/errors.go | 15 + .../image/v5/signature/internal/json.go | 90 + .../image/v5/signature/internal/rekor_set.go | 237 + .../v5/signature/internal/sigstore_payload.go | 202 + .../image/v5/signature/mechanism.go | 97 + .../image/v5/signature/mechanism_gpgme.go | 207 + .../image/v5/signature/mechanism_openpgp.go | 179 + .../image/v5/signature/policy_config.go | 799 + .../v5/signature/policy_config_sigstore.go | 343 + .../image/v5/signature/policy_eval.go | 293 + .../v5/signature/policy_eval_baselayer.go | 20 + .../v5/signature/policy_eval_signedby.go | 145 + .../v5/signature/policy_eval_sigstore.go | 281 + .../image/v5/signature/policy_eval_simple.go | 29 + .../image/v5/signature/policy_paths_common.go | 8 + .../v5/signature/policy_paths_freebsd.go | 8 + .../v5/signature/policy_reference_match.go | 154 + .../image/v5/signature/policy_types.go | 215 + .../image/v5/signature/signer/signer.go | 9 + .../image/v5/signature/sigstore/copied.go | 103 + .../image/v5/signature/sigstore/generate.go | 35 + .../v5/signature/sigstore/internal/signer.go | 95 + .../image/v5/signature/sigstore/signer.go | 60 + .../containers/image/v5/signature/simple.go | 283 + .../v5/signature/simplesigning/signer.go | 105 + .../containers/image/v5/transports/stub.go | 36 + .../image/v5/transports/transports.go | 90 + .../containers/image/v5/types/types.go | 713 + .../containers/image/v5/version/version.go | 18 + .../containers/libtrust/CODE-OF-CONDUCT.md | 3 + .../containers/libtrust/CONTRIBUTING.md | 13 + vendor/github.com/containers/libtrust/LICENSE | 191 + .../containers/libtrust/MAINTAINERS | 3 + .../github.com/containers/libtrust/README.md | 22 + .../containers/libtrust/SECURITY.md | 3 + .../containers/libtrust/certificates.go | 175 + vendor/github.com/containers/libtrust/doc.go | 9 + .../github.com/containers/libtrust/ec_key.go | 422 + .../containers/libtrust/ec_key_no_openssl.go | 23 + .../containers/libtrust/ec_key_openssl.go | 24 + .../github.com/containers/libtrust/filter.go | 50 + vendor/github.com/containers/libtrust/hash.go | 56 + .../containers/libtrust/jsonsign.go | 657 + vendor/github.com/containers/libtrust/key.go | 253 + .../containers/libtrust/key_files.go | 255 + .../containers/libtrust/key_manager.go | 175 + .../github.com/containers/libtrust/rsa_key.go | 427 + vendor/github.com/containers/libtrust/util.go | 363 + .../github.com/containers/ocicrypt/.gitignore | 1 + .../containers/ocicrypt/.golangci.yml | 35 + .../containers/ocicrypt/ADOPTERS.md | 10 + .../containers/ocicrypt/CODE-OF-CONDUCT.md | 3 + vendor/github.com/containers/ocicrypt/LICENSE | 189 + .../containers/ocicrypt/MAINTAINERS | 6 + .../github.com/containers/ocicrypt/Makefile | 34 + .../github.com/containers/ocicrypt/README.md | 50 + .../containers/ocicrypt/SECURITY.md | 3 + .../ocicrypt/blockcipher/blockcipher.go | 161 + .../blockcipher/blockcipher_aes_ctr.go | 193 + .../containers/ocicrypt/config/config.go | 114 + .../ocicrypt/config/constructors.go | 246 + .../config/keyprovider-config/config.go | 80 + .../ocicrypt/crypto/pkcs11/common.go | 134 + .../ocicrypt/crypto/pkcs11/pkcs11helpers.go | 485 + .../crypto/pkcs11/pkcs11helpers_nocgo.go | 30 + .../ocicrypt/crypto/pkcs11/utils.go | 115 + .../containers/ocicrypt/encryption.go | 356 + vendor/github.com/containers/ocicrypt/gpg.go | 432 + .../containers/ocicrypt/gpgvault.go | 100 + .../ocicrypt/keywrap/jwe/keywrapper_jwe.go | 137 + .../keywrap/keyprovider/keyprovider.go | 244 + .../containers/ocicrypt/keywrap/keywrap.go | 48 + .../ocicrypt/keywrap/pgp/keywrapper_gpg.go | 272 + .../keywrap/pkcs11/keywrapper_pkcs11.go | 148 + .../keywrap/pkcs7/keywrapper_pkcs7.go | 137 + .../github.com/containers/ocicrypt/reader.go | 40 + .../containers/ocicrypt/spec/spec.go | 16 + .../ocicrypt/utils/delayedreader.go | 109 + .../containers/ocicrypt/utils/ioutils.go | 58 + .../utils/keyprovider/keyprovider.pb.go | 243 + .../utils/keyprovider/keyprovider.proto | 17 + .../containers/ocicrypt/utils/testing.go | 165 + .../containers/ocicrypt/utils/utils.go | 249 + vendor/github.com/containers/storage/AUTHORS | 1523 + vendor/github.com/containers/storage/LICENSE | 191 + vendor/github.com/containers/storage/NOTICE | 19 + .../containers/storage/pkg/archive/README.md | 1 + .../containers/storage/pkg/archive/archive.go | 1518 + .../storage/pkg/archive/archive_110.go | 23 + .../storage/pkg/archive/archive_19.go | 14 + .../storage/pkg/archive/archive_bsd.go | 19 + .../storage/pkg/archive/archive_linux.go | 209 + .../storage/pkg/archive/archive_other.go | 12 + .../storage/pkg/archive/archive_unix.go | 112 + .../storage/pkg/archive/archive_windows.go | 84 + .../storage/pkg/archive/archive_zstd.go | 41 + .../containers/storage/pkg/archive/changes.go | 497 + .../storage/pkg/archive/changes_linux.go | 400 + .../storage/pkg/archive/changes_other.go | 113 + .../storage/pkg/archive/changes_unix.go | 52 + .../storage/pkg/archive/changes_windows.go | 29 + .../containers/storage/pkg/archive/copy.go | 458 + .../storage/pkg/archive/copy_unix.go | 12 + .../storage/pkg/archive/copy_windows.go | 9 + .../containers/storage/pkg/archive/diff.go | 276 + .../storage/pkg/archive/fflags_bsd.go | 167 + .../storage/pkg/archive/fflags_unsupported.go | 21 + .../storage/pkg/archive/time_linux.go | 16 + .../storage/pkg/archive/time_unsupported.go | 17 + .../storage/pkg/archive/whiteouts.go | 23 + .../containers/storage/pkg/archive/wrap.go | 59 + .../pkg/chunked/compressor/compressor.go | 513 + .../storage/pkg/chunked/compressor/rollsum.go | 85 + .../pkg/chunked/internal/compression.go | 283 + .../storage/pkg/fileutils/fileutils.go | 371 + .../storage/pkg/fileutils/fileutils_darwin.go | 27 + .../pkg/fileutils/fileutils_solaris.go | 7 + .../storage/pkg/fileutils/fileutils_unix.go | 22 + .../pkg/fileutils/fileutils_windows.go | 7 + .../containers/storage/pkg/homedir/homedir.go | 52 + .../storage/pkg/homedir/homedir_others.go | 21 + .../storage/pkg/homedir/homedir_unix.go | 96 + .../storage/pkg/homedir/homedir_windows.go | 32 + .../containers/storage/pkg/idtools/idtools.go | 461 + .../storage/pkg/idtools/idtools_supported.go | 87 + .../storage/pkg/idtools/idtools_unix.go | 214 + .../pkg/idtools/idtools_unsupported.go | 12 + .../storage/pkg/idtools/idtools_windows.go | 24 + .../containers/storage/pkg/idtools/parser.go | 59 + .../storage/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 13 + .../storage/pkg/idtools/utils_unix.go | 33 + .../containers/storage/pkg/ioutils/buffer.go | 51 + .../storage/pkg/ioutils/bytespipe.go | 186 + .../storage/pkg/ioutils/fswriters.go | 281 + .../storage/pkg/ioutils/fswriters_linux.go | 23 + .../storage/pkg/ioutils/fswriters_other.go | 26 + .../containers/storage/pkg/ioutils/readers.go | 170 + .../storage/pkg/ioutils/temp_unix.go | 11 + .../storage/pkg/ioutils/temp_windows.go | 19 + .../storage/pkg/ioutils/writeflusher.go | 92 + .../containers/storage/pkg/ioutils/writers.go | 66 + .../storage/pkg/lockfile/lastwrite.go | 82 + .../storage/pkg/lockfile/lockfile.go | 409 + .../storage/pkg/lockfile/lockfile_unix.go | 95 + .../storage/pkg/lockfile/lockfile_windows.go | 99 + .../storage/pkg/longpath/longpath.go | 26 + .../containers/storage/pkg/mount/flags.go | 149 + .../storage/pkg/mount/flags_freebsd.go | 48 + .../storage/pkg/mount/flags_linux.go | 87 + .../storage/pkg/mount/flags_unsupported.go | 32 + .../containers/storage/pkg/mount/mount.go | 110 + .../storage/pkg/mount/mounter_freebsd.go | 68 + .../storage/pkg/mount/mounter_linux.go | 74 + .../storage/pkg/mount/mounter_unsupported.go | 9 + .../containers/storage/pkg/mount/mountinfo.go | 13 + .../storage/pkg/mount/mountinfo_linux.go | 5 + .../storage/pkg/mount/sharedsubtree_linux.go | 64 + .../storage/pkg/mount/unmount_unix.go | 35 + .../storage/pkg/mount/unmount_unsupported.go | 8 + .../containers/storage/pkg/pools/pools.go | 119 + .../containers/storage/pkg/promise/promise.go | 11 + .../containers/storage/pkg/reexec/README.md | 5 + .../storage/pkg/reexec/command_freebsd.go | 38 + .../storage/pkg/reexec/command_linux.go | 35 + .../storage/pkg/reexec/command_unix.go | 33 + .../storage/pkg/reexec/command_unsupported.go | 21 + .../storage/pkg/reexec/command_windows.go | 35 + .../containers/storage/pkg/reexec/reexec.go | 66 + .../containers/storage/pkg/regexp/regexp.go | 234 + .../pkg/regexp/regexp_dontprecompile.go | 6 + .../storage/pkg/regexp/regexp_precompile.go | 6 + .../containers/storage/pkg/system/chmod.go | 17 + .../containers/storage/pkg/system/chtimes.go | 35 + .../storage/pkg/system/chtimes_unix.go | 15 + .../storage/pkg/system/chtimes_windows.go | 29 + .../containers/storage/pkg/system/errors.go | 8 + .../containers/storage/pkg/system/exitcode.go | 33 + .../containers/storage/pkg/system/init.go | 22 + .../storage/pkg/system/init_windows.go | 16 + .../storage/pkg/system/lchflags_bsd.go | 56 + .../containers/storage/pkg/system/lchown.go | 20 + .../storage/pkg/system/lcow_unix.go | 9 + .../storage/pkg/system/lcow_windows.go | 6 + .../storage/pkg/system/lstat_unix.go | 21 + .../storage/pkg/system/lstat_windows.go | 14 + .../containers/storage/pkg/system/meminfo.go | 17 + .../storage/pkg/system/meminfo_freebsd.go | 86 + .../storage/pkg/system/meminfo_linux.go | 65 + .../storage/pkg/system/meminfo_solaris.go | 130 + .../storage/pkg/system/meminfo_unsupported.go | 12 + .../storage/pkg/system/meminfo_windows.go | 46 + .../containers/storage/pkg/system/mknod.go | 23 + .../storage/pkg/system/mknod_freebsd.go | 23 + .../storage/pkg/system/mknod_windows.go | 14 + .../containers/storage/pkg/system/path.go | 20 + .../storage/pkg/system/path_unix.go | 10 + .../storage/pkg/system/path_windows.go | 34 + .../storage/pkg/system/process_unix.go | 25 + .../containers/storage/pkg/system/rm.go | 99 + .../storage/pkg/system/rm_common.go | 10 + .../storage/pkg/system/rm_freebsd.go | 17 + .../storage/pkg/system/stat_common.go | 12 + .../storage/pkg/system/stat_darwin.go | 15 + .../storage/pkg/system/stat_freebsd.go | 28 + .../storage/pkg/system/stat_linux.go | 22 + .../storage/pkg/system/stat_openbsd.go | 15 + .../storage/pkg/system/stat_solaris.go | 15 + .../storage/pkg/system/stat_unix.go | 88 + .../storage/pkg/system/stat_windows.go | 74 + .../storage/pkg/system/syscall_unix.go | 27 + .../storage/pkg/system/syscall_windows.go | 127 + .../containers/storage/pkg/system/umask.go | 14 + .../storage/pkg/system/umask_windows.go | 10 + .../storage/pkg/system/utimes_freebsd.go | 25 + .../storage/pkg/system/utimes_linux.go | 25 + .../storage/pkg/system/utimes_unsupported.go | 11 + .../storage/pkg/system/xattrs_darwin.go | 84 + .../storage/pkg/system/xattrs_linux.go | 87 + .../storage/pkg/system/xattrs_unsupported.go | 32 + .../storage/pkg/unshare/getenv_linux_cgo.go | 23 + .../storage/pkg/unshare/getenv_linux_nocgo.go | 12 + .../containers/storage/pkg/unshare/unshare.c | 378 + .../containers/storage/pkg/unshare/unshare.go | 32 + .../storage/pkg/unshare/unshare_cgo.go | 11 + .../storage/pkg/unshare/unshare_darwin.go | 54 + .../storage/pkg/unshare/unshare_freebsd.c | 76 + .../storage/pkg/unshare/unshare_freebsd.go | 179 + .../storage/pkg/unshare/unshare_gccgo.go | 26 + .../storage/pkg/unshare/unshare_linux.go | 729 + .../pkg/unshare/unshare_unsupported.go | 51 + .../pkg/unshare/unshare_unsupported_cgo.go | 11 + .../cyberphone/json-canonicalization/LICENSE | 13 + .../webpki.org/jsoncanonicalizer/es6numfmt.go | 71 + .../jsoncanonicalizer/jsoncanonicalizer.go | 378 + vendor/github.com/docker/distribution/LICENSE | 202 + .../docker/distribution/digestset/set.go | 247 + .../docker/distribution/reference/helpers.go | 42 + .../distribution/reference/normalize.go | 199 + .../distribution/reference/reference.go | 433 + .../docker/distribution/reference/regexp.go | 143 + .../registry/api/errcode/errors.go | 267 + .../registry/api/errcode/handler.go | 40 + .../registry/api/errcode/register.go | 138 + .../registry/api/v2/descriptors.go | 1613 + .../distribution/registry/api/v2/doc.go | 9 + .../distribution/registry/api/v2/errors.go | 145 + .../registry/api/v2/headerparser.go | 161 + .../distribution/registry/api/v2/routes.go | 40 + .../distribution/registry/api/v2/urls.go | 254 + .../registry/client/auth/challenge/addr.go | 27 + .../client/auth/challenge/authchallenge.go | 237 + .../docker/docker-credential-helpers/LICENSE | 20 + .../client/client.go | 121 + .../client/command.go | 54 + .../credentials/credentials.go | 209 + .../credentials/error.go | 121 + .../credentials/helper.go | 14 + .../credentials/version.go | 16 + vendor/github.com/docker/docker/AUTHORS | 2390 + vendor/github.com/docker/docker/LICENSE | 191 + vendor/github.com/docker/docker/NOTICE | 19 + .../docker/api/types/versions/README.md | 14 + .../docker/api/types/versions/compare.go | 65 + .../github.com/docker/go-connections/LICENSE | 191 + .../go-connections/tlsconfig/certpool_go17.go | 18 + .../tlsconfig/certpool_other.go | 13 + .../docker/go-connections/tlsconfig/config.go | 254 + .../tlsconfig/config_client_ciphers.go | 17 + .../tlsconfig/config_legacy_client_ciphers.go | 15 + .../docker/go-units/CONTRIBUTING.md | 67 + vendor/github.com/docker/go-units/LICENSE | 191 + vendor/github.com/docker/go-units/MAINTAINERS | 46 + vendor/github.com/docker/go-units/README.md | 16 + vendor/github.com/docker/go-units/circle.yml | 11 + vendor/github.com/docker/go-units/duration.go | 35 + vendor/github.com/docker/go-units/size.go | 154 + vendor/github.com/docker/go-units/ulimit.go | 123 + .../github.com/go-jose/go-jose/v3/.gitignore | 2 + .../go-jose/go-jose/v3/.golangci.yml | 53 + .../github.com/go-jose/go-jose/v3/.travis.yml | 33 + .../go-jose/go-jose/v3/BUG-BOUNTY.md | 10 + .../go-jose/go-jose/v3/CONTRIBUTING.md | 15 + vendor/github.com/go-jose/go-jose/v3/LICENSE | 202 + .../github.com/go-jose/go-jose/v3/README.md | 122 + .../go-jose/go-jose/v3/asymmetric.go | 592 + .../go-jose/go-jose/v3/cipher/cbc_hmac.go | 196 + .../go-jose/go-jose/v3/cipher/concat_kdf.go | 75 + .../go-jose/go-jose/v3/cipher/ecdh_es.go | 86 + .../go-jose/go-jose/v3/cipher/key_wrap.go | 109 + .../github.com/go-jose/go-jose/v3/crypter.go | 544 + vendor/github.com/go-jose/go-jose/v3/doc.go | 27 + .../github.com/go-jose/go-jose/v3/encoding.go | 191 + .../go-jose/go-jose/v3/json/LICENSE | 27 + .../go-jose/go-jose/v3/json/README.md | 13 + .../go-jose/go-jose/v3/json/decode.go | 1217 + .../go-jose/go-jose/v3/json/encode.go | 1197 + .../go-jose/go-jose/v3/json/indent.go | 141 + .../go-jose/go-jose/v3/json/scanner.go | 623 + .../go-jose/go-jose/v3/json/stream.go | 485 + .../go-jose/go-jose/v3/json/tags.go | 44 + vendor/github.com/go-jose/go-jose/v3/jwe.go | 295 + vendor/github.com/go-jose/go-jose/v3/jwk.go | 798 + vendor/github.com/go-jose/go-jose/v3/jws.go | 366 + .../github.com/go-jose/go-jose/v3/opaque.go | 144 + .../github.com/go-jose/go-jose/v3/shared.go | 520 + .../github.com/go-jose/go-jose/v3/signing.go | 450 + .../go-jose/go-jose/v3/symmetric.go | 495 + .../go-openapi/analysis/.codecov.yml | 5 + .../go-openapi/analysis/.gitattributes | 2 + .../github.com/go-openapi/analysis/.gitignore | 5 + .../go-openapi/analysis/.golangci.yml | 56 + .../go-openapi/analysis/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/analysis/LICENSE | 202 + .../github.com/go-openapi/analysis/README.md | 31 + .../go-openapi/analysis/analyzer.go | 1064 + .../go-openapi/analysis/appveyor.yml | 32 + .../github.com/go-openapi/analysis/debug.go | 23 + vendor/github.com/go-openapi/analysis/doc.go | 43 + .../github.com/go-openapi/analysis/fixer.go | 79 + .../github.com/go-openapi/analysis/flatten.go | 802 + .../go-openapi/analysis/flatten_name.go | 293 + .../go-openapi/analysis/flatten_options.go | 78 + .../analysis/internal/debug/debug.go | 41 + .../internal/flatten/normalize/normalize.go | 87 + .../internal/flatten/operations/operations.go | 90 + .../internal/flatten/replace/replace.go | 434 + .../flatten/schutils/flatten_schema.go | 29 + .../analysis/internal/flatten/sortref/keys.go | 201 + .../internal/flatten/sortref/sort_ref.go | 141 + .../github.com/go-openapi/analysis/mixin.go | 515 + .../github.com/go-openapi/analysis/schema.go | 256 + .../go-openapi/errors/.gitattributes | 1 + .../github.com/go-openapi/errors/.gitignore | 2 + .../go-openapi/errors/.golangci.yml | 48 + .../go-openapi/errors/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/errors/LICENSE | 202 + vendor/github.com/go-openapi/errors/README.md | 11 + vendor/github.com/go-openapi/errors/api.go | 182 + vendor/github.com/go-openapi/errors/auth.go | 22 + vendor/github.com/go-openapi/errors/doc.go | 26 + .../github.com/go-openapi/errors/headers.go | 103 + .../go-openapi/errors/middleware.go | 50 + .../github.com/go-openapi/errors/parsing.go | 78 + vendor/github.com/go-openapi/errors/schema.go | 611 + .../go-openapi/jsonpointer/.editorconfig | 26 + .../go-openapi/jsonpointer/.gitignore | 1 + .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 + .../github.com/go-openapi/jsonpointer/LICENSE | 202 + .../go-openapi/jsonpointer/README.md | 15 + .../go-openapi/jsonpointer/pointer.go | 390 + .../go-openapi/jsonreference/.gitignore | 1 + .../go-openapi/jsonreference/.golangci.yml | 50 + .../jsonreference/CODE_OF_CONDUCT.md | 74 + .../go-openapi/jsonreference/LICENSE | 202 + .../go-openapi/jsonreference/README.md | 15 + .../jsonreference/internal/normalize_url.go | 69 + .../go-openapi/jsonreference/reference.go | 158 + .../github.com/go-openapi/loads/.editorconfig | 26 + vendor/github.com/go-openapi/loads/.gitignore | 4 + .../github.com/go-openapi/loads/.golangci.yml | 44 + .../github.com/go-openapi/loads/.travis.yml | 25 + .../go-openapi/loads/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/loads/LICENSE | 202 + vendor/github.com/go-openapi/loads/README.md | 6 + vendor/github.com/go-openapi/loads/doc.go | 21 + vendor/github.com/go-openapi/loads/loaders.go | 134 + vendor/github.com/go-openapi/loads/options.go | 61 + vendor/github.com/go-openapi/loads/spec.go | 266 + .../go-openapi/runtime/.editorconfig | 26 + .../go-openapi/runtime/.gitattributes | 1 + .../github.com/go-openapi/runtime/.gitignore | 5 + .../go-openapi/runtime/.golangci.yml | 44 + .../go-openapi/runtime/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/runtime/LICENSE | 202 + .../github.com/go-openapi/runtime/README.md | 7 + .../go-openapi/runtime/bytestream.go | 169 + .../go-openapi/runtime/client_auth_info.go | 30 + .../go-openapi/runtime/client_operation.go | 41 + .../go-openapi/runtime/client_request.go | 152 + .../go-openapi/runtime/client_response.go | 110 + .../go-openapi/runtime/constants.go | 49 + vendor/github.com/go-openapi/runtime/csv.go | 77 + .../github.com/go-openapi/runtime/discard.go | 9 + vendor/github.com/go-openapi/runtime/file.go | 19 + .../github.com/go-openapi/runtime/headers.go | 45 + .../go-openapi/runtime/interfaces.go | 112 + vendor/github.com/go-openapi/runtime/json.go | 38 + .../github.com/go-openapi/runtime/request.go | 139 + .../github.com/go-openapi/runtime/statuses.go | 90 + vendor/github.com/go-openapi/runtime/text.go | 116 + .../github.com/go-openapi/runtime/values.go | 19 + vendor/github.com/go-openapi/runtime/xml.go | 36 + .../github.com/go-openapi/spec/.editorconfig | 26 + vendor/github.com/go-openapi/spec/.gitignore | 2 + .../github.com/go-openapi/spec/.golangci.yml | 42 + .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/spec/LICENSE | 202 + vendor/github.com/go-openapi/spec/README.md | 34 + .../github.com/go-openapi/spec/appveyor.yml | 32 + vendor/github.com/go-openapi/spec/bindata.go | 297 + vendor/github.com/go-openapi/spec/cache.go | 98 + .../go-openapi/spec/contact_info.go | 57 + vendor/github.com/go-openapi/spec/debug.go | 49 + vendor/github.com/go-openapi/spec/errors.go | 19 + vendor/github.com/go-openapi/spec/expander.go | 594 + .../go-openapi/spec/external_docs.go | 24 + vendor/github.com/go-openapi/spec/header.go | 203 + vendor/github.com/go-openapi/spec/info.go | 184 + vendor/github.com/go-openapi/spec/items.go | 234 + vendor/github.com/go-openapi/spec/license.go | 56 + .../github.com/go-openapi/spec/normalizer.go | 202 + .../go-openapi/spec/normalizer_nonwindows.go | 44 + .../go-openapi/spec/normalizer_windows.go | 154 + .../github.com/go-openapi/spec/operation.go | 397 + .../github.com/go-openapi/spec/parameter.go | 326 + .../github.com/go-openapi/spec/path_item.go | 87 + vendor/github.com/go-openapi/spec/paths.go | 97 + .../github.com/go-openapi/spec/properties.go | 91 + vendor/github.com/go-openapi/spec/ref.go | 193 + vendor/github.com/go-openapi/spec/resolver.go | 127 + vendor/github.com/go-openapi/spec/response.go | 152 + .../github.com/go-openapi/spec/responses.go | 140 + vendor/github.com/go-openapi/spec/schema.go | 645 + .../go-openapi/spec/schema_loader.go | 338 + .../go-openapi/spec/security_scheme.go | 170 + vendor/github.com/go-openapi/spec/spec.go | 78 + vendor/github.com/go-openapi/spec/swagger.go | 448 + vendor/github.com/go-openapi/spec/tag.go | 75 + vendor/github.com/go-openapi/spec/url_go18.go | 8 + vendor/github.com/go-openapi/spec/url_go19.go | 14 + .../github.com/go-openapi/spec/validations.go | 215 + .../github.com/go-openapi/spec/xml_object.go | 68 + .../go-openapi/strfmt/.editorconfig | 26 + .../go-openapi/strfmt/.gitattributes | 2 + .../github.com/go-openapi/strfmt/.gitignore | 2 + .../go-openapi/strfmt/.golangci.yml | 59 + .../go-openapi/strfmt/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/strfmt/LICENSE | 202 + vendor/github.com/go-openapi/strfmt/README.md | 88 + vendor/github.com/go-openapi/strfmt/bson.go | 165 + vendor/github.com/go-openapi/strfmt/date.go | 187 + .../github.com/go-openapi/strfmt/default.go | 2035 + vendor/github.com/go-openapi/strfmt/doc.go | 18 + .../github.com/go-openapi/strfmt/duration.go | 211 + vendor/github.com/go-openapi/strfmt/format.go | 326 + vendor/github.com/go-openapi/strfmt/time.go | 319 + vendor/github.com/go-openapi/strfmt/ulid.go | 230 + .../github.com/go-openapi/swag/.editorconfig | 26 + .../github.com/go-openapi/swag/.gitattributes | 2 + vendor/github.com/go-openapi/swag/.gitignore | 4 + .../github.com/go-openapi/swag/.golangci.yml | 54 + .../go-openapi/swag/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/swag/LICENSE | 202 + vendor/github.com/go-openapi/swag/README.md | 21 + vendor/github.com/go-openapi/swag/convert.go | 208 + .../go-openapi/swag/convert_types.go | 730 + vendor/github.com/go-openapi/swag/doc.go | 31 + vendor/github.com/go-openapi/swag/file.go | 33 + vendor/github.com/go-openapi/swag/json.go | 312 + vendor/github.com/go-openapi/swag/loading.go | 121 + .../github.com/go-openapi/swag/name_lexem.go | 87 + vendor/github.com/go-openapi/swag/net.go | 38 + vendor/github.com/go-openapi/swag/path.go | 59 + .../github.com/go-openapi/swag/post_go18.go | 24 + .../github.com/go-openapi/swag/post_go19.go | 68 + vendor/github.com/go-openapi/swag/pre_go18.go | 24 + vendor/github.com/go-openapi/swag/pre_go19.go | 70 + vendor/github.com/go-openapi/swag/split.go | 262 + vendor/github.com/go-openapi/swag/util.go | 394 + vendor/github.com/go-openapi/swag/yaml.go | 450 + .../go-openapi/validate/.editorconfig | 26 + .../go-openapi/validate/.gitattributes | 2 + .../github.com/go-openapi/validate/.gitignore | 5 + .../go-openapi/validate/.golangci.yml | 50 + .../go-openapi/validate/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/validate/LICENSE | 202 + .../github.com/go-openapi/validate/README.md | 38 + .../go-openapi/validate/appveyor.yml | 32 + .../github.com/go-openapi/validate/context.go | 56 + .../github.com/go-openapi/validate/debug.go | 47 + .../go-openapi/validate/default_validator.go | 281 + vendor/github.com/go-openapi/validate/doc.go | 85 + .../go-openapi/validate/example_validator.go | 270 + .../github.com/go-openapi/validate/formats.go | 69 + .../github.com/go-openapi/validate/helpers.go | 324 + .../go-openapi/validate/object_validator.go | 279 + .../github.com/go-openapi/validate/options.go | 43 + .../github.com/go-openapi/validate/result.go | 486 + vendor/github.com/go-openapi/validate/rexp.go | 71 + .../github.com/go-openapi/validate/schema.go | 260 + .../go-openapi/validate/schema_messages.go | 78 + .../go-openapi/validate/schema_option.go | 54 + .../go-openapi/validate/schema_props.go | 240 + .../go-openapi/validate/slice_validator.go | 105 + vendor/github.com/go-openapi/validate/spec.go | 804 + .../go-openapi/validate/spec_messages.go | 360 + vendor/github.com/go-openapi/validate/type.go | 177 + .../go-openapi/validate/update-fixtures.sh | 15 + .../go-openapi/validate/validator.go | 645 + .../github.com/go-openapi/validate/values.go | 450 + vendor/github.com/gobwas/glob/.gitignore | 8 + vendor/github.com/gobwas/glob/.travis.yml | 9 + vendor/github.com/gobwas/glob/LICENSE | 21 + vendor/github.com/gobwas/glob/bench.sh | 26 + .../gobwas/glob/compiler/compiler.go | 525 + vendor/github.com/gobwas/glob/glob.go | 80 + vendor/github.com/gobwas/glob/match/any.go | 45 + vendor/github.com/gobwas/glob/match/any_of.go | 82 + vendor/github.com/gobwas/glob/match/btree.go | 146 + .../github.com/gobwas/glob/match/contains.go | 58 + .../github.com/gobwas/glob/match/every_of.go | 99 + vendor/github.com/gobwas/glob/match/list.go | 49 + vendor/github.com/gobwas/glob/match/match.go | 81 + vendor/github.com/gobwas/glob/match/max.go | 49 + vendor/github.com/gobwas/glob/match/min.go | 57 + .../github.com/gobwas/glob/match/nothing.go | 27 + vendor/github.com/gobwas/glob/match/prefix.go | 50 + .../gobwas/glob/match/prefix_any.go | 55 + .../gobwas/glob/match/prefix_suffix.go | 62 + vendor/github.com/gobwas/glob/match/range.go | 48 + vendor/github.com/gobwas/glob/match/row.go | 77 + .../github.com/gobwas/glob/match/segments.go | 91 + vendor/github.com/gobwas/glob/match/single.go | 43 + vendor/github.com/gobwas/glob/match/suffix.go | 35 + .../gobwas/glob/match/suffix_any.go | 43 + vendor/github.com/gobwas/glob/match/super.go | 33 + vendor/github.com/gobwas/glob/match/text.go | 45 + vendor/github.com/gobwas/glob/readme.md | 148 + .../github.com/gobwas/glob/syntax/ast/ast.go | 122 + .../gobwas/glob/syntax/ast/parser.go | 157 + .../gobwas/glob/syntax/lexer/lexer.go | 273 + .../gobwas/glob/syntax/lexer/token.go | 88 + .../github.com/gobwas/glob/syntax/syntax.go | 14 + .../gobwas/glob/util/runes/runes.go | 154 + .../gobwas/glob/util/strings/strings.go | 39 + vendor/github.com/golang/protobuf/AUTHORS | 3 + .../github.com/golang/protobuf/CONTRIBUTORS | 3 + vendor/github.com/golang/protobuf/LICENSE | 28 + .../golang/protobuf/jsonpb/decode.go | 530 + .../golang/protobuf/jsonpb/encode.go | 559 + .../github.com/golang/protobuf/jsonpb/json.go | 69 + .../golang/protobuf/proto/buffer.go | 324 + .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 113 + .../golang/protobuf/proto/discard.go | 58 + .../golang/protobuf/proto/extensions.go | 356 + .../golang/protobuf/proto/properties.go | 306 + .../github.com/golang/protobuf/proto/proto.go | 167 + .../golang/protobuf/proto/registry.go | 317 + .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../github.com/golang/protobuf/ptypes/any.go | 179 + .../golang/protobuf/ptypes/any/any.pb.go | 62 + .../github.com/golang/protobuf/ptypes/doc.go | 10 + .../golang/protobuf/ptypes/duration.go | 76 + .../protobuf/ptypes/duration/duration.pb.go | 63 + .../golang/protobuf/ptypes/timestamp.go | 112 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 + .../google/go-containerregistry/LICENSE | 202 + .../go-containerregistry/pkg/name/README.md | 3 + .../go-containerregistry/pkg/name/check.go | 43 + .../go-containerregistry/pkg/name/digest.go | 94 + .../go-containerregistry/pkg/name/doc.go | 42 + .../go-containerregistry/pkg/name/errors.go | 48 + .../go-containerregistry/pkg/name/options.go | 83 + .../go-containerregistry/pkg/name/ref.go | 75 + .../go-containerregistry/pkg/name/registry.go | 142 + .../pkg/name/repository.go | 121 + .../go-containerregistry/pkg/name/tag.go | 108 + vendor/github.com/google/uuid/CHANGELOG.md | 21 + vendor/github.com/google/uuid/CONTRIBUTING.md | 26 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 21 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/marshal.go | 38 + vendor/github.com/google/uuid/node.go | 90 + vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/null.go | 118 + vendor/github.com/google/uuid/sql.go | 59 + vendor/github.com/google/uuid/time.go | 123 + vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 312 + vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 76 + vendor/github.com/gorilla/mux/AUTHORS | 8 + vendor/github.com/gorilla/mux/LICENSE | 27 + vendor/github.com/gorilla/mux/README.md | 805 + vendor/github.com/gorilla/mux/doc.go | 306 + vendor/github.com/gorilla/mux/middleware.go | 74 + vendor/github.com/gorilla/mux/mux.go | 606 + vendor/github.com/gorilla/mux/regexp.go | 388 + vendor/github.com/gorilla/mux/route.go | 736 + vendor/github.com/gorilla/mux/test_helpers.go | 19 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 178 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 150 + .../hashicorp/go-multierror/append.go | 43 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 121 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + .../hashicorp/go-version/CHANGELOG.md | 45 + .../github.com/hashicorp/go-version/LICENSE | 354 + .../github.com/hashicorp/go-version/README.md | 66 + .../hashicorp/go-version/constraint.go | 296 + .../hashicorp/go-version/version.go | 407 + .../go-version/version_collection.go | 17 + .../inconshreveable/mousetrap/LICENSE | 208 +- .../inconshreveable/mousetrap/trap_others.go | 1 + .../inconshreveable/mousetrap/trap_windows.go | 88 +- .../mousetrap/trap_windows_1.4.go | 46 - vendor/github.com/josharian/intern/README.md | 5 + vendor/github.com/josharian/intern/intern.go | 44 + vendor/github.com/josharian/intern/license.md | 21 + .../github.com/json-iterator/go/.codecov.yml | 3 + vendor/github.com/json-iterator/go/.gitignore | 4 + .../github.com/json-iterator/go/.travis.yml | 14 + vendor/github.com/json-iterator/go/Gopkg.lock | 21 + vendor/github.com/json-iterator/go/Gopkg.toml | 26 + vendor/github.com/json-iterator/go/LICENSE | 21 + vendor/github.com/json-iterator/go/README.md | 85 + vendor/github.com/json-iterator/go/adapter.go | 150 + vendor/github.com/json-iterator/go/any.go | 325 + .../github.com/json-iterator/go/any_array.go | 278 + .../github.com/json-iterator/go/any_bool.go | 137 + .../github.com/json-iterator/go/any_float.go | 83 + .../github.com/json-iterator/go/any_int32.go | 74 + .../github.com/json-iterator/go/any_int64.go | 74 + .../json-iterator/go/any_invalid.go | 82 + vendor/github.com/json-iterator/go/any_nil.go | 69 + .../github.com/json-iterator/go/any_number.go | 123 + .../github.com/json-iterator/go/any_object.go | 374 + vendor/github.com/json-iterator/go/any_str.go | 166 + .../github.com/json-iterator/go/any_uint32.go | 74 + .../github.com/json-iterator/go/any_uint64.go | 74 + vendor/github.com/json-iterator/go/build.sh | 12 + vendor/github.com/json-iterator/go/config.go | 375 + .../go/fuzzy_mode_convert_table.md | 7 + vendor/github.com/json-iterator/go/iter.go | 349 + .../github.com/json-iterator/go/iter_array.go | 64 + .../github.com/json-iterator/go/iter_float.go | 342 + .../github.com/json-iterator/go/iter_int.go | 346 + .../json-iterator/go/iter_object.go | 267 + .../github.com/json-iterator/go/iter_skip.go | 130 + .../json-iterator/go/iter_skip_sloppy.go | 163 + .../json-iterator/go/iter_skip_strict.go | 99 + .../github.com/json-iterator/go/iter_str.go | 215 + .../github.com/json-iterator/go/jsoniter.go | 18 + vendor/github.com/json-iterator/go/pool.go | 42 + vendor/github.com/json-iterator/go/reflect.go | 337 + .../json-iterator/go/reflect_array.go | 104 + .../json-iterator/go/reflect_dynamic.go | 70 + .../json-iterator/go/reflect_extension.go | 483 + .../json-iterator/go/reflect_json_number.go | 112 + .../go/reflect_json_raw_message.go | 76 + .../json-iterator/go/reflect_map.go | 346 + .../json-iterator/go/reflect_marshaler.go | 225 + .../json-iterator/go/reflect_native.go | 453 + .../json-iterator/go/reflect_optional.go | 129 + .../json-iterator/go/reflect_slice.go | 99 + .../go/reflect_struct_decoder.go | 1097 + .../go/reflect_struct_encoder.go | 211 + vendor/github.com/json-iterator/go/stream.go | 210 + .../json-iterator/go/stream_float.go | 111 + .../github.com/json-iterator/go/stream_int.go | 190 + .../github.com/json-iterator/go/stream_str.go | 372 + vendor/github.com/json-iterator/go/test.sh | 12 + .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 141 + vendor/github.com/klauspost/compress/LICENSE | 304 + .../github.com/klauspost/compress/README.md | 642 + .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/flate/deflate.go | 988 + .../klauspost/compress/flate/dict_decoder.go | 184 + .../klauspost/compress/flate/fast_encoder.go | 216 + .../compress/flate/huffman_bit_writer.go | 1182 + .../klauspost/compress/flate/huffman_code.go | 417 + .../compress/flate/huffman_sortByFreq.go | 159 + .../compress/flate/huffman_sortByLiteral.go | 201 + .../klauspost/compress/flate/inflate.go | 793 + .../klauspost/compress/flate/inflate_gen.go | 1283 + .../klauspost/compress/flate/level1.go | 241 + .../klauspost/compress/flate/level2.go | 214 + .../klauspost/compress/flate/level3.go | 241 + .../klauspost/compress/flate/level4.go | 221 + .../klauspost/compress/flate/level5.go | 310 + .../klauspost/compress/flate/level6.go | 325 + .../klauspost/compress/flate/regmask_amd64.go | 37 + .../klauspost/compress/flate/regmask_other.go | 40 + .../klauspost/compress/flate/stateless.go | 318 + .../klauspost/compress/flate/token.go | 379 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 682 + .../klauspost/compress/fse/decompress.go | 376 + .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 229 + .../klauspost/compress/huff0/bitwriter.go | 103 + .../klauspost/compress/huff0/bytereader.go | 44 + .../klauspost/compress/huff0/compress.go | 749 + .../klauspost/compress/huff0/decompress.go | 1167 + .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 830 + .../compress/huff0/decompress_generic.go | 299 + .../klauspost/compress/huff0/huff0.go | 337 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 + .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 + .../compress/internal/snapref/encode_other.go | 250 + .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 + .../klauspost/compress/zstd/bitreader.go | 140 + .../klauspost/compress/zstd/bitwriter.go | 113 + .../klauspost/compress/zstd/blockdec.go | 726 + .../klauspost/compress/zstd/blockenc.go | 874 + .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 229 + .../klauspost/compress/zstd/decoder.go | 948 + .../compress/zstd/decoder_options.go | 169 + .../klauspost/compress/zstd/dict.go | 161 + .../klauspost/compress/zstd/enc_base.go | 173 + .../klauspost/compress/zstd/enc_best.go | 530 + .../klauspost/compress/zstd/enc_better.go | 1242 + .../klauspost/compress/zstd/enc_dfast.go | 1123 + .../klauspost/compress/zstd/enc_fast.go | 891 + .../klauspost/compress/zstd/encoder.go | 624 + .../compress/zstd/encoder_options.go | 339 + .../klauspost/compress/zstd/framedec.go | 413 + .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 + .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 72 + .../klauspost/compress/zstd/fse_encoder.go | 701 + .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 68 + .../compress/zstd/matchlen_generic.go | 33 + .../klauspost/compress/zstd/seqdec.go | 508 + .../klauspost/compress/zstd/seqdec_amd64.go | 394 + .../klauspost/compress/zstd/seqdec_amd64.s | 4175 + .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 435 + .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 121 + vendor/github.com/klauspost/pgzip/.gitignore | 24 + vendor/github.com/klauspost/pgzip/.travis.yml | 28 + vendor/github.com/klauspost/pgzip/GO_LICENSE | 27 + vendor/github.com/klauspost/pgzip/LICENSE | 22 + vendor/github.com/klauspost/pgzip/README.md | 134 + vendor/github.com/klauspost/pgzip/gunzip.go | 597 + vendor/github.com/klauspost/pgzip/gzip.go | 519 + .../letsencrypt/boulder/LICENSE.txt | 375 + .../letsencrypt/boulder/core/challenges.go | 45 + .../letsencrypt/boulder/core/interfaces.go | 14 + .../letsencrypt/boulder/core/objects.go | 516 + .../letsencrypt/boulder/core/util.go | 300 + .../letsencrypt/boulder/errors/errors.go | 194 + .../letsencrypt/boulder/goodkey/blocked.go | 96 + .../letsencrypt/boulder/goodkey/good_key.go | 422 + .../letsencrypt/boulder/goodkey/weak.go | 66 + .../boulder/identifier/identifier.go | 32 + .../letsencrypt/boulder/probs/probs.go | 349 + .../letsencrypt/boulder/revocation/reasons.go | 72 + vendor/github.com/mailru/easyjson/LICENSE | 7 + .../github.com/mailru/easyjson/buffer/pool.go | 278 + .../mailru/easyjson/jlexer/bytestostr.go | 24 + .../easyjson/jlexer/bytestostr_nounsafe.go | 13 + .../mailru/easyjson/jlexer/error.go | 15 + .../mailru/easyjson/jlexer/lexer.go | 1244 + .../mailru/easyjson/jwriter/writer.go | 405 + vendor/github.com/mattn/go-runewidth/LICENSE | 21 + .../github.com/mattn/go-runewidth/README.md | 27 + .../mattn/go-runewidth/runewidth.go | 358 + .../mattn/go-runewidth/runewidth_appengine.go | 9 + .../mattn/go-runewidth/runewidth_js.go | 9 + .../mattn/go-runewidth/runewidth_posix.go | 81 + .../mattn/go-runewidth/runewidth_table.go | 439 + .../mattn/go-runewidth/runewidth_windows.go | 28 + .../github.com/mattn/go-sqlite3/.codecov.yml | 4 + vendor/github.com/mattn/go-sqlite3/.gitignore | 14 + vendor/github.com/mattn/go-sqlite3/LICENSE | 21 + vendor/github.com/mattn/go-sqlite3/README.md | 603 + vendor/github.com/mattn/go-sqlite3/backup.go | 85 + .../github.com/mattn/go-sqlite3/callback.go | 411 + vendor/github.com/mattn/go-sqlite3/convert.go | 299 + vendor/github.com/mattn/go-sqlite3/doc.go | 135 + vendor/github.com/mattn/go-sqlite3/error.go | 150 + .../mattn/go-sqlite3/sqlite3-binding.c | 247989 +++++++++++++++ .../mattn/go-sqlite3/sqlite3-binding.h | 13169 + vendor/github.com/mattn/go-sqlite3/sqlite3.go | 2262 + .../mattn/go-sqlite3/sqlite3_context.go | 103 + .../mattn/go-sqlite3/sqlite3_func_crypt.go | 120 + .../mattn/go-sqlite3/sqlite3_go18.go | 54 + .../mattn/go-sqlite3/sqlite3_libsqlite3.go | 21 + .../go-sqlite3/sqlite3_load_extension.go | 84 + .../go-sqlite3/sqlite3_load_extension_omit.go | 24 + .../sqlite3_opt_allow_uri_authority.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_app_armor.go | 16 + .../go-sqlite3/sqlite3_opt_column_metadata.go | 21 + .../go-sqlite3/sqlite3_opt_foreign_keys.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_fts5.go | 14 + .../mattn/go-sqlite3/sqlite3_opt_icu.go | 19 + .../go-sqlite3/sqlite3_opt_introspect.go | 15 + .../go-sqlite3/sqlite3_opt_math_functions.go | 14 + .../mattn/go-sqlite3/sqlite3_opt_os_trace.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_preupdate.go | 20 + .../go-sqlite3/sqlite3_opt_preupdate_hook.go | 112 + .../go-sqlite3/sqlite3_opt_preupdate_omit.go | 21 + .../go-sqlite3/sqlite3_opt_secure_delete.go | 15 + .../sqlite3_opt_secure_delete_fast.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_serialize.go | 82 + .../go-sqlite3/sqlite3_opt_serialize_omit.go | 20 + .../mattn/go-sqlite3/sqlite3_opt_stat4.go | 15 + .../go-sqlite3/sqlite3_opt_unlock_notify.c | 85 + .../go-sqlite3/sqlite3_opt_unlock_notify.go | 93 + .../mattn/go-sqlite3/sqlite3_opt_userauth.go | 289 + .../go-sqlite3/sqlite3_opt_userauth_omit.go | 152 + .../go-sqlite3/sqlite3_opt_vacuum_full.go | 15 + .../go-sqlite3/sqlite3_opt_vacuum_incr.go | 15 + .../mattn/go-sqlite3/sqlite3_opt_vtable.go | 720 + .../mattn/go-sqlite3/sqlite3_other.go | 17 + .../mattn/go-sqlite3/sqlite3_solaris.go | 14 + .../mattn/go-sqlite3/sqlite3_trace.go | 287 + .../mattn/go-sqlite3/sqlite3_type.go | 108 + .../go-sqlite3/sqlite3_usleep_windows.go | 41 + .../mattn/go-sqlite3/sqlite3_windows.go | 17 + .../github.com/mattn/go-sqlite3/sqlite3ext.h | 718 + .../mattn/go-sqlite3/static_mock.go | 37 + vendor/github.com/miekg/pkcs11/.gitignore | 3 + vendor/github.com/miekg/pkcs11/LICENSE | 27 + .../github.com/miekg/pkcs11/Makefile.release | 57 + vendor/github.com/miekg/pkcs11/README.md | 68 + vendor/github.com/miekg/pkcs11/error.go | 98 + vendor/github.com/miekg/pkcs11/hsm.db | Bin 0 -> 10240 bytes vendor/github.com/miekg/pkcs11/params.go | 190 + vendor/github.com/miekg/pkcs11/pkcs11.go | 1609 + vendor/github.com/miekg/pkcs11/pkcs11.h | 265 + vendor/github.com/miekg/pkcs11/pkcs11f.h | 939 + vendor/github.com/miekg/pkcs11/pkcs11go.h | 33 + vendor/github.com/miekg/pkcs11/pkcs11t.h | 2047 + vendor/github.com/miekg/pkcs11/release.go | 18 + vendor/github.com/miekg/pkcs11/softhsm.conf | 1 + vendor/github.com/miekg/pkcs11/softhsm2.conf | 4 + vendor/github.com/miekg/pkcs11/types.go | 315 + vendor/github.com/miekg/pkcs11/vendor.go | 127 + vendor/github.com/miekg/pkcs11/zconst.go | 766 + .../mitchellh/mapstructure/CHANGELOG.md | 96 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 279 + .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 1540 + vendor/github.com/moby/sys/mountinfo/LICENSE | 202 + vendor/github.com/moby/sys/mountinfo/doc.go | 44 + .../moby/sys/mountinfo/mounted_linux.go | 101 + .../moby/sys/mountinfo/mounted_unix.go | 53 + .../moby/sys/mountinfo/mountinfo.go | 67 + .../moby/sys/mountinfo/mountinfo_bsd.go | 56 + .../moby/sys/mountinfo/mountinfo_filters.go | 63 + .../sys/mountinfo/mountinfo_freebsdlike.go | 14 + .../moby/sys/mountinfo/mountinfo_linux.go | 214 + .../moby/sys/mountinfo/mountinfo_openbsd.go | 11 + .../sys/mountinfo/mountinfo_unsupported.go | 19 + .../moby/sys/mountinfo/mountinfo_windows.go | 10 + .../modern-go/concurrent/.gitignore | 1 + .../modern-go/concurrent/.travis.yml | 14 + .../github.com/modern-go/concurrent/LICENSE | 201 + .../github.com/modern-go/concurrent/README.md | 49 + .../modern-go/concurrent/executor.go | 14 + .../modern-go/concurrent/go_above_19.go | 15 + .../modern-go/concurrent/go_below_19.go | 33 + vendor/github.com/modern-go/concurrent/log.go | 13 + .../github.com/modern-go/concurrent/test.sh | 12 + .../concurrent/unbounded_executor.go | 119 + .../github.com/modern-go/reflect2/.gitignore | 2 + .../github.com/modern-go/reflect2/.travis.yml | 15 + .../github.com/modern-go/reflect2/Gopkg.lock | 9 + .../github.com/modern-go/reflect2/Gopkg.toml | 31 + vendor/github.com/modern-go/reflect2/LICENSE | 201 + .../github.com/modern-go/reflect2/README.md | 71 + .../modern-go/reflect2/go_above_118.go | 23 + .../modern-go/reflect2/go_above_19.go | 17 + .../modern-go/reflect2/go_below_118.go | 21 + .../github.com/modern-go/reflect2/reflect2.go | 300 + .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 + .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 + .../github.com/modern-go/reflect2/safe_map.go | 101 + .../modern-go/reflect2/safe_slice.go | 92 + .../modern-go/reflect2/safe_struct.go | 29 + .../modern-go/reflect2/safe_type.go | 78 + .../github.com/modern-go/reflect2/type_map.go | 70 + .../modern-go/reflect2/unsafe_array.go | 65 + .../modern-go/reflect2/unsafe_eface.go | 59 + .../modern-go/reflect2/unsafe_field.go | 74 + .../modern-go/reflect2/unsafe_iface.go | 64 + .../modern-go/reflect2/unsafe_link.go | 76 + .../modern-go/reflect2/unsafe_map.go | 130 + .../modern-go/reflect2/unsafe_ptr.go | 46 + .../modern-go/reflect2/unsafe_slice.go | 177 + .../modern-go/reflect2/unsafe_struct.go | 59 + .../modern-go/reflect2/unsafe_type.go | 85 + vendor/github.com/oklog/ulid/.gitignore | 29 + vendor/github.com/oklog/ulid/.travis.yml | 16 + vendor/github.com/oklog/ulid/AUTHORS.md | 2 + vendor/github.com/oklog/ulid/CHANGELOG.md | 33 + vendor/github.com/oklog/ulid/CONTRIBUTING.md | 17 + vendor/github.com/oklog/ulid/Gopkg.lock | 15 + vendor/github.com/oklog/ulid/Gopkg.toml | 26 + vendor/github.com/oklog/ulid/LICENSE | 201 + vendor/github.com/oklog/ulid/README.md | 150 + vendor/github.com/oklog/ulid/ulid.go | 614 + .../opencontainers/go-digest/.mailmap | 4 + .../opencontainers/go-digest/.pullapprove.yml | 28 + .../opencontainers/go-digest/.travis.yml | 5 + .../opencontainers/go-digest/CONTRIBUTING.md | 72 + .../opencontainers/go-digest/LICENSE | 192 + .../opencontainers/go-digest/LICENSE.docs | 425 + .../opencontainers/go-digest/MAINTAINERS | 5 + .../opencontainers/go-digest/README.md | 96 + .../opencontainers/go-digest/algorithm.go | 193 + .../opencontainers/go-digest/digest.go | 157 + .../opencontainers/go-digest/digester.go | 40 + .../opencontainers/go-digest/doc.go | 62 + .../opencontainers/go-digest/verifiers.go | 46 + .../opencontainers/image-spec/LICENSE | 191 + .../image-spec/specs-go/v1/annotations.go | 62 + .../image-spec/specs-go/v1/config.go | 111 + .../image-spec/specs-go/v1/descriptor.go | 80 + .../image-spec/specs-go/v1/index.go | 38 + .../image-spec/specs-go/v1/layout.go | 32 + .../image-spec/specs-go/v1/manifest.go | 41 + .../image-spec/specs-go/v1/mediatype.go | 75 + .../image-spec/specs-go/version.go | 32 + .../image-spec/specs-go/versioned.go | 23 + vendor/github.com/opencontainers/runc/LICENSE | 191 + vendor/github.com/opencontainers/runc/NOTICE | 17 + .../runc/libcontainer/user/lookup_unix.go | 157 + .../runc/libcontainer/user/user.go | 605 + .../runc/libcontainer/user/user_fuzzer.go | 43 + .../opencontainers/runtime-spec/LICENSE | 191 + .../runtime-spec/specs-go/config.go | 879 + .../runtime-spec/specs-go/state.go | 56 + .../runtime-spec/specs-go/version.go | 18 + vendor/github.com/osbuild/images/LICENSE | 201 + .../images/internal/common/constants.go | 38 + .../osbuild/images/internal/common/distro.go | 78 + .../osbuild/images/internal/common/helpers.go | 103 + .../images/internal/common/pointers.go | 5 + .../osbuild/images/internal/common/states.go | 71 + .../osbuild/images/internal/dnfjson/cache.go | 325 + .../images/internal/dnfjson/dnfjson.go | 655 + .../images/internal/environment/azure.go | 22 + .../images/internal/environment/ec2.go | 18 + .../internal/environment/environment.go | 25 + .../images/internal/environment/gcp.go | 6 + .../images/internal/environment/kvm.go | 21 + .../images/internal/environment/openstack.go | 6 + .../images/internal/environment/vsphere.go | 6 + .../osbuild/images/internal/fdo/fdo.go | 15 + .../osbuild/images/internal/fsnode/dir.go | 34 + .../osbuild/images/internal/fsnode/file.go | 36 + .../osbuild/images/internal/fsnode/fsnode.go | 133 + .../images/internal/ignition/ignition.go | 31 + .../images/internal/pathpolicy/path_policy.go | 54 + .../images/internal/pathpolicy/path_trie.go | 123 + .../images/internal/pathpolicy/policies.go | 48 + .../osbuild/images/internal/shell/shell.go | 11 + .../osbuild/images/internal/users/users.go | 40 + .../images/internal/workload/anaconda.go | 7 + .../images/internal/workload/custom.go | 22 + .../osbuild/images/internal/workload/sap.go | 6 + .../internal/workload/static_webserver.go | 7 + .../images/internal/workload/workload.go | 30 + .../osbuild/images/pkg/artifact/artifact.go | 31 + .../osbuild/images/pkg/blueprint/blueprint.go | 72 + .../images/pkg/blueprint/customizations.go | 358 + .../blueprint/filesystem_customizations.go | 89 + .../pkg/blueprint/fsnode_customizations.go | 471 + .../blueprint/repository_customizations.go | 137 + .../osbuild/images/pkg/container/client.go | 506 + .../osbuild/images/pkg/container/resolver.go | 87 + .../osbuild/images/pkg/container/spec.go | 37 + .../osbuild/images/pkg/crypt/crypt.go | 59 + .../osbuild/images/pkg/crypt/crypt_impl.go | 87 + .../images/pkg/crypt/crypt_impl_macos.go | 7 + .../osbuild/images/pkg/disk/btrfs.go | 158 + .../osbuild/images/pkg/disk/disk.go | 170 + .../osbuild/images/pkg/disk/filesystem.go | 85 + .../osbuild/images/pkg/disk/luks.go | 100 + .../github.com/osbuild/images/pkg/disk/lvm.go | 205 + .../osbuild/images/pkg/disk/partition.go | 87 + .../images/pkg/disk/partition_table.go | 739 + .../osbuild/images/pkg/distro/distro.go | 162 + .../osbuild/images/pkg/distro/image_config.go | 94 + .../pkg/image/anaconda_live_installer.go | 123 + .../pkg/image/anaconda_ostree_installer.go | 129 + .../pkg/image/anaconda_tar_installer.go | 162 + .../osbuild/images/pkg/image/archive.go | 47 + .../osbuild/images/pkg/image/container.go | 47 + .../osbuild/images/pkg/image/disk.go | 123 + .../osbuild/images/pkg/image/image.go | 29 + .../images/pkg/image/ostree_archive.go | 67 + .../images/pkg/image/ostree_container.go | 79 + .../osbuild/images/pkg/image/ostree_disk.go | 136 + .../pkg/image/ostree_simplified_installer.go | 164 + .../images/pkg/manifest/anaconda_installer.go | 411 + .../manifest/anaconda_installer_iso_tree.go | 313 + .../osbuild/images/pkg/manifest/build.go | 117 + .../images/pkg/manifest/coi_iso_tree.go | 172 + .../osbuild/images/pkg/manifest/commit.go | 66 + .../images/pkg/manifest/commit_server_tree.go | 154 + .../images/pkg/manifest/coreos_installer.go | 203 + .../images/pkg/manifest/efi_boot_tree.go | 62 + .../osbuild/images/pkg/manifest/empty.go | 98 + .../osbuild/images/pkg/manifest/iso.go | 80 + .../osbuild/images/pkg/manifest/iso_rootfs.go | 71 + .../osbuild/images/pkg/manifest/manifest.go | 204 + .../images/pkg/manifest/oci_container.go | 64 + .../osbuild/images/pkg/manifest/os.go | 823 + .../images/pkg/manifest/ostree_deployment.go | 442 + .../osbuild/images/pkg/manifest/ovf.go | 52 + .../osbuild/images/pkg/manifest/pipeline.go | 203 + .../osbuild/images/pkg/manifest/qcow2.go | 62 + .../osbuild/images/pkg/manifest/raw.go | 83 + .../osbuild/images/pkg/manifest/raw_ostree.go | 124 + .../osbuild/images/pkg/manifest/tar.go | 69 + .../osbuild/images/pkg/manifest/vmdk.go | 60 + .../osbuild/images/pkg/manifest/vpc.go | 61 + .../osbuild/images/pkg/manifest/xz.go | 56 + .../images/pkg/osbuild/anaconda_stage.go | 32 + .../images/pkg/osbuild/authconfig_stage.go | 13 + .../images/pkg/osbuild/authselect_stage.go | 15 + .../images/pkg/osbuild/bootiso_stage.go | 85 + .../osbuild/images/pkg/osbuild/btrfs_mount.go | 10 + .../images/pkg/osbuild/buildstamp_stage.go | 30 + .../osbuild/images/pkg/osbuild/chmod_stage.go | 20 + .../osbuild/images/pkg/osbuild/chown_stage.go | 98 + .../images/pkg/osbuild/chrony_stage.go | 24 + .../pkg/osbuild/clevis_luks_bind_stage.go | 17 + .../images/pkg/osbuild/cloud_init_stage.go | 158 + .../images/pkg/osbuild/containers_input.go | 51 + .../osbuild/containers_storage_conf_stage.go | 58 + .../osbuild/images/pkg/osbuild/copy_stage.go | 134 + .../osbuild/images/pkg/osbuild/curl_source.go | 105 + .../osbuild/images/pkg/osbuild/device.go | 226 + .../images/pkg/osbuild/discinfo_stage.go | 19 + .../osbuild/images/pkg/osbuild/disk.go | 126 + .../pkg/osbuild/dnf_automatic_config_stage.go | 70 + .../images/pkg/osbuild/dnf_config_stage.go | 68 + .../images/pkg/osbuild/dracut_conf_stage.go | 77 + .../images/pkg/osbuild/dracut_stage.go | 56 + .../osbuild/images/pkg/osbuild/ext4_mount.go | 10 + .../osbuild/images/pkg/osbuild/fat_mount.go | 10 + .../osbuild/images/pkg/osbuild/fdo_stage.go | 25 + .../osbuild/images/pkg/osbuild/files_input.go | 313 + .../images/pkg/osbuild/firewall_stage.go | 36 + .../images/pkg/osbuild/first_boot_stage.go | 15 + .../images/pkg/osbuild/fix_bls_stage.go | 23 + .../osbuild/images/pkg/osbuild/fsnode.go | 116 + .../osbuild/images/pkg/osbuild/fstab_stage.go | 78 + .../pkg/osbuild/gcp_guest_agent_conf_stage.go | 118 + .../images/pkg/osbuild/groups_stage.go | 48 + .../images/pkg/osbuild/grub2_inst_stage.go | 164 + .../images/pkg/osbuild/grub2_legacy_stage.go | 227 + .../osbuild/images/pkg/osbuild/grub2_stage.go | 151 + .../images/pkg/osbuild/grub_iso_stage.go | 30 + .../images/pkg/osbuild/hostname_stage.go | 14 + .../images/pkg/osbuild/ignition_stage.go | 32 + .../images/pkg/osbuild/implantisomd5_stage.go | 16 + .../images/pkg/osbuild/inline_source.go | 41 + .../osbuild/images/pkg/osbuild/input.go | 33 + .../images/pkg/osbuild/isolinux_stage.go | 27 + .../pkg/osbuild/kernel_cmdline_stage.go | 26 + .../images/pkg/osbuild/keymap_stage.go | 35 + .../images/pkg/osbuild/kickstart_stage.go | 83 + .../images/pkg/osbuild/locale_stage.go | 19 + .../images/pkg/osbuild/loopback_device.go | 29 + .../images/pkg/osbuild/lorax_script_stage.go | 28 + .../images/pkg/osbuild/luks2_device.go | 17 + .../images/pkg/osbuild/luks2_format_stage.go | 55 + .../pkg/osbuild/luks2_remove_key_stage.go | 15 + .../images/pkg/osbuild/lvm2_create_stage.go | 48 + .../images/pkg/osbuild/lvm2_lv_device.go | 18 + .../images/pkg/osbuild/lvm2_metadata_stage.go | 48 + .../osbuild/images/pkg/osbuild/mkdir_stage.go | 25 + .../images/pkg/osbuild/mkfs_btrfs_stage.go | 16 + .../images/pkg/osbuild/mkfs_ext4_stage.go | 16 + .../images/pkg/osbuild/mkfs_fat_stage.go | 17 + .../osbuild/images/pkg/osbuild/mkfs_stage.go | 67 + .../images/pkg/osbuild/mkfs_xfs_stage.go | 16 + .../images/pkg/osbuild/modprobe_stage.go | 165 + .../osbuild/images/pkg/osbuild/mount.go | 15 + .../images/pkg/osbuild/nginxconf_stage.go | 32 + .../images/pkg/osbuild/oci_archive_stage.go | 128 + .../images/pkg/osbuild/osbuild-exec.go | 105 + .../osbuild/images/pkg/osbuild/osbuild.go | 47 + .../pkg/osbuild/oscap_autotailor_stage.go | 47 + .../pkg/osbuild/oscap_remediation_stage.go | 88 + .../images/pkg/osbuild/ostree_commit_stage.go | 44 + .../images/pkg/osbuild/ostree_config_stage.go | 29 + .../osbuild/ostree_deploy_container_stage.go | 68 + .../images/pkg/osbuild/ostree_deploy_stage.go | 52 + .../pkg/osbuild/ostree_deployment_mount.go | 32 + .../pkg/osbuild/ostree_fillvar_stage.go | 24 + .../pkg/osbuild/ostree_init_fs_stage.go | 8 + .../images/pkg/osbuild/ostree_init_stage.go | 29 + .../images/pkg/osbuild/ostree_input.go | 44 + .../pkg/osbuild/ostree_os_init_stage.go | 17 + .../images/pkg/osbuild/ostree_passwd_stage.go | 21 + .../pkg/osbuild/ostree_preptree_stage.go | 25 + .../images/pkg/osbuild/ostree_pull_stage.go | 52 + .../pkg/osbuild/ostree_remotes_stage.go | 44 + .../pkg/osbuild/ostree_selinux_stage.go | 17 + .../images/pkg/osbuild/ostree_source.go | 50 + .../osbuild/images/pkg/osbuild/ovf_stage.go | 39 + .../pkg/osbuild/pam_limits_conf_stage.go | 154 + .../pkg/osbuild/pwquality_conf_stage.go | 23 + .../osbuild/images/pkg/osbuild/qemu_stage.go | 259 + .../osbuild/images/pkg/osbuild/result.go | 204 + .../images/pkg/osbuild/result_test_data.go | 7889 + .../images/pkg/osbuild/rhsm_facts_stage.go | 19 + .../osbuild/images/pkg/osbuild/rhsm_stage.go | 54 + .../osbuild/images/pkg/osbuild/rpm_stage.go | 152 + .../images/pkg/osbuild/script_stage.go | 24 + .../pkg/osbuild/selinux_config_stage.go | 36 + .../images/pkg/osbuild/selinux_stage.go | 29 + .../images/pkg/osbuild/sfdisk_stage.go | 45 + .../images/pkg/osbuild/sgdisk_stage.go | 46 + .../images/pkg/osbuild/shell_init_stage.go | 75 + .../images/pkg/osbuild/skopeo_index_source.go | 57 + .../images/pkg/osbuild/skopeo_source.go | 69 + .../images/pkg/osbuild/skopeo_stage.go | 39 + .../osbuild/images/pkg/osbuild/source.go | 114 + .../images/pkg/osbuild/squashfs_stage.go | 17 + .../images/pkg/osbuild/sshd_config_stage.go | 117 + .../osbuild/images/pkg/osbuild/stage.go | 27 + .../images/pkg/osbuild/sysconfig_stage.go | 90 + .../images/pkg/osbuild/sysctld_stage.go | 66 + .../pkg/osbuild/systemd_journald_stage.go | 92 + .../pkg/osbuild/systemd_logind_stage.go | 43 + .../pkg/osbuild/systemd_preset_stage.go | 56 + .../images/pkg/osbuild/systemd_stage.go | 17 + .../images/pkg/osbuild/systemd_unit_stage.go | 42 + .../osbuild/images/pkg/osbuild/tar_stage.go | 99 + .../images/pkg/osbuild/timezone_stage.go | 14 + .../images/pkg/osbuild/tmpfilesd_stage.go | 61 + .../osbuild/images/pkg/osbuild/tree_input.go | 30 + .../images/pkg/osbuild/truncate_stage.go | 20 + .../osbuild/images/pkg/osbuild/tuned_stage.go | 40 + .../images/pkg/osbuild/udev_rules_stage.go | 257 + .../osbuild/images/pkg/osbuild/users_stage.go | 111 + .../osbuild/images/pkg/osbuild/v1result.go | 148 + .../images/pkg/osbuild/waagent_conf.go | 19 + .../images/pkg/osbuild/wsl_conf_stage.go | 18 + .../osbuild/images/pkg/osbuild/xfs_mount.go | 10 + .../images/pkg/osbuild/xorrisofs_stage.go | 40 + .../osbuild/images/pkg/osbuild/xz_stage.go | 40 + .../images/pkg/osbuild/yum_config_stage.go | 57 + .../images/pkg/osbuild/yum_repos_stage.go | 146 + .../images/pkg/osbuild/zipl_inst_stage.go | 69 + .../osbuild/images/pkg/osbuild/zipl_stage.go | 25 + .../osbuild/images/pkg/ostree/errors.go | 47 + .../osbuild/images/pkg/ostree/ostree.go | 267 + .../osbuild/images/pkg/platform/aarch64.go | 62 + .../osbuild/images/pkg/platform/platform.go | 108 + .../osbuild/images/pkg/platform/ppc64le.go | 45 + .../osbuild/images/pkg/platform/s390x.go | 38 + .../osbuild/images/pkg/platform/x86_64.go | 52 + .../osbuild/images/pkg/rhsm/facts/facts.go | 29 + .../osbuild/images/pkg/rhsm/secrets.go | 190 + .../osbuild/images/pkg/rpmmd/metadata.go | 39 + .../osbuild/images/pkg/rpmmd/repository.go | 458 + .../osbuild/images/pkg/runner/centos.go | 39 + .../osbuild/images/pkg/runner/fedora.go | 19 + .../osbuild/images/pkg/runner/linux.go | 16 + .../osbuild/images/pkg/runner/rhel.go | 40 + .../osbuild/images/pkg/runner/runner.go | 6 + .../images/pkg/subscription/subscription.go | 20 + vendor/github.com/proglottis/gpgme/.gitignore | 1 + vendor/github.com/proglottis/gpgme/LICENSE | 12 + vendor/github.com/proglottis/gpgme/README.md | 13 + .../github.com/proglottis/gpgme/callbacks.go | 42 + vendor/github.com/proglottis/gpgme/data.go | 197 + vendor/github.com/proglottis/gpgme/go_gpgme.c | 111 + vendor/github.com/proglottis/gpgme/go_gpgme.h | 44 + vendor/github.com/proglottis/gpgme/gpgme.go | 950 + .../proglottis/gpgme/unset_agent_info.go | 18 + .../gpgme/unset_agent_info_windows.go | 14 + vendor/github.com/rivo/uniseg/LICENSE.txt | 21 + vendor/github.com/rivo/uniseg/README.md | 157 + vendor/github.com/rivo/uniseg/doc.go | 108 + .../github.com/rivo/uniseg/eastasianwidth.go | 2556 + .../rivo/uniseg/emojipresentation.go | 285 + .../github.com/rivo/uniseg/gen_breaktest.go | 213 + .../github.com/rivo/uniseg/gen_properties.go | 256 + vendor/github.com/rivo/uniseg/grapheme.go | 334 + .../rivo/uniseg/graphemeproperties.go | 1891 + .../github.com/rivo/uniseg/graphemerules.go | 138 + vendor/github.com/rivo/uniseg/line.go | 134 + .../github.com/rivo/uniseg/lineproperties.go | 3513 + vendor/github.com/rivo/uniseg/linerules.go | 470 + vendor/github.com/rivo/uniseg/properties.go | 168 + vendor/github.com/rivo/uniseg/sentence.go | 90 + .../rivo/uniseg/sentenceproperties.go | 2815 + .../github.com/rivo/uniseg/sentencerules.go | 205 + vendor/github.com/rivo/uniseg/step.go | 246 + vendor/github.com/rivo/uniseg/width.go | 54 + vendor/github.com/rivo/uniseg/word.go | 89 + .../github.com/rivo/uniseg/wordproperties.go | 1848 + vendor/github.com/rivo/uniseg/wordrules.go | 246 + .../go-securesystemslib/LICENSE | 21 + .../encrypted/encrypted.go | 290 + .../github.com/sigstore/fulcio/COPYRIGHT.txt | 14 + vendor/github.com/sigstore/fulcio/LICENSE | 201 + .../sigstore/fulcio/pkg/certificate/doc.go | 17 + .../fulcio/pkg/certificate/extensions.go | 439 + .../github.com/sigstore/rekor/CONTRIBUTORS.md | 116 + .../github.com/sigstore/rekor/COPYRIGHT.txt | 14 + vendor/github.com/sigstore/rekor/LICENSE | 202 + .../rekor/pkg/generated/models/alpine.go | 210 + .../pkg/generated/models/alpine_schema.go | 29 + .../generated/models/alpine_v001_schema.go | 455 + .../pkg/generated/models/consistency_proof.go | 118 + .../rekor/pkg/generated/models/cose.go | 210 + .../rekor/pkg/generated/models/cose_schema.go | 29 + .../pkg/generated/models/cose_v001_schema.go | 519 + .../rekor/pkg/generated/models/dsse.go | 210 + .../rekor/pkg/generated/models/dsse_schema.go | 29 + .../pkg/generated/models/dsse_v001_schema.go | 685 + .../rekor/pkg/generated/models/error.go | 69 + .../pkg/generated/models/hashedrekord.go | 210 + .../generated/models/hashedrekord_schema.go | 29 + .../models/hashedrekord_v001_schema.go | 513 + .../rekor/pkg/generated/models/helm.go | 210 + .../rekor/pkg/generated/models/helm_schema.go | 29 + .../pkg/generated/models/helm_v001_schema.go | 662 + .../models/inactive_shard_log_info.go | 153 + .../pkg/generated/models/inclusion_proof.go | 179 + .../rekor/pkg/generated/models/intoto.go | 210 + .../pkg/generated/models/intoto_schema.go | 29 + .../generated/models/intoto_v001_schema.go | 514 + .../generated/models/intoto_v002_schema.go | 757 + .../rekor/pkg/generated/models/jar.go | 210 + .../rekor/pkg/generated/models/jar_schema.go | 29 + .../pkg/generated/models/jar_v001_schema.go | 569 + .../rekor/pkg/generated/models/log_entry.go | 445 + .../rekor/pkg/generated/models/log_info.go | 221 + .../pkg/generated/models/proposed_entry.go | 195 + .../rekor/pkg/generated/models/rekord.go | 210 + .../pkg/generated/models/rekord_schema.go | 29 + .../generated/models/rekord_v001_schema.go | 611 + .../rekor/pkg/generated/models/rfc3161.go | 210 + .../pkg/generated/models/rfc3161_schema.go | 29 + .../generated/models/rfc3161_v001_schema.go | 183 + .../rekor/pkg/generated/models/rpm.go | 210 + .../rekor/pkg/generated/models/rpm_schema.go | 29 + .../pkg/generated/models/rpm_v001_schema.go | 450 + .../pkg/generated/models/search_index.go | 341 + .../pkg/generated/models/search_log_query.go | 297 + .../rekor/pkg/generated/models/tuf.go | 210 + .../rekor/pkg/generated/models/tuf_schema.go | 29 + .../pkg/generated/models/tuf_v001_schema.go | 304 + .../sigstore/sigstore/COPYRIGHT.txt | 14 + vendor/github.com/sigstore/sigstore/LICENSE | 202 + .../sigstore/pkg/cryptoutils/certificate.go | 173 + .../sigstore/sigstore/pkg/cryptoutils/doc.go | 17 + .../sigstore/pkg/cryptoutils/generic.go | 31 + .../sigstore/pkg/cryptoutils/password.go | 94 + .../sigstore/pkg/cryptoutils/privatekey.go | 152 + .../sigstore/pkg/cryptoutils/publickey.go | 184 + .../sigstore/sigstore/pkg/cryptoutils/sans.go | 149 + .../sigstore/sigstore/pkg/signature/doc.go | 17 + .../sigstore/sigstore/pkg/signature/ecdsa.go | 253 + .../sigstore/pkg/signature/ed25519.go | 197 + .../sigstore/pkg/signature/message.go | 111 + .../sigstore/pkg/signature/options.go | 57 + .../sigstore/pkg/signature/options/context.go | 37 + .../sigstore/pkg/signature/options/digest.go | 35 + .../sigstore/pkg/signature/options/doc.go | 17 + .../pkg/signature/options/keyversion.go | 50 + .../sigstore/pkg/signature/options/noop.go | 49 + .../sigstore/pkg/signature/options/rand.go | 41 + .../signature/options/remoteverification.go | 32 + .../sigstore/pkg/signature/options/rpcauth.go | 58 + .../pkg/signature/options/signeropts.go | 40 + .../sigstore/pkg/signature/payload/doc.go | 17 + .../sigstore/pkg/signature/payload/payload.go | 122 + .../sigstore/pkg/signature/publickey.go | 25 + .../sigstore/pkg/signature/rsapkcs1v15.go | 225 + .../sigstore/sigstore/pkg/signature/rsapss.go | 260 + .../sigstore/sigstore/pkg/signature/signer.go | 89 + .../sigstore/pkg/signature/signerverifier.go | 69 + .../sigstore/sigstore/pkg/signature/util.go | 55 + .../sigstore/pkg/signature/verifier.go | 100 + vendor/github.com/sirupsen/logrus/.gitignore | 4 + .../github.com/sirupsen/logrus/.golangci.yml | 40 + vendor/github.com/sirupsen/logrus/.travis.yml | 15 + .../github.com/sirupsen/logrus/CHANGELOG.md | 259 + vendor/github.com/sirupsen/logrus/LICENSE | 21 + vendor/github.com/sirupsen/logrus/README.md | 515 + vendor/github.com/sirupsen/logrus/alt_exit.go | 76 + .../github.com/sirupsen/logrus/appveyor.yml | 14 + .../github.com/sirupsen/logrus/buffer_pool.go | 43 + vendor/github.com/sirupsen/logrus/doc.go | 26 + vendor/github.com/sirupsen/logrus/entry.go | 442 + vendor/github.com/sirupsen/logrus/exported.go | 270 + .../github.com/sirupsen/logrus/formatter.go | 78 + vendor/github.com/sirupsen/logrus/hooks.go | 34 + .../sirupsen/logrus/json_formatter.go | 128 + vendor/github.com/sirupsen/logrus/logger.go | 417 + vendor/github.com/sirupsen/logrus/logrus.go | 186 + .../logrus/terminal_check_appengine.go | 11 + .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 7 + .../logrus/terminal_check_no_terminal.go | 11 + .../logrus/terminal_check_notappengine.go | 17 + .../sirupsen/logrus/terminal_check_solaris.go | 11 + .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 27 + .../sirupsen/logrus/text_formatter.go | 339 + vendor/github.com/sirupsen/logrus/writer.go | 102 + vendor/github.com/spf13/cobra/.golangci.yml | 22 +- vendor/github.com/spf13/cobra/Makefile | 8 +- vendor/github.com/spf13/cobra/README.md | 19 +- vendor/github.com/spf13/cobra/active_help.go | 24 +- vendor/github.com/spf13/cobra/active_help.md | 157 - vendor/github.com/spf13/cobra/args.go | 40 +- .../spf13/cobra/bash_completions.go | 18 +- .../spf13/cobra/bash_completions.md | 93 - .../spf13/cobra/bash_completionsV2.go | 85 +- vendor/github.com/spf13/cobra/cobra.go | 36 +- vendor/github.com/spf13/cobra/command.go | 255 +- .../github.com/spf13/cobra/command_notwin.go | 14 + vendor/github.com/spf13/cobra/command_win.go | 14 + vendor/github.com/spf13/cobra/completions.go | 77 +- .../spf13/cobra/fish_completions.go | 90 +- .../spf13/cobra/fish_completions.md | 4 - vendor/github.com/spf13/cobra/flag_groups.go | 75 +- .../spf13/cobra/powershell_completions.go | 63 +- .../spf13/cobra/powershell_completions.md | 3 - .../spf13/cobra/projects_using_cobra.md | 54 - .../spf13/cobra/shell_completions.go | 14 + .../spf13/cobra/shell_completions.md | 548 - vendor/github.com/spf13/cobra/user_guide.md | 666 - .../github.com/spf13/cobra/zsh_completions.go | 29 +- .../github.com/spf13/cobra/zsh_completions.md | 48 - .../stefanberger/go-pkcs11uri/.gitignore | 2 + .../stefanberger/go-pkcs11uri/.travis.yml | 25 + .../stefanberger/go-pkcs11uri/LICENSE | 177 + .../stefanberger/go-pkcs11uri/Makefile | 28 + .../stefanberger/go-pkcs11uri/README.md | 102 + .../stefanberger/go-pkcs11uri/pkcs11uri.go | 453 + vendor/github.com/syndtr/gocapability/LICENSE | 24 + .../gocapability/capability/capability.go | 133 + .../capability/capability_linux.go | 642 + .../capability/capability_noop.go | 19 + .../syndtr/gocapability/capability/enum.go | 309 + .../gocapability/capability/enum_gen.go | 138 + .../gocapability/capability/syscall_linux.go | 154 + .../theupdateframework/go-tuf/LICENSE | 27 + .../go-tuf/encrypted/encrypted.go | 226 + vendor/github.com/titanous/rocacheck/LICENSE | 22 + .../github.com/titanous/rocacheck/README.md | 7 + .../titanous/rocacheck/rocacheck.go | 52 + vendor/github.com/ulikunitz/xz/.gitignore | 28 + vendor/github.com/ulikunitz/xz/LICENSE | 26 + vendor/github.com/ulikunitz/xz/README.md | 77 + vendor/github.com/ulikunitz/xz/SECURITY.md | 10 + vendor/github.com/ulikunitz/xz/TODO.md | 372 + vendor/github.com/ulikunitz/xz/bits.go | 79 + vendor/github.com/ulikunitz/xz/crc.go | 54 + vendor/github.com/ulikunitz/xz/format.go | 721 + .../github.com/ulikunitz/xz/fox-check-none.xz | Bin 0 -> 96 bytes vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 + .../ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../ulikunitz/xz/internal/xlog/xlog.go | 456 + .../github.com/ulikunitz/xz/lzma/bintree.go | 522 + vendor/github.com/ulikunitz/xz/lzma/bitops.go | 47 + .../github.com/ulikunitz/xz/lzma/breader.go | 39 + vendor/github.com/ulikunitz/xz/lzma/buffer.go | 171 + .../ulikunitz/xz/lzma/bytewriter.go | 37 + .../github.com/ulikunitz/xz/lzma/decoder.go | 277 + .../ulikunitz/xz/lzma/decoderdict.go | 128 + .../ulikunitz/xz/lzma/directcodec.go | 38 + .../github.com/ulikunitz/xz/lzma/distcodec.go | 140 + .../github.com/ulikunitz/xz/lzma/encoder.go | 268 + .../ulikunitz/xz/lzma/encoderdict.go | 149 + vendor/github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 + vendor/github.com/ulikunitz/xz/lzma/header.go | 167 + .../github.com/ulikunitz/xz/lzma/header2.go | 398 + .../ulikunitz/xz/lzma/lengthcodec.go | 115 + .../ulikunitz/xz/lzma/literalcodec.go | 125 + .../ulikunitz/xz/lzma/matchalgorithm.go | 52 + .../github.com/ulikunitz/xz/lzma/operation.go | 55 + vendor/github.com/ulikunitz/xz/lzma/prob.go | 53 + .../ulikunitz/xz/lzma/properties.go | 69 + .../ulikunitz/xz/lzma/rangecodec.go | 222 + vendor/github.com/ulikunitz/xz/lzma/reader.go | 100 + .../github.com/ulikunitz/xz/lzma/reader2.go | 231 + vendor/github.com/ulikunitz/xz/lzma/state.go | 145 + .../ulikunitz/xz/lzma/treecodecs.go | 133 + vendor/github.com/ulikunitz/xz/lzma/writer.go | 209 + .../github.com/ulikunitz/xz/lzma/writer2.go | 305 + vendor/github.com/ulikunitz/xz/lzmafilter.go | 117 + vendor/github.com/ulikunitz/xz/make-docs | 5 + vendor/github.com/ulikunitz/xz/none-check.go | 23 + vendor/github.com/ulikunitz/xz/reader.go | 359 + vendor/github.com/ulikunitz/xz/writer.go | 399 + vendor/github.com/vbatts/tar-split/LICENSE | 28 + .../vbatts/tar-split/archive/tar/common.go | 723 + .../vbatts/tar-split/archive/tar/format.go | 303 + .../vbatts/tar-split/archive/tar/reader.go | 924 + .../tar-split/archive/tar/stat_actime1.go | 20 + .../tar-split/archive/tar/stat_actime2.go | 20 + .../vbatts/tar-split/archive/tar/stat_unix.go | 96 + .../vbatts/tar-split/archive/tar/strconv.go | 326 + .../vbatts/tar-split/archive/tar/writer.go | 653 + .../vbatts/tar-split/tar/asm/README.md | 44 + .../vbatts/tar-split/tar/asm/assemble.go | 132 + .../vbatts/tar-split/tar/asm/disassemble.go | 156 + .../vbatts/tar-split/tar/asm/doc.go | 9 + .../vbatts/tar-split/tar/storage/doc.go | 12 + .../vbatts/tar-split/tar/storage/entry.go | 78 + .../vbatts/tar-split/tar/storage/getter.go | 105 + .../vbatts/tar-split/tar/storage/packer.go | 110 + .../github.com/vbauerster/mpb/v8/.gitignore | 5 + .../github.com/vbauerster/mpb/v8/CONTRIBUTING | 15 + vendor/github.com/vbauerster/mpb/v8/README.md | 119 + vendor/github.com/vbauerster/mpb/v8/UNLICENSE | 24 + vendor/github.com/vbauerster/mpb/v8/bar.go | 662 + .../vbauerster/mpb/v8/bar_filler.go | 31 + .../vbauerster/mpb/v8/bar_filler_bar.go | 291 + .../vbauerster/mpb/v8/bar_filler_nop.go | 24 + .../vbauerster/mpb/v8/bar_filler_spinner.go | 103 + .../vbauerster/mpb/v8/bar_option.go | 200 + .../vbauerster/mpb/v8/container_option.go | 135 + .../vbauerster/mpb/v8/cwriter/doc.go | 2 + .../vbauerster/mpb/v8/cwriter/util_bsd.go | 7 + .../vbauerster/mpb/v8/cwriter/util_linux.go | 7 + .../vbauerster/mpb/v8/cwriter/util_solaris.go | 7 + .../vbauerster/mpb/v8/cwriter/util_zos.go | 7 + .../vbauerster/mpb/v8/cwriter/writer.go | 59 + .../vbauerster/mpb/v8/cwriter/writer_posix.go | 48 + .../mpb/v8/cwriter/writer_windows.go | 101 + .../github.com/vbauerster/mpb/v8/decor/any.go | 21 + .../vbauerster/mpb/v8/decor/counters.go | 253 + .../vbauerster/mpb/v8/decor/decorator.go | 186 + .../github.com/vbauerster/mpb/v8/decor/doc.go | 19 + .../vbauerster/mpb/v8/decor/elapsed.go | 33 + .../github.com/vbauerster/mpb/v8/decor/eta.go | 205 + .../vbauerster/mpb/v8/decor/meta.go | 34 + .../vbauerster/mpb/v8/decor/moving_average.go | 74 + .../vbauerster/mpb/v8/decor/name.go | 11 + .../vbauerster/mpb/v8/decor/on_abort.go | 68 + .../mpb/v8/decor/on_compete_or_on_abort.go | 21 + .../vbauerster/mpb/v8/decor/on_complete.go | 67 + .../vbauerster/mpb/v8/decor/on_condition.go | 51 + .../vbauerster/mpb/v8/decor/percentage.go | 68 + .../vbauerster/mpb/v8/decor/size_type.go | 120 + .../mpb/v8/decor/sizeb1000_string.go | 41 + .../mpb/v8/decor/sizeb1024_string.go | 41 + .../vbauerster/mpb/v8/decor/speed.go | 179 + .../vbauerster/mpb/v8/decor/spinner.go | 21 + vendor/github.com/vbauerster/mpb/v8/doc.go | 2 + .../vbauerster/mpb/v8/heap_manager.go | 173 + .../vbauerster/mpb/v8/internal/percentage.go | 19 + .../vbauerster/mpb/v8/internal/width.go | 10 + .../vbauerster/mpb/v8/priority_queue.go | 37 + .../github.com/vbauerster/mpb/v8/progress.go | 456 + .../vbauerster/mpb/v8/proxyreader.go | 96 + .../vbauerster/mpb/v8/proxywriter.go | 96 + vendor/go.mongodb.org/mongo-driver/LICENSE | 201 + .../go.mongodb.org/mongo-driver/bson/bson.go | 50 + .../bson/bsoncodec/array_codec.go | 50 + .../mongo-driver/bson/bsoncodec/bsoncodec.go | 238 + .../bson/bsoncodec/byte_slice_codec.go | 111 + .../bson/bsoncodec/cond_addr_codec.go | 63 + .../bson/bsoncodec/default_value_decoders.go | 1729 + .../bson/bsoncodec/default_value_encoders.go | 766 + .../mongo-driver/bson/bsoncodec/doc.go | 90 + .../bson/bsoncodec/empty_interface_codec.go | 147 + .../mongo-driver/bson/bsoncodec/map_codec.go | 309 + .../mongo-driver/bson/bsoncodec/mode.go | 65 + .../bson/bsoncodec/pointer_codec.go | 109 + .../mongo-driver/bson/bsoncodec/proxy.go | 14 + .../mongo-driver/bson/bsoncodec/registry.go | 469 + .../bson/bsoncodec/slice_codec.go | 199 + .../bson/bsoncodec/string_codec.go | 119 + .../bson/bsoncodec/struct_codec.go | 664 + .../bson/bsoncodec/struct_tag_parser.go | 139 + .../mongo-driver/bson/bsoncodec/time_codec.go | 127 + .../mongo-driver/bson/bsoncodec/types.go | 57 + .../mongo-driver/bson/bsoncodec/uint_codec.go | 173 + .../bsonoptions/byte_slice_codec_options.go | 38 + .../mongo-driver/bson/bsonoptions/doc.go | 8 + .../empty_interface_codec_options.go | 38 + .../bson/bsonoptions/map_codec_options.go | 67 + .../bson/bsonoptions/slice_codec_options.go | 38 + .../bson/bsonoptions/string_codec_options.go | 41 + .../bson/bsonoptions/struct_codec_options.go | 87 + .../bson/bsonoptions/time_codec_options.go | 38 + .../bson/bsonoptions/uint_codec_options.go | 38 + .../mongo-driver/bson/bsonrw/copier.go | 445 + .../mongo-driver/bson/bsonrw/doc.go | 9 + .../bson/bsonrw/extjson_parser.go | 806 + .../bson/bsonrw/extjson_reader.go | 644 + .../bson/bsonrw/extjson_tables.go | 223 + .../bson/bsonrw/extjson_wrappers.go | 492 + .../bson/bsonrw/extjson_writer.go | 732 + .../mongo-driver/bson/bsonrw/json_scanner.go | 528 + .../mongo-driver/bson/bsonrw/mode.go | 108 + .../mongo-driver/bson/bsonrw/reader.go | 63 + .../mongo-driver/bson/bsonrw/value_reader.go | 874 + .../mongo-driver/bson/bsonrw/value_writer.go | 606 + .../mongo-driver/bson/bsonrw/writer.go | 78 + .../mongo-driver/bson/bsontype/bsontype.go | 97 + .../mongo-driver/bson/decoder.go | 141 + .../go.mongodb.org/mongo-driver/bson/doc.go | 141 + .../mongo-driver/bson/encoder.go | 99 + .../mongo-driver/bson/marshal.go | 248 + .../mongo-driver/bson/primitive/decimal.go | 423 + .../mongo-driver/bson/primitive/objectid.go | 206 + .../mongo-driver/bson/primitive/primitive.go | 217 + .../mongo-driver/bson/primitive_codecs.go | 92 + .../go.mongodb.org/mongo-driver/bson/raw.go | 85 + .../mongo-driver/bson/raw_element.go | 51 + .../mongo-driver/bson/raw_value.go | 309 + .../mongo-driver/bson/registry.go | 24 + .../go.mongodb.org/mongo-driver/bson/types.go | 36 + .../mongo-driver/bson/unmarshal.go | 101 + .../mongo-driver/x/bsonx/bsoncore/array.go | 164 + .../x/bsonx/bsoncore/bson_arraybuilder.go | 201 + .../x/bsonx/bsoncore/bson_documentbuilder.go | 189 + .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 862 + .../mongo-driver/x/bsonx/bsoncore/document.go | 386 + .../x/bsonx/bsoncore/document_sequence.go | 189 + .../mongo-driver/x/bsonx/bsoncore/element.go | 152 + .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 + .../mongo-driver/x/bsonx/bsoncore/value.go | 980 + vendor/go.mozilla.org/pkcs7/.gitignore | 24 + vendor/go.mozilla.org/pkcs7/LICENSE | 22 + vendor/go.mozilla.org/pkcs7/Makefile | 20 + vendor/go.mozilla.org/pkcs7/README.md | 69 + vendor/go.mozilla.org/pkcs7/ber.go | 271 + vendor/go.mozilla.org/pkcs7/decrypt.go | 177 + vendor/go.mozilla.org/pkcs7/encrypt.go | 399 + vendor/go.mozilla.org/pkcs7/pkcs7.go | 291 + vendor/go.mozilla.org/pkcs7/sign.go | 429 + vendor/go.mozilla.org/pkcs7/verify.go | 343 + .../go.mozilla.org/pkcs7/verify_test_dsa.go | 182 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + vendor/golang.org/x/crypto/cast5/cast5.go | 536 + vendor/golang.org/x/crypto/ed25519/ed25519.go | 71 + .../x/crypto/internal/alias/alias.go | 31 + .../x/crypto/internal/alias/alias_purego.go | 34 + .../x/crypto/internal/poly1305/bits_compat.go | 39 + .../x/crypto/internal/poly1305/bits_go1.13.go | 21 + .../x/crypto/internal/poly1305/mac_noasm.go | 9 + .../x/crypto/internal/poly1305/poly1305.go | 99 + .../x/crypto/internal/poly1305/sum_amd64.go | 47 + .../x/crypto/internal/poly1305/sum_amd64.s | 108 + .../x/crypto/internal/poly1305/sum_generic.go | 309 + .../x/crypto/internal/poly1305/sum_ppc64le.go | 47 + .../x/crypto/internal/poly1305/sum_ppc64le.s | 181 + .../x/crypto/internal/poly1305/sum_s390x.go | 76 + .../x/crypto/internal/poly1305/sum_s390x.s | 503 + .../x/crypto/nacl/secretbox/secretbox.go | 173 + vendor/golang.org/x/crypto/ocsp/ocsp.go | 792 + .../x/crypto/openpgp/armor/armor.go | 232 + .../x/crypto/openpgp/armor/encode.go | 161 + .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/elgamal/elgamal.go | 130 + .../x/crypto/openpgp/errors/errors.go | 78 + vendor/golang.org/x/crypto/openpgp/keys.go | 693 + .../x/crypto/openpgp/packet/compressed.go | 123 + .../x/crypto/openpgp/packet/config.go | 91 + .../x/crypto/openpgp/packet/encrypted_key.go | 208 + .../x/crypto/openpgp/packet/literal.go | 89 + .../x/crypto/openpgp/packet/ocfb.go | 143 + .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 161 + .../x/crypto/openpgp/packet/packet.go | 590 + .../x/crypto/openpgp/packet/private_key.go | 384 + .../x/crypto/openpgp/packet/public_key.go | 753 + .../x/crypto/openpgp/packet/public_key_v3.go | 279 + .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 + .../x/crypto/openpgp/packet/signature_v3.go | 146 + .../openpgp/packet/symmetric_key_encrypted.go | 155 + .../openpgp/packet/symmetrically_encrypted.go | 290 + .../x/crypto/openpgp/packet/userattribute.go | 90 + .../x/crypto/openpgp/packet/userid.go | 159 + vendor/golang.org/x/crypto/openpgp/read.go | 448 + vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 + vendor/golang.org/x/crypto/openpgp/write.go | 418 + vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + .../x/crypto/salsa20/salsa/hsalsa20.go | 146 + .../x/crypto/salsa20/salsa/salsa208.go | 201 + .../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 + .../x/crypto/salsa20/salsa/salsa20_amd64.s | 880 + .../x/crypto/salsa20/salsa/salsa20_noasm.go | 14 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 233 + vendor/golang.org/x/crypto/scrypt/scrypt.go | 212 + vendor/golang.org/x/crypto/sha3/doc.go | 62 + vendor/golang.org/x/crypto/sha3/hashes.go | 97 + .../x/crypto/sha3/hashes_generic.go | 27 + vendor/golang.org/x/crypto/sha3/keccakf.go | 414 + .../golang.org/x/crypto/sha3/keccakf_amd64.go | 13 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 390 + vendor/golang.org/x/crypto/sha3/register.go | 18 + vendor/golang.org/x/crypto/sha3/sha3.go | 197 + vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 288 + vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 33 + vendor/golang.org/x/crypto/sha3/shake.go | 172 + .../golang.org/x/crypto/sha3/shake_generic.go | 19 + vendor/golang.org/x/crypto/sha3/xor.go | 23 + .../golang.org/x/crypto/sha3/xor_generic.go | 28 + .../golang.org/x/crypto/sha3/xor_unaligned.go | 66 + vendor/golang.org/x/exp/LICENSE | 27 + vendor/golang.org/x/exp/PATENTS | 22 + .../x/exp/constraints/constraints.go | 50 + vendor/golang.org/x/exp/maps/maps.go | 94 + vendor/golang.org/x/exp/slices/cmp.go | 44 + vendor/golang.org/x/exp/slices/slices.go | 499 + vendor/golang.org/x/exp/slices/sort.go | 195 + .../golang.org/x/exp/slices/zsortanyfunc.go | 479 + .../golang.org/x/exp/slices/zsortordered.go | 481 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/http/httpguts/guts.go | 50 + .../golang.org/x/net/http/httpguts/httplex.go | 352 + vendor/golang.org/x/net/http2/.gitignore | 2 + vendor/golang.org/x/net/http2/ascii.go | 53 + vendor/golang.org/x/net/http2/ciphers.go | 641 + .../x/net/http2/client_conn_pool.go | 311 + vendor/golang.org/x/net/http2/databuffer.go | 149 + vendor/golang.org/x/net/http2/errors.go | 145 + vendor/golang.org/x/net/http2/flow.go | 120 + vendor/golang.org/x/net/http2/frame.go | 1658 + vendor/golang.org/x/net/http2/gotrack.go | 170 + vendor/golang.org/x/net/http2/headermap.go | 105 + vendor/golang.org/x/net/http2/hpack/encode.go | 245 + vendor/golang.org/x/net/http2/hpack/hpack.go | 523 + .../golang.org/x/net/http2/hpack/huffman.go | 226 + .../x/net/http2/hpack/static_table.go | 188 + vendor/golang.org/x/net/http2/hpack/tables.go | 403 + vendor/golang.org/x/net/http2/http2.go | 385 + vendor/golang.org/x/net/http2/pipe.go | 175 + vendor/golang.org/x/net/http2/server.go | 3280 + vendor/golang.org/x/net/http2/transport.go | 3234 + vendor/golang.org/x/net/http2/write.go | 370 + vendor/golang.org/x/net/http2/writesched.go | 251 + .../x/net/http2/writesched_priority.go | 451 + .../x/net/http2/writesched_random.go | 77 + .../x/net/http2/writesched_roundrobin.go | 119 + vendor/golang.org/x/net/idna/go118.go | 13 + vendor/golang.org/x/net/idna/idna10.0.0.go | 769 + vendor/golang.org/x/net/idna/idna9.0.0.go | 717 + vendor/golang.org/x/net/idna/pre_go118.go | 11 + vendor/golang.org/x/net/idna/punycode.go | 217 + vendor/golang.org/x/net/idna/tables10.0.0.go | 4559 + vendor/golang.org/x/net/idna/tables11.0.0.go | 4653 + vendor/golang.org/x/net/idna/tables12.0.0.go | 4733 + vendor/golang.org/x/net/idna/tables13.0.0.go | 4959 + vendor/golang.org/x/net/idna/tables15.0.0.go | 5144 + vendor/golang.org/x/net/idna/tables9.0.0.go | 4486 + vendor/golang.org/x/net/idna/trie.go | 51 + vendor/golang.org/x/net/idna/trie12.0.0.go | 30 + vendor/golang.org/x/net/idna/trie13.0.0.go | 30 + vendor/golang.org/x/net/idna/trieval.go | 119 + .../x/net/internal/timeseries/timeseries.go | 525 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1130 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + .../golang.org/x/sync/semaphore/semaphore.go | 136 + vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 + vendor/golang.org/x/sys/cpu/byteorder.go | 66 + vendor/golang.org/x/sys/cpu/cpu.go | 290 + vendor/golang.org/x/sys/cpu/cpu_aix.go | 33 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 31 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 15 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 37 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 31 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 + vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 111 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 30 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + vendor/golang.org/x/sys/cpu/cpu_loong64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 15 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 9 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 11 + .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 12 + .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 151 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 26 + vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + vendor/golang.org/x/sys/cpu/endian_big.go | 10 + vendor/golang.org/x/sys/cpu/endian_little.go | 10 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 71 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 53 + vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 + .../x/sys/cpu/runtime_auxv_go121.go | 18 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 26 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 35 + vendor/golang.org/x/sys/plan9/asm.s | 8 + vendor/golang.org/x/sys/plan9/asm_plan9_386.s | 30 + .../golang.org/x/sys/plan9/asm_plan9_amd64.s | 30 + vendor/golang.org/x/sys/plan9/asm_plan9_arm.s | 25 + vendor/golang.org/x/sys/plan9/const_plan9.go | 70 + vendor/golang.org/x/sys/plan9/dir_plan9.go | 212 + vendor/golang.org/x/sys/plan9/env_plan9.go | 31 + vendor/golang.org/x/sys/plan9/errors_plan9.go | 50 + vendor/golang.org/x/sys/plan9/mkall.sh | 150 + vendor/golang.org/x/sys/plan9/mkerrors.sh | 246 + .../golang.org/x/sys/plan9/mksysnum_plan9.sh | 23 + .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 21 + vendor/golang.org/x/sys/plan9/pwd_plan9.go | 23 + vendor/golang.org/x/sys/plan9/race.go | 30 + vendor/golang.org/x/sys/plan9/race0.go | 25 + vendor/golang.org/x/sys/plan9/str.go | 22 + vendor/golang.org/x/sys/plan9/syscall.go | 109 + .../golang.org/x/sys/plan9/syscall_plan9.go | 361 + .../x/sys/plan9/zsyscall_plan9_386.go | 284 + .../x/sys/plan9/zsyscall_plan9_amd64.go | 284 + .../x/sys/plan9/zsyscall_plan9_arm.go | 284 + .../golang.org/x/sys/plan9/zsysnum_plan9.go | 49 + vendor/golang.org/x/sys/windows/aliases.go | 12 + .../golang.org/x/sys/windows/dll_windows.go | 416 + vendor/golang.org/x/sys/windows/empty.s | 8 + .../golang.org/x/sys/windows/env_windows.go | 54 + vendor/golang.org/x/sys/windows/eventlog.go | 20 + .../golang.org/x/sys/windows/exec_windows.go | 248 + .../x/sys/windows/memory_windows.go | 48 + vendor/golang.org/x/sys/windows/mkerrors.bash | 70 + .../x/sys/windows/mkknownfolderids.bash | 27 + vendor/golang.org/x/sys/windows/mksyscall.go | 9 + vendor/golang.org/x/sys/windows/race.go | 30 + vendor/golang.org/x/sys/windows/race0.go | 25 + .../x/sys/windows/security_windows.go | 1435 + vendor/golang.org/x/sys/windows/service.go | 257 + .../x/sys/windows/setupapi_windows.go | 1425 + vendor/golang.org/x/sys/windows/str.go | 22 + vendor/golang.org/x/sys/windows/syscall.go | 104 + .../x/sys/windows/syscall_windows.go | 1834 + .../golang.org/x/sys/windows/types_windows.go | 3382 + .../x/sys/windows/types_windows_386.go | 35 + .../x/sys/windows/types_windows_amd64.go | 34 + .../x/sys/windows/types_windows_arm.go | 35 + .../x/sys/windows/types_windows_arm64.go | 34 + .../x/sys/windows/zerrors_windows.go | 9468 + .../x/sys/windows/zknownfolderids_windows.go | 149 + .../x/sys/windows/zsyscall_windows.go | 4390 + vendor/golang.org/x/term/CONTRIBUTING.md | 26 + vendor/golang.org/x/term/LICENSE | 27 + vendor/golang.org/x/term/PATENTS | 22 + vendor/golang.org/x/term/README.md | 19 + vendor/golang.org/x/term/codereview.cfg | 1 + vendor/golang.org/x/term/term.go | 60 + vendor/golang.org/x/term/term_plan9.go | 42 + vendor/golang.org/x/term/term_unix.go | 91 + vendor/golang.org/x/term/term_unix_bsd.go | 12 + vendor/golang.org/x/term/term_unix_other.go | 12 + vendor/golang.org/x/term/term_unsupported.go | 38 + vendor/golang.org/x/term/term_windows.go | 79 + vendor/golang.org/x/term/terminal.go | 986 + vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + .../x/text/secure/bidirule/bidirule.go | 336 + .../x/text/secure/bidirule/bidirule10.0.0.go | 11 + .../x/text/secure/bidirule/bidirule9.0.0.go | 14 + .../golang.org/x/text/transform/transform.go | 709 + vendor/golang.org/x/text/unicode/bidi/bidi.go | 359 + .../golang.org/x/text/unicode/bidi/bracket.go | 335 + vendor/golang.org/x/text/unicode/bidi/core.go | 1071 + vendor/golang.org/x/text/unicode/bidi/prop.go | 206 + .../x/text/unicode/bidi/tables10.0.0.go | 1815 + .../x/text/unicode/bidi/tables11.0.0.go | 1887 + .../x/text/unicode/bidi/tables12.0.0.go | 1923 + .../x/text/unicode/bidi/tables13.0.0.go | 1955 + .../x/text/unicode/bidi/tables15.0.0.go | 2042 + .../x/text/unicode/bidi/tables9.0.0.go | 1781 + .../golang.org/x/text/unicode/bidi/trieval.go | 48 + .../x/text/unicode/norm/composition.go | 512 + .../x/text/unicode/norm/forminfo.go | 279 + .../golang.org/x/text/unicode/norm/input.go | 109 + vendor/golang.org/x/text/unicode/norm/iter.go | 458 + .../x/text/unicode/norm/normalize.go | 610 + .../x/text/unicode/norm/readwriter.go | 125 + .../x/text/unicode/norm/tables10.0.0.go | 7657 + .../x/text/unicode/norm/tables11.0.0.go | 7693 + .../x/text/unicode/norm/tables12.0.0.go | 7710 + .../x/text/unicode/norm/tables13.0.0.go | 7760 + .../x/text/unicode/norm/tables15.0.0.go | 7907 + .../x/text/unicode/norm/tables9.0.0.go | 7637 + .../x/text/unicode/norm/transform.go | 88 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + .../genproto/googleapis/rpc/LICENSE | 202 + .../googleapis/rpc/status/status.pb.go | 203 + vendor/google.golang.org/grpc/AUTHORS | 1 + .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 73 + vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/MAINTAINERS.md | 28 + vendor/google.golang.org/grpc/Makefile | 46 + vendor/google.golang.org/grpc/NOTICE.txt | 13 + vendor/google.golang.org/grpc/README.md | 107 + vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/attributes/attributes.go | 141 + vendor/google.golang.org/grpc/backoff.go | 61 + .../google.golang.org/grpc/backoff/backoff.go | 52 + .../grpc/balancer/balancer.go | 442 + .../grpc/balancer/base/balancer.go | 264 + .../grpc/balancer/base/base.go | 71 + .../grpc/balancer/conn_state_evaluator.go | 74 + .../grpc/balancer/grpclb/state/state.go | 51 + .../grpc/balancer/roundrobin/roundrobin.go | 81 + .../grpc/balancer_conn_wrappers.go | 454 + .../grpc_binarylog_v1/binarylog.pb.go | 1183 + vendor/google.golang.org/grpc/call.go | 74 + .../grpc/channelz/channelz.go | 36 + vendor/google.golang.org/grpc/clientconn.go | 2038 + vendor/google.golang.org/grpc/codec.go | 50 + vendor/google.golang.org/grpc/codegen.sh | 17 + .../grpc/codes/code_string.go | 111 + vendor/google.golang.org/grpc/codes/codes.go | 244 + .../grpc/connectivity/connectivity.go | 94 + .../grpc/credentials/credentials.go | 291 + .../grpc/credentials/insecure/insecure.go | 98 + .../google.golang.org/grpc/credentials/tls.go | 236 + vendor/google.golang.org/grpc/dialoptions.go | 716 + vendor/google.golang.org/grpc/doc.go | 26 + .../grpc/encoding/encoding.go | 130 + .../grpc/encoding/proto/proto.go | 58 + .../grpc/grpclog/component.go | 117 + .../google.golang.org/grpc/grpclog/grpclog.go | 132 + .../google.golang.org/grpc/grpclog/logger.go | 87 + .../grpc/grpclog/loggerv2.go | 258 + vendor/google.golang.org/grpc/interceptor.go | 104 + .../grpc/internal/backoff/backoff.go | 109 + .../balancer/gracefulswitch/gracefulswitch.go | 385 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 192 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 208 + .../grpc/internal/binarylog/method_logger.go | 445 + .../grpc/internal/binarylog/sink.go | 170 + .../grpc/internal/buffer/unbounded.go | 105 + .../grpc/internal/channelz/funcs.go | 756 + .../grpc/internal/channelz/id.go | 75 + .../grpc/internal/channelz/logging.go | 79 + .../grpc/internal/channelz/types.go | 727 + .../grpc/internal/channelz/types_linux.go | 51 + .../grpc/internal/channelz/types_nonlinux.go | 43 + .../grpc/internal/channelz/util_linux.go | 37 + .../grpc/internal/channelz/util_nonlinux.go | 27 + .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/credentials/spiffe.go | 75 + .../grpc/internal/credentials/syscallconn.go | 58 + .../grpc/internal/credentials/util.go | 52 + .../grpc/internal/envconfig/envconfig.go | 72 + .../grpc/internal/envconfig/observability.go | 42 + .../grpc/internal/envconfig/xds.go | 95 + .../grpc/internal/grpclog/grpclog.go | 126 + .../grpc/internal/grpclog/prefixLogger.go | 93 + .../grpc/internal/grpcrand/grpcrand.go | 95 + .../internal/grpcsync/callback_serializer.go | 125 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/grpcsync/oncefunc.go | 32 + .../grpc/internal/grpcsync/pubsub.go | 121 + .../grpc/internal/grpcutil/compressor.go | 47 + .../grpc/internal/grpcutil/encode_duration.go | 63 + .../grpc/internal/grpcutil/grpcutil.go | 20 + .../grpc/internal/grpcutil/metadata.go | 40 + .../grpc/internal/grpcutil/method.go | 88 + .../grpc/internal/grpcutil/regex.go | 31 + .../grpc/internal/idle/idle.go | 301 + .../grpc/internal/internal.go | 211 + .../grpc/internal/metadata/metadata.go | 132 + .../grpc/internal/pretty/pretty.go | 82 + .../grpc/internal/resolver/config_selector.go | 167 + .../internal/resolver/dns/dns_resolver.go | 470 + .../resolver/passthrough/passthrough.go | 64 + .../grpc/internal/resolver/unix/unix.go | 74 + .../grpc/internal/serviceconfig/duration.go | 130 + .../internal/serviceconfig/serviceconfig.go | 180 + .../grpc/internal/status/status.go | 204 + .../grpc/internal/syscall/syscall_linux.go | 112 + .../grpc/internal/syscall/syscall_nonlinux.go | 77 + .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 1007 + .../grpc/internal/transport/defaults.go | 55 + .../grpc/internal/transport/flowcontrol.go | 215 + .../grpc/internal/transport/handler_server.go | 483 + .../grpc/internal/transport/http2_client.go | 1788 + .../grpc/internal/transport/http2_server.go | 1466 + .../grpc/internal/transport/http_util.go | 465 + .../grpc/internal/transport/logging.go | 40 + .../transport/networktype/networktype.go | 46 + .../grpc/internal/transport/proxy.go | 142 + .../grpc/internal/transport/transport.go | 837 + .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/keepalive/keepalive.go | 85 + .../grpc/metadata/metadata.go | 295 + vendor/google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 236 + vendor/google.golang.org/grpc/pickfirst.go | 263 + vendor/google.golang.org/grpc/preloader.go | 67 + vendor/google.golang.org/grpc/regenerate.sh | 123 + vendor/google.golang.org/grpc/resolver/map.go | 138 + .../grpc/resolver/resolver.go | 316 + .../grpc/resolver_conn_wrapper.go | 247 + vendor/google.golang.org/grpc/rpc_util.go | 959 + vendor/google.golang.org/grpc/server.go | 2108 + .../google.golang.org/grpc/service_config.go | 347 + .../grpc/serviceconfig/serviceconfig.go | 44 + .../grpc/shared_buffer_pool.go | 154 + .../google.golang.org/grpc/stats/handlers.go | 63 + vendor/google.golang.org/grpc/stats/stats.go | 343 + .../google.golang.org/grpc/status/status.go | 162 + vendor/google.golang.org/grpc/stream.go | 1780 + vendor/google.golang.org/grpc/tap/tap.go | 62 + vendor/google.golang.org/grpc/trace.go | 123 + vendor/google.golang.org/grpc/version.go | 22 + vendor/google.golang.org/grpc/vet.sh | 212 + vendor/google.golang.org/protobuf/LICENSE | 27 + vendor/google.golang.org/protobuf/PATENTS | 22 + .../protobuf/encoding/protojson/decode.go | 665 + .../protobuf/encoding/protojson/doc.go | 11 + .../protobuf/encoding/protojson/encode.go | 349 + .../encoding/protojson/well_known_types.go | 895 + .../protobuf/encoding/prototext/decode.go | 770 + .../protobuf/encoding/prototext/doc.go | 7 + .../protobuf/encoding/prototext/encode.go | 376 + .../protobuf/encoding/protowire/wire.go | 547 + .../protobuf/internal/descfmt/stringer.go | 318 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 69 + .../internal/encoding/defval/default.go | 213 + .../protobuf/internal/encoding/json/decode.go | 340 + .../internal/encoding/json/decode_number.go | 254 + .../internal/encoding/json/decode_string.go | 91 + .../internal/encoding/json/decode_token.go | 192 + .../protobuf/internal/encoding/json/encode.go | 278 + .../encoding/messageset/messageset.go | 242 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 686 + .../internal/encoding/text/decode_number.go | 211 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 272 + .../protobuf/internal/errors/errors.go | 89 + .../protobuf/internal/errors/is_go112.go | 40 + .../protobuf/internal/errors/is_go113.go | 13 + .../protobuf/internal/filedesc/build.go | 157 + .../protobuf/internal/filedesc/desc.go | 633 + .../protobuf/internal/filedesc/desc_init.go | 471 + .../protobuf/internal/filedesc/desc_lazy.go | 704 + .../protobuf/internal/filedesc/desc_list.go | 457 + .../internal/filedesc/desc_list_gen.go | 356 + .../protobuf/internal/filedesc/placeholder.go | 109 + .../protobuf/internal/filetype/build.go | 296 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go | 10 + .../internal/flags/proto_legacy_enable.go | 10 + .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 919 + .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 190 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/impl/api_export.go | 177 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 223 + .../protobuf/internal/impl/codec_field.go | 830 + .../protobuf/internal/impl/codec_gen.go | 5637 + .../protobuf/internal/impl/codec_map.go | 388 + .../protobuf/internal/impl/codec_map_go111.go | 38 + .../protobuf/internal/impl/codec_map_go112.go | 12 + .../protobuf/internal/impl/codec_message.go | 217 + .../internal/impl/codec_messageset.go | 123 + .../protobuf/internal/impl/codec_reflect.go | 210 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 18 + .../protobuf/internal/impl/convert.go | 495 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 285 + .../protobuf/internal/impl/encode.go | 201 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 218 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 176 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 563 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 279 + .../protobuf/internal/impl/message_reflect.go | 463 + .../internal/impl/message_reflect_field.go | 543 + .../internal/impl/message_reflect_gen.go | 249 + .../protobuf/internal/impl/pointer_reflect.go | 179 + .../protobuf/internal/impl/pointer_unsafe.go | 175 + .../protobuf/internal/impl/validate.go | 576 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/order/order.go | 89 + .../protobuf/internal/order/range.go | 115 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 28 + .../protobuf/internal/strs/strings_unsafe.go | 95 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 294 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 86 + .../protobuf/proto/encode.go | 322 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 57 + .../protobuf/proto/extension.go | 92 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 93 + .../google.golang.org/protobuf/proto/proto.go | 43 + .../protobuf/proto/proto_methods.go | 20 + .../protobuf/proto/proto_reflect.go | 20 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 101 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/reflect/protodesc/desc.go | 276 + .../protobuf/reflect/protodesc/desc_init.go | 248 + .../reflect/protodesc/desc_resolve.go | 286 + .../reflect/protodesc/desc_validate.go | 374 + .../protobuf/reflect/protodesc/proto.go | 252 + .../protobuf/reflect/protoreflect/methods.go | 78 + .../protobuf/reflect/protoreflect/proto.go | 508 + .../protobuf/reflect/protoreflect/source.go | 129 + .../reflect/protoreflect/source_gen.go | 502 + .../protobuf/reflect/protoreflect/type.go | 666 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_equal.go | 168 + .../reflect/protoreflect/value_pure.go | 60 + .../reflect/protoreflect/value_union.go | 438 + .../reflect/protoreflect/value_unsafe.go | 99 + .../reflect/protoregistry/registry.go | 882 + .../protobuf/runtime/protoiface/legacy.go | 15 + .../protobuf/runtime/protoiface/methods.go | 168 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 60 + .../types/descriptorpb/descriptor.pb.go | 4605 + .../protobuf/types/known/anypb/any.pb.go | 495 + .../types/known/durationpb/duration.pb.go | 374 + .../types/known/timestamppb/timestamp.pb.go | 383 + .../go-jose/go-jose.v2/.gitcookies.sh.enc | 1 + vendor/gopkg.in/go-jose/go-jose.v2/.gitignore | 8 + .../gopkg.in/go-jose/go-jose.v2/.travis.yml | 45 + .../go-jose/go-jose.v2/CONTRIBUTING.md | 14 + vendor/gopkg.in/go-jose/go-jose.v2/LICENSE | 202 + vendor/gopkg.in/go-jose/go-jose.v2/README.md | 118 + .../gopkg.in/go-jose/go-jose.v2/asymmetric.go | 592 + .../go-jose/go-jose.v2/cipher/cbc_hmac.go | 196 + .../go-jose/go-jose.v2/cipher/concat_kdf.go | 75 + .../go-jose/go-jose.v2/cipher/ecdh_es.go | 86 + .../go-jose/go-jose.v2/cipher/key_wrap.go | 109 + vendor/gopkg.in/go-jose/go-jose.v2/crypter.go | 542 + vendor/gopkg.in/go-jose/go-jose.v2/doc.go | 27 + .../gopkg.in/go-jose/go-jose.v2/encoding.go | 185 + .../gopkg.in/go-jose/go-jose.v2/json/LICENSE | 27 + .../go-jose/go-jose.v2/json/README.md | 13 + .../go-jose/go-jose.v2/json/decode.go | 1217 + .../go-jose/go-jose.v2/json/encode.go | 1197 + .../go-jose/go-jose.v2/json/indent.go | 141 + .../go-jose/go-jose.v2/json/scanner.go | 623 + .../go-jose/go-jose.v2/json/stream.go | 485 + .../gopkg.in/go-jose/go-jose.v2/json/tags.go | 44 + vendor/gopkg.in/go-jose/go-jose.v2/jwe.go | 294 + vendor/gopkg.in/go-jose/go-jose.v2/jwk.go | 760 + vendor/gopkg.in/go-jose/go-jose.v2/jws.go | 366 + vendor/gopkg.in/go-jose/go-jose.v2/opaque.go | 144 + vendor/gopkg.in/go-jose/go-jose.v2/shared.go | 520 + vendor/gopkg.in/go-jose/go-jose.v2/signing.go | 441 + .../gopkg.in/go-jose/go-jose.v2/symmetric.go | 482 + vendor/gopkg.in/ini.v1/.editorconfig | 12 + vendor/gopkg.in/ini.v1/.gitignore | 7 + vendor/gopkg.in/ini.v1/.golangci.yml | 27 + vendor/gopkg.in/ini.v1/LICENSE | 191 + vendor/gopkg.in/ini.v1/Makefile | 15 + vendor/gopkg.in/ini.v1/README.md | 43 + vendor/gopkg.in/ini.v1/codecov.yml | 16 + vendor/gopkg.in/ini.v1/data_source.go | 76 + vendor/gopkg.in/ini.v1/deprecated.go | 22 + vendor/gopkg.in/ini.v1/error.go | 49 + vendor/gopkg.in/ini.v1/file.go | 541 + vendor/gopkg.in/ini.v1/helper.go | 24 + vendor/gopkg.in/ini.v1/ini.go | 176 + vendor/gopkg.in/ini.v1/key.go | 837 + vendor/gopkg.in/ini.v1/parser.go | 520 + vendor/gopkg.in/ini.v1/section.go | 256 + vendor/gopkg.in/ini.v1/struct.go | 747 + vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 747 + vendor/gopkg.in/yaml.v3/decode.go | 1000 + vendor/gopkg.in/yaml.v3/emitterc.go | 2020 + vendor/gopkg.in/yaml.v3/encode.go | 577 + vendor/gopkg.in/yaml.v3/parserc.go | 1258 + vendor/gopkg.in/yaml.v3/readerc.go | 434 + vendor/gopkg.in/yaml.v3/resolve.go | 326 + vendor/gopkg.in/yaml.v3/scannerc.go | 3038 + vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 698 + vendor/gopkg.in/yaml.v3/yamlh.go | 807 + vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 + vendor/modules.txt | 522 +- 2386 files changed, 811556 insertions(+), 1907 deletions(-) rename Containerfile.orig => Containerfile.odc (79%) create mode 100644 Containerfile.osbuildbootc create mode 100644 README-osbuildbootc.md rename README.md.orig => README.md.osbuild-deploy-container (100%) create mode 100644 cmd/osbuild-deploy-container/fedora-eln.json create mode 100644 cmd/osbuild-deploy-container/image.go create mode 100644 cmd/osbuild-deploy-container/main.go create mode 100644 cmd/osbuild-deploy-container/partition_tables.go create mode 100644 vendor/github.com/BurntSushi/toml/.gitignore create mode 100644 vendor/github.com/BurntSushi/toml/COPYING create mode 100644 vendor/github.com/BurntSushi/toml/README.md create mode 100644 vendor/github.com/BurntSushi/toml/decode.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go create mode 100644 vendor/github.com/BurntSushi/toml/doc.go create mode 100644 vendor/github.com/BurntSushi/toml/encode.go create mode 100644 vendor/github.com/BurntSushi/toml/error.go create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go create mode 100644 vendor/github.com/BurntSushi/toml/lex.go create mode 100644 vendor/github.com/BurntSushi/toml/meta.go create mode 100644 vendor/github.com/BurntSushi/toml/parse.go create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 vendor/github.com/BurntSushi/toml/type_toml.go create mode 100644 vendor/github.com/VividCortex/ewma/.gitignore create mode 100644 vendor/github.com/VividCortex/ewma/.whitesource create mode 100644 vendor/github.com/VividCortex/ewma/LICENSE create mode 100644 vendor/github.com/VividCortex/ewma/README.md create mode 100644 vendor/github.com/VividCortex/ewma/codecov.yml create mode 100644 vendor/github.com/VividCortex/ewma/ewma.go create mode 100644 vendor/github.com/acarl005/stripansi/LICENSE create mode 100644 vendor/github.com/acarl005/stripansi/README.md create mode 100644 vendor/github.com/acarl005/stripansi/stripansi.go create mode 100644 vendor/github.com/asaskevich/govalidator/.gitignore create mode 100644 vendor/github.com/asaskevich/govalidator/.travis.yml create mode 100644 vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md create mode 100644 vendor/github.com/asaskevich/govalidator/LICENSE create mode 100644 vendor/github.com/asaskevich/govalidator/README.md create mode 100644 vendor/github.com/asaskevich/govalidator/arrays.go create mode 100644 vendor/github.com/asaskevich/govalidator/converter.go create mode 100644 vendor/github.com/asaskevich/govalidator/doc.go create mode 100644 vendor/github.com/asaskevich/govalidator/error.go create mode 100644 vendor/github.com/asaskevich/govalidator/numerics.go create mode 100644 vendor/github.com/asaskevich/govalidator/patterns.go create mode 100644 vendor/github.com/asaskevich/govalidator/types.go create mode 100644 vendor/github.com/asaskevich/govalidator/utils.go create mode 100644 vendor/github.com/asaskevich/govalidator/validator.go create mode 100644 vendor/github.com/asaskevich/govalidator/wercker.yml create mode 100644 vendor/github.com/containers/common/LICENSE create mode 100644 vendor/github.com/containers/common/pkg/retry/retry.go create mode 100644 vendor/github.com/containers/common/pkg/retry/retry_linux.go create mode 100644 vendor/github.com/containers/common/pkg/retry/retry_unsupported.go create mode 100644 vendor/github.com/containers/image/v5/LICENSE create mode 100644 vendor/github.com/containers/image/v5/copy/blob.go create mode 100644 vendor/github.com/containers/image/v5/copy/compression.go create mode 100644 vendor/github.com/containers/image/v5/copy/copy.go create mode 100644 vendor/github.com/containers/image/v5/copy/digesting_reader.go create mode 100644 vendor/github.com/containers/image/v5/copy/encryption.go create mode 100644 vendor/github.com/containers/image/v5/copy/manifest.go create mode 100644 vendor/github.com/containers/image/v5/copy/multiple.go create mode 100644 vendor/github.com/containers/image/v5/copy/progress_bars.go create mode 100644 vendor/github.com/containers/image/v5/copy/progress_channel.go create mode 100644 vendor/github.com/containers/image/v5/copy/sign.go create mode 100644 vendor/github.com/containers/image/v5/copy/single.go create mode 100644 vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/reader.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/src.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/transport.go create mode 100644 vendor/github.com/containers/image/v5/docker/archive/writer.go create mode 100644 vendor/github.com/containers/image/v5/docker/body_reader.go create mode 100644 vendor/github.com/containers/image/v5/docker/cache.go create mode 100644 vendor/github.com/containers/image/v5/docker/distribution_error.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_client.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_image.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_image_dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_image_src.go create mode 100644 vendor/github.com/containers/image/v5/docker/docker_transport.go create mode 100644 vendor/github.com/containers/image/v5/docker/errors.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go create mode 100644 vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go create mode 100644 vendor/github.com/containers/image/v5/docker/paths_common.go create mode 100644 vendor/github.com/containers/image/v5/docker/paths_freebsd.go create mode 100644 vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/README.md create mode 100644 vendor/github.com/containers/image/v5/docker/reference/helpers.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/normalize.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/reference.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/regexp.go create mode 100644 vendor/github.com/containers/image/v5/docker/registries_d.go create mode 100644 vendor/github.com/containers/image/v5/docker/wwwauthenticate.go create mode 100644 vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go create mode 100644 vendor/github.com/containers/image/v5/internal/blobinfocache/types.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/docker_list.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/docker_schema1.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/manifest.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/memory.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/oci.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/sourced.go create mode 100644 vendor/github.com/containers/image/v5/internal/image/unparsed.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go create mode 100644 vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/common.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/errors.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/list.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/manifest.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go create mode 100644 vendor/github.com/containers/image/v5/internal/private/private.go create mode 100644 vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go create mode 100644 vendor/github.com/containers/image/v5/internal/rootless/rootless.go create mode 100644 vendor/github.com/containers/image/v5/internal/set/set.go create mode 100644 vendor/github.com/containers/image/v5/internal/signature/signature.go create mode 100644 vendor/github.com/containers/image/v5/internal/signature/sigstore.go create mode 100644 vendor/github.com/containers/image/v5/internal/signature/simple.go create mode 100644 vendor/github.com/containers/image/v5/internal/signer/signer.go create mode 100644 vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go create mode 100644 vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go create mode 100644 vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go create mode 100644 vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go create mode 100644 vendor/github.com/containers/image/v5/internal/useragent/useragent.go create mode 100644 vendor/github.com/containers/image/v5/manifest/common.go create mode 100644 vendor/github.com/containers/image/v5/manifest/docker_schema1.go create mode 100644 vendor/github.com/containers/image/v5/manifest/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go create mode 100644 vendor/github.com/containers/image/v5/manifest/list.go create mode 100644 vendor/github.com/containers/image/v5/manifest/manifest.go create mode 100644 vendor/github.com/containers/image/v5/manifest/oci.go create mode 100644 vendor/github.com/containers/image/v5/manifest/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/oci/archive/oci_dest.go create mode 100644 vendor/github.com/containers/image/v5/oci/archive/oci_src.go create mode 100644 vendor/github.com/containers/image/v5/oci/archive/oci_transport.go create mode 100644 vendor/github.com/containers/image/v5/oci/internal/oci_util.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_dest.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_src.go create mode 100644 vendor/github.com/containers/image/v5/oci/layout/oci_transport.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go create mode 100644 vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/compression.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/internal/types.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/types/types.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/zstd.go create mode 100644 vendor/github.com/containers/image/v5/pkg/docker/config/config.go create mode 100644 vendor/github.com/containers/image/v5/pkg/strslice/README.md create mode 100644 vendor/github.com/containers/image/v5/pkg/strslice/strslice.go create mode 100644 vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go create mode 100644 vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go create mode 100644 vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go create mode 100644 vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go create mode 100644 vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go create mode 100644 vendor/github.com/containers/image/v5/signature/docker.go create mode 100644 vendor/github.com/containers/image/v5/signature/fulcio_cert.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/errors.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/json.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/rekor_set.go create mode 100644 vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go create mode 100644 vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_config.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_eval_simple.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_paths_common.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_reference_match.go create mode 100644 vendor/github.com/containers/image/v5/signature/policy_types.go create mode 100644 vendor/github.com/containers/image/v5/signature/signer/signer.go create mode 100644 vendor/github.com/containers/image/v5/signature/sigstore/copied.go create mode 100644 vendor/github.com/containers/image/v5/signature/sigstore/generate.go create mode 100644 vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go create mode 100644 vendor/github.com/containers/image/v5/signature/sigstore/signer.go create mode 100644 vendor/github.com/containers/image/v5/signature/simple.go create mode 100644 vendor/github.com/containers/image/v5/signature/simplesigning/signer.go create mode 100644 vendor/github.com/containers/image/v5/transports/stub.go create mode 100644 vendor/github.com/containers/image/v5/transports/transports.go create mode 100644 vendor/github.com/containers/image/v5/types/types.go create mode 100644 vendor/github.com/containers/image/v5/version/version.go create mode 100644 vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/containers/libtrust/CONTRIBUTING.md create mode 100644 vendor/github.com/containers/libtrust/LICENSE create mode 100644 vendor/github.com/containers/libtrust/MAINTAINERS create mode 100644 vendor/github.com/containers/libtrust/README.md create mode 100644 vendor/github.com/containers/libtrust/SECURITY.md create mode 100644 vendor/github.com/containers/libtrust/certificates.go create mode 100644 vendor/github.com/containers/libtrust/doc.go create mode 100644 vendor/github.com/containers/libtrust/ec_key.go create mode 100644 vendor/github.com/containers/libtrust/ec_key_no_openssl.go create mode 100644 vendor/github.com/containers/libtrust/ec_key_openssl.go create mode 100644 vendor/github.com/containers/libtrust/filter.go create mode 100644 vendor/github.com/containers/libtrust/hash.go create mode 100644 vendor/github.com/containers/libtrust/jsonsign.go create mode 100644 vendor/github.com/containers/libtrust/key.go create mode 100644 vendor/github.com/containers/libtrust/key_files.go create mode 100644 vendor/github.com/containers/libtrust/key_manager.go create mode 100644 vendor/github.com/containers/libtrust/rsa_key.go create mode 100644 vendor/github.com/containers/libtrust/util.go create mode 100644 vendor/github.com/containers/ocicrypt/.gitignore create mode 100644 vendor/github.com/containers/ocicrypt/.golangci.yml create mode 100644 vendor/github.com/containers/ocicrypt/ADOPTERS.md create mode 100644 vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/containers/ocicrypt/LICENSE create mode 100644 vendor/github.com/containers/ocicrypt/MAINTAINERS create mode 100644 vendor/github.com/containers/ocicrypt/Makefile create mode 100644 vendor/github.com/containers/ocicrypt/README.md create mode 100644 vendor/github.com/containers/ocicrypt/SECURITY.md create mode 100644 vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go create mode 100644 vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go create mode 100644 vendor/github.com/containers/ocicrypt/config/config.go create mode 100644 vendor/github.com/containers/ocicrypt/config/constructors.go create mode 100644 vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go create mode 100644 vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go create mode 100644 vendor/github.com/containers/ocicrypt/encryption.go create mode 100644 vendor/github.com/containers/ocicrypt/gpg.go create mode 100644 vendor/github.com/containers/ocicrypt/gpgvault.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/keywrap.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go create mode 100644 vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go create mode 100644 vendor/github.com/containers/ocicrypt/reader.go create mode 100644 vendor/github.com/containers/ocicrypt/spec/spec.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/delayedreader.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/ioutils.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto create mode 100644 vendor/github.com/containers/ocicrypt/utils/testing.go create mode 100644 vendor/github.com/containers/ocicrypt/utils/utils.go create mode 100644 vendor/github.com/containers/storage/AUTHORS create mode 100644 vendor/github.com/containers/storage/LICENSE create mode 100644 vendor/github.com/containers/storage/NOTICE create mode 100644 vendor/github.com/containers/storage/pkg/archive/README.md create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_110.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_19.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_bsd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_other.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/archive_zstd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_other.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/copy.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/diff.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/time_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/containers/storage/pkg/archive/wrap.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go create mode 100644 vendor/github.com/containers/storage/pkg/chunked/internal/compression.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_others.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/parser.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/readers.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/containers/storage/pkg/ioutils/writers.go create mode 100644 vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go create mode 100644 vendor/github.com/containers/storage/pkg/lockfile/lockfile.go create mode 100644 vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/longpath/longpath.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mount.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mounter_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mountinfo.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/unmount_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/pools/pools.go create mode 100644 vendor/github.com/containers/storage/pkg/promise/promise.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/README.md create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/reexec.go create mode 100644 vendor/github.com/containers/storage/pkg/regexp/regexp.go create mode 100644 vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go create mode 100644 vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chmod.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chtimes.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chtimes_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/errors.go create mode 100644 vendor/github.com/containers/storage/pkg/system/exitcode.go create mode 100644 vendor/github.com/containers/storage/pkg/system/init.go create mode 100644 vendor/github.com/containers/storage/pkg/system/init_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lchown.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lcow_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lcow_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lstat_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/mknod.go create mode 100644 vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/path.go create mode 100644 vendor/github.com/containers/storage/pkg/system/path_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/path_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/process_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/rm.go create mode 100644 vendor/github.com/containers/storage/pkg/system/rm_common.go create mode 100644 vendor/github.com/containers/storage/pkg/system/rm_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_common.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/umask.go create mode 100644 vendor/github.com/containers/storage/pkg/system/umask_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/utimes_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare.c create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go create mode 100644 vendor/github.com/cyberphone/json-canonicalization/LICENSE create mode 100644 vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go create mode 100644 vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go create mode 100644 vendor/github.com/docker/distribution/LICENSE create mode 100644 vendor/github.com/docker/distribution/digestset/set.go create mode 100644 vendor/github.com/docker/distribution/reference/helpers.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize.go create mode 100644 vendor/github.com/docker/distribution/reference/reference.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/handler.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/register.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/descriptors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/headerparser.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/routes.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/urls.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/LICENSE create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/client.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/command.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/error.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/helper.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/version.go create mode 100644 vendor/github.com/docker/docker/AUTHORS create mode 100644 vendor/github.com/docker/docker/LICENSE create mode 100644 vendor/github.com/docker/docker/NOTICE create mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md create mode 100644 vendor/github.com/docker/docker/api/types/versions/compare.go create mode 100644 vendor/github.com/docker/go-connections/LICENSE create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go create mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-units/LICENSE create mode 100644 vendor/github.com/docker/go-units/MAINTAINERS create mode 100644 vendor/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/docker/go-units/circle.yml create mode 100644 vendor/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/docker/go-units/ulimit.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/.gitignore create mode 100644 vendor/github.com/go-jose/go-jose/v3/.golangci.yml create mode 100644 vendor/github.com/go-jose/go-jose/v3/.travis.yml create mode 100644 vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v3/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/asymmetric.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/crypter.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/doc.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/encoding.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/decode.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/encode.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/indent.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/scanner.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/stream.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/tags.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwe.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwk.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jws.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/opaque.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/shared.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/signing.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/symmetric.go create mode 100644 vendor/github.com/go-openapi/analysis/.codecov.yml create mode 100644 vendor/github.com/go-openapi/analysis/.gitattributes create mode 100644 vendor/github.com/go-openapi/analysis/.gitignore create mode 100644 vendor/github.com/go-openapi/analysis/.golangci.yml create mode 100644 vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/analysis/LICENSE create mode 100644 vendor/github.com/go-openapi/analysis/README.md create mode 100644 vendor/github.com/go-openapi/analysis/analyzer.go create mode 100644 vendor/github.com/go-openapi/analysis/appveyor.yml create mode 100644 vendor/github.com/go-openapi/analysis/debug.go create mode 100644 vendor/github.com/go-openapi/analysis/doc.go create mode 100644 vendor/github.com/go-openapi/analysis/fixer.go create mode 100644 vendor/github.com/go-openapi/analysis/flatten.go create mode 100644 vendor/github.com/go-openapi/analysis/flatten_name.go create mode 100644 vendor/github.com/go-openapi/analysis/flatten_options.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/debug/debug.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go create mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go create mode 100644 vendor/github.com/go-openapi/analysis/mixin.go create mode 100644 vendor/github.com/go-openapi/analysis/schema.go create mode 100644 vendor/github.com/go-openapi/errors/.gitattributes create mode 100644 vendor/github.com/go-openapi/errors/.gitignore create mode 100644 vendor/github.com/go-openapi/errors/.golangci.yml create mode 100644 vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/errors/LICENSE create mode 100644 vendor/github.com/go-openapi/errors/README.md create mode 100644 vendor/github.com/go-openapi/errors/api.go create mode 100644 vendor/github.com/go-openapi/errors/auth.go create mode 100644 vendor/github.com/go-openapi/errors/doc.go create mode 100644 vendor/github.com/go-openapi/errors/headers.go create mode 100644 vendor/github.com/go-openapi/errors/middleware.go create mode 100644 vendor/github.com/go-openapi/errors/parsing.go create mode 100644 vendor/github.com/go-openapi/errors/schema.go create mode 100644 vendor/github.com/go-openapi/jsonpointer/.editorconfig create mode 100644 vendor/github.com/go-openapi/jsonpointer/.gitignore create mode 100644 vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/jsonpointer/LICENSE create mode 100644 vendor/github.com/go-openapi/jsonpointer/README.md create mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer.go create mode 100644 vendor/github.com/go-openapi/jsonreference/.gitignore create mode 100644 vendor/github.com/go-openapi/jsonreference/.golangci.yml create mode 100644 vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/jsonreference/LICENSE create mode 100644 vendor/github.com/go-openapi/jsonreference/README.md create mode 100644 vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go create mode 100644 vendor/github.com/go-openapi/jsonreference/reference.go create mode 100644 vendor/github.com/go-openapi/loads/.editorconfig create mode 100644 vendor/github.com/go-openapi/loads/.gitignore create mode 100644 vendor/github.com/go-openapi/loads/.golangci.yml create mode 100644 vendor/github.com/go-openapi/loads/.travis.yml create mode 100644 vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/loads/LICENSE create mode 100644 vendor/github.com/go-openapi/loads/README.md create mode 100644 vendor/github.com/go-openapi/loads/doc.go create mode 100644 vendor/github.com/go-openapi/loads/loaders.go create mode 100644 vendor/github.com/go-openapi/loads/options.go create mode 100644 vendor/github.com/go-openapi/loads/spec.go create mode 100644 vendor/github.com/go-openapi/runtime/.editorconfig create mode 100644 vendor/github.com/go-openapi/runtime/.gitattributes create mode 100644 vendor/github.com/go-openapi/runtime/.gitignore create mode 100644 vendor/github.com/go-openapi/runtime/.golangci.yml create mode 100644 vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/runtime/LICENSE create mode 100644 vendor/github.com/go-openapi/runtime/README.md create mode 100644 vendor/github.com/go-openapi/runtime/bytestream.go create mode 100644 vendor/github.com/go-openapi/runtime/client_auth_info.go create mode 100644 vendor/github.com/go-openapi/runtime/client_operation.go create mode 100644 vendor/github.com/go-openapi/runtime/client_request.go create mode 100644 vendor/github.com/go-openapi/runtime/client_response.go create mode 100644 vendor/github.com/go-openapi/runtime/constants.go create mode 100644 vendor/github.com/go-openapi/runtime/csv.go create mode 100644 vendor/github.com/go-openapi/runtime/discard.go create mode 100644 vendor/github.com/go-openapi/runtime/file.go create mode 100644 vendor/github.com/go-openapi/runtime/headers.go create mode 100644 vendor/github.com/go-openapi/runtime/interfaces.go create mode 100644 vendor/github.com/go-openapi/runtime/json.go create mode 100644 vendor/github.com/go-openapi/runtime/request.go create mode 100644 vendor/github.com/go-openapi/runtime/statuses.go create mode 100644 vendor/github.com/go-openapi/runtime/text.go create mode 100644 vendor/github.com/go-openapi/runtime/values.go create mode 100644 vendor/github.com/go-openapi/runtime/xml.go create mode 100644 vendor/github.com/go-openapi/spec/.editorconfig create mode 100644 vendor/github.com/go-openapi/spec/.gitignore create mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml create mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/spec/LICENSE create mode 100644 vendor/github.com/go-openapi/spec/README.md create mode 100644 vendor/github.com/go-openapi/spec/appveyor.yml create mode 100644 vendor/github.com/go-openapi/spec/bindata.go create mode 100644 vendor/github.com/go-openapi/spec/cache.go create mode 100644 vendor/github.com/go-openapi/spec/contact_info.go create mode 100644 vendor/github.com/go-openapi/spec/debug.go create mode 100644 vendor/github.com/go-openapi/spec/errors.go create mode 100644 vendor/github.com/go-openapi/spec/expander.go create mode 100644 vendor/github.com/go-openapi/spec/external_docs.go create mode 100644 vendor/github.com/go-openapi/spec/header.go create mode 100644 vendor/github.com/go-openapi/spec/info.go create mode 100644 vendor/github.com/go-openapi/spec/items.go create mode 100644 vendor/github.com/go-openapi/spec/license.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer_nonwindows.go create mode 100644 vendor/github.com/go-openapi/spec/normalizer_windows.go create mode 100644 vendor/github.com/go-openapi/spec/operation.go create mode 100644 vendor/github.com/go-openapi/spec/parameter.go create mode 100644 vendor/github.com/go-openapi/spec/path_item.go create mode 100644 vendor/github.com/go-openapi/spec/paths.go create mode 100644 vendor/github.com/go-openapi/spec/properties.go create mode 100644 vendor/github.com/go-openapi/spec/ref.go create mode 100644 vendor/github.com/go-openapi/spec/resolver.go create mode 100644 vendor/github.com/go-openapi/spec/response.go create mode 100644 vendor/github.com/go-openapi/spec/responses.go create mode 100644 vendor/github.com/go-openapi/spec/schema.go create mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go create mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go create mode 100644 vendor/github.com/go-openapi/spec/spec.go create mode 100644 vendor/github.com/go-openapi/spec/swagger.go create mode 100644 vendor/github.com/go-openapi/spec/tag.go create mode 100644 vendor/github.com/go-openapi/spec/url_go18.go create mode 100644 vendor/github.com/go-openapi/spec/url_go19.go create mode 100644 vendor/github.com/go-openapi/spec/validations.go create mode 100644 vendor/github.com/go-openapi/spec/xml_object.go create mode 100644 vendor/github.com/go-openapi/strfmt/.editorconfig create mode 100644 vendor/github.com/go-openapi/strfmt/.gitattributes create mode 100644 vendor/github.com/go-openapi/strfmt/.gitignore create mode 100644 vendor/github.com/go-openapi/strfmt/.golangci.yml create mode 100644 vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/strfmt/LICENSE create mode 100644 vendor/github.com/go-openapi/strfmt/README.md create mode 100644 vendor/github.com/go-openapi/strfmt/bson.go create mode 100644 vendor/github.com/go-openapi/strfmt/date.go create mode 100644 vendor/github.com/go-openapi/strfmt/default.go create mode 100644 vendor/github.com/go-openapi/strfmt/doc.go create mode 100644 vendor/github.com/go-openapi/strfmt/duration.go create mode 100644 vendor/github.com/go-openapi/strfmt/format.go create mode 100644 vendor/github.com/go-openapi/strfmt/time.go create mode 100644 vendor/github.com/go-openapi/strfmt/ulid.go create mode 100644 vendor/github.com/go-openapi/swag/.editorconfig create mode 100644 vendor/github.com/go-openapi/swag/.gitattributes create mode 100644 vendor/github.com/go-openapi/swag/.gitignore create mode 100644 vendor/github.com/go-openapi/swag/.golangci.yml create mode 100644 vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/swag/LICENSE create mode 100644 vendor/github.com/go-openapi/swag/README.md create mode 100644 vendor/github.com/go-openapi/swag/convert.go create mode 100644 vendor/github.com/go-openapi/swag/convert_types.go create mode 100644 vendor/github.com/go-openapi/swag/doc.go create mode 100644 vendor/github.com/go-openapi/swag/file.go create mode 100644 vendor/github.com/go-openapi/swag/json.go create mode 100644 vendor/github.com/go-openapi/swag/loading.go create mode 100644 vendor/github.com/go-openapi/swag/name_lexem.go create mode 100644 vendor/github.com/go-openapi/swag/net.go create mode 100644 vendor/github.com/go-openapi/swag/path.go create mode 100644 vendor/github.com/go-openapi/swag/post_go18.go create mode 100644 vendor/github.com/go-openapi/swag/post_go19.go create mode 100644 vendor/github.com/go-openapi/swag/pre_go18.go create mode 100644 vendor/github.com/go-openapi/swag/pre_go19.go create mode 100644 vendor/github.com/go-openapi/swag/split.go create mode 100644 vendor/github.com/go-openapi/swag/util.go create mode 100644 vendor/github.com/go-openapi/swag/yaml.go create mode 100644 vendor/github.com/go-openapi/validate/.editorconfig create mode 100644 vendor/github.com/go-openapi/validate/.gitattributes create mode 100644 vendor/github.com/go-openapi/validate/.gitignore create mode 100644 vendor/github.com/go-openapi/validate/.golangci.yml create mode 100644 vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/validate/LICENSE create mode 100644 vendor/github.com/go-openapi/validate/README.md create mode 100644 vendor/github.com/go-openapi/validate/appveyor.yml create mode 100644 vendor/github.com/go-openapi/validate/context.go create mode 100644 vendor/github.com/go-openapi/validate/debug.go create mode 100644 vendor/github.com/go-openapi/validate/default_validator.go create mode 100644 vendor/github.com/go-openapi/validate/doc.go create mode 100644 vendor/github.com/go-openapi/validate/example_validator.go create mode 100644 vendor/github.com/go-openapi/validate/formats.go create mode 100644 vendor/github.com/go-openapi/validate/helpers.go create mode 100644 vendor/github.com/go-openapi/validate/object_validator.go create mode 100644 vendor/github.com/go-openapi/validate/options.go create mode 100644 vendor/github.com/go-openapi/validate/result.go create mode 100644 vendor/github.com/go-openapi/validate/rexp.go create mode 100644 vendor/github.com/go-openapi/validate/schema.go create mode 100644 vendor/github.com/go-openapi/validate/schema_messages.go create mode 100644 vendor/github.com/go-openapi/validate/schema_option.go create mode 100644 vendor/github.com/go-openapi/validate/schema_props.go create mode 100644 vendor/github.com/go-openapi/validate/slice_validator.go create mode 100644 vendor/github.com/go-openapi/validate/spec.go create mode 100644 vendor/github.com/go-openapi/validate/spec_messages.go create mode 100644 vendor/github.com/go-openapi/validate/type.go create mode 100644 vendor/github.com/go-openapi/validate/update-fixtures.sh create mode 100644 vendor/github.com/go-openapi/validate/validator.go create mode 100644 vendor/github.com/go-openapi/validate/values.go create mode 100644 vendor/github.com/gobwas/glob/.gitignore create mode 100644 vendor/github.com/gobwas/glob/.travis.yml create mode 100644 vendor/github.com/gobwas/glob/LICENSE create mode 100644 vendor/github.com/gobwas/glob/bench.sh create mode 100644 vendor/github.com/gobwas/glob/compiler/compiler.go create mode 100644 vendor/github.com/gobwas/glob/glob.go create mode 100644 vendor/github.com/gobwas/glob/match/any.go create mode 100644 vendor/github.com/gobwas/glob/match/any_of.go create mode 100644 vendor/github.com/gobwas/glob/match/btree.go create mode 100644 vendor/github.com/gobwas/glob/match/contains.go create mode 100644 vendor/github.com/gobwas/glob/match/every_of.go create mode 100644 vendor/github.com/gobwas/glob/match/list.go create mode 100644 vendor/github.com/gobwas/glob/match/match.go create mode 100644 vendor/github.com/gobwas/glob/match/max.go create mode 100644 vendor/github.com/gobwas/glob/match/min.go create mode 100644 vendor/github.com/gobwas/glob/match/nothing.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix_any.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix_suffix.go create mode 100644 vendor/github.com/gobwas/glob/match/range.go create mode 100644 vendor/github.com/gobwas/glob/match/row.go create mode 100644 vendor/github.com/gobwas/glob/match/segments.go create mode 100644 vendor/github.com/gobwas/glob/match/single.go create mode 100644 vendor/github.com/gobwas/glob/match/suffix.go create mode 100644 vendor/github.com/gobwas/glob/match/suffix_any.go create mode 100644 vendor/github.com/gobwas/glob/match/super.go create mode 100644 vendor/github.com/gobwas/glob/match/text.go create mode 100644 vendor/github.com/gobwas/glob/readme.md create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/ast.go create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/parser.go create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/lexer.go create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/token.go create mode 100644 vendor/github.com/gobwas/glob/syntax/syntax.go create mode 100644 vendor/github.com/gobwas/glob/util/runes/runes.go create mode 100644 vendor/github.com/gobwas/glob/util/strings/strings.go create mode 100644 vendor/github.com/golang/protobuf/AUTHORS create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/google/go-containerregistry/LICENSE create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/check.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/digest.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/errors.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/options.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/ref.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/registry.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/repository.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/name/tag.go create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/null.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/gorilla/mux/AUTHORS create mode 100644 vendor/github.com/gorilla/mux/LICENSE create mode 100644 vendor/github.com/gorilla/mux/README.md create mode 100644 vendor/github.com/gorilla/mux/doc.go create mode 100644 vendor/github.com/gorilla/mux/middleware.go create mode 100644 vendor/github.com/gorilla/mux/mux.go create mode 100644 vendor/github.com/gorilla/mux/regexp.go create mode 100644 vendor/github.com/gorilla/mux/route.go create mode 100644 vendor/github.com/gorilla/mux/test_helpers.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/hashicorp/go-version/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 vendor/github.com/hashicorp/go-version/README.md create mode 100644 vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 vendor/github.com/hashicorp/go-version/version.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go create mode 100644 vendor/github.com/josharian/intern/README.md create mode 100644 vendor/github.com/josharian/intern/intern.go create mode 100644 vendor/github.com/josharian/intern/license.md create mode 100644 vendor/github.com/json-iterator/go/.codecov.yml create mode 100644 vendor/github.com/json-iterator/go/.gitignore create mode 100644 vendor/github.com/json-iterator/go/.travis.yml create mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock create mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml create mode 100644 vendor/github.com/json-iterator/go/LICENSE create mode 100644 vendor/github.com/json-iterator/go/README.md create mode 100644 vendor/github.com/json-iterator/go/adapter.go create mode 100644 vendor/github.com/json-iterator/go/any.go create mode 100644 vendor/github.com/json-iterator/go/any_array.go create mode 100644 vendor/github.com/json-iterator/go/any_bool.go create mode 100644 vendor/github.com/json-iterator/go/any_float.go create mode 100644 vendor/github.com/json-iterator/go/any_int32.go create mode 100644 vendor/github.com/json-iterator/go/any_int64.go create mode 100644 vendor/github.com/json-iterator/go/any_invalid.go create mode 100644 vendor/github.com/json-iterator/go/any_nil.go create mode 100644 vendor/github.com/json-iterator/go/any_number.go create mode 100644 vendor/github.com/json-iterator/go/any_object.go create mode 100644 vendor/github.com/json-iterator/go/any_str.go create mode 100644 vendor/github.com/json-iterator/go/any_uint32.go create mode 100644 vendor/github.com/json-iterator/go/any_uint64.go create mode 100644 vendor/github.com/json-iterator/go/build.sh create mode 100644 vendor/github.com/json-iterator/go/config.go create mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md create mode 100644 vendor/github.com/json-iterator/go/iter.go create mode 100644 vendor/github.com/json-iterator/go/iter_array.go create mode 100644 vendor/github.com/json-iterator/go/iter_float.go create mode 100644 vendor/github.com/json-iterator/go/iter_int.go create mode 100644 vendor/github.com/json-iterator/go/iter_object.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_sloppy.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_strict.go create mode 100644 vendor/github.com/json-iterator/go/iter_str.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter.go create mode 100644 vendor/github.com/json-iterator/go/pool.go create mode 100644 vendor/github.com/json-iterator/go/reflect.go create mode 100644 vendor/github.com/json-iterator/go/reflect_array.go create mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go create mode 100644 vendor/github.com/json-iterator/go/reflect_extension.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go create mode 100644 vendor/github.com/json-iterator/go/reflect_map.go create mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go create mode 100644 vendor/github.com/json-iterator/go/reflect_native.go create mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go create mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_decoder.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_encoder.go create mode 100644 vendor/github.com/json-iterator/go/stream.go create mode 100644 vendor/github.com/json-iterator/go/stream_float.go create mode 100644 vendor/github.com/json-iterator/go/stream_int.go create mode 100644 vendor/github.com/json-iterator/go/stream_str.go create mode 100644 vendor/github.com/json-iterator/go/test.sh create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/fast_encoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate_gen.go create mode 100644 vendor/github.com/klauspost/compress/flate/level1.go create mode 100644 vendor/github.com/klauspost/compress/flate/level2.go create mode 100644 vendor/github.com/klauspost/compress/flate/level3.go create mode 100644 vendor/github.com/klauspost/compress/flate/level4.go create mode 100644 vendor/github.com/klauspost/compress/flate/level5.go create mode 100644 vendor/github.com/klauspost/compress/flate/level6.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_other.go create mode 100644 vendor/github.com/klauspost/compress/flate/stateless.go create mode 100644 vendor/github.com/klauspost/compress/flate/token.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/klauspost/pgzip/.gitignore create mode 100644 vendor/github.com/klauspost/pgzip/.travis.yml create mode 100644 vendor/github.com/klauspost/pgzip/GO_LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/LICENSE create mode 100644 vendor/github.com/klauspost/pgzip/README.md create mode 100644 vendor/github.com/klauspost/pgzip/gunzip.go create mode 100644 vendor/github.com/klauspost/pgzip/gzip.go create mode 100644 vendor/github.com/letsencrypt/boulder/LICENSE.txt create mode 100644 vendor/github.com/letsencrypt/boulder/core/challenges.go create mode 100644 vendor/github.com/letsencrypt/boulder/core/interfaces.go create mode 100644 vendor/github.com/letsencrypt/boulder/core/objects.go create mode 100644 vendor/github.com/letsencrypt/boulder/core/util.go create mode 100644 vendor/github.com/letsencrypt/boulder/errors/errors.go create mode 100644 vendor/github.com/letsencrypt/boulder/goodkey/blocked.go create mode 100644 vendor/github.com/letsencrypt/boulder/goodkey/good_key.go create mode 100644 vendor/github.com/letsencrypt/boulder/goodkey/weak.go create mode 100644 vendor/github.com/letsencrypt/boulder/identifier/identifier.go create mode 100644 vendor/github.com/letsencrypt/boulder/probs/probs.go create mode 100644 vendor/github.com/letsencrypt/boulder/revocation/reasons.go create mode 100644 vendor/github.com/mailru/easyjson/LICENSE create mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/error.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer.go create mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go create mode 100644 vendor/github.com/mattn/go-runewidth/LICENSE create mode 100644 vendor/github.com/mattn/go-runewidth/README.md create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_appengine.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_js.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_posix.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_table.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_windows.go create mode 100644 vendor/github.com/mattn/go-sqlite3/.codecov.yml create mode 100644 vendor/github.com/mattn/go-sqlite3/.gitignore create mode 100644 vendor/github.com/mattn/go-sqlite3/LICENSE create mode 100644 vendor/github.com/mattn/go-sqlite3/README.md create mode 100644 vendor/github.com/mattn/go-sqlite3/backup.go create mode 100644 vendor/github.com/mattn/go-sqlite3/callback.go create mode 100644 vendor/github.com/mattn/go-sqlite3/convert.go create mode 100644 vendor/github.com/mattn/go-sqlite3/doc.go create mode 100644 vendor/github.com/mattn/go-sqlite3/error.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_context.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_os_trace.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_other.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_type.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3ext.h create mode 100644 vendor/github.com/mattn/go-sqlite3/static_mock.go create mode 100644 vendor/github.com/miekg/pkcs11/.gitignore create mode 100644 vendor/github.com/miekg/pkcs11/LICENSE create mode 100644 vendor/github.com/miekg/pkcs11/Makefile.release create mode 100644 vendor/github.com/miekg/pkcs11/README.md create mode 100644 vendor/github.com/miekg/pkcs11/error.go create mode 100644 vendor/github.com/miekg/pkcs11/hsm.db create mode 100644 vendor/github.com/miekg/pkcs11/params.go create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11.go create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11.h create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11f.h create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11go.h create mode 100644 vendor/github.com/miekg/pkcs11/pkcs11t.h create mode 100644 vendor/github.com/miekg/pkcs11/release.go create mode 100644 vendor/github.com/miekg/pkcs11/softhsm.conf create mode 100644 vendor/github.com/miekg/pkcs11/softhsm2.conf create mode 100644 vendor/github.com/miekg/pkcs11/types.go create mode 100644 vendor/github.com/miekg/pkcs11/vendor.go create mode 100644 vendor/github.com/miekg/pkcs11/zconst.go create mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/moby/sys/mountinfo/LICENSE create mode 100644 vendor/github.com/moby/sys/mountinfo/doc.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_linux.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_unix.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go create mode 100644 vendor/github.com/modern-go/concurrent/.gitignore create mode 100644 vendor/github.com/modern-go/concurrent/.travis.yml create mode 100644 vendor/github.com/modern-go/concurrent/LICENSE create mode 100644 vendor/github.com/modern-go/concurrent/README.md create mode 100644 vendor/github.com/modern-go/concurrent/executor.go create mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go create mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go create mode 100644 vendor/github.com/modern-go/concurrent/log.go create mode 100644 vendor/github.com/modern-go/concurrent/test.sh create mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go create mode 100644 vendor/github.com/modern-go/reflect2/.gitignore create mode 100644 vendor/github.com/modern-go/reflect2/.travis.yml create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml create mode 100644 vendor/github.com/modern-go/reflect2/LICENSE create mode 100644 vendor/github.com/modern-go/reflect2/README.md create mode 100644 vendor/github.com/modern-go/reflect2/go_above_118.go create mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_118.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s create mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go create mode 100644 vendor/github.com/modern-go/reflect2/type_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go create mode 100644 vendor/github.com/oklog/ulid/.gitignore create mode 100644 vendor/github.com/oklog/ulid/.travis.yml create mode 100644 vendor/github.com/oklog/ulid/AUTHORS.md create mode 100644 vendor/github.com/oklog/ulid/CHANGELOG.md create mode 100644 vendor/github.com/oklog/ulid/CONTRIBUTING.md create mode 100644 vendor/github.com/oklog/ulid/Gopkg.lock create mode 100644 vendor/github.com/oklog/ulid/Gopkg.toml create mode 100644 vendor/github.com/oklog/ulid/LICENSE create mode 100644 vendor/github.com/oklog/ulid/README.md create mode 100644 vendor/github.com/oklog/ulid/ulid.go create mode 100644 vendor/github.com/opencontainers/go-digest/.mailmap create mode 100644 vendor/github.com/opencontainers/go-digest/.pullapprove.yml create mode 100644 vendor/github.com/opencontainers/go-digest/.travis.yml create mode 100644 vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs create mode 100644 vendor/github.com/opencontainers/go-digest/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/go-digest/README.md create mode 100644 vendor/github.com/opencontainers/go-digest/algorithm.go create mode 100644 vendor/github.com/opencontainers/go-digest/digest.go create mode 100644 vendor/github.com/opencontainers/go-digest/digester.go create mode 100644 vendor/github.com/opencontainers/go-digest/doc.go create mode 100644 vendor/github.com/opencontainers/go-digest/verifiers.go create mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go create mode 100644 vendor/github.com/opencontainers/runc/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/NOTICE create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/config.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/state.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/version.go create mode 100644 vendor/github.com/osbuild/images/LICENSE create mode 100644 vendor/github.com/osbuild/images/internal/common/constants.go create mode 100644 vendor/github.com/osbuild/images/internal/common/distro.go create mode 100644 vendor/github.com/osbuild/images/internal/common/helpers.go create mode 100644 vendor/github.com/osbuild/images/internal/common/pointers.go create mode 100644 vendor/github.com/osbuild/images/internal/common/states.go create mode 100644 vendor/github.com/osbuild/images/internal/dnfjson/cache.go create mode 100644 vendor/github.com/osbuild/images/internal/dnfjson/dnfjson.go create mode 100644 vendor/github.com/osbuild/images/internal/environment/azure.go create mode 100644 vendor/github.com/osbuild/images/internal/environment/ec2.go create mode 100644 vendor/github.com/osbuild/images/internal/environment/environment.go create mode 100644 vendor/github.com/osbuild/images/internal/environment/gcp.go create mode 100644 vendor/github.com/osbuild/images/internal/environment/kvm.go create mode 100644 vendor/github.com/osbuild/images/internal/environment/openstack.go create mode 100644 vendor/github.com/osbuild/images/internal/environment/vsphere.go create mode 100644 vendor/github.com/osbuild/images/internal/fdo/fdo.go create mode 100644 vendor/github.com/osbuild/images/internal/fsnode/dir.go create mode 100644 vendor/github.com/osbuild/images/internal/fsnode/file.go create mode 100644 vendor/github.com/osbuild/images/internal/fsnode/fsnode.go create mode 100644 vendor/github.com/osbuild/images/internal/ignition/ignition.go create mode 100644 vendor/github.com/osbuild/images/internal/pathpolicy/path_policy.go create mode 100644 vendor/github.com/osbuild/images/internal/pathpolicy/path_trie.go create mode 100644 vendor/github.com/osbuild/images/internal/pathpolicy/policies.go create mode 100644 vendor/github.com/osbuild/images/internal/shell/shell.go create mode 100644 vendor/github.com/osbuild/images/internal/users/users.go create mode 100644 vendor/github.com/osbuild/images/internal/workload/anaconda.go create mode 100644 vendor/github.com/osbuild/images/internal/workload/custom.go create mode 100644 vendor/github.com/osbuild/images/internal/workload/sap.go create mode 100644 vendor/github.com/osbuild/images/internal/workload/static_webserver.go create mode 100644 vendor/github.com/osbuild/images/internal/workload/workload.go create mode 100644 vendor/github.com/osbuild/images/pkg/artifact/artifact.go create mode 100644 vendor/github.com/osbuild/images/pkg/blueprint/blueprint.go create mode 100644 vendor/github.com/osbuild/images/pkg/blueprint/customizations.go create mode 100644 vendor/github.com/osbuild/images/pkg/blueprint/filesystem_customizations.go create mode 100644 vendor/github.com/osbuild/images/pkg/blueprint/fsnode_customizations.go create mode 100644 vendor/github.com/osbuild/images/pkg/blueprint/repository_customizations.go create mode 100644 vendor/github.com/osbuild/images/pkg/container/client.go create mode 100644 vendor/github.com/osbuild/images/pkg/container/resolver.go create mode 100644 vendor/github.com/osbuild/images/pkg/container/spec.go create mode 100644 vendor/github.com/osbuild/images/pkg/crypt/crypt.go create mode 100644 vendor/github.com/osbuild/images/pkg/crypt/crypt_impl.go create mode 100644 vendor/github.com/osbuild/images/pkg/crypt/crypt_impl_macos.go create mode 100644 vendor/github.com/osbuild/images/pkg/disk/btrfs.go create mode 100644 vendor/github.com/osbuild/images/pkg/disk/disk.go create mode 100644 vendor/github.com/osbuild/images/pkg/disk/filesystem.go create mode 100644 vendor/github.com/osbuild/images/pkg/disk/luks.go create mode 100644 vendor/github.com/osbuild/images/pkg/disk/lvm.go create mode 100644 vendor/github.com/osbuild/images/pkg/disk/partition.go create mode 100644 vendor/github.com/osbuild/images/pkg/disk/partition_table.go create mode 100644 vendor/github.com/osbuild/images/pkg/distro/distro.go create mode 100644 vendor/github.com/osbuild/images/pkg/distro/image_config.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/anaconda_live_installer.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/anaconda_ostree_installer.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/anaconda_tar_installer.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/archive.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/container.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/disk.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/image.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/ostree_archive.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/ostree_container.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/ostree_disk.go create mode 100644 vendor/github.com/osbuild/images/pkg/image/ostree_simplified_installer.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/anaconda_installer.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/anaconda_installer_iso_tree.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/build.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/coi_iso_tree.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/commit.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/commit_server_tree.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/coreos_installer.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/efi_boot_tree.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/empty.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/iso.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/iso_rootfs.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/manifest.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/oci_container.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/os.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/ostree_deployment.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/ovf.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/pipeline.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/qcow2.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/raw.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/raw_ostree.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/tar.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/vmdk.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/vpc.go create mode 100644 vendor/github.com/osbuild/images/pkg/manifest/xz.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/anaconda_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/authconfig_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/authselect_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/bootiso_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/btrfs_mount.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/buildstamp_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/chmod_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/chown_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/chrony_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/clevis_luks_bind_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/cloud_init_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/containers_input.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/containers_storage_conf_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/copy_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/curl_source.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/device.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/discinfo_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/disk.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/dnf_automatic_config_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/dnf_config_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/dracut_conf_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/dracut_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ext4_mount.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/fat_mount.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/fdo_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/files_input.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/firewall_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/first_boot_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/fix_bls_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/fsnode.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/fstab_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/gcp_guest_agent_conf_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/groups_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/grub2_inst_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/grub2_legacy_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/grub2_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/grub_iso_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/hostname_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ignition_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/implantisomd5_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/inline_source.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/input.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/isolinux_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/kernel_cmdline_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/keymap_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/kickstart_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/locale_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/loopback_device.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/lorax_script_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/luks2_device.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/luks2_format_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/luks2_remove_key_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/lvm2_create_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/lvm2_lv_device.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/lvm2_metadata_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/mkdir_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/mkfs_btrfs_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/mkfs_ext4_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/mkfs_fat_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/mkfs_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/mkfs_xfs_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/modprobe_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/mount.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/nginxconf_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/oci_archive_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/osbuild-exec.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/osbuild.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/oscap_autotailor_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/oscap_remediation_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_commit_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_config_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_deploy_container_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_deploy_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_deployment_mount.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_fillvar_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_init_fs_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_init_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_input.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_os_init_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_passwd_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_preptree_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_pull_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_remotes_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_selinux_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ostree_source.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/ovf_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/pam_limits_conf_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/pwquality_conf_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/qemu_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/result.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/result_test_data.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/rhsm_facts_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/rhsm_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/rpm_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/script_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/selinux_config_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/selinux_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/sfdisk_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/sgdisk_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/shell_init_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/skopeo_index_source.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/skopeo_source.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/skopeo_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/source.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/squashfs_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/sshd_config_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/sysconfig_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/sysctld_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/systemd_journald_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/systemd_logind_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/systemd_preset_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/systemd_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/systemd_unit_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/tar_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/timezone_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/tmpfilesd_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/tree_input.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/truncate_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/tuned_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/udev_rules_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/users_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/v1result.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/waagent_conf.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/wsl_conf_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/xfs_mount.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/xorrisofs_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/xz_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/yum_config_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/yum_repos_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/zipl_inst_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/osbuild/zipl_stage.go create mode 100644 vendor/github.com/osbuild/images/pkg/ostree/errors.go create mode 100644 vendor/github.com/osbuild/images/pkg/ostree/ostree.go create mode 100644 vendor/github.com/osbuild/images/pkg/platform/aarch64.go create mode 100644 vendor/github.com/osbuild/images/pkg/platform/platform.go create mode 100644 vendor/github.com/osbuild/images/pkg/platform/ppc64le.go create mode 100644 vendor/github.com/osbuild/images/pkg/platform/s390x.go create mode 100644 vendor/github.com/osbuild/images/pkg/platform/x86_64.go create mode 100644 vendor/github.com/osbuild/images/pkg/rhsm/facts/facts.go create mode 100644 vendor/github.com/osbuild/images/pkg/rhsm/secrets.go create mode 100644 vendor/github.com/osbuild/images/pkg/rpmmd/metadata.go create mode 100644 vendor/github.com/osbuild/images/pkg/rpmmd/repository.go create mode 100644 vendor/github.com/osbuild/images/pkg/runner/centos.go create mode 100644 vendor/github.com/osbuild/images/pkg/runner/fedora.go create mode 100644 vendor/github.com/osbuild/images/pkg/runner/linux.go create mode 100644 vendor/github.com/osbuild/images/pkg/runner/rhel.go create mode 100644 vendor/github.com/osbuild/images/pkg/runner/runner.go create mode 100644 vendor/github.com/osbuild/images/pkg/subscription/subscription.go create mode 100644 vendor/github.com/proglottis/gpgme/.gitignore create mode 100644 vendor/github.com/proglottis/gpgme/LICENSE create mode 100644 vendor/github.com/proglottis/gpgme/README.md create mode 100644 vendor/github.com/proglottis/gpgme/callbacks.go create mode 100644 vendor/github.com/proglottis/gpgme/data.go create mode 100644 vendor/github.com/proglottis/gpgme/go_gpgme.c create mode 100644 vendor/github.com/proglottis/gpgme/go_gpgme.h create mode 100644 vendor/github.com/proglottis/gpgme/gpgme.go create mode 100644 vendor/github.com/proglottis/gpgme/unset_agent_info.go create mode 100644 vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go create mode 100644 vendor/github.com/rivo/uniseg/LICENSE.txt create mode 100644 vendor/github.com/rivo/uniseg/README.md create mode 100644 vendor/github.com/rivo/uniseg/doc.go create mode 100644 vendor/github.com/rivo/uniseg/eastasianwidth.go create mode 100644 vendor/github.com/rivo/uniseg/emojipresentation.go create mode 100644 vendor/github.com/rivo/uniseg/gen_breaktest.go create mode 100644 vendor/github.com/rivo/uniseg/gen_properties.go create mode 100644 vendor/github.com/rivo/uniseg/grapheme.go create mode 100644 vendor/github.com/rivo/uniseg/graphemeproperties.go create mode 100644 vendor/github.com/rivo/uniseg/graphemerules.go create mode 100644 vendor/github.com/rivo/uniseg/line.go create mode 100644 vendor/github.com/rivo/uniseg/lineproperties.go create mode 100644 vendor/github.com/rivo/uniseg/linerules.go create mode 100644 vendor/github.com/rivo/uniseg/properties.go create mode 100644 vendor/github.com/rivo/uniseg/sentence.go create mode 100644 vendor/github.com/rivo/uniseg/sentenceproperties.go create mode 100644 vendor/github.com/rivo/uniseg/sentencerules.go create mode 100644 vendor/github.com/rivo/uniseg/step.go create mode 100644 vendor/github.com/rivo/uniseg/width.go create mode 100644 vendor/github.com/rivo/uniseg/word.go create mode 100644 vendor/github.com/rivo/uniseg/wordproperties.go create mode 100644 vendor/github.com/rivo/uniseg/wordrules.go create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go create mode 100644 vendor/github.com/sigstore/fulcio/COPYRIGHT.txt create mode 100644 vendor/github.com/sigstore/fulcio/LICENSE create mode 100644 vendor/github.com/sigstore/fulcio/pkg/certificate/doc.go create mode 100644 vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go create mode 100644 vendor/github.com/sigstore/rekor/CONTRIBUTORS.md create mode 100644 vendor/github.com/sigstore/rekor/COPYRIGHT.txt create mode 100644 vendor/github.com/sigstore/rekor/LICENSE create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/error.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go create mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go create mode 100644 vendor/github.com/sigstore/sigstore/COPYRIGHT.txt create mode 100644 vendor/github.com/sigstore/sigstore/LICENSE create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/generic.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/cryptoutils/sans.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/message.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/keyversion.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/rand.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/remoteverification.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/rpcauth.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/options/signeropts.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/payload/doc.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/publickey.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/signer.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/util.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go create mode 100644 vendor/github.com/sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml create mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/sirupsen/logrus/README.md create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml create mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go create mode 100644 vendor/github.com/sirupsen/logrus/doc.go create mode 100644 vendor/github.com/sirupsen/logrus/entry.go create mode 100644 vendor/github.com/sirupsen/logrus/exported.go create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/logger.go create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/writer.go delete mode 100644 vendor/github.com/spf13/cobra/active_help.md delete mode 100644 vendor/github.com/spf13/cobra/bash_completions.md delete mode 100644 vendor/github.com/spf13/cobra/fish_completions.md delete mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md delete mode 100644 vendor/github.com/spf13/cobra/projects_using_cobra.md delete mode 100644 vendor/github.com/spf13/cobra/shell_completions.md delete mode 100644 vendor/github.com/spf13/cobra/user_guide.md delete mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/.gitignore create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/LICENSE create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/Makefile create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/README.md create mode 100644 vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go create mode 100644 vendor/github.com/syndtr/gocapability/LICENSE create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_linux.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_noop.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum_gen.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/syscall_linux.go create mode 100644 vendor/github.com/theupdateframework/go-tuf/LICENSE create mode 100644 vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go create mode 100644 vendor/github.com/titanous/rocacheck/LICENSE create mode 100644 vendor/github.com/titanous/rocacheck/README.md create mode 100644 vendor/github.com/titanous/rocacheck/rocacheck.go create mode 100644 vendor/github.com/ulikunitz/xz/.gitignore create mode 100644 vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 vendor/github.com/ulikunitz/xz/README.md create mode 100644 vendor/github.com/ulikunitz/xz/SECURITY.md create mode 100644 vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 vendor/github.com/ulikunitz/xz/bits.go create mode 100644 vendor/github.com/ulikunitz/xz/crc.go create mode 100644 vendor/github.com/ulikunitz/xz/format.go create mode 100644 vendor/github.com/ulikunitz/xz/fox-check-none.xz create mode 100644 vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100644 vendor/github.com/ulikunitz/xz/make-docs create mode 100644 vendor/github.com/ulikunitz/xz/none-check.go create mode 100644 vendor/github.com/ulikunitz/xz/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/writer.go create mode 100644 vendor/github.com/vbatts/tar-split/LICENSE create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/common.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/format.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/reader.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/strconv.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/writer.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/README.md create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/assemble.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/asm/doc.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/doc.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/entry.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/getter.go create mode 100644 vendor/github.com/vbatts/tar-split/tar/storage/packer.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/.gitignore create mode 100644 vendor/github.com/vbauerster/mpb/v8/CONTRIBUTING create mode 100644 vendor/github.com/vbauerster/mpb/v8/README.md create mode 100644 vendor/github.com/vbauerster/mpb/v8/UNLICENSE create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_filler.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/bar_option.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/container_option.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/doc.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/util_bsd.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/util_linux.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/util_solaris.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/util_zos.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/writer_windows.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/any.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/counters.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/decorator.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/doc.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/eta.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/meta.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/name.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/on_compete_or_on_abort.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/on_condition.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/percentage.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/size_type.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/sizeb1000_string.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/sizeb1024_string.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/speed.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/spinner.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/doc.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/heap_manager.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/internal/percentage.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/internal/width.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/priority_queue.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/progress.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/proxyreader.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/proxywriter.go create mode 100644 vendor/go.mongodb.org/mongo-driver/LICENSE create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/decoder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/encoder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/marshal.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_element.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_value.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/registry.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/types.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go create mode 100644 vendor/go.mozilla.org/pkcs7/.gitignore create mode 100644 vendor/go.mozilla.org/pkcs7/LICENSE create mode 100644 vendor/go.mozilla.org/pkcs7/Makefile create mode 100644 vendor/go.mozilla.org/pkcs7/README.md create mode 100644 vendor/go.mozilla.org/pkcs7/ber.go create mode 100644 vendor/go.mozilla.org/pkcs7/decrypt.go create mode 100644 vendor/go.mozilla.org/pkcs7/encrypt.go create mode 100644 vendor/go.mozilla.org/pkcs7/pkcs7.go create mode 100644 vendor/go.mozilla.org/pkcs7/sign.go create mode 100644 vendor/go.mozilla.org/pkcs7/verify.go create mode 100644 vendor/go.mozilla.org/pkcs7/verify_test_dsa.go create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 vendor/golang.org/x/crypto/internal/alias/alias.go create mode 100644 vendor/golang.org/x/crypto/internal/alias/alias_purego.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/poly1305.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go create mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go create mode 100644 vendor/golang.org/x/crypto/sha3/doc.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.s create mode 100644 vendor/golang.org/x/crypto/sha3/register.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.s create mode 100644 vendor/golang.org/x/crypto/sha3/shake.go create mode 100644 vendor/golang.org/x/crypto/sha3/shake_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor_unaligned.go create mode 100644 vendor/golang.org/x/exp/LICENSE create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go create mode 100644 vendor/golang.org/x/exp/slices/slices.go create mode 100644 vendor/golang.org/x/exp/slices/sort.go create mode 100644 vendor/golang.org/x/exp/slices/zsortanyfunc.go create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/http/httpguts/guts.go create mode 100644 vendor/golang.org/x/net/http/httpguts/httplex.go create mode 100644 vendor/golang.org/x/net/http2/.gitignore create mode 100644 vendor/golang.org/x/net/http2/ascii.go create mode 100644 vendor/golang.org/x/net/http2/ciphers.go create mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go create mode 100644 vendor/golang.org/x/net/http2/databuffer.go create mode 100644 vendor/golang.org/x/net/http2/errors.go create mode 100644 vendor/golang.org/x/net/http2/flow.go create mode 100644 vendor/golang.org/x/net/http2/frame.go create mode 100644 vendor/golang.org/x/net/http2/gotrack.go create mode 100644 vendor/golang.org/x/net/http2/headermap.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/static_table.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/golang.org/x/net/http2/http2.go create mode 100644 vendor/golang.org/x/net/http2/pipe.go create mode 100644 vendor/golang.org/x/net/http2/server.go create mode 100644 vendor/golang.org/x/net/http2/transport.go create mode 100644 vendor/golang.org/x/net/http2/write.go create mode 100644 vendor/golang.org/x/net/http2/writesched.go create mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go create mode 100644 vendor/golang.org/x/net/http2/writesched_random.go create mode 100644 vendor/golang.org/x/net/http2/writesched_roundrobin.go create mode 100644 vendor/golang.org/x/net/idna/go118.go create mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/pre_go118.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables12.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables15.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/trie.go create mode 100644 vendor/golang.org/x/net/idna/trie12.0.0.go create mode 100644 vendor/golang.org/x/net/idna/trie13.0.0.go create mode 100644 vendor/golang.org/x/net/idna/trieval.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go create mode 100644 vendor/golang.org/x/sys/plan9/asm.s create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_386.s create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_arm.s create mode 100644 vendor/golang.org/x/sys/plan9/const_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/dir_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/env_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/errors_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/mkall.sh create mode 100644 vendor/golang.org/x/sys/plan9/mkerrors.sh create mode 100644 vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh create mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/pwd_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/race.go create mode 100644 vendor/golang.org/x/sys/plan9/race0.go create mode 100644 vendor/golang.org/x/sys/plan9/str.go create mode 100644 vendor/golang.org/x/sys/plan9/syscall.go create mode 100644 vendor/golang.org/x/sys/plan9/syscall_plan9.go create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go create mode 100644 vendor/golang.org/x/sys/plan9/zsysnum_plan9.go create mode 100644 vendor/golang.org/x/sys/windows/aliases.go create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/empty.s create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go create mode 100644 vendor/golang.org/x/sys/windows/str.go create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go create mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 vendor/golang.org/x/term/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/term/LICENSE create mode 100644 vendor/golang.org/x/term/PATENTS create mode 100644 vendor/golang.org/x/term/README.md create mode 100644 vendor/golang.org/x/term/codereview.cfg create mode 100644 vendor/golang.org/x/term/term.go create mode 100644 vendor/golang.org/x/term/term_plan9.go create mode 100644 vendor/golang.org/x/term/term_unix.go create mode 100644 vendor/golang.org/x/term/term_unix_bsd.go create mode 100644 vendor/golang.org/x/term/term_unix_other.go create mode 100644 vendor/golang.org/x/term/term_unsupported.go create mode 100644 vendor/golang.org/x/term/term_windows.go create mode 100644 vendor/golang.org/x/term/terminal.go create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables15.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/SECURITY.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/state/state.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/insecure/insecure.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/grpclog/component.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/syscallconn.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/util.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/method.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/regex.go create mode 100644 vendor/google.golang.org/grpc/internal/idle/idle.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/proxy.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/resolver/map.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/version.go create mode 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/google.golang.org/protobuf/LICENSE create mode 100644 vendor/google.golang.org/protobuf/PATENTS create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/.gitignore create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/LICENSE create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/README.md create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/crypter.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/doc.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/encoding.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/README.md create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/indent.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/scanner.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/stream.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/json/tags.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/jwe.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/jwk.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/jws.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/opaque.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/shared.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/signing.go create mode 100644 vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go create mode 100644 vendor/gopkg.in/ini.v1/.editorconfig create mode 100644 vendor/gopkg.in/ini.v1/.gitignore create mode 100644 vendor/gopkg.in/ini.v1/.golangci.yml create mode 100644 vendor/gopkg.in/ini.v1/LICENSE create mode 100644 vendor/gopkg.in/ini.v1/Makefile create mode 100644 vendor/gopkg.in/ini.v1/README.md create mode 100644 vendor/gopkg.in/ini.v1/codecov.yml create mode 100644 vendor/gopkg.in/ini.v1/data_source.go create mode 100644 vendor/gopkg.in/ini.v1/deprecated.go create mode 100644 vendor/gopkg.in/ini.v1/error.go create mode 100644 vendor/gopkg.in/ini.v1/file.go create mode 100644 vendor/gopkg.in/ini.v1/helper.go create mode 100644 vendor/gopkg.in/ini.v1/ini.go create mode 100644 vendor/gopkg.in/ini.v1/key.go create mode 100644 vendor/gopkg.in/ini.v1/parser.go create mode 100644 vendor/gopkg.in/ini.v1/section.go create mode 100644 vendor/gopkg.in/ini.v1/struct.go create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go diff --git a/Containerfile b/Containerfile index 934ed3d0..04c5e837 100644 --- a/Containerfile +++ b/Containerfile @@ -1,4 +1,4 @@ -FROM quay.io/fedora/fedora:39 as builder +FROM registry.fedoraproject.org/fedora:39 AS builder RUN dnf -y install golang make COPY . /src RUN cd /src && make && make install DESTDIR=/instroot @@ -6,4 +6,4 @@ RUN cd /src && make && make install DESTDIR=/instroot FROM quay.io/fedora/fedora:39 COPY --from=builder /instroot / RUN /usr/lib/osbuildbootc/installdeps.sh -ENTRYPOINT ["osbuildbootc"] +ENTRYPOINT ["osbuildbootc"] \ No newline at end of file diff --git a/Containerfile.orig b/Containerfile.odc similarity index 79% rename from Containerfile.orig rename to Containerfile.odc index f1df79bc..e7f94989 100644 --- a/Containerfile.orig +++ b/Containerfile.odc @@ -1,7 +1,7 @@ FROM registry.fedoraproject.org/fedora:39 AS builder -RUN dnf install -y git-core golang gpgme-devel libassuan-devel -COPY build.sh . -RUN ./build.sh +RUN dnf -y install golang make +COPY . /src +RUN cd /src && make && make install DESTDIR=/instroot FROM registry.fedoraproject.org/fedora:39 RUN dnf install -y osbuild osbuild-ostree && dnf clean all diff --git a/Containerfile.osbuildbootc b/Containerfile.osbuildbootc new file mode 100644 index 00000000..934ed3d0 --- /dev/null +++ b/Containerfile.osbuildbootc @@ -0,0 +1,9 @@ +FROM quay.io/fedora/fedora:39 as builder +RUN dnf -y install golang make +COPY . /src +RUN cd /src && make && make install DESTDIR=/instroot + +FROM quay.io/fedora/fedora:39 +COPY --from=builder /instroot / +RUN /usr/lib/osbuildbootc/installdeps.sh +ENTRYPOINT ["osbuildbootc"] diff --git a/Makefile b/Makefile index 186c61bd..2e5a32fa 100644 --- a/Makefile +++ b/Makefile @@ -2,8 +2,10 @@ GONAMESPACE := github.com/cgwalters/osbuildbootc PREFIX ?= /usr DESTDIR ?= -.PHONY: all bin/osbuildbootc install clean vendor -all: bin/osbuildbootc +BINARIES := osbuildbootc osbuild-deploy-container + +.PHONY: all bin install clean vendor +all: bin src:=$(shell find src -maxdepth 1 -type f -executable -print) GOARCH:=$(shell uname -m) @@ -13,8 +15,9 @@ else ifeq ($(GOARCH),aarch64) GOARCH="arm64" endif -bin/osbuildbootc: - cd cmd && go build -mod vendor -o ../$@ +bin: + (cd cmd && go build -mod vendor -o ../bin/osbuildbootc) + (top=$$(pwd); cd cmd/osbuild-deploy-container && go build -mod vendor -o $${top}/bin/osbuild-deploy-container) check: (cd cmd && go test -mod=vendor) @@ -26,7 +29,7 @@ clean: install: install -d $(DESTDIR)$(PREFIX)/lib/osbuildbootc install -D -t $(DESTDIR)$(PREFIX)/lib/osbuildbootc $$(find src/ -maxdepth 1 -type f) - install -D -t $(DESTDIR)$(PREFIX)/bin bin/osbuildbootc + install -D -t $(DESTDIR)$(PREFIX)/bin $(addprefix bin/,$(BINARIES)) vendor: @go mod vendor diff --git a/README-osbuildbootc.md b/README-osbuildbootc.md new file mode 100644 index 00000000..380b1559 --- /dev/null +++ b/README-osbuildbootc.md @@ -0,0 +1,53 @@ + +# osbuildbootc + +## Usage + +This tool can be invoked as a pre-built container image, and it can also be installed +as a standalone tool inside another environment. The implementation uses qemu+KVM. + +Example invocation for the container image: + +```bash +podman run --rm -ti --security-opt label=disable --device /dev/kvm -v $(pwd):/srv -w /srv ghcr.io/cgwalters/osbuildbootc:latest build-qcow2 -I quay.io/cgwalters/ostest example.qcow2 +``` + +Explanation of podman arguments: + +- `--security-opt label=disable`: This is necessary to bind mount in host paths at all +- `--device /dev/kvm`: Pass the KVM device into the container image +- `-v $(pwd):/srv -w /srv`: Pass the current directory as `/srv` into the container + +Note that by default KVM is required. You can set the `OSBUILD_NO_KVM` environment variable +to use full qemu emulation if necessary. + +### Take a container image from remote registry, output a qcow2 + +```bash +osbuildbootc build-qcow2 quay.io/centos-boot/fedora-boot-cloud:eln fedora-boot-cloud.qcow2 +``` + +### Take a container image stored in local OCI directory + +In some scenarios it may be desirable to have local disk caches of container images, +instead of fetching from a registry every time. + +Note here we need to specify the *target* image after installtion to ensure that +the machine will fetch updates from the registry. + +```bash +osbuildbootc build-qcow2 --transport oci oci:cgwalters-ostest -I -t quay.io/cgwalters/ostest foo.qcow2 +``` + +## Development + +This project is mostly in Go. However, it also has some shell script because +some nontrivial code was inherited from [coreos-assembler](https://github.com/coreos/coreos-assembler/). + +It's recommended to use e.g. [a toolbox](https://github.com/containers/toolbox/) for development: + +```bash +make && sudo make install +``` + +Then you can run `osbuildbootc`. diff --git a/README.md b/README.md index fa592759..4a002863 100644 --- a/README.md +++ b/README.md @@ -1,52 +1,14 @@ -# osbuildbootc +# Tooling for generating disk images from bootc images -## Usage +This project currently merges two tools with unrelated history, called +`osbuild-deploy-container` and `osbuildbootc`. -This tool can be invoked as a pre-built container image, and it can also be installed -as a standalone tool inside another environment. The implementation uses qemu+KVM. +For more on bootc, see [its upstream repository](https://github.com/containers/bootc). -Example invocation for the container image: +## osbuild-deploy-container -```bash -podman run --rm -ti --security-opt label=disable --device /dev/kvm -v $(pwd):/srv -w /srv ghcr.io/cgwalters/osbuildbootc:latest build-qcow2 -I quay.io/cgwalters/ostest example.qcow2 -``` +See [README.md.osbuild-deploy-container](README.md.osbuild-deploy-container) -Explanation of podman arguments: +## osbuildbootc -- `--security-opt label=disable`: This is necessary to bind mount in host paths at all -- `--device /dev/kvm`: Pass the KVM device into the container image -- `-v $(pwd):/srv -w /srv`: Pass the current directory as `/srv` into the container - -Note that by default KVM is required. You can set the `OSBUILD_NO_KVM` environment variable -to use full qemu emulation if necessary. - -### Take a container image from remote registry, output a qcow2 - -```bash -osbuildbootc build-qcow2 quay.io/centos-boot/fedora-boot-cloud:eln fedora-boot-cloud.qcow2 -``` - -### Take a container image stored in local OCI directory - -In some scenarios it may be desirable to have local disk caches of container images, -instead of fetching from a registry every time. - -Note here we need to specify the *target* image after installtion to ensure that -the machine will fetch updates from the registry. - -```bash -osbuildbootc build-qcow2 --transport oci oci:cgwalters-ostest -I -t quay.io/cgwalters/ostest foo.qcow2 -``` - -## Development - -This project is mostly in Go. However, it also has some shell script because -some nontrivial code was inherited from [coreos-assembler](https://github.com/coreos/coreos-assembler/). - -It's recommended to use e.g. [a toolbox](https://github.com/containers/toolbox/) for development: - -```bash -make && sudo make install -``` - -Then you can run `osbuildbootc`. +See [README.md.osbuildbootc](README.md.osbuildbootc) diff --git a/README.md.orig b/README.md.osbuild-deploy-container similarity index 100% rename from README.md.orig rename to README.md.osbuild-deploy-container diff --git a/build.sh b/build.sh index 90f2dd85..6439352c 100755 --- a/build.sh +++ b/build.sh @@ -3,6 +3,5 @@ set -euo pipefail -git clone --branch bifrost-image --depth 1 https://github.com/achilleas-k/images.git cd images go build ./cmd/osbuild-deploy-container diff --git a/cmd/osbuild-deploy-container/fedora-eln.json b/cmd/osbuild-deploy-container/fedora-eln.json new file mode 100644 index 00000000..5b5bc50a --- /dev/null +++ b/cmd/osbuild-deploy-container/fedora-eln.json @@ -0,0 +1,58 @@ +{ + "aarch64": [ + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/BaseOS/aarch64/os/", + "name": "baseos" + }, + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/AppStream/aarch64/os/", + "name": "appstream" + }, + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/CRB/aarch64/os/", + "name": "crb" + } + ], + "ppc64le": [ + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/BaseOS/ppc64le/os/", + "name": "baseos" + }, + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/AppStream/ppc64le/os/", + "name": "appstream" + }, + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/CRB/ppc64le/os/", + "name": "crb" + } + ], + "s390x": [ + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/BaseOS/s390x/os/", + "name": "baseos" + }, + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/AppStream/s390x/os/", + "name": "appstream" + }, + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/CRB/s390x/os/", + "name": "crb" + } + ], + "x86_64": [ + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/BaseOS/x86_64/os/", + "name": "baseos" + }, + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/AppStream/x86_64/os/", + "name": "appstream" + }, + { + "baseurl": "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose/CRB/x86_64/os/", + "name": "crb" + } + ] +} diff --git a/cmd/osbuild-deploy-container/image.go b/cmd/osbuild-deploy-container/image.go new file mode 100644 index 00000000..2a4e3a78 --- /dev/null +++ b/cmd/osbuild-deploy-container/image.go @@ -0,0 +1,113 @@ +package main + +import ( + "fmt" + "math/rand" + + "github.com/osbuild/images/internal/common" + "github.com/osbuild/images/internal/users" + "github.com/osbuild/images/internal/workload" + "github.com/osbuild/images/pkg/blueprint" + "github.com/osbuild/images/pkg/container" + "github.com/osbuild/images/pkg/disk" + "github.com/osbuild/images/pkg/image" + "github.com/osbuild/images/pkg/manifest" + "github.com/osbuild/images/pkg/ostree" + "github.com/osbuild/images/pkg/platform" + "github.com/osbuild/images/pkg/rpmmd" + "github.com/osbuild/images/pkg/runner" +) + +func Manifest(imageref string, config *BuildConfig, repos []rpmmd.RepoConfig, arch string, seed int64) (*manifest.Manifest, error) { + + source := rand.NewSource(seed) + + // math/rand is good enough in this case + /* #nosec G404 */ + rng := rand.New(source) + + baseImage := &ostree.ImageOptions{ + Container: imageref, + TLSVerify: common.ToPtr(true), + } + + img, err := pipelines(baseImage, config, arch, rng) + if err != nil { + fail(err.Error()) + } + mf := manifest.New() + mf.Distro = manifest.DISTRO_FEDORA + runner := &runner.Fedora{Version: 39} + _, err = img.InstantiateManifest(&mf, repos, runner, rng) + + return &mf, err +} + +func pipelines(baseImage *ostree.ImageOptions, config *BuildConfig, arch string, rng *rand.Rand) (image.ImageKind, error) { + if baseImage == nil { + fail("pipeline: no base image defined") + } + ref := "ostree/1/1/0" + containerSource := container.SourceSpec{ + Source: baseImage.Container, + Name: baseImage.Container, + TLSVerify: baseImage.TLSVerify, + } + + img := image.NewOSTreeContainerDiskImage(containerSource, ref) + + var customizations *blueprint.Customizations + if config != nil && config.Blueprint != nil { + customizations = config.Blueprint.Customizations + } + img.Users = users.UsersFromBP(customizations.GetUsers()) + img.Groups = users.GroupsFromBP(customizations.GetGroups()) + + img.KernelOptionsAppend = []string{ + "rw", + "console=tty0", + "console=ttyS0", + } + + img.SysrootReadOnly = true + + switch arch { + case platform.ARCH_X86_64.String(): + img.Platform = &platform.X86{ + BasePlatform: platform.BasePlatform{ + ImageFormat: platform.FORMAT_QCOW2, + }, + BIOS: true, + UEFIVendor: "fedora", + } + case platform.ARCH_AARCH64.String(): + img.Platform = &platform.Aarch64{ + UEFIVendor: "fedora", + BasePlatform: platform.BasePlatform{ + ImageFormat: platform.FORMAT_QCOW2, + QCOW2Compat: "1.1", + }, + } + } + + img.OSName = "default" + + if kopts := customizations.GetKernel(); kopts != nil && kopts.Append != "" { + img.KernelOptionsAppend = append(img.KernelOptionsAppend, kopts.Append) + } + + img.Workload = &workload.Custom{} + + basept, ok := partitionTables[arch] + if !ok { + fail(fmt.Sprintf("pipelines: no partition tables defined for %s", arch)) + } + size := uint64(10 * common.GibiByte) + pt, err := disk.NewPartitionTable(&basept, nil, size, disk.RawPartitioningMode, nil, rng) + check(err) + img.PartitionTable = pt + + img.Filename = "disk.qcow2" + + return img, nil +} diff --git a/cmd/osbuild-deploy-container/main.go b/cmd/osbuild-deploy-container/main.go new file mode 100644 index 00000000..8f2cb281 --- /dev/null +++ b/cmd/osbuild-deploy-container/main.go @@ -0,0 +1,181 @@ +package main + +import ( + _ "embed" + "encoding/json" + "flag" + "fmt" + "os" + "path/filepath" + + "github.com/osbuild/images/internal/common" + "github.com/osbuild/images/internal/dnfjson" + "github.com/osbuild/images/pkg/blueprint" + "github.com/osbuild/images/pkg/container" + "github.com/osbuild/images/pkg/manifest" + "github.com/osbuild/images/pkg/osbuild" + "github.com/osbuild/images/pkg/ostree" + "github.com/osbuild/images/pkg/rpmmd" +) + +//go:embed fedora-eln.json +var reposStr string + +const ( + distroName = "fedora-39" + modulePlatformID = "platform:f39" + releaseVersion = "39" +) + +func fail(msg string) { + fmt.Fprintln(os.Stderr, msg) + os.Exit(1) +} + +func check(err error) { + if err != nil { + fail(err.Error()) + } +} + +type BuildConfig struct { + Name string `json:"name"` + OSTree *ostree.ImageOptions `json:"ostree,omitempty"` + Blueprint *blueprint.Blueprint `json:"blueprint,omitempty"` + Depends interface{} `json:"depends,omitempty"` // ignored +} + +// Parse embedded repositories and return repo configs for the given +// architecture. +func loadRepos(arch string) []rpmmd.RepoConfig { + var repoData map[string][]rpmmd.RepoConfig + err := json.Unmarshal([]byte(reposStr), &repoData) + if err != nil { + fail(fmt.Sprintf("error loading repositories: %s", err)) + } + archRepos, ok := repoData[arch] + if !ok { + fail(fmt.Sprintf("no repositories defined for %s", arch)) + } + return archRepos +} + +func loadConfig(path string) BuildConfig { + fp, err := os.Open(path) + check(err) + defer fp.Close() + + dec := json.NewDecoder(fp) + dec.DisallowUnknownFields() + var conf BuildConfig + + check(dec.Decode(&conf)) + if dec.More() { + fail(fmt.Sprintf("multiple configuration objects or extra data found in %q", path)) + } + return conf +} + +func makeManifest(imageref string, config *BuildConfig, repos []rpmmd.RepoConfig, archName string, seedArg int64, cacheRoot string) (manifest.OSBuildManifest, error) { + manifest, err := Manifest(imageref, config, repos, archName, seedArg) + check(err) + + // depsolve packages + solver := dnfjson.NewSolver(modulePlatformID, releaseVersion, archName, distroName, cacheRoot) + solver.SetDNFJSONPath("./dnf-json") + depsolvedSets := make(map[string][]rpmmd.PackageSpec) + for name, pkgSet := range manifest.GetPackageSetChains() { + res, err := solver.Depsolve(pkgSet) + if err != nil { + return nil, err + } + depsolvedSets[name] = res + } + + // resolve container + resolver := container.NewResolver(archName) + containerSpecs := make(map[string][]container.Spec) + for plName, sourceSpecs := range manifest.GetContainerSourceSpecs() { + for _, c := range sourceSpecs { + resolver.Add(c) + } + containerSpecs[plName], err = resolver.Finish() + if err != nil { + return nil, err + } + } + mf, err := manifest.Serialize(depsolvedSets, containerSpecs, nil) + if err != nil { + fail(fmt.Sprintf("[ERROR] manifest serialization failed: %s", err.Error())) + } + return mf, nil +} + +func saveManifest(ms manifest.OSBuildManifest, fpath string) error { + b, err := json.MarshalIndent(ms, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal data for %q: %s\n", fpath, err.Error()) + } + b = append(b, '\n') // add new line at end of file + fp, err := os.Create(fpath) + if err != nil { + return fmt.Errorf("failed to create output file %q: %s\n", fpath, err.Error()) + } + defer fp.Close() + if _, err := fp.Write(b); err != nil { + return fmt.Errorf("failed to write output file %q: %s\n", fpath, err.Error()) + } + return nil +} + +func main() { + + arch := common.CurrentArch() + repos := loadRepos(arch) + + var outputDir, osbuildStore, rpmCacheRoot, configFile, imageref string + flag.StringVar(&outputDir, "output", ".", "artifact output directory") + flag.StringVar(&osbuildStore, "store", ".osbuild", "osbuild store for intermediate pipeline trees") + flag.StringVar(&rpmCacheRoot, "rpmmd", "/var/cache/osbuild/rpmmd", "rpm metadata cache directory") + flag.StringVar(&configFile, "config", "", "build config file") + flag.StringVar(&imageref, "imageref", "", "container image to deploy") + + flag.Parse() + + if imageref == "" { + fail("imageref is required") + } + + if err := os.MkdirAll(outputDir, 0777); err != nil { + fail(fmt.Sprintf("failed to create target directory: %s", err.Error())) + } + + config := BuildConfig{ + Name: "empty", + } + if configFile != "" { + config = loadConfig(configFile) + } + + seedArg := int64(0) + + fmt.Printf("Generating manifest for %s: ", config.Name) + mf, err := makeManifest(imageref, &config, repos, arch, seedArg, rpmCacheRoot) + if err != nil { + check(err) + } + fmt.Print("DONE\n") + + manifestPath := filepath.Join(outputDir, "manifest.json") + if err := saveManifest(mf, manifestPath); err != nil { + check(err) + } + + fmt.Printf("Building manifest: %s\n", manifestPath) + + if _, err := osbuild.RunOSBuild(mf, osbuildStore, outputDir, []string{"qcow2"}, nil, nil, false, os.Stderr); err != nil { + check(err) + } + + fmt.Printf("Jobs done. Results saved in\n%s\n", outputDir) +} diff --git a/cmd/osbuild-deploy-container/partition_tables.go b/cmd/osbuild-deploy-container/partition_tables.go new file mode 100644 index 00000000..d264fa2d --- /dev/null +++ b/cmd/osbuild-deploy-container/partition_tables.go @@ -0,0 +1,107 @@ +package main + +import ( + "github.com/osbuild/images/internal/common" + "github.com/osbuild/images/pkg/disk" + "github.com/osbuild/images/pkg/distro" + "github.com/osbuild/images/pkg/platform" +) + +var partitionTables = distro.BasePartitionTableMap{ + platform.ARCH_X86_64.String(): disk.PartitionTable{ + UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0", + Type: "gpt", + Partitions: []disk.Partition{ + { + Size: 1 * common.MebiByte, + Bootable: true, + Type: disk.BIOSBootPartitionGUID, + UUID: disk.BIOSBootPartitionUUID, + }, + { + Size: 501 * common.MebiByte, + Type: disk.EFISystemPartitionGUID, + UUID: disk.EFISystemPartitionUUID, + Payload: &disk.Filesystem{ + Type: "vfat", + UUID: disk.EFIFilesystemUUID, + Mountpoint: "/boot/efi", + Label: "EFI-SYSTEM", + FSTabOptions: "umask=0077,shortname=winnt", + FSTabFreq: 0, + FSTabPassNo: 2, + }, + }, + { + Size: 1 * common.GibiByte, + Type: disk.FilesystemDataGUID, + UUID: disk.FilesystemDataUUID, + Payload: &disk.Filesystem{ + Type: "ext4", + Mountpoint: "/boot", + Label: "boot", + FSTabOptions: "defaults", + FSTabFreq: 1, + FSTabPassNo: 2, + }, + }, + { + Size: 2 * common.GibiByte, + Type: disk.FilesystemDataGUID, + UUID: disk.RootPartitionUUID, + Payload: &disk.Filesystem{ + Type: "ext4", + Label: "root", + Mountpoint: "/", + FSTabOptions: "defaults", + FSTabFreq: 1, + FSTabPassNo: 1, + }, + }, + }, + }, + platform.ARCH_AARCH64.String(): disk.PartitionTable{ + UUID: "0xc1748067", + Type: "dos", + Partitions: []disk.Partition{ + { + Size: 501 * common.MebiByte, + Type: "06", + Bootable: true, + Payload: &disk.Filesystem{ + Type: "vfat", + UUID: disk.EFIFilesystemUUID, + Mountpoint: "/boot/efi", + Label: "EFI-SYSTEM", + FSTabOptions: "umask=0077,shortname=winnt", + FSTabFreq: 0, + FSTabPassNo: 2, + }, + }, + { + Size: 1 * common.GibiByte, + Type: "83", + Payload: &disk.Filesystem{ + Type: "ext4", + Mountpoint: "/boot", + Label: "boot", + FSTabOptions: "defaults", + FSTabFreq: 1, + FSTabPassNo: 2, + }, + }, + { + Size: 2569 * common.MebiByte, + Type: "83", + Payload: &disk.Filesystem{ + Type: "ext4", + Label: "root", + Mountpoint: "/", + FSTabOptions: "defaults", + FSTabFreq: 1, + FSTabPassNo: 1, + }, + }, + }, + }, +} diff --git a/go.mod b/go.mod index 1bec7ae3..3fd946ec 100644 --- a/go.mod +++ b/go.mod @@ -6,15 +6,101 @@ require ( github.com/coreos/stream-metadata-go v0.4.3 github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.5.0 + github.com/spf13/cobra v1.8.0 golang.org/x/sys v0.14.0 // indirect ) -require github.com/go-logr/logr v1.3.0 // indirect +require github.com/osbuild/images v0.16.0 + +replace github.com/osbuild/images => github.com/achilleas-k/images v0.0.0-20231110154557-dd89cc3fa6b0 + +require ( + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/VividCortex/ewma v1.2.0 // indirect + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/containers/common v0.56.0 // indirect + github.com/containers/image/v5 v5.28.0 // indirect + github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect + github.com/containers/ocicrypt v1.1.8 // indirect + github.com/containers/storage v1.50.2 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.6+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.0 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/runtime v0.26.0 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/validate v0.22.1 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-containerregistry v0.16.1 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-sqlite3 v1.14.17 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opencontainers/runc v1.1.9 // indirect + github.com/opencontainers/runtime-spec v1.1.0 // indirect + github.com/proglottis/gpgme v0.1.3 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect + github.com/sigstore/fulcio v1.4.0 // indirect + github.com/sigstore/rekor v1.2.2 // indirect + github.com/sigstore/sigstore v1.7.3 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect + github.com/theupdateframework/go-tuf v0.5.2 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect + github.com/vbatts/tar-split v0.11.5 // indirect + github.com/vbauerster/mpb/v8 v8.6.1 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect + go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect + golang.org/x/crypto v0.15.0 // indirect + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect + golang.org/x/net v0.18.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/term v0.14.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) require ( github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect k8s.io/klog/v2 v2.110.1 ) diff --git a/go.sum b/go.sum index fa818d43..ecc651d6 100644 --- a/go.sum +++ b/go.sum @@ -1,56 +1,395 @@ +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/achilleas-k/images v0.0.0-20231110154557-dd89cc3fa6b0 h1:Io0IiRNOwX3uaEnkR5FhsFXgNKp0EeP9d+YhnlD/d9U= +github.com/achilleas-k/images v0.0.0-20231110154557-dd89cc3fa6b0/go.mod h1:3Bu5yvabkoMCAuqgdobsOobVZc4tDGttD+Yt8C86BZU= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/containers/common v0.56.0 h1:hysHUsEai1EkMXanU26UV55wMXns/a6AYmaFqJ4fEMY= +github.com/containers/common v0.56.0/go.mod h1:IjaDdfUtcs2CfCcJMZxuut4XlvkTkY9Nlqkso9xCOq4= +github.com/containers/image/v5 v5.28.0 h1:H4cWbdI88UA/mDb6SxMo3IxpmS1BSs/Kifvhwt9g048= +github.com/containers/image/v5 v5.28.0/go.mod h1:9aPnNkwHNHgGl9VlQxXEshvmOJRbdRAc1rNDD6sP2eU= +github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= +github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/ocicrypt v1.1.8 h1:saSBF0/8DyPUjzcxMVzL2OBUWCkvRvqIm75pu0ADSZk= +github.com/containers/ocicrypt v1.1.8/go.mod h1:jM362hyBtbwLMWzXQZTlkjKGAQf/BN/LFMtH0FIRt34= +github.com/containers/storage v1.50.2 h1:Fys4BjFUVNRBEXlO70hFI48VW4EXsgnGisTpk9tTMsE= +github.com/containers/storage v1.50.2/go.mod h1:dpspZsUrcKD8SpTofvKWhwPDHD0MkO4Q7VE+oYdWkiA= github.com/coreos/stream-metadata-go v0.4.3 h1:5GykJ8dtZSx1rdlzEAiDVzA73cwmUF3ceTuIP293L6E= github.com/coreos/stream-metadata-go v0.4.3/go.mod h1:fMObQqQm8Ku91G04btKzEH3AsdP1mrAb986z9aaK0tE= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd h1:0av0vtcjA8Hqv5gyWj79CLCFVwOOyBNWPjrfUWceMNg= +github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e h1:SCnqm8SjSa0QqRxXbo5YY//S+OryeJioe17nK+iDZpg= github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e/go.mod h1:o129ljs6alsIQTc8d6eweihqpmmrbxZ2g1jhgjhPykI= github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e h1:x5PInTuXLddHWHlePCNAcM8QtUfOGx44f3UmYPMtDcI= github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e/go.mod h1:K4+o74YGNjOb9N6yyG+LPj1NjHtk+Qz0IYQPvirbaLs= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= +github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= +github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= +github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= +github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= +github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I= +github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= +github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.9 h1:XR0VIHTGce5eWPkaPesqTBrhW2yAcaraWfsEalNwQLM= +github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= +github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= +github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= +github.com/sigstore/fulcio v1.4.0 h1:05+k8BFvwTQzfCkVxESWzCN4b70KIRliGYz0Upmdrs8= +github.com/sigstore/fulcio v1.4.0/go.mod h1:wcjlktbhoy6+ZTxO3yXpvqUxsLV+JEH4FF3a5Jz4VPI= +github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= +github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= +github.com/sigstore/sigstore v1.7.3 h1:HVVTfrMezJeLyl2xhJ8edzkrEGBa4KxjQZB4FlQ4JLU= +github.com/sigstore/sigstore v1.7.3/go.mod h1:cl0c7Dtg3MM3c13L8pqqrfrmBa0eM3POcdtBepjylmw= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA= +github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= +github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= +github.com/vbauerster/mpb/v8 v8.6.1 h1:XbBpIbJxJOO9yMcKPpI4oEFPW6tLAptefNQJNcGWri8= +github.com/vbauerster/mpb/v8 v8.6.1/go.mod h1:S0tuIjikxlLxCeNijNhwAuD/BB3UE/d2nygG8SOldk0= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= +go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U= +gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 00000000..fe79e3ad --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +/toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 00000000..01b57432 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 00000000..3651cfa9 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,120 @@ +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` packages. + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.13 or newer; add it to your go.mod with: + + % go get github.com/BurntSushi/toml@latest + +It also comes with a TOML validator CLI tool: + + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml + +### Examples +For the simplest example, consider some TOML file as just a list of keys and +values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which can be decoded with: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time +} + +var conf Config +_, err := toml.Decode(tomlData, &conf) +``` + +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. + +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses values in a `mail.Address`: + +```toml +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] +``` + +Can be decoded with: + +```go +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address +} + +func (a *address) UnmarshalText(text []byte) error { + var err error + a.Address, err = mail.ParseAddress(string(text)) + return err +} + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 00000000..4d38f3bf --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,602 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strconv" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v interface{}) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) + return err +} + +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use [PrimitiveDecode] to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded interface{} + context Key +} + +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 +) + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. +// +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// email addresses. +// +// # Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) + } + + // Check if this is a supported type: struct, map, interface{}, or something + // that implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + + md := MetaData{ + mapping: p.mapping, + keyInfo: p.keyInfo, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + data: data, + } + return md, md.unify(p.mapping, rv) +} + +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) +// +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == primitiveType { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + if v, ok := rvi.(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32, reflect.Float64: + return md.unifyFloat64(data, rv) + } + return md.e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return md.e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = struct{}{} + md.context = append(md.context, key) + + err := md.unify(datum, subv) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) + } + + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return md.badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = struct{}{} + md.context = append(md.context, k) + + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + + err := md.unify(v, indirect(rvval)) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey := indirect(reflect.New(rv.Type().Key())) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return md.badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + rvk := rv.Kind() + + if num, ok := data.(float64); ok { + switch rvk { + case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + + if num, ok := data.(int64); ok { + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetFloat(float64(num)) + return nil + } + + return md.badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) + } + rv.SetInt(int64(dur)) + return nil + } + } + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return md.badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case encoding.TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return md.badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +func (md *MetaData) badtype(dst string, data interface{}) error { + return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...interface{}) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { + return true + } + return false +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 00000000..086d0b68 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,19 @@ +//go:build go1.16 +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 00000000..b9e30971 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,29 @@ +package toml + +import ( + "encoding" + "io" +) + +// TextMarshaler is an alias for encoding.TextMarshaler. +// +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 00000000..81a7c0fe --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,11 @@ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// There is also support for delaying decoding with the Primitive type, and +// querying the set of keys in a TOML document with the MetaData type. +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 00000000..9cd25d75 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,759 @@ +package toml + +import ( + "bufio" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var dblQuotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + // String to use for a single indentation level; default is two spaces. + Indent string + + w *bufio.Writer + hasWritten bool // written any output to w yet? +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): + enc.writeKeyValue(key, rv, false) + return + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } + enc.writeQuoted(string(s)) + return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) + } + + switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := eindirect(rv.Index(i)) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := eindirect(rv.Index(i)) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { + continue + } + + frv := eindirect(rv.Field(i)) + + if is32Bit { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + } + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue + } + } + + if typeIsTable(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if isTableArray(rv) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + default: + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") + } +} + +func isMarshaler(rv reflect.Value) bool { + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) +} + +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false + } + + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { + encPanic(errArrayNilElement) + } + + if ret && !typeEqual(tomlHash, tt) { + ret = false + } + } + return ret +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !isEmpty(rv.Field(i)) { + return false + } + } + return true + case reflect.Bool: + return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌───────────────────┠+// │ ┌───┠┌────â”│ +// v v v v vv +// key = {k = 1, k2 = 2} +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. + if len(key) == 0 { + enc.eElement(val) + return + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +// Resolve any level of pointers to the actual value (e.g. **string → string). +func eindirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } + return v + } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 00000000..efd68865 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,279 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using [ErrorWithPosition]: +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithPosition returns the error with detailed location context. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + } + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage returns the error with detailed location context and usage +// guidance. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errLexInvalidNum struct{ v string } + errLexInvalidDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} + errParseRange struct { + i interface{} // int or float + size string // "int64", "uint16", etc. + } + errParseDuration struct{ d string } +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } +func (e errLexInvalidNum) Usage() string { return "" } +func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } +func (e errLexInvalidDate) Usage() string { return "" } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹ⷠ│ 9.2 × 10¹ⷠ+ uint8 │ 0 │ 255 + uint16 │ 0 │ 65535 + uint32 │ 0 │ 4294967295 + uint64 │ 0 │ 1.8 × 10¹⸠+ +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 00000000..022f15bc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 00000000..3545a6ad --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1283 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const eof = 0 + +type stateFn func(lx *lexer) stateFn + +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. + + // A stack of state functions used to maintain context. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + err error + pos Position +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string, tomlNext bool) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + +func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// error stops all lexing by emitting an error and returning `nil`. +// +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} + return nil +} + +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case '#': + lx.push(lexTop) + return lexCommentStart + case '[': + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '#': + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == '[' { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == '.': + lx.ignore() + return lexTableNameStart + case r == ']': + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r, lx.tomlNext) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case '[': + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case '{': + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case '"': + if lx.accept('"') { + if lx.accept('"') { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == '#': + lx.push(lexArrayValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == ']': + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == '#': + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + return lexArrayValue // move on to the next value + case r == ']': + return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) + } +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == '}': + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == '}': + return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == '"': + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineString + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\\': + return lexMultilineStringEscape + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineString + } +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + default: + return lexRawString + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineRawString + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineRawString + } +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + if isNL(lx.next()) { /// \ escaping newline. + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.error(errLexEscape{r}) +} + +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected two hexadecimal digits after '\x', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune, tomlNext bool) bool { + if tomlNext { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' || + r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || + (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || + (r >= 0x037f && r <= 0x1fff) || + (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || + (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || + (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || + (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || + (r >= 0x10000 && r <= 0xeffff) + } + + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go new file mode 100644 index 00000000..2e78b24e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -0,0 +1,121 @@ +package toml + +import ( + "strings" +) + +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. +type MetaData struct { + context Key // Used only during decoding. + + keyInfo map[string]keyInfo + mapping map[string]interface{} + keys []Key + decoded map[string]struct{} + data []byte // Input file; for errors. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. +// +// Returns false for an empty key. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var ( + hash map[string]interface{} + ok bool + hashOrVal interface{} = md.mapping + ) + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() + } + return "" +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a [Primitive] value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if _, ok := md.decoded[key.String()]; !ok { + undecoded = append(undecoded, key) + } + } + return undecoded +} + +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get +// values of this type. +type Key []string + +func (k Key) String() string { + ss := make([]string, len(k)) + for i := range k { + ss[i] = k.maybeQuoted(i) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, c := range k[i] { + if !isBareKeyChar(c, false) { + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 00000000..9c191536 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,811 @@ +package toml + +import ( + "fmt" + "os" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + tomlNext bool + + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]interface{} // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +type keyInfo struct { + pos Position + tomlType tomlType +} + +func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + + defer func() { + if r := recover(); r != nil { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 + data = data[2:] + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + + p = &parser{ + keyInfo: make(map[string]keyInfo), + mapping: make(map[string]interface{}), + lx: lex(data, tomlNext), + ordered: make([]Key, 0), + implicits: make(map[string]struct{}), + tomlNext: tomlNext, + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicf(format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) + if it.typ == itemError { + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) + } + return it +} + +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.nextPos() + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash, item.pos) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash, item.pos) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set value. + vItem := p.next() + val, typ := p.value(vItem, false) + p.set(p.currentKey, val, typ, vItem.pos) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location + next bool +}{ + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} + +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) + + var ( + types []tomlType + + // Initialize to a non-nil empty slice. This makes it consistent with + // how S = [] decodes into a non-nil slice inside something like struct + // { S []string }. See #338 + array = []interface{}{} + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + + // XXX: types isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + _ = types + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ, it.pos) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", key) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { + p.setValue(key, val) + p.setType(key, typ, pos) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType, pos Position) { + keyContext := make(Key, 0, len(p.context)+1) + keyContext = append(keyContext, p.context...) + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. +func (p *parser) stripEscapedNewlines(s string) string { + var b strings.Builder + var i int + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() + } + i += ix + + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue + } + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } + } + if j == i+1 { + // Not a whitespace escape. + i++ + continue + } + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. + // (It's a bad escape sequence, but we can let + // replaceEscapes catch it.) + i++ + continue + } + b.WriteString(s[:i]) + s = s[j:] + i = 0 + } +} + +func (p *parser) replaceEscapes(it item, str string) string { + replaced := make([]rune, 0, len(str)) + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case 'e': + if p.tomlNext { + replaced = append(replaced, rune(0x001B)) + r += 1 + } + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3]) + replaced = append(replaced, escaped) + r += 3 + } + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 00000000..254ca82e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + var count map[reflect.Type]int + var nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go new file mode 100644 index 00000000..4e90d773 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -0,0 +1,70 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsTable(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/VividCortex/ewma/.gitignore b/vendor/github.com/VividCortex/ewma/.gitignore new file mode 100644 index 00000000..c66769f6 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +.*.sw? +/coverage.txt \ No newline at end of file diff --git a/vendor/github.com/VividCortex/ewma/.whitesource b/vendor/github.com/VividCortex/ewma/.whitesource new file mode 100644 index 00000000..d7eebc0c --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.whitesource @@ -0,0 +1,3 @@ +{ + "settingsInheritedFrom": "VividCortex/whitesource-config@master" +} \ No newline at end of file diff --git a/vendor/github.com/VividCortex/ewma/LICENSE b/vendor/github.com/VividCortex/ewma/LICENSE new file mode 100644 index 00000000..a78d643e --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2013 VividCortex + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/VividCortex/ewma/README.md b/vendor/github.com/VividCortex/ewma/README.md new file mode 100644 index 00000000..87b4a3c7 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/README.md @@ -0,0 +1,145 @@ +# EWMA + +[![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) +![build](https://github.com/VividCortex/ewma/workflows/build/badge.svg) +[![codecov](https://codecov.io/gh/VividCortex/ewma/branch/master/graph/badge.svg)](https://codecov.io/gh/VividCortex/ewma) + +This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our +Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/). + +### Exponentially Weighted Moving Average + +An exponentially weighted moving average is a way to continuously compute a type of +average for a series of numbers, as the numbers arrive. After a value in the series is +added to the average, its weight in the average decreases exponentially over time. This +biases the average towards more recent data. EWMAs are useful for several reasons, chiefly +their inexpensive computational and memory cost, as well as the fact that they represent +the recent central tendency of the series of values. + +The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average +is biased towards recent history. The alpha must be between 0 and 1, and is typically +a fairly small number, such as 0.04. We will discuss the choice of alpha later. + +The algorithm works thus, in pseudocode: + +1. Multiply the next number in the series by alpha. +2. Multiply the current value of the average by 1 minus alpha. +3. Add the result of steps 1 and 2, and store it as the new current value of the average. +4. Repeat for each number in the series. + +There are special-case behaviors for how to initialize the current value, and these vary +between implementations. One approach is to start with the first value in the series; +another is to average the first 10 or so values in the series using an arithmetic average, +and then begin the incremental updating of the average. Each method has pros and cons. + +It may help to look at it pictorially. Suppose the series has five numbers, and we choose +alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300. + +![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png) + +Now let's take the moving average of those numbers. First we set the average to the value +of the first number. + +![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png) + +Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add +them to generate a new value. + +![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png) + +This continues until we are done. + +![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png) + +Notice how each of the values in the series decays by half each time a new value +is added, and the top of the bars in the lower portion of the image represents the +size of the moving average. It is a smoothed, or low-pass, average of the original +series. + +For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia. + +### Choosing Alpha + +Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average) +that averages over the previous N samples. What is the average age of each sample? It is N/2. + +Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula +to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book +"Production and Operations Analysis" by Steven Nahmias. + +So, for example, if you have a time-series with samples once per second, and you want to get the +moving average over the previous minute, you should use an alpha of .032786885. This, by the way, +is the constant alpha used for this repository's SimpleEWMA. + +### Implementations + +This repository contains two implementations of the EWMA algorithm, with different properties. + +The implementations all conform to the MovingAverage interface, and the constructor returns +that type. + +Current implementations assume an implicit time interval of 1.0 between every sample added. +That is, the passage of time is treated as though it's the same as the arrival of samples. +If you need time-based decay when samples are not arriving precisely at set intervals, then +this package will not support your needs at present. + +#### SimpleEWMA + +A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA +for multiple reasons. It has no warm-up period and it uses a constant +decay. These properties let it use less memory. It will also behave +differently when it's equal to zero, which is assumed to mean +uninitialized, so if a value is likely to actually become zero over time, +then any non-zero value will cause a sharp jump instead of a small change. + +#### VariableEWMA + +Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory. +It also has a "warmup" time when you start adding values to it. It will report a value of 0.0 +until you have added the required number of samples to it. It uses some memory to store the +number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA. + +## Usage + +### API Documentation + +View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma). + +```go +package main + +import "github.com/VividCortex/ewma" + +func main() { + samples := [100]float64{ + 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149, + } + + e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params + a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1) + + for _, f := range samples { + e.Add(f) + a.Add(f) + } + + e.Value() //=> 13.577404704631077 + a.Value() //=> 1.5806140565521463e-12 +} +``` + +## Contributing + +We only accept pull requests for minor fixes or improvements. This includes: + +* Small bug fixes +* Typos +* Documentation or comments + +Please open issues to discuss new features. Pull requests for new features will be rejected, +so we recommend forking the repository and making changes in your fork for your use case. + +## License + +This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved. +It is licensed under the MIT license. Please see the LICENSE file for applicable license terms. diff --git a/vendor/github.com/VividCortex/ewma/codecov.yml b/vendor/github.com/VividCortex/ewma/codecov.yml new file mode 100644 index 00000000..0d36d903 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/codecov.yml @@ -0,0 +1,6 @@ +coverage: + status: + project: + default: + threshold: 15% + patch: off diff --git a/vendor/github.com/VividCortex/ewma/ewma.go b/vendor/github.com/VividCortex/ewma/ewma.go new file mode 100644 index 00000000..44d5d53e --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/ewma.go @@ -0,0 +1,126 @@ +// Package ewma implements exponentially weighted moving averages. +package ewma + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +const ( + // By default, we average over a one-minute period, which means the average + // age of the metrics in the period is 30 seconds. + AVG_METRIC_AGE float64 = 30.0 + + // The formula for computing the decay factor from the average age comes + // from "Production and Operations Analysis" by Steven Nahmias. + DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1) + + // For best results, the moving average should not be initialized to the + // samples it sees immediately. The book "Production and Operations + // Analysis" by Steven Nahmias suggests initializing the moving average to + // the mean of the first 10 samples. Until the VariableEwma has seen this + // many samples, it is not "ready" to be queried for the value of the + // moving average. This adds some memory cost. + WARMUP_SAMPLES uint8 = 10 +) + +// MovingAverage is the interface that computes a moving average over a time- +// series stream of numbers. The average may be over a window or exponentially +// decaying. +type MovingAverage interface { + Add(float64) + Value() float64 + Set(float64) +} + +// NewMovingAverage constructs a MovingAverage that computes an average with the +// desired characteristics in the moving window or exponential decay. If no +// age is given, it constructs a default exponentially weighted implementation +// that consumes minimal memory. The age is related to the decay factor alpha +// by the formula given for the DECAY constant. It signifies the average age +// of the samples as time goes to infinity. +func NewMovingAverage(age ...float64) MovingAverage { + if len(age) == 0 || age[0] == AVG_METRIC_AGE { + return new(SimpleEWMA) + } + return &VariableEWMA{ + decay: 2 / (age[0] + 1), + } +} + +// A SimpleEWMA represents the exponentially weighted moving average of a +// series of numbers. It WILL have different behavior than the VariableEWMA +// for multiple reasons. It has no warm-up period and it uses a constant +// decay. These properties let it use less memory. It will also behave +// differently when it's equal to zero, which is assumed to mean +// uninitialized, so if a value is likely to actually become zero over time, +// then any non-zero value will cause a sharp jump instead of a small change. +// However, note that this takes a long time, and the value may just +// decays to a stable value that's close to zero, but which won't be mistaken +// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example. +type SimpleEWMA struct { + // The current value of the average. After adding with Add(), this is + // updated to reflect the average of all values seen thus far. + value float64 +} + +// Add adds a value to the series and updates the moving average. +func (e *SimpleEWMA) Add(value float64) { + if e.value == 0 { // this is a proxy for "uninitialized" + e.value = value + } else { + e.value = (value * DECAY) + (e.value * (1 - DECAY)) + } +} + +// Value returns the current value of the moving average. +func (e *SimpleEWMA) Value() float64 { + return e.value +} + +// Set sets the EWMA's value. +func (e *SimpleEWMA) Set(value float64) { + e.value = value +} + +// VariableEWMA represents the exponentially weighted moving average of a series of +// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory. +type VariableEWMA struct { + // The multiplier factor by which the previous samples decay. + decay float64 + // The current value of the average. + value float64 + // The number of samples added to this instance. + count uint8 +} + +// Add adds a value to the series and updates the moving average. +func (e *VariableEWMA) Add(value float64) { + switch { + case e.count < WARMUP_SAMPLES: + e.count++ + e.value += value + case e.count == WARMUP_SAMPLES: + e.count++ + e.value = e.value / float64(WARMUP_SAMPLES) + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + default: + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + } +} + +// Value returns the current value of the average, or 0.0 if the series hasn't +// warmed up yet. +func (e *VariableEWMA) Value() float64 { + if e.count <= WARMUP_SAMPLES { + return 0.0 + } + + return e.value +} + +// Set sets the EWMA's value. +func (e *VariableEWMA) Set(value float64) { + e.value = value + if e.count <= WARMUP_SAMPLES { + e.count = WARMUP_SAMPLES + 1 + } +} diff --git a/vendor/github.com/acarl005/stripansi/LICENSE b/vendor/github.com/acarl005/stripansi/LICENSE new file mode 100644 index 00000000..00abe0db --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Andrew Carlson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/acarl005/stripansi/README.md b/vendor/github.com/acarl005/stripansi/README.md new file mode 100644 index 00000000..8bdb1f50 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/README.md @@ -0,0 +1,30 @@ +Strip ANSI +========== + +This Go package removes ANSI escape codes from strings. + +Ideally, we would prevent these from appearing in any text we want to process. +However, sometimes this can't be helped, and we need to be able to deal with that noise. +This will use a regexp to remove those unwanted escape codes. + + +## Install + +```sh +$ go get -u github.com/acarl005/stripansi +``` + +## Usage + +```go +import ( + "fmt" + "github.com/acarl005/stripansi" +) + +func main() { + msg := "\x1b[38;5;140m foo\x1b[0m bar" + cleanMsg := stripansi.Strip(msg) + fmt.Println(cleanMsg) // " foo bar" +} +``` diff --git a/vendor/github.com/acarl005/stripansi/stripansi.go b/vendor/github.com/acarl005/stripansi/stripansi.go new file mode 100644 index 00000000..235732a7 --- /dev/null +++ b/vendor/github.com/acarl005/stripansi/stripansi.go @@ -0,0 +1,13 @@ +package stripansi + +import ( + "regexp" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var re = regexp.MustCompile(ansi) + +func Strip(str string) string { + return re.ReplaceAllString(str, "") +} diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore new file mode 100644 index 00000000..8d69a941 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.gitignore @@ -0,0 +1,15 @@ +bin/ +.idea/ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml new file mode 100644 index 00000000..bb83c667 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -0,0 +1,12 @@ +language: go +dist: xenial +go: + - '1.10' + - '1.11' + - '1.12' + - '1.13' + - 'tip' + +script: + - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..4b462b0d --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md @@ -0,0 +1,43 @@ +# Contributor Code of Conduct + +This project adheres to [The Code Manifesto](http://codemanifesto.com) +as its guidelines for contributor interactions. + +## The Code Manifesto + +We want to work in an ecosystem that empowers developers to reach their +potential — one that encourages growth and effective collaboration. A space +that is safe for all. + +A space such as this benefits everyone that participates in it. It encourages +new developers to enter our field. It is through discussion and collaboration +that we grow, and through growth that we improve. + +In the effort to create such a place, we hold to these values: + +1. **Discrimination limits us.** This includes discrimination on the basis of + race, gender, sexual orientation, gender identity, age, nationality, + technology and any other arbitrary exclusion of a group of people. +2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort + levels. Remember that, and if brought to your attention, heed it. +3. **We are our biggest assets.** None of us were born masters of our trade. + Each of us has been helped along the way. Return that favor, when and where + you can. +4. **We are resources for the future.** As an extension of #3, share what you + know. Make yourself a resource to help those that come after you. +5. **Respect defines us.** Treat others as you wish to be treated. Make your + discussions, criticisms and debates from a position of respectfulness. Ask + yourself, is it true? Is it necessary? Is it constructive? Anything less is + unacceptable. +6. **Reactions require grace.** Angry responses are valid, but abusive language + and vindictive actions are toxic. When something happens that offends you, + handle it assertively, but be respectful. Escalate reasonably, and try to + allow the offender an opportunity to explain themselves, and possibly + correct the issue. +7. **Opinions are just that: opinions.** Each and every one of us, due to our + background and upbringing, have varying opinions. That is perfectly + acceptable. Remember this: if you respect your own opinions, you should + respect the opinions of others. +8. **To err is human.** You might not intend it, but mistakes do happen and + contribute to build experience. Tolerate honest mistakes, and don't + hesitate to apologize if you make one yourself. diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md new file mode 100644 index 00000000..7ed268a1 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -0,0 +1,63 @@ +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE new file mode 100644 index 00000000..cacba910 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2020 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md new file mode 100644 index 00000000..2c3fc35e --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -0,0 +1,622 @@ +govalidator +=========== +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) +[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) + +A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). + +#### Installation +Make sure that Go is installed on your computer. +Type the following command in your terminal: + + go get github.com/asaskevich/govalidator + +or you can get specified release of the package with `gopkg.in`: + + go get gopkg.in/asaskevich/govalidator.v10 + +After it the package is ready to use. + + +#### Import package in your project +Add following line in your `*.go` file: +```go +import "github.com/asaskevich/govalidator" +``` +If you are unhappy to use long `govalidator`, you can do something like this: +```go +import ( + valid "github.com/asaskevich/govalidator" +) +``` + +#### Activate behavior to require all fields have a validation tag by default +`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. + +`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. + +```go +import "github.com/asaskevich/govalidator" + +func init() { + govalidator.SetFieldsRequiredByDefault(true) +} +``` + +Here's some code to explain it: +```go +// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +type exampleStruct struct { + Name string `` + Email string `valid:"email"` +} + +// this, however, will only fail when Email is empty or an invalid email address: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +// lastly, this will only fail when Email is an invalid email address but not when it's empty: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email,optional"` +} +``` + +#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) +##### Custom validator function signature +A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. +```go +import "github.com/asaskevich/govalidator" + +// old signature +func(i interface{}) bool + +// new signature +func(i interface{}, o interface{}) bool +``` + +##### Adding a custom validator +This was changed to prevent data races when accessing custom validators. +```go +import "github.com/asaskevich/govalidator" + +// before +govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { + // ... +} + +// after +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { + // ... +}) +``` + +#### List of functions: +```go +func Abs(value float64) float64 +func BlackList(str, chars string) string +func ByteLength(str string, params ...string) bool +func CamelCaseToUnderscore(str string) string +func Contains(str, substring string) bool +func Count(array []interface{}, iterator ConditionIterator) int +func Each(array []interface{}, iterator Iterator) +func ErrorByField(e error, field string) string +func ErrorsByField(e error) map[string]string +func Filter(array []interface{}, iterator ConditionIterator) []interface{} +func Find(array []interface{}, iterator ConditionIterator) interface{} +func GetLine(s string, index int) (string, error) +func GetLines(s string) []string +func HasLowerCase(str string) bool +func HasUpperCase(str string) bool +func HasWhitespace(str string) bool +func HasWhitespaceOnly(str string) bool +func InRange(value interface{}, left interface{}, right interface{}) bool +func InRangeFloat32(value, left, right float32) bool +func InRangeFloat64(value, left, right float64) bool +func InRangeInt(value, left, right interface{}) bool +func IsASCII(str string) bool +func IsAlpha(str string) bool +func IsAlphanumeric(str string) bool +func IsBase64(str string) bool +func IsByteLength(str string, min, max int) bool +func IsCIDR(str string) bool +func IsCRC32(str string) bool +func IsCRC32b(str string) bool +func IsCreditCard(str string) bool +func IsDNSName(str string) bool +func IsDataURI(str string) bool +func IsDialString(str string) bool +func IsDivisibleBy(str, num string) bool +func IsEmail(str string) bool +func IsExistingEmail(email string) bool +func IsFilePath(str string) (bool, int) +func IsFloat(str string) bool +func IsFullWidth(str string) bool +func IsHalfWidth(str string) bool +func IsHash(str string, algorithm string) bool +func IsHexadecimal(str string) bool +func IsHexcolor(str string) bool +func IsHost(str string) bool +func IsIP(str string) bool +func IsIPv4(str string) bool +func IsIPv6(str string) bool +func IsISBN(str string, version int) bool +func IsISBN10(str string) bool +func IsISBN13(str string) bool +func IsISO3166Alpha2(str string) bool +func IsISO3166Alpha3(str string) bool +func IsISO4217(str string) bool +func IsISO693Alpha2(str string) bool +func IsISO693Alpha3b(str string) bool +func IsIn(str string, params ...string) bool +func IsInRaw(str string, params ...string) bool +func IsInt(str string) bool +func IsJSON(str string) bool +func IsLatitude(str string) bool +func IsLongitude(str string) bool +func IsLowerCase(str string) bool +func IsMAC(str string) bool +func IsMD4(str string) bool +func IsMD5(str string) bool +func IsMagnetURI(str string) bool +func IsMongoID(str string) bool +func IsMultibyte(str string) bool +func IsNatural(value float64) bool +func IsNegative(value float64) bool +func IsNonNegative(value float64) bool +func IsNonPositive(value float64) bool +func IsNotNull(str string) bool +func IsNull(str string) bool +func IsNumeric(str string) bool +func IsPort(str string) bool +func IsPositive(value float64) bool +func IsPrintableASCII(str string) bool +func IsRFC3339(str string) bool +func IsRFC3339WithoutZone(str string) bool +func IsRGBcolor(str string) bool +func IsRegex(str string) bool +func IsRequestURI(rawurl string) bool +func IsRequestURL(rawurl string) bool +func IsRipeMD128(str string) bool +func IsRipeMD160(str string) bool +func IsRsaPub(str string, params ...string) bool +func IsRsaPublicKey(str string, keylen int) bool +func IsSHA1(str string) bool +func IsSHA256(str string) bool +func IsSHA384(str string) bool +func IsSHA512(str string) bool +func IsSSN(str string) bool +func IsSemver(str string) bool +func IsTiger128(str string) bool +func IsTiger160(str string) bool +func IsTiger192(str string) bool +func IsTime(str string, format string) bool +func IsType(v interface{}, params ...string) bool +func IsURL(str string) bool +func IsUTFDigit(str string) bool +func IsUTFLetter(str string) bool +func IsUTFLetterNumeric(str string) bool +func IsUTFNumeric(str string) bool +func IsUUID(str string) bool +func IsUUIDv3(str string) bool +func IsUUIDv4(str string) bool +func IsUUIDv5(str string) bool +func IsULID(str string) bool +func IsUnixTime(str string) bool +func IsUpperCase(str string) bool +func IsVariableWidth(str string) bool +func IsWhole(value float64) bool +func LeftTrim(str, chars string) string +func Map(array []interface{}, iterator ResultIterator) []interface{} +func Matches(str, pattern string) bool +func MaxStringLength(str string, params ...string) bool +func MinStringLength(str string, params ...string) bool +func NormalizeEmail(str string) (string, error) +func PadBoth(str string, padStr string, padLen int) string +func PadLeft(str string, padStr string, padLen int) string +func PadRight(str string, padStr string, padLen int) string +func PrependPathToErrors(err error, path string) error +func Range(str string, params ...string) bool +func RemoveTags(s string) string +func ReplacePattern(str, pattern, replace string) string +func Reverse(s string) string +func RightTrim(str, chars string) string +func RuneLength(str string, params ...string) bool +func SafeFileName(str string) string +func SetFieldsRequiredByDefault(value bool) +func SetNilPtrAllowedByRequired(value bool) +func Sign(value float64) float64 +func StringLength(str string, params ...string) bool +func StringMatches(s string, params ...string) bool +func StripLow(str string, keepNewLines bool) string +func ToBoolean(str string) (bool, error) +func ToFloat(str string) (float64, error) +func ToInt(value interface{}) (res int64, err error) +func ToJSON(obj interface{}) (string, error) +func ToString(obj interface{}) string +func Trim(str, chars string) string +func Truncate(str string, length int, ending string) string +func TruncatingErrorf(str string, args ...interface{}) error +func UnderscoreToCamelCase(s string) string +func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) +func ValidateStruct(s interface{}) (bool, error) +func WhiteList(str, chars string) string +type ConditionIterator +type CustomTypeValidator +type Error +func (e Error) Error() string +type Errors +func (es Errors) Error() string +func (es Errors) Errors() []error +type ISO3166Entry +type ISO693Entry +type InterfaceParamValidator +type Iterator +type ParamValidator +type ResultIterator +type UnsupportedTypeError +func (e *UnsupportedTypeError) Error() string +type Validator +``` + +#### Examples +###### IsURL +```go +println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) +``` +###### IsType +```go +println(govalidator.IsType("Bob", "string")) +println(govalidator.IsType(1, "int")) +i := 1 +println(govalidator.IsType(&i, "*int")) +``` + +IsType can be used through the tag `type` which is essential for map validation: +```go +type User struct { + Name string `valid:"type(string)"` + Age int `valid:"type(int)"` + Meta interface{} `valid:"type(string)"` +} +result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"}) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ToString +```go +type User struct { + FirstName string + LastName string +} + +str := govalidator.ToString(&User{"John", "Juan"}) +println(str) +``` +###### Each, Map, Filter, Count for slices +Each iterates over the slice/array and calls Iterator for every item +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.Iterator = func(value interface{}, index int) { + println(value.(int)) +} +govalidator.Each(data, fn) +``` +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 +} +_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +``` +```go +data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 +} +_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +_ = govalidator.Count(data, fn) // result = 5 +``` +###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) +If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: +```go +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) +``` +For completely custom validators (interface-based), see below. + +Here is a list of available validators for struct fields (validator - used function): +```go +"email": IsEmail, +"url": IsURL, +"dialstring": IsDialString, +"requrl": IsRequestURL, +"requri": IsRequestURI, +"alpha": IsAlpha, +"utfletter": IsUTFLetter, +"alphanum": IsAlphanumeric, +"utfletternum": IsUTFLetterNumeric, +"numeric": IsNumeric, +"utfnumeric": IsUTFNumeric, +"utfdigit": IsUTFDigit, +"hexadecimal": IsHexadecimal, +"hexcolor": IsHexcolor, +"rgbcolor": IsRGBcolor, +"lowercase": IsLowerCase, +"uppercase": IsUpperCase, +"int": IsInt, +"float": IsFloat, +"null": IsNull, +"uuid": IsUUID, +"uuidv3": IsUUIDv3, +"uuidv4": IsUUIDv4, +"uuidv5": IsUUIDv5, +"creditcard": IsCreditCard, +"isbn10": IsISBN10, +"isbn13": IsISBN13, +"json": IsJSON, +"multibyte": IsMultibyte, +"ascii": IsASCII, +"printableascii": IsPrintableASCII, +"fullwidth": IsFullWidth, +"halfwidth": IsHalfWidth, +"variablewidth": IsVariableWidth, +"base64": IsBase64, +"datauri": IsDataURI, +"ip": IsIP, +"port": IsPort, +"ipv4": IsIPv4, +"ipv6": IsIPv6, +"dns": IsDNSName, +"host": IsHost, +"mac": IsMAC, +"latitude": IsLatitude, +"longitude": IsLongitude, +"ssn": IsSSN, +"semver": IsSemver, +"rfc3339": IsRFC3339, +"rfc3339WithoutZone": IsRFC3339WithoutZone, +"ISO3166Alpha2": IsISO3166Alpha2, +"ISO3166Alpha3": IsISO3166Alpha3, +"ulid": IsULID, +``` +Validators with parameters + +```go +"range(min|max)": Range, +"length(min|max)": ByteLength, +"runelength(min|max)": RuneLength, +"stringlength(min|max)": StringLength, +"matches(pattern)": StringMatches, +"in(string1|string2|...|stringN)": IsIn, +"rsapub(keylength)" : IsRsaPub, +"minstringlength(int): MinStringLength, +"maxstringlength(int): MaxStringLength, +``` +Validators with parameters for any type + +```go +"type(type)": IsType, +``` + +And here is small example of usage: +```go +type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + Message2 string `valid:"animal(dog)"` + AuthorIP string `valid:"ipv4"` + Date string `valid:"-"` +} +post := &Post{ + Title: "My Example Post", + Message: "duck", + Message2: "dog", + AuthorIP: "123.234.54.3", +} + +// Add your own struct validation tags +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) + +// Add your own struct validation tags with parameter +govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { + species := params[0] + return str == species +}) +govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") + +result, err := govalidator.ValidateStruct(post) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) +If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` + +So here is small example of usage: +```go +var mapTemplate = map[string]interface{}{ + "name":"required,alpha", + "family":"required,alpha", + "email":"required,email", + "cell-phone":"numeric", + "address":map[string]interface{}{ + "line1":"required,alphanum", + "line2":"alphanum", + "postal-code":"numeric", + }, +} + +var inputMap = map[string]interface{}{ + "name":"Bob", + "family":"Smith", + "email":"foo@bar.baz", + "address":map[string]interface{}{ + "line1":"", + "line2":"", + "postal-code":"", + }, +} + +result, err := govalidator.ValidateMap(inputMap, mapTemplate) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` + +###### WhiteList +```go +// Remove all characters from string ignoring characters between "a" and "z" +println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +``` + +###### Custom validation functions +Custom validation using your own domain specific validators is also available - here's an example of how to use it: +```go +import "github.com/asaskevich/govalidator" + +type CustomByteArray [6]byte // custom types are supported and can be validated + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // you can type switch on the context interface being validated + case StructWithCustomByteArray: + // you can check and validate against some other field in the context, + // return early or not validate against the context at all – your choice + case SomeOtherType: + // ... + default: + // expecting some other type? Throw/panic here or continue + } + + switch v := i.(type) { // type switch on the struct field being validated + case CustomByteArray: + for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes + if e != 0 { + return true + } + } + } + return false +}) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false +}) +``` + +###### Loop over Error() +By default .Error() returns all errors in a single String. To access each error you can do this: +```go + if err != nil { + errs := err.(govalidator.Errors).Errors() + for _, e := range errs { + fmt.Println(e.Error()) + } + } +``` + +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + +#### Notes +Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). +Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). + +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + +#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) +* [Daniel Lohse](https://github.com/annismckenzie) +* [Attila Oláh](https://github.com/attilaolah) +* [Daniel Korner](https://github.com/Dadie) +* [Steven Wilkin](https://github.com/stevenwilkin) +* [Deiwin Sarjas](https://github.com/deiwin) +* [Noah Shibley](https://github.com/slugmobile) +* [Nathan Davies](https://github.com/nathj07) +* [Matt Sanford](https://github.com/mzsanford) +* [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙠[[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go new file mode 100644 index 00000000..3e1da7cb --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/arrays.go @@ -0,0 +1,87 @@ +package govalidator + +// Iterator is the function that accepts element of slice/array and its index +type Iterator func(interface{}, int) + +// ResultIterator is the function that accepts element of slice/array and its index and returns any result +type ResultIterator func(interface{}, int) interface{} + +// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean +type ConditionIterator func(interface{}, int) bool + +// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values +type ReduceIterator func(interface{}, interface{}) interface{} + +// Some validates that any item of array corresponds to ConditionIterator. Returns boolean. +func Some(array []interface{}, iterator ConditionIterator) bool { + res := false + for index, data := range array { + res = res || iterator(data, index) + } + return res +} + +// Every validates that every item of array corresponds to ConditionIterator. Returns boolean. +func Every(array []interface{}, iterator ConditionIterator) bool { + res := true + for index, data := range array { + res = res && iterator(data, index) + } + return res +} + +// Reduce boils down a list of values into a single value by ReduceIterator +func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} { + for _, data := range array { + initialValue = iterator(initialValue, data) + } + return initialValue +} + +// Each iterates over the slice and apply Iterator to every item +func Each(array []interface{}, iterator Iterator) { + for index, data := range array { + iterator(data, index) + } +} + +// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. +func Map(array []interface{}, iterator ResultIterator) []interface{} { + var result = make([]interface{}, len(array)) + for index, data := range array { + result[index] = iterator(data, index) + } + return result +} + +// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. +func Find(array []interface{}, iterator ConditionIterator) interface{} { + for index, data := range array { + if iterator(data, index) { + return data + } + } + return nil +} + +// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. +func Filter(array []interface{}, iterator ConditionIterator) []interface{} { + var result = make([]interface{}, 0) + for index, data := range array { + if iterator(data, index) { + result = append(result, data) + } + } + return result +} + +// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. +func Count(array []interface{}, iterator ConditionIterator) int { + count := 0 + for index, data := range array { + if iterator(data, index) { + count = count + 1 + } + } + return count +} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go new file mode 100644 index 00000000..d68e990f --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/converter.go @@ -0,0 +1,81 @@ +package govalidator + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// ToString convert the input to a string. +func ToString(obj interface{}) string { + res := fmt.Sprintf("%v", obj) + return res +} + +// ToJSON convert the input to a valid JSON string +func ToJSON(obj interface{}) (string, error) { + res, err := json.Marshal(obj) + if err != nil { + res = []byte("") + } + return string(res), err +} + +// ToFloat convert the input string to a float, or 0.0 if the input is not a float. +func ToFloat(value interface{}) (res float64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = float64(val.Int()) + case uint, uint8, uint16, uint32, uint64: + res = float64(val.Uint()) + case float32, float64: + res = val.Float() + case string: + res, err = strconv.ParseFloat(val.String(), 64) + if err != nil { + res = 0 + } + default: + err = fmt.Errorf("ToInt: unknown interface type %T", value) + res = 0 + } + + return +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case float32, float64: + res = int64(val.Float()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("ToInt: invalid numeric format %g", value) + res = 0 + } + default: + err = fmt.Errorf("ToInt: unknown interface type %T", value) + res = 0 + } + + return +} + +// ToBoolean convert the input string to a boolean. +func ToBoolean(str string) (bool, error) { + return strconv.ParseBool(str) +} diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go new file mode 100644 index 00000000..55dce62d --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/doc.go @@ -0,0 +1,3 @@ +package govalidator + +// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go new file mode 100644 index 00000000..1da2336f --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/error.go @@ -0,0 +1,47 @@ +package govalidator + +import ( + "sort" + "strings" +) + +// Errors is an array of multiple errors and conforms to the error interface. +type Errors []error + +// Errors returns itself. +func (es Errors) Errors() []error { + return es +} + +func (es Errors) Error() string { + var errs []string + for _, e := range es { + errs = append(errs, e.Error()) + } + sort.Strings(errs) + return strings.Join(errs, ";") +} + +// Error encapsulates a name, an error and whether there's a custom error message or not. +type Error struct { + Name string + Err error + CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string + Path []string +} + +func (e Error) Error() string { + if e.CustomErrorMessageExists { + return e.Err.Error() + } + + errName := e.Name + if len(e.Path) > 0 { + errName = strings.Join(append(e.Path, e.Name), ".") + } + + return errName + ": " + e.Err.Error() +} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go new file mode 100644 index 00000000..5041d9e8 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/numerics.go @@ -0,0 +1,100 @@ +package govalidator + +import ( + "math" +) + +// Abs returns absolute value of number +func Abs(value float64) float64 { + return math.Abs(value) +} + +// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise +func Sign(value float64) float64 { + if value > 0 { + return 1 + } else if value < 0 { + return -1 + } else { + return 0 + } +} + +// IsNegative returns true if value < 0 +func IsNegative(value float64) bool { + return value < 0 +} + +// IsPositive returns true if value > 0 +func IsPositive(value float64) bool { + return value > 0 +} + +// IsNonNegative returns true if value >= 0 +func IsNonNegative(value float64) bool { + return value >= 0 +} + +// IsNonPositive returns true if value <= 0 +func IsNonPositive(value float64) bool { + return value <= 0 +} + +// InRangeInt returns true if value lies between left and right border +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 + } + return value64 >= left64 && value64 <= right64 +} + +// InRangeFloat32 returns true if value lies between left and right border +func InRangeFloat32(value, left, right float32) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRangeFloat64 returns true if value lies between left and right border +func InRangeFloat64(value, left, right float64) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string. +// All types must the same type. +// False if value doesn't lie in range or if it incompatible or not comparable +func InRange(value interface{}, left interface{}, right interface{}) bool { + switch value.(type) { + case int: + intValue, _ := ToInt(value) + intLeft, _ := ToInt(left) + intRight, _ := ToInt(right) + return InRangeInt(intValue, intLeft, intRight) + case float32, float64: + intValue, _ := ToFloat(value) + intLeft, _ := ToFloat(left) + intRight, _ := ToFloat(right) + return InRangeFloat64(intValue, intLeft, intRight) + case string: + return value.(string) >= left.(string) && value.(string) <= right.(string) + default: + return false + } +} + +// IsWhole returns true if value is whole number +func IsWhole(value float64) bool { + return math.Remainder(value, 1) == 0 +} + +// IsNatural returns true if value is natural number (positive and whole) +func IsNatural(value float64) bool { + return IsWhole(value) && IsPositive(value) +} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go new file mode 100644 index 00000000..bafc3765 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -0,0 +1,113 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$â—-]+\\[a-z0-9_.$â—-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" + IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" + IMSI string = "^\\d{14,15}$" + E164 string = `^\+?[1-9]\d{1,14}$` +) + +// Used by IsFilePath func +const ( + // Unknown is unresolved OS type + Unknown = iota + // Win is Windows type + Win + // Unix is *nix OS types + Unix +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxMagnetURI = regexp.MustCompile(MagnetURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxARWinPath = regexp.MustCompile(WinARPath) + rxARUnixPath = regexp.MustCompile(UnixARPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) + rxIMEI = regexp.MustCompile(IMEI) + rxIMSI = regexp.MustCompile(IMSI) + rxE164 = regexp.MustCompile(E164) +) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go new file mode 100644 index 00000000..c573abb5 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/types.go @@ -0,0 +1,656 @@ +package govalidator + +import ( + "reflect" + "regexp" + "sort" + "sync" +) + +// Validator is a wrapper for a validator function that returns bool and accepts string. +type Validator func(str string) bool + +// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. +// The second parameter should be the context (in the case of validating a struct: the whole object being validated). +type CustomTypeValidator func(i interface{}, o interface{}) bool + +// ParamValidator is a wrapper for validator functions that accept additional parameters. +type ParamValidator func(str string, params ...string) bool + +// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value +type InterfaceParamValidator func(in interface{}, params ...string) bool +type tagOptionsMap map[string]tagOption + +func (t tagOptionsMap) orderedKeys() []string { + var keys []string + for k := range t { + keys = append(keys, k) + } + + sort.Slice(keys, func(a, b int) bool { + return t[keys[a]].order < t[keys[b]].order + }) + + return keys +} + +type tagOption struct { + name string + customErrorMessage string + order int +} + +// UnsupportedTypeError is a wrapper for reflect.Type +type UnsupportedTypeError struct { + Type reflect.Type +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflect.Value + +// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value +var InterfaceParamTagMap = map[string]InterfaceParamValidator{ + "type": IsType, +} + +// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. +var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ + "type": regexp.MustCompile(`^type\((.*)\)$`), +} + +// ParamTagMap is a map of functions accept variants parameters +var ParamTagMap = map[string]ParamValidator{ + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": IsInRaw, + "rsapub": IsRsaPub, + "minstringlength": MinStringLength, + "maxstringlength": MaxStringLength, +} + +// ParamTagRegexMap maps param tags to their respective regexes. +var ParamTagRegexMap = map[string]*regexp.Regexp{ + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), + "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), + "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), +} + +type customTypeTagMap struct { + validators map[string]CustomTypeValidator + + sync.RWMutex +} + +func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { + tm.RLock() + defer tm.RUnlock() + v, ok := tm.validators[name] + return v, ok +} + +func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { + tm.Lock() + defer tm.Unlock() + tm.validators[name] = ctv +} + +// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. +// Use this to validate compound or custom types that need to be handled as a whole, e.g. +// `type UUID [16]byte` (this would be handled as an array of bytes). +var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} + +// TagMap is a map of functions, that can be used as tags for ValidateStruct function. +var TagMap = map[string]Validator{ + "email": IsEmail, + "url": IsURL, + "dialstring": IsDialString, + "requrl": IsRequestURL, + "requri": IsRequestURI, + "alpha": IsAlpha, + "utfletter": IsUTFLetter, + "alphanum": IsAlphanumeric, + "utfletternum": IsUTFLetterNumeric, + "numeric": IsNumeric, + "utfnumeric": IsUTFNumeric, + "utfdigit": IsUTFDigit, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHexcolor, + "rgbcolor": IsRGBcolor, + "lowercase": IsLowerCase, + "uppercase": IsUpperCase, + "int": IsInt, + "float": IsFloat, + "null": IsNull, + "notnull": IsNotNull, + "uuid": IsUUID, + "uuidv3": IsUUIDv3, + "uuidv4": IsUUIDv4, + "uuidv5": IsUUIDv5, + "creditcard": IsCreditCard, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "json": IsJSON, + "multibyte": IsMultibyte, + "ascii": IsASCII, + "printableascii": IsPrintableASCII, + "fullwidth": IsFullWidth, + "halfwidth": IsHalfWidth, + "variablewidth": IsVariableWidth, + "base64": IsBase64, + "datauri": IsDataURI, + "ip": IsIP, + "port": IsPort, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "dns": IsDNSName, + "host": IsHost, + "mac": IsMAC, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "semver": IsSemver, + "rfc3339": IsRFC3339, + "rfc3339WithoutZone": IsRFC3339WithoutZone, + "ISO3166Alpha2": IsISO3166Alpha2, + "ISO3166Alpha3": IsISO3166Alpha3, + "ISO4217": IsISO4217, + "IMEI": IsIMEI, + "ulid": IsULID, +} + +// ISO3166Entry stores country codes +type ISO3166Entry struct { + EnglishShortName string + FrenchShortName string + Alpha2Code string + Alpha3Code string + Numeric string +} + +//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" +var ISO3166List = []ISO3166Entry{ + {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, + {"Albania", "Albanie (l')", "AL", "ALB", "008"}, + {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, + {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, + {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, + {"Andorra", "Andorre (l')", "AD", "AND", "020"}, + {"Angola", "Angola (l')", "AO", "AGO", "024"}, + {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, + {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, + {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, + {"Australia", "Australie (l')", "AU", "AUS", "036"}, + {"Austria", "Autriche (l')", "AT", "AUT", "040"}, + {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, + {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, + {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, + {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, + {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, + {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, + {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, + {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, + {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, + {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, + {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, + {"Bouvet Island", "Bouvet (l'ÃŽle)", "BV", "BVT", "074"}, + {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, + {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, + {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, + {"Solomon Islands", "Salomon (ÃŽles)", "SB", "SLB", "090"}, + {"Virgin Islands (British)", "Vierges britanniques (les ÃŽles)", "VG", "VGB", "092"}, + {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, + {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, + {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, + {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, + {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, + {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, + {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, + {"Canada", "Canada (le)", "CA", "CAN", "124"}, + {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, + {"Cayman Islands (the)", "Caïmans (les ÃŽles)", "KY", "CYM", "136"}, + {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, + {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, + {"Chad", "Tchad (le)", "TD", "TCD", "148"}, + {"Chile", "Chili (le)", "CL", "CHL", "152"}, + {"China", "Chine (la)", "CN", "CHN", "156"}, + {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, + {"Christmas Island", "Christmas (l'ÃŽle)", "CX", "CXR", "162"}, + {"Cocos (Keeling) Islands (the)", "Cocos (les ÃŽles)/ Keeling (les ÃŽles)", "CC", "CCK", "166"}, + {"Colombia", "Colombie (la)", "CO", "COL", "170"}, + {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, + {"Mayotte", "Mayotte", "YT", "MYT", "175"}, + {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, + {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, + {"Cook Islands (the)", "Cook (les ÃŽles)", "CK", "COK", "184"}, + {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, + {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, + {"Cuba", "Cuba", "CU", "CUB", "192"}, + {"Cyprus", "Chypre", "CY", "CYP", "196"}, + {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, + {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, + {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, + {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, + {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, + {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, + {"El Salvador", "El Salvador", "SV", "SLV", "222"}, + {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, + {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, + {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, + {"Estonia", "Estonie (l')", "EE", "EST", "233"}, + {"Faroe Islands (the)", "Féroé (les ÃŽles)", "FO", "FRO", "234"}, + {"Falkland Islands (the) [Malvinas]", "Falkland (les ÃŽles)/Malouines (les ÃŽles)", "FK", "FLK", "238"}, + {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les ÃŽles Sandwich du Sud (la)", "GS", "SGS", "239"}, + {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, + {"Finland", "Finlande (la)", "FI", "FIN", "246"}, + {"Ã…land Islands", "Ã…land(les ÃŽles)", "AX", "ALA", "248"}, + {"France", "France (la)", "FR", "FRA", "250"}, + {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, + {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, + {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, + {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, + {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, + {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, + {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, + {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, + {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, + {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, + {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, + {"Kiribati", "Kiribati", "KI", "KIR", "296"}, + {"Greece", "Grèce (la)", "GR", "GRC", "300"}, + {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, + {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, + {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, + {"Guam", "Guam", "GU", "GUM", "316"}, + {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, + {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, + {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, + {"Haiti", "Haïti", "HT", "HTI", "332"}, + {"Heard Island and McDonald Islands", "Heard-et-ÃŽles MacDonald (l'ÃŽle)", "HM", "HMD", "334"}, + {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, + {"Honduras", "Honduras (le)", "HN", "HND", "340"}, + {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, + {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, + {"Iceland", "Islande (l')", "IS", "ISL", "352"}, + {"India", "Inde (l')", "IN", "IND", "356"}, + {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, + {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, + {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, + {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, + {"Israel", "Israël", "IL", "ISR", "376"}, + {"Italy", "Italie (l')", "IT", "ITA", "380"}, + {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, + {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, + {"Japan", "Japon (le)", "JP", "JPN", "392"}, + {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, + {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, + {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, + {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, + {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, + {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, + {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, + {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, + {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, + {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, + {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, + {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, + {"Libya", "Libye (la)", "LY", "LBY", "434"}, + {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, + {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, + {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, + {"Macao", "Macao", "MO", "MAC", "446"}, + {"Madagascar", "Madagascar", "MG", "MDG", "450"}, + {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, + {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, + {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, + {"Mali", "Mali (le)", "ML", "MLI", "466"}, + {"Malta", "Malte", "MT", "MLT", "470"}, + {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, + {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, + {"Mauritius", "Maurice", "MU", "MUS", "480"}, + {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, + {"Monaco", "Monaco", "MC", "MCO", "492"}, + {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, + {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, + {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, + {"Montserrat", "Montserrat", "MS", "MSR", "500"}, + {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, + {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, + {"Oman", "Oman", "OM", "OMN", "512"}, + {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, + {"Nauru", "Nauru", "NR", "NRU", "520"}, + {"Nepal", "Népal (le)", "NP", "NPL", "524"}, + {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, + {"Curaçao", "Curaçao", "CW", "CUW", "531"}, + {"Aruba", "Aruba", "AW", "ABW", "533"}, + {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, + {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, + {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, + {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, + {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, + {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, + {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, + {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, + {"Niue", "Niue", "NU", "NIU", "570"}, + {"Norfolk Island", "Norfolk (l'ÃŽle)", "NF", "NFK", "574"}, + {"Norway", "Norvège (la)", "NO", "NOR", "578"}, + {"Northern Mariana Islands (the)", "Mariannes du Nord (les ÃŽles)", "MP", "MNP", "580"}, + {"United States Minor Outlying Islands (the)", "ÃŽles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, + {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, + {"Marshall Islands (the)", "Marshall (ÃŽles)", "MH", "MHL", "584"}, + {"Palau", "Palaos (les)", "PW", "PLW", "585"}, + {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, + {"Panama", "Panama (le)", "PA", "PAN", "591"}, + {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, + {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, + {"Peru", "Pérou (le)", "PE", "PER", "604"}, + {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, + {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, + {"Poland", "Pologne (la)", "PL", "POL", "616"}, + {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, + {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, + {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, + {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, + {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, + {"Réunion", "Réunion (La)", "RE", "REU", "638"}, + {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, + {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, + {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, + {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, + {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, + {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, + {"Anguilla", "Anguilla", "AI", "AIA", "660"}, + {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, + {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, + {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, + {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, + {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, + {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, + {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, + {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, + {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, + {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, + {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, + {"Singapore", "Singapour", "SG", "SGP", "702"}, + {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, + {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, + {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, + {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, + {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, + {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, + {"Spain", "Espagne (l')", "ES", "ESP", "724"}, + {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, + {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, + {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, + {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, + {"Svalbard and Jan Mayen", "Svalbard et l'ÃŽle Jan Mayen (le)", "SJ", "SJM", "744"}, + {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, + {"Sweden", "Suède (la)", "SE", "SWE", "752"}, + {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, + {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, + {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, + {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, + {"Togo", "Togo (le)", "TG", "TGO", "768"}, + {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, + {"Tonga", "Tonga (les)", "TO", "TON", "776"}, + {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, + {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, + {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, + {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, + {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, + {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les ÃŽles)", "TC", "TCA", "796"}, + {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, + {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, + {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, + {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, + {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, + {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, + {"Guernsey", "Guernesey", "GG", "GGY", "831"}, + {"Jersey", "Jersey", "JE", "JEY", "832"}, + {"Isle of Man", "ÃŽle de Man", "IM", "IMN", "833"}, + {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, + {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, + {"Virgin Islands (U.S.)", "Vierges des États-Unis (les ÃŽles)", "VI", "VIR", "850"}, + {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, + {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, + {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, + {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, + {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, + {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, + {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, + {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, +} + +// ISO4217List is the list of ISO currency codes +var ISO4217List = []string{ + "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", + "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", + "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", + "DJF", "DKK", "DOP", "DZD", + "EGP", "ERN", "ETB", "EUR", + "FJD", "FKP", + "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", + "HKD", "HNL", "HRK", "HTG", "HUF", + "IDR", "ILS", "INR", "IQD", "IRR", "ISK", + "JMD", "JOD", "JPY", + "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", + "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", + "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", + "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", + "OMR", + "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", + "QAR", + "RON", "RSD", "RUB", "RWF", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", + "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", + "VEF", "VES", "VND", "VUV", + "WST", + "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", + "YER", + "ZAR", "ZMW", "ZWL", +} + +// ISO693Entry stores ISO language codes +type ISO693Entry struct { + Alpha3bCode string + Alpha2Code string + English string +} + +//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json +var ISO693List = []ISO693Entry{ + {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, + {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, + {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, + {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, + {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, + {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, + {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, + {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, + {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, + {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, + {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, + {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, + {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, + {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, + {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, + {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, + {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, + {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, + {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, + {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, + {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, + {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, + {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, + {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, + {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, + {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, + {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, + {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, + {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, + {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, + {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, + {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, + {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, + {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, + {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, + {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, + {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, + {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, + {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, + {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, + {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, + {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, + {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, + {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, + {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, + {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, + {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, + {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, + {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, + {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, + {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, + {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, + {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, + {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, + {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, + {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, + {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, + {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, + {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, + {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, + {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, + {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, + {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, + {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, + {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, + {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, + {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, + {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, + {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, + {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, + {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, + {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, + {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, + {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, + {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, + {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, + {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, + {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, + {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, + {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, + {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, + {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, + {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, + {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, + {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, + {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, + {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, + {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, + {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, + {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, + {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, + {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, + {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, + {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, + {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, + {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, + {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, + {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, + {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, + {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, + {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, + {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, + {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, + {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, + {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, + {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, + {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, + {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, + {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, + {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, + {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, + {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, + {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, + {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, + {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, + {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, + {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, + {Alpha3bCode: "nob", Alpha2Code: "nb", English: "BokmÃ¥l, Norwegian; Norwegian BokmÃ¥l"}, + {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, + {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, + {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, + {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, + {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, + {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, + {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, + {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, + {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, + {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, + {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, + {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, + {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, + {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, + {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, + {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, + {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, + {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, + {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, + {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, + {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, + {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, + {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, + {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, + {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, + {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, + {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, + {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, + {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, + {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, + {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, + {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, + {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, + {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, + {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, + {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, + {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, + {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, + {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, + {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, + {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, + {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, + {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, + {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, + {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, + {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, + {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, + {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, + {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, + {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, + {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, + {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, + {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, + {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, + {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, + {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, + {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, + {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, + {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, + {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, + {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, + {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, + {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, + {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, + {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, + {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, +} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go new file mode 100644 index 00000000..f4c30f82 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/utils.go @@ -0,0 +1,270 @@ +package govalidator + +import ( + "errors" + "fmt" + "html" + "math" + "path" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Contains checks if the string contains the substring. +func Contains(str, substring string) bool { + return strings.Contains(str, substring) +} + +// Matches checks if string matches the pattern (pattern is regular expression) +// In case of error return false +func Matches(str, pattern string) bool { + match, _ := regexp.MatchString(pattern, str) + return match +} + +// LeftTrim trims characters from the left side of the input. +// If second argument is empty, it will remove leading spaces. +func LeftTrim(str, chars string) string { + if chars == "" { + return strings.TrimLeftFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("^[" + chars + "]+") + return r.ReplaceAllString(str, "") +} + +// RightTrim trims characters from the right side of the input. +// If second argument is empty, it will remove trailing spaces. +func RightTrim(str, chars string) string { + if chars == "" { + return strings.TrimRightFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("[" + chars + "]+$") + return r.ReplaceAllString(str, "") +} + +// Trim trims characters from both sides of the input. +// If second argument is empty, it will remove spaces. +func Trim(str, chars string) string { + return LeftTrim(RightTrim(str, chars), chars) +} + +// WhiteList removes characters that do not appear in the whitelist. +func WhiteList(str, chars string) string { + pattern := "[^" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// BlackList removes characters that appear in the blacklist. +func BlackList(str, chars string) string { + pattern := "[" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. +// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). +func StripLow(str string, keepNewLines bool) string { + chars := "" + if keepNewLines { + chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" + } else { + chars = "\x00-\x1F\x7F" + } + return BlackList(str, chars) +} + +// ReplacePattern replaces regular expression pattern in string +func ReplacePattern(str, pattern, replace string) string { + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, replace) +} + +// Escape replaces <, >, & and " with HTML entities. +var Escape = html.EscapeString + +func addSegment(inrune, segment []rune) []rune { + if len(segment) == 0 { + return inrune + } + if len(inrune) != 0 { + inrune = append(inrune, '_') + } + inrune = append(inrune, segment...) + return inrune +} + +// UnderscoreToCamelCase converts from underscore separated form to camel case form. +// Ex.: my_func => MyFunc +func UnderscoreToCamelCase(s string) string { + return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) +} + +// CamelCaseToUnderscore converts from camel case form to underscore separated form. +// Ex.: MyFunc => my_func +func CamelCaseToUnderscore(str string) string { + var output []rune + var segment []rune + for _, r := range str { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { + output = addSegment(output, segment) + segment = nil + } + segment = append(segment, unicode.ToLower(r)) + } + output = addSegment(output, segment) + return string(output) +} + +// Reverse returns reversed string +func Reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} + +// GetLines splits string by "\n" and return array of lines +func GetLines(s string) []string { + return strings.Split(s, "\n") +} + +// GetLine returns specified line of multiline string +func GetLine(s string, index int) (string, error) { + lines := GetLines(s) + if index < 0 || index >= len(lines) { + return "", errors.New("line index out of bounds") + } + return lines[index], nil +} + +// RemoveTags removes all tags from HTML string +func RemoveTags(s string) string { + return ReplacePattern(s, "<[^>]*>", "") +} + +// SafeFileName returns safe string that can be used in file names +func SafeFileName(str string) string { + name := strings.ToLower(str) + name = path.Clean(path.Base(name)) + name = strings.Trim(name, " ") + separators, err := regexp.Compile(`[ &_=+:]`) + if err == nil { + name = separators.ReplaceAllString(name, "-") + } + legal, err := regexp.Compile(`[^[:alnum:]-.]`) + if err == nil { + name = legal.ReplaceAllString(name, "") + } + for strings.Contains(name, "--") { + name = strings.Replace(name, "--", "-", -1) + } + return name +} + +// NormalizeEmail canonicalize an email address. +// The local part of the email address is lowercased for all domains; the hostname is always lowercased and +// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). +// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and +// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are +// normalized to @gmail.com. +func NormalizeEmail(str string) (string, error) { + if !IsEmail(str) { + return "", fmt.Errorf("%s is not an email", str) + } + parts := strings.Split(str, "@") + parts[0] = strings.ToLower(parts[0]) + parts[1] = strings.ToLower(parts[1]) + if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { + parts[1] = "gmail.com" + parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] + } + return strings.Join(parts, "@"), nil +} + +// Truncate a string to the closest length without breaking words. +func Truncate(str string, length int, ending string) string { + var aftstr, befstr string + if len(str) > length { + words := strings.Fields(str) + before, present := 0, 0 + for i := range words { + befstr = aftstr + before = present + aftstr = aftstr + words[i] + " " + present = len(aftstr) + if present > length && i != 0 { + if (length - before) < (present - length) { + return Trim(befstr, " /\\.,\"'#!?&@+-") + ending + } + return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending + } + } + } + + return str +} + +// PadLeft pads left side of a string if size of string is less then indicated pad length +func PadLeft(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, false) +} + +// PadRight pads right side of a string if size of string is less then indicated pad length +func PadRight(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, false, true) +} + +// PadBoth pads both sides of a string if size of string is less then indicated pad length +func PadBoth(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, true) +} + +// PadString either left, right or both sides. +// Note that padding string can be unicode and more then one character +func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { + + // When padded length is less then the current string size + if padLen < utf8.RuneCountInString(str) { + return str + } + + padLen -= utf8.RuneCountInString(str) + + targetLen := padLen + + targetLenLeft := targetLen + targetLenRight := targetLen + if padLeft && padRight { + targetLenLeft = padLen / 2 + targetLenRight = padLen - targetLenLeft + } + + strToRepeatLen := utf8.RuneCountInString(padStr) + + repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) + repeatedString := strings.Repeat(padStr, repeatTimes) + + leftSide := "" + if padLeft { + leftSide = repeatedString[0:targetLenLeft] + } + + rightSide := "" + if padRight { + rightSide = repeatedString[0:targetLenRight] + } + + return leftSide + str + rightSide +} + +// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object +func TruncatingErrorf(str string, args ...interface{}) error { + n := strings.Count(str, "%s") + return fmt.Errorf(str, args[:n]...) +} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go new file mode 100644 index 00000000..c9c4fac0 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -0,0 +1,1768 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +var ( + fieldsRequiredByDefault bool + nilPtrAllowedByRequired = false + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + paramsRegexp = regexp.MustCompile(`\(.*\)$`) +) + +const maxURLRuneCount = 2083 +const minURLRuneCount = 3 +const rfc3339WithoutZone = "2006-01-02T15:04:05" + +// SetFieldsRequiredByDefault causes validation to fail when struct fields +// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). +// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +// type exampleStruct struct { +// Name string `` +// Email string `valid:"email"` +// This, however, will only fail when Email is empty or an invalid email address: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email"` +// Lastly, this will only fail when Email is an invalid email address but not when it's empty: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email,optional"` +func SetFieldsRequiredByDefault(value bool) { + fieldsRequiredByDefault = value +} + +// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. +// The validation will still reject ptr fields in their zero value state. Example with this enabled: +// type exampleStruct struct { +// Name *string `valid:"required"` +// With `Name` set to "", this will be considered invalid input and will cause a validation error. +// With `Name` set to nil, this will be considered valid by validation. +// By default this is disabled. +func SetNilPtrAllowedByRequired(value bool) { + nilPtrAllowedByRequired = value +} + +// IsEmail checks if the string is an email. +func IsEmail(str string) bool { + // TODO uppercase letters are not supported + return rxEmail.MatchString(str) +} + +// IsExistingEmail checks if the string is an email of existing domain +func IsExistingEmail(email string) bool { + + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true +} + +// IsURL checks if the string is an URL. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} + +// IsRequestURL checks if the string rawurl, assuming +// it was received in an HTTP request, is a valid +// URL confirm to RFC 3986 +func IsRequestURL(rawurl string) bool { + url, err := url.ParseRequestURI(rawurl) + if err != nil { + return false //Couldn't even parse the rawurl + } + if len(url.Scheme) == 0 { + return false //No Scheme found + } + return true +} + +// IsRequestURI checks if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid. +func IsAlpha(str string) bool { + if IsNull(str) { + return true + } + return rxAlpha.MatchString(str) +} + +//IsUTFLetter checks if the string contains only unicode letter characters. +//Similar to IsAlpha but for all languages. Empty string is valid. +func IsUTFLetter(str string) bool { + if IsNull(str) { + return true + } + + for _, c := range str { + if !unicode.IsLetter(c) { + return false + } + } + return true + +} + +// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid. +func IsAlphanumeric(str string) bool { + if IsNull(str) { + return true + } + return rxAlphanumeric.MatchString(str) +} + +// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid. +func IsUTFLetterNumeric(str string) bool { + if IsNull(str) { + return true + } + for _, c := range str { + if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok + return false + } + } + return true + +} + +// IsNumeric checks if the string contains only numbers. Empty string is valid. +func IsNumeric(str string) bool { + if IsNull(str) { + return true + } + return rxNumeric.MatchString(str) +} + +// IsUTFNumeric checks if the string contains only unicode numbers of any kind. +// Numbers can be 0-9 but also Fractions ¾,Roman â…¨ and Hangzhou 〩. Empty string is valid. +func IsUTFNumeric(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsNumber(c) { //numbers && minus sign are ok + return false + } + } + return true + +} + +// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid. +func IsUTFDigit(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsDigit(c) { //digits && minus sign are ok + return false + } + } + return true + +} + +// IsHexadecimal checks if the string is a hexadecimal number. +func IsHexadecimal(str string) bool { + return rxHexadecimal.MatchString(str) +} + +// IsHexcolor checks if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsLowerCase checks if the string is lowercase. Empty string is valid. +func IsLowerCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToLower(str) +} + +// IsUpperCase checks if the string is uppercase. Empty string is valid. +func IsUpperCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToUpper(str) +} + +// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + +// IsInt checks if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsFloat checks if the string is a float. +func IsFloat(str string) bool { + return str != "" && rxFloat.MatchString(str) +} + +// IsDivisibleBy checks if the string is a number that's divisible by another. +// If second argument is not valid integer or zero, it's return false. +// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). +func IsDivisibleBy(str, num string) bool { + f, _ := ToFloat(str) + p := int64(f) + q, _ := ToInt(num) + if q == 0 { + return false + } + return (p == 0) || (p%q == 0) +} + +// IsNull checks if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} + +// IsNotNull checks if the string is not null. +func IsNotNull(str string) bool { + return !IsNull(str) +} + +// HasWhitespaceOnly checks the string only contains whitespace +func HasWhitespaceOnly(str string) bool { + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) +} + +// HasWhitespace checks if the string contains any whitespace +func HasWhitespace(str string) bool { + return len(str) > 0 && rxHasWhitespace.MatchString(str) +} + +// IsByteLength checks if the string's length (in bytes) falls in a range. +func IsByteLength(str string, min, max int) bool { + return len(str) >= min && len(str) <= max +} + +// IsUUIDv3 checks if the string is a UUID version 3. +func IsUUIDv3(str string) bool { + return rxUUID3.MatchString(str) +} + +// IsUUIDv4 checks if the string is a UUID version 4. +func IsUUIDv4(str string) bool { + return rxUUID4.MatchString(str) +} + +// IsUUIDv5 checks if the string is a UUID version 5. +func IsUUIDv5(str string) bool { + return rxUUID5.MatchString(str) +} + +// IsUUID checks if the string is a UUID (version 3, 4 or 5). +func IsUUID(str string) bool { + return rxUUID.MatchString(str) +} + +// Byte to index table for O(1) lookups when unmarshaling. +// We use 0xFF as sentinel value for invalid indexes. +var ulidDec = [...]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, + 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, + 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, + 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +} + +// EncodedSize is the length of a text encoded ULID. +const ulidEncodedSize = 26 + +// IsULID checks if the string is a ULID. +// +// Implementation got from: +// https://github.com/oklog/ulid (Apache-2.0 License) +// +func IsULID(str string) bool { + // Check if a base32 encoded ULID is the right length. + if len(str) != ulidEncodedSize { + return false + } + + // Check if all the characters in a base32 encoded ULID are part of the + // expected base32 character set. + if ulidDec[str[0]] == 0xFF || + ulidDec[str[1]] == 0xFF || + ulidDec[str[2]] == 0xFF || + ulidDec[str[3]] == 0xFF || + ulidDec[str[4]] == 0xFF || + ulidDec[str[5]] == 0xFF || + ulidDec[str[6]] == 0xFF || + ulidDec[str[7]] == 0xFF || + ulidDec[str[8]] == 0xFF || + ulidDec[str[9]] == 0xFF || + ulidDec[str[10]] == 0xFF || + ulidDec[str[11]] == 0xFF || + ulidDec[str[12]] == 0xFF || + ulidDec[str[13]] == 0xFF || + ulidDec[str[14]] == 0xFF || + ulidDec[str[15]] == 0xFF || + ulidDec[str[16]] == 0xFF || + ulidDec[str[17]] == 0xFF || + ulidDec[str[18]] == 0xFF || + ulidDec[str[19]] == 0xFF || + ulidDec[str[20]] == 0xFF || + ulidDec[str[21]] == 0xFF || + ulidDec[str[22]] == 0xFF || + ulidDec[str[23]] == 0xFF || + ulidDec[str[24]] == 0xFF || + ulidDec[str[25]] == 0xFF { + return false + } + + // Check if the first character in a base32 encoded ULID will overflow. This + // happens because the base32 representation encodes 130 bits, while the + // ULID is only 128 bits. + // + // See https://github.com/oklog/ulid/issues/9 for details. + if str[0] > '7' { + return false + } + return true +} + +// IsCreditCard checks if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + + number, _ := ToInt(sanitized) + number, lastDigit := number / 10, number % 10 + + var sum int64 + for i:=0; number > 0; i++ { + digit := number % 10 + + if i % 2 == 0 { + digit *= 2 + if digit > 9 { + digit -= 9 + } + } + + sum += digit + number = number / 10 + } + + return (sum + lastDigit) % 10 == 0 +} + +// IsISBN10 checks if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 checks if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN checks if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be checks both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal). +func IsJSON(str string) bool { + var js json.RawMessage + return json.Unmarshal([]byte(str), &js) == nil +} + +// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid. +func IsMultibyte(str string) bool { + if IsNull(str) { + return true + } + return rxMultibyte.MatchString(str) +} + +// IsASCII checks if the string contains ASCII chars only. Empty string is valid. +func IsASCII(str string) bool { + if IsNull(str) { + return true + } + return rxASCII.MatchString(str) +} + +// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid. +func IsPrintableASCII(str string) bool { + if IsNull(str) { + return true + } + return rxPrintableASCII.MatchString(str) +} + +// IsFullWidth checks if the string contains any full-width chars. Empty string is valid. +func IsFullWidth(str string) bool { + if IsNull(str) { + return true + } + return rxFullWidth.MatchString(str) +} + +// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid. +func IsHalfWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) +} + +// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid. +func IsVariableWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) +} + +// IsBase64 checks if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsFilePath checks is a string is Win or Unix file path and returns it's type. +func IsFilePath(str string) (bool, int) { + if rxWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false, Win + } + return true, Win + } else if rxUnixPath.MatchString(str) { + return true, Unix + } + return false, Unknown +} + +//IsWinFilePath checks both relative & absolute paths in Windows +func IsWinFilePath(str string) bool { + if rxARWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false + } + return true + } + return false +} + +//IsUnixFilePath checks both relative & absolute paths in Unix +func IsUnixFilePath(str string) bool { + if rxARUnixPath.MatchString(str) { + return true + } + return false +} + +// IsDataURI checks if a string is base64 encoded data URI such as an image +func IsDataURI(str string) bool { + dataURI := strings.Split(str, ",") + if !rxDataURI.MatchString(dataURI[0]) { + return false + } + return IsBase64(dataURI[1]) +} + +// IsMagnetURI checks if a string is valid magnet URI +func IsMagnetURI(str string) bool { + return rxMagnetURI.MatchString(str) +} + +// IsISO3166Alpha2 checks if a string is valid two-letter country code +func IsISO3166Alpha2(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO3166Alpha3 checks if a string is valid three-letter country code +func IsISO3166Alpha3(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha3Code { + return true + } + } + return false +} + +// IsISO693Alpha2 checks if a string is valid two-letter language code +func IsISO693Alpha2(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO693Alpha3b checks if a string is valid three-letter language code +func IsISO693Alpha3b(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha3bCode { + return true + } + } + return false +} + +// IsDNSName will validate the given string as a DNS name +func IsDNSName(str string) bool { + if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { + // constraints already violated + return false + } + return !IsIP(str) && rxDNSName.MatchString(str) +} + +// IsHash checks if a string is a hash of type algorithm. +// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] +func IsHash(str string, algorithm string) bool { + var len string + algo := strings.ToLower(algorithm) + + if algo == "crc32" || algo == "crc32b" { + len = "8" + } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { + len = "32" + } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { + len = "40" + } else if algo == "tiger192" { + len = "48" + } else if algo == "sha3-224" { + len = "56" + } else if algo == "sha256" || algo == "sha3-256" { + len = "64" + } else if algo == "sha384" || algo == "sha3-384" { + len = "96" + } else if algo == "sha512" || algo == "sha3-512" { + len = "128" + } else { + return false + } + + return Matches(str, "^[a-f0-9]{"+len+"}$") +} + +// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")` +func IsSHA3224(str string) bool { + return IsHash(str, "sha3-224") +} + +// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")` +func IsSHA3256(str string) bool { + return IsHash(str, "sha3-256") +} + +// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")` +func IsSHA3384(str string) bool { + return IsHash(str, "sha3-384") +} + +// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")` +func IsSHA3512(str string) bool { + return IsHash(str, "sha3-512") +} + +// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` +func IsSHA512(str string) bool { + return IsHash(str, "sha512") +} + +// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` +func IsSHA384(str string) bool { + return IsHash(str, "sha384") +} + +// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` +func IsSHA256(str string) bool { + return IsHash(str, "sha256") +} + +// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` +func IsTiger192(str string) bool { + return IsHash(str, "tiger192") +} + +// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` +func IsTiger160(str string) bool { + return IsHash(str, "tiger160") +} + +// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` +func IsRipeMD160(str string) bool { + return IsHash(str, "ripemd160") +} + +// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` +func IsSHA1(str string) bool { + return IsHash(str, "sha1") +} + +// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` +func IsTiger128(str string) bool { + return IsHash(str, "tiger128") +} + +// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` +func IsRipeMD128(str string) bool { + return IsHash(str, "ripemd128") +} + +// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` +func IsCRC32(str string) bool { + return IsHash(str, "crc32") +} + +// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` +func IsCRC32b(str string) bool { + return IsHash(str, "crc32b") +} + +// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` +func IsMD5(str string) bool { + return IsHash(str, "md5") +} + +// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` +func IsMD4(str string) bool { + return IsHash(str, "md4") +} + +// IsDialString validates the given string for usage with the various Dial() functions +func IsDialString(str string) bool { + if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { + return true + } + + return false +} + +// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` +func IsIP(str string) bool { + return net.ParseIP(str) != nil +} + +// IsPort checks if a string represents a valid port +func IsPort(str string) bool { + if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { + return true + } + return false +} + +// IsIPv4 checks if the string is an IP version 4. +func IsIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// IsIPv6 checks if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) +func IsCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// IsMAC checks if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name +func IsHost(str string) bool { + return IsIP(str) || IsDNSName(str) +} + +// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId. +func IsMongoID(str string) bool { + return rxHexadecimal.MatchString(str) && (len(str) == 24) +} + +// IsLatitude checks if a string is valid latitude. +func IsLatitude(str string) bool { + return rxLatitude.MatchString(str) +} + +// IsLongitude checks if a string is valid longitude. +func IsLongitude(str string) bool { + return rxLongitude.MatchString(str) +} + +// IsIMEI checks if a string is valid IMEI +func IsIMEI(str string) bool { + return rxIMEI.MatchString(str) +} + +// IsIMSI checks if a string is valid IMSI +func IsIMSI(str string) bool { + if !rxIMSI.MatchString(str) { + return false + } + + mcc, err := strconv.ParseInt(str[0:3], 10, 32) + if err != nil { + return false + } + + switch mcc { + case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219: + case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235: + case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257: + case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278: + case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293: + case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314: + case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346: + case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364: + case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402: + case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417: + case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428: + case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441: + case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467: + case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528: + case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545: + case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555: + case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611: + case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621: + case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631: + case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641: + case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652: + case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708: + case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736: + case 738, 740, 742, 744, 746, 748, 750, 995: + return true + default: + return false + } + return true +} + +// IsRsaPublicKey checks if a string is valid public key with provided length +func IsRsaPublicKey(str string, keylen int) bool { + bb := bytes.NewBufferString(str) + pemBytes, err := ioutil.ReadAll(bb) + if err != nil { + return false + } + block, _ := pem.Decode(pemBytes) + if block != nil && block.Type != "PUBLIC KEY" { + return false + } + var der []byte + + if block != nil { + der = block.Bytes + } else { + der, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return false + } + } + + key, err := x509.ParsePKIXPublicKey(der) + if err != nil { + return false + } + pubkey, ok := key.(*rsa.PublicKey) + if !ok { + return false + } + bitlen := len(pubkey.N.Bytes()) * 8 + return bitlen == int(keylen) +} + +// IsRegex checks if a give string is a valid regex with RE2 syntax or not +func IsRegex(str string) bool { + if _, err := regexp.Compile(str); err == nil { + return true + } + return false +} + +func toJSONName(tag string) string { + if tag == "" { + return "" + } + + // JSON name always comes first. If there's no options then split[0] is + // JSON name, if JSON name is not set, then split[0] is an empty string. + split := strings.SplitN(tag, ",", 2) + + name := split[0] + + // However it is possible that the field is skipped when + // (de-)serializing from/to JSON, in which case assume that there is no + // tag name to use + if name == "-" { + return "" + } + return name +} + +func prependPathToErrors(err error, path string) error { + switch err2 := err.(type) { + case Error: + err2.Path = append([]string{path}, err2.Path...) + return err2 + case Errors: + errors := err2.Errors() + for i, err3 := range errors { + errors[i] = prependPathToErrors(err3, path) + } + return err2 + } + return err +} + +// ValidateArray performs validation according to condition iterator that validates every element of the array +func ValidateArray(array []interface{}, iterator ConditionIterator) bool { + return Every(array, iterator) +} + +// ValidateMap use validation map for fields. +// result will be equal to `false` if there are any errors. +// s is the map containing the data to be validated. +// m is the validation map in the form: +// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} +func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + var errs Errors + var index int + val := reflect.ValueOf(s) + for key, value := range s { + presentResult := true + validator, ok := m[key] + if !ok { + presentResult = false + var err error + err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + valueField := reflect.ValueOf(value) + mapResult := true + typeResult := true + structResult := true + resultField := true + switch subValidator := validator.(type) { + case map[string]interface{}: + var err error + if v, ok := value.(map[string]interface{}); !ok { + mapResult = false + err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) + err = prependPathToErrors(err, key) + errs = append(errs, err) + } else { + mapResult, err = ValidateMap(v, subValidator) + if err != nil { + mapResult = false + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + } + case string: + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + subValidator != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + } + resultField, err = typeCheck(valueField, reflect.StructField{ + Name: key, + PkgPath: "", + Type: val.Type(), + Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), + Offset: 0, + Index: []int{index}, + Anonymous: false, + }, val, nil) + if err != nil { + errs = append(errs, err) + } + case nil: + // already handlerd when checked before + default: + typeResult = false + err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) + err = prependPathToErrors(err, key) + errs = append(errs, err) + } + result = result && presentResult && typeResult && resultField && structResult && mapResult + index++ + } + // checks required keys + requiredResult := true + for key, value := range m { + if schema, ok := value.(string); ok { + tags := parseTagIntoMap(schema) + if required, ok := tags["required"]; ok { + if _, ok := s[key]; !ok { + requiredResult = false + if required.customErrorMessage != "" { + err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} + } else { + err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} + } + errs = append(errs, err) + } + } + } + } + + if len(errs) > 0 { + err = errs + } + return result && requiredResult, err +} + +// ValidateStruct use tags for fields. +// result will be equal to `false` if there are any errors. +// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) +func ValidateStruct(s interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + val := reflect.ValueOf(s) + if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + val = val.Elem() + } + // we only accept structs + if val.Kind() != reflect.Struct { + return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) + } + var errs Errors + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + if typeField.PkgPath != "" { + continue // Private field + } + structResult := true + if valueField.Kind() == reflect.Interface { + valueField = valueField.Elem() + } + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = prependPathToErrors(err, typeField.Name) + errs = append(errs, err) + } + } + resultField, err2 := typeCheck(valueField, typeField, val, nil) + if err2 != nil { + + // Replace structure name with JSON name if there is a tag on the variable + jsonTag := toJSONName(typeField.Tag.Get("json")) + if jsonTag != "" { + switch jsonError := err2.(type) { + case Error: + jsonError.Name = jsonTag + err2 = jsonError + case Errors: + for i2, err3 := range jsonError { + switch customErr := err3.(type) { + case Error: + customErr.Name = jsonTag + jsonError[i2] = customErr + } + } + + err2 = jsonError + } + } + + errs = append(errs, err2) + } + result = result && resultField && structResult + } + if len(errs) > 0 { + err = errs + } + return result, err +} + +// ValidateStructAsync performs async validation of the struct and returns results through the channels +func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) { + res := make(chan bool) + errors := make(chan error) + + go func() { + defer close(res) + defer close(errors) + + isValid, isFailed := ValidateStruct(s) + + res <- isValid + errors <- isFailed + }() + + return res, errors +} + +// ValidateMapAsync performs async validation of the map and returns results through the channels +func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) { + res := make(chan bool) + errors := make(chan error) + + go func() { + defer close(res) + defer close(errors) + + isValid, isFailed := ValidateMap(s, m) + + res <- isValid + errors <- isFailed + }() + + return res, errors +} + +// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} +func parseTagIntoMap(tag string) tagOptionsMap { + optionsMap := make(tagOptionsMap) + options := strings.Split(tag, ",") + + for i, option := range options { + option = strings.TrimSpace(option) + + validationOptions := strings.Split(option, "~") + if !isValidTag(validationOptions[0]) { + continue + } + if len(validationOptions) == 2 { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} + } else { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} + } + } + return optionsMap +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// IsSemver checks if string is valid semantic version +func IsSemver(str string) bool { + return rxSemver.MatchString(str) +} + +// IsType checks if interface is of some type +func IsType(v interface{}, params ...string) bool { + if len(params) == 1 { + typ := params[0] + return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) + } + return false +} + +// IsTime checks if string is valid according to given format +func IsTime(str string, format string) bool { + _, err := time.Parse(format, str) + return err == nil +} + +// IsUnixTime checks if string is valid unix timestamp value +func IsUnixTime(str string) bool { + if _, err := strconv.Atoi(str); err == nil { + return true + } + return false +} + +// IsRFC3339 checks if string is valid timestamp value according to RFC3339 +func IsRFC3339(str string) bool { + return IsTime(str, time.RFC3339) +} + +// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone. +func IsRFC3339WithoutZone(str string) bool { + return IsTime(str, rfc3339WithoutZone) +} + +// IsISO4217 checks if string is valid ISO currency code +func IsISO4217(str string) bool { + for _, currency := range ISO4217List { + if str == currency { + return true + } + } + + return false +} + +// ByteLength checks string's length +func ByteLength(str string, params ...string) bool { + if len(params) == 2 { + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return len(str) >= int(min) && len(str) <= int(max) + } + + return false +} + +// RuneLength checks string's length +// Alias for StringLength +func RuneLength(str string, params ...string) bool { + return StringLength(str, params...) +} + +// IsRsaPub checks whether string is valid RSA key +// Alias for IsRsaPublicKey +func IsRsaPub(str string, params ...string) bool { + if len(params) == 1 { + len, _ := ToInt(params[0]) + return IsRsaPublicKey(str, int(len)) + } + + return false +} + +// StringMatches checks if a string matches a given pattern. +func StringMatches(s string, params ...string) bool { + if len(params) == 1 { + pattern := params[0] + return Matches(s, pattern) + } + return false +} + +// StringLength checks string's length (including multi byte strings) +func StringLength(str string, params ...string) bool { + + if len(params) == 2 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return strLength >= int(min) && strLength <= int(max) + } + + return false +} + +// MinStringLength checks string's minimum length (including multi byte strings) +func MinStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + return strLength >= int(min) + } + + return false +} + +// MaxStringLength checks string's maximum length (including multi byte strings) +func MaxStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + max, _ := ToInt(params[0]) + return strLength <= int(max) + } + + return false +} + +// Range checks string's length +func Range(str string, params ...string) bool { + if len(params) == 2 { + value, _ := ToFloat(str) + min, _ := ToFloat(params[0]) + max, _ := ToFloat(params[1]) + return InRange(value, min, max) + } + + return false +} + +// IsInRaw checks if string is in list of allowed values +func IsInRaw(str string, params ...string) bool { + if len(params) == 1 { + rawParams := params[0] + + parsedParams := strings.Split(rawParams, "|") + + return IsIn(str, parsedParams...) + } + + return false +} + +// IsIn checks if string str is a member of the set of strings params +func IsIn(str string, params ...string) bool { + for _, param := range params { + if str == param { + return true + } + } + + return false +} + +func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { + if nilPtrAllowedByRequired { + k := v.Kind() + if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { + return true, nil + } + } + + if requiredOption, isRequired := options["required"]; isRequired { + if len(requiredOption.customErrorMessage) > 0 { + return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} + } + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} + } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} + } + // not required and empty is valid + return true, nil +} + +func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { + if !v.IsValid() { + return false, nil + } + + tag := t.Tag.Get(tagName) + + // checks if the field should be ignored + switch tag { + case "": + if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { + if !fieldsRequiredByDefault { + return true, nil + } + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} + } + case "-": + return true, nil + } + + isRootType := false + if options == nil { + isRootType = true + options = parseTagIntoMap(tag) + } + + if isEmptyValue(v) { + // an empty value is not validated, checks only required + isValid, resultErr = checkRequired(v, t, options) + for key := range options { + delete(options, key) + } + return isValid, resultErr + } + + var customTypeErrors Errors + optionsOrder := options.orderedKeys() + for _, validatorName := range optionsOrder { + validatorStruct := options[validatorName] + if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { + delete(options, validatorName) + + if result := validatefunc(v.Interface(), o.Interface()); !result { + if len(validatorStruct.customErrorMessage) > 0 { + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) + continue + } + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) + } + } + } + + if len(customTypeErrors.Errors()) > 0 { + return false, customTypeErrors + } + + if isRootType { + // Ensure that we've checked the value by all specified validators before report that the value is valid + defer func() { + delete(options, "optional") + delete(options, "required") + + if isValid && resultErr == nil && len(options) != 0 { + optionsOrder := options.orderedKeys() + for _, validator := range optionsOrder { + isValid = false + resultErr = Error{t.Name, fmt.Errorf( + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} + return + } + } + }() + } + + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // checks whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // checks for interface param validators + for key, value := range InterfaceParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := InterfaceParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + field := fmt.Sprint(v) + if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + } + } + + switch v.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.String: + // for each tag option checks the map of validator functions + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // checks whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // checks for param validators + for key, value := range ParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := ParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + // type not yet supported, fail + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} + } + } + + if validatefunc, ok := TagMap[validator]; ok { + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field); !result && !negate || result && negate { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + //Not Yet Supported Types (Fail here!) + err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) + return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} + } + } + } + return true, nil + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return false, &UnsupportedTypeError{v.Type()} + } + var sv stringValues + sv = v.MapKeys() + sort.Sort(sv) + result := true + for i, k := range sv { + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Slice, reflect.Array: + result := true + for i := 0; i < v.Len(); i++ { + var resultItem bool + var err error + if v.Index(i).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.Index(i), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.Index(i).Interface()) + if err != nil { + err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Interface: + // If the value is an interface then encode its element + if v.IsNil() { + return true, nil + } + return ValidateStruct(v.Interface()) + case reflect.Ptr: + // If the value is a pointer then checks its element + if v.IsNil() { + return true, nil + } + return typeCheck(v.Elem(), t, o, options) + case reflect.Struct: + return true, nil + default: + return false, &UnsupportedTypeError{v.Type()} + } +} + +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + +// isEmptyValue checks whether value empty or not +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.String, reflect.Array: + return v.Len() == 0 + case reflect.Map, reflect.Slice: + return v.Len() == 0 || v.IsNil() + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) +} + +// ErrorByField returns error for specified field of the struct +// validated by ValidateStruct or empty string if there are no errors +// or this field doesn't exists or doesn't have any errors. +func ErrorByField(e error, field string) string { + if e == nil { + return "" + } + return ErrorsByField(e)[field] +} + +// ErrorsByField returns map of errors of the struct validated +// by ValidateStruct or empty map if there are no errors. +func ErrorsByField(e error) map[string]string { + m := make(map[string]string) + if e == nil { + return m + } + // prototype for ValidateStruct + + switch e := e.(type) { + case Error: + m[e.Name] = e.Err.Error() + case Errors: + for _, item := range e.Errors() { + n := ErrorsByField(item) + for k, v := range n { + m[k] = v + } + } + } + + return m +} + +// Error returns string equivalent for reflect.Type +func (e *UnsupportedTypeError) Error() string { + return "validator: unsupported type: " + e.Type.String() +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } + +func IsE164(str string) bool { + return rxE164.MatchString(str) +} diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml new file mode 100644 index 00000000..bc5f7b08 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -0,0 +1,15 @@ +box: golang +build: + steps: + - setup-go-workspace + + - script: + name: go get + code: | + go version + go get -t ./... + + - script: + name: go test + code: | + go test -race -v ./... diff --git a/vendor/github.com/containers/common/LICENSE b/vendor/github.com/containers/common/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/containers/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go new file mode 100644 index 00000000..d47e47e1 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -0,0 +1,131 @@ +package retry + +import ( + "context" + "io" + "math" + "net" + "net/url" + "syscall" + "time" + + "github.com/docker/distribution/registry/api/errcode" + errcodev2 "github.com/docker/distribution/registry/api/v2" + "github.com/hashicorp/go-multierror" + "github.com/sirupsen/logrus" +) + +// Options defines the option to retry. +type Options struct { + MaxRetry int // The number of times to possibly retry. + Delay time.Duration // The delay to use between retries, if set. + IsErrorRetryable func(error) bool +} + +// RetryOptions is deprecated, use Options. +type RetryOptions = Options // nolint:revive + +// RetryIfNecessary deprecated function use IfNecessary. +func RetryIfNecessary(ctx context.Context, operation func() error, options *Options) error { // nolint:revive + return IfNecessary(ctx, operation, options) +} + +// IfNecessary retries the operation in exponential backoff with the retry Options. +func IfNecessary(ctx context.Context, operation func() error, options *Options) error { + var isRetryable func(error) bool + if options.IsErrorRetryable != nil { + isRetryable = options.IsErrorRetryable + } else { + isRetryable = IsErrorRetryable + } + err := operation() + for attempt := 0; err != nil && isRetryable(err) && attempt < options.MaxRetry; attempt++ { + delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second + if options.Delay != 0 { + delay = options.Delay + } + logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, options.MaxRetry, err) + select { + case <-time.After(delay): + break + case <-ctx.Done(): + return err + } + err = operation() + } + return err +} + +// IsErrorRetryable makes a HEURISTIC determination whether it is worth retrying upon encountering an error. +// That heuristic is NOT STABLE and it CAN CHANGE AT ANY TIME. +// Callers that have a hard requirement for specific treatment of a class of errors should make their own check +// instead of relying on this function maintaining its past behavior. +func IsErrorRetryable(err error) bool { + switch err { + case nil: + return false + case context.Canceled, context.DeadlineExceeded: + return false + default: // continue + } + + type unwrapper interface { + Unwrap() error + } + + switch e := err.(type) { + case errcode.Error: + switch e.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied, + errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown: + return false + } + return true + case *net.OpError: + return IsErrorRetryable(e.Err) + case *url.Error: // This includes errors returned by the net/http client. + if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF + return true + } + return IsErrorRetryable(e.Err) + case syscall.Errno: + return isErrnoRetryable(e) + case errcode.Errors: + // if this error is a group of errors, process them all in turn + for i := range e { + if !IsErrorRetryable(e[i]) { + return false + } + } + return true + case *multierror.Error: + // if this error is a group of errors, process them all in turn + for i := range e.Errors { + if !IsErrorRetryable(e.Errors[i]) { + return false + } + } + return true + case net.Error: + if e.Timeout() { + return true + } + if unwrappable, ok := e.(unwrapper); ok { + err = unwrappable.Unwrap() + return IsErrorRetryable(err) + } + case unwrapper: // Test this last, because various error types might implement .Unwrap() + err = e.Unwrap() + return IsErrorRetryable(err) + } + + return false +} + +func isErrnoRetryable(e error) bool { + switch e { + case syscall.ECONNREFUSED, syscall.EINTR, syscall.EAGAIN, syscall.EBUSY, syscall.ENETDOWN, syscall.ENETUNREACH, syscall.ENETRESET, syscall.ECONNABORTED, syscall.ECONNRESET, syscall.ETIMEDOUT, syscall.EHOSTDOWN, syscall.EHOSTUNREACH: + return true + } + return isErrnoERESTART(e) +} diff --git a/vendor/github.com/containers/common/pkg/retry/retry_linux.go b/vendor/github.com/containers/common/pkg/retry/retry_linux.go new file mode 100644 index 00000000..b7942f7f --- /dev/null +++ b/vendor/github.com/containers/common/pkg/retry/retry_linux.go @@ -0,0 +1,9 @@ +package retry + +import ( + "syscall" +) + +func isErrnoERESTART(e error) bool { + return e == syscall.ERESTART +} diff --git a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go new file mode 100644 index 00000000..901e28a5 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package retry + +func isErrnoERESTART(e error) bool { + return false +} diff --git a/vendor/github.com/containers/image/v5/LICENSE b/vendor/github.com/containers/image/v5/LICENSE new file mode 100644 index 00000000..95356353 --- /dev/null +++ b/vendor/github.com/containers/image/v5/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go new file mode 100644 index 00000000..8d5580d7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -0,0 +1,187 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps (de/re/)compressing it if canModifyBlob, +// and returns a complete blobInfo of the copied blob. +func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, + isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers; + // that pipeline is built by updating stream. + // === Input: srcReader + stream := sourceStream{ + reader: srcReader, + info: srcInfo, + } + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err) + } + stream.reader = digestingReader + + // === Update progress bars + stream.reader = bar.ProxyReader(stream.reader) + + // === Decrypt the stream, if required. + decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Detect compression of the input stream. + // This requires us to “peek ahead†into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor)) + originalLayerReader = stream.reader + } + + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + canModifyBlob := !isConfig && ic.cannotModifyManifestReason == "" + // === Deal with layer compression/decompression if necessary + compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression) + if err != nil { + return types.BlobInfo{}, err + } + defer compressionStep.close() + + // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided + if decryptionStep.decrypting && toEncrypt { + // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value. + // Before relaxing this, see the original pull request’s review if there are other reasons to reject this. + return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") + } + encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) + if err != nil { + return types.BlobInfo{}, err + } + + // === Report progress using the ic.c.options.Progress channel, if required. + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { + progressReader := newProgressReader( + stream.reader, + ic.c.options.Progress, + ic.c.options.ProgressInterval, + srcInfo, + ) + defer progressReader.reportDone() + stream.reader = progressReader + } + + // === Finally, send the layer stream to dest. + options := private.PutBlobOptions{ + Cache: ic.c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, + } + if !isConfig { + options.LayerIndex = &layerIndex + } + destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err) + } + uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob) + + compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations) + decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation) + if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil { + return types.BlobInfo{}, err + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(io.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest { + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil { + return types.BlobInfo{}, err + } + } + + return uploadedInfo, nil +} + +// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built. +// This allows handles of individual aspects to build the copy pipeline without _too much_ +// specific cooperation by the caller. +// +// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline +// without specific knowledge of various aspects in copyBlobFromStream; that may come one day. +type sourceStream struct { + reader io.Reader + info types.BlobInfo // corresponding to the data available in reader. +} + +// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read. +// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination. +type errorAnnotationReader struct { + reader io.Reader +} + +// Read annotates the error happened during read +func (r errorAnnotationReader) Read(b []byte) (n int, err error) { + n, err = r.reader.Read(b) + if err != nil && err != io.EOF { + return n, fmt.Errorf("happened during read: %w", err) + } + return n, err +} + +// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo. +func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo { + // The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + return types.BlobInfo{ + Digest: uploadedBlob.Digest, + Size: uploadedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + Annotations: inputInfo.Annotations, + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto. + CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream. + CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream. + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream. + } +} diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go new file mode 100644 index 00000000..6ba70f0b --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -0,0 +1,342 @@ +package copy + +import ( + "errors" + "fmt" + "io" + + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" +) + +var ( + // defaultCompressionFormat is used if the destination transport requests + // compression, and the user does not explicitly instruct us to use an algorithm. + defaultCompressionFormat = &compression.Gzip + + // compressionBufferSize is the buffer size used to compress a blob + compressionBufferSize = 1048576 + + // expectedCompressionFormats is used to check if a blob with a specified media type is compressed + // using the algorithm that the media type says it should be compressed with + expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ + imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, + imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, + manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, + } +) + +// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression†step. +type bpDetectCompressionStepData struct { + isCompressed bool + format compressiontypes.Algorithm // Valid if isCompressed + decompressor compressiontypes.DecompressorFunc // Valid if isCompressed + srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob. +} + +// blobPipelineDetectCompressionStep updates *stream to detect its current compression format. +// srcInfo is only used for error messages. +// Returns data for other steps. +func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) { + // This requires us to “peek ahead†into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + stream.reader = reader + + res := bpDetectCompressionStepData{ + isCompressed: decompressor != nil, + format: format, + decompressor: decompressor, + } + if res.isCompressed { + res.srcCompressorName = format.Name() + } else { + res.srcCompressorName = internalblobinfocache.Uncompressed + } + + if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name()) + } + return res, nil +} + +// bpCompressionStepData contains data that the copy pipeline needs about the compression step. +type bpCompressionStepData struct { + operation types.LayerCompression // Operation to use for updating the blob metadata. + uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. + uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + srcCompressorName string // Compressor name to record in the blob info cache for the source blob. + uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob. + closers []io.Closer // Objects to close after the upload is done, if any. +} + +// blobPipelineCompressionStep updates *stream to compress and/or decompress it. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData, +// and must eventually call close. +func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo, + detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType) + if !layerCompressionChangeSupported { + logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType) + } + if canModifyBlob && layerCompressionChangeSupported { + for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){ + ic.bpcPreserveEncrypted, + ic.bpcCompressUncompressed, + ic.bpcRecompressCompressed, + ic.bpcDecompressCompressed, + } { + res, err := fn(stream, detected) + if err != nil { + return nil, err + } + if res != nil { + return res, nil + } + } + } + return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil +} + +// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if isOciEncrypted(stream.info.MediaType) { + logrus.Debugf("Using original blob without modification for encrypted blob") + // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted + return &bpCompressionStepData{ + operation: types.PreserveOriginal, + uploadedAlgorithm: nil, + srcCompressorName: internalblobinfocache.UnknownCompression, + uploadedCompressorName: internalblobinfocache.UnknownCompression, + }, nil + } + return nil, nil +} + +// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed { + logrus.Debugf("Compressing blob on the fly") + var uploadedAlgorithm *compressiontypes.Algorithm + if ic.compressionFormat != nil { + uploadedAlgorithm = ic.compressionFormat + } else { + uploadedAlgorithm = defaultCompressionFormat + } + + reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm) + // Note: reader must be closed on all return paths. + stream.reader = reader + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + return &bpCompressionStepData{ + operation: types.Compress, + uploadedAlgorithm: uploadedAlgorithm, + uploadedAnnotations: annotations, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: uploadedAlgorithm.Name(), + closers: []io.Closer{reader}, + }, nil + } + return nil, nil +} + +// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && + ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + decompressed, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + decompressed.Close() + } + }() + + recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat) + // Note: recompressed must be closed on all return paths. + stream.reader = recompressed + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. + Digest: "", + Size: -1, + } + succeeded = true + return &bpCompressionStepData{ + operation: types.PreserveOriginal, + uploadedAlgorithm: ic.compressionFormat, + uploadedAnnotations: annotations, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: ic.compressionFormat.Name(), + closers: []io.Closer{decompressed, recompressed}, + }, nil + } + return nil, nil +} + +// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed { + logrus.Debugf("Blob will be decompressed") + s, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + // Note: s must be closed on all return paths. + stream.reader = s + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. + Digest: "", + Size: -1, + } + return &bpCompressionStepData{ + operation: types.Decompress, + uploadedAlgorithm: nil, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: internalblobinfocache.Uncompressed, + closers: []io.Closer{s}, + }, nil + } + return nil, nil +} + +// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob. +// This does not change the sourceStream parameter; we include it for symmetry with other +// pipeline steps. +func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData, + layerCompressionChangeSupported bool) *bpCompressionStepData { + logrus.Debugf("Using original blob without modification") + // Remember if the original blob was compressed, and if so how, so that if + // LayerInfosForCopy() returned something that differs from what was in the + // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), + // it will be able to correctly derive the MediaType for the copied blob. + // + // But don’t touch blobs in objects where we can’t change compression, + // so that src.UpdatedImage() doesn’t fail; assume that for such blobs + // LayerInfosForCopy() should not be making any changes in the first place. + var algorithm *compressiontypes.Algorithm + if layerCompressionChangeSupported && detected.isCompressed { + algorithm = &detected.format + } else { + algorithm = nil + } + return &bpCompressionStepData{ + operation: types.PreserveOriginal, + uploadedAlgorithm: algorithm, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: detected.srcCompressorName, + } +} + +// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary. +func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) { + *operation = d.operation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + *algorithm = d.uploadedAlgorithm + if *annotations == nil { + *annotations = map[string]string{} + } + maps.Copy(*annotations, d.uploadedAnnotations) +} + +// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo. +// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties. +func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, + encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error { + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptionStep.encrypting && !decryptionStep.decrypting { + // If d.operation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader + // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob + // (because stream.info.Digest == "", this must have been computed afresh). + switch d.operation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) + } + } + if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) + } + if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && + d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) + } + return nil +} + +// close closes objects that carry state throughout the compression/decompression operation. +func (d *bpCompressionStepData) close() { + for _, c := range d.closers { + c.Close() + } +} + +// doCompression reads all input from src and writes its compressed equivalent to dest. +func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error { + compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) + if err != nil { + return err + } + + buf := make([]byte, compressionBufferSize) + + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() + if err != nil { + compressor.Close() + return err + } + + return compressor.Close() +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel) +} + +// compressedStream returns a stream the input reader compressed using format, and a metadata map. +// The caller must close the returned reader. +// AFTER the stream is consumed, metadata will be updated with annotations to use on the data. +func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) { + pipeReader, pipeWriter := io.Pipe() + annotations := map[string]string{} + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter + return pipeReader, annotations +} diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go new file mode 100644 index 00000000..ad1453fc --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -0,0 +1,394 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/containers/image/v5/docker/reference" + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagesource" + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache" + compression "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" + "golang.org/x/sync/semaphore" + "golang.org/x/term" +) + +var ( + // ErrDecryptParamsMissing is returned if there is missing decryption parameters + ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present") + + // maxParallelDownloads is used to limit the maximum number of parallel + // downloads. Let's follow Firefox by limiting it to 6. + maxParallelDownloads = uint(6) +) + +const ( + // CopySystemImage is the default value which, when set in + // Options.ImageListSelection, indicates that the caller expects only one + // image to be copied, so if the source reference refers to a list of + // images, one that matches the current system will be selected. + CopySystemImage ImageListSelection = iota + // CopyAllImages is a value which, when set in Options.ImageListSelection, + // indicates that the caller expects to copy multiple images, and if + // the source reference refers to a list, that the list and every image + // to which it refers will be copied. If the source reference refers + // to a list, the target reference can not accept lists, an error + // should be returned. + CopyAllImages + // CopySpecificImages is a value which, when set in + // Options.ImageListSelection, indicates that the caller expects the + // source reference to be either a single image or a list of images, + // and if the source reference is a list, wants only specific instances + // from it copied (or none of them, if the list of instances to copy is + // empty), along with the list itself. If the target reference can + // only accept one image (i.e., it cannot accept lists), an error + // should be returned. + CopySpecificImages +) + +// ImageListSelection is one of CopySystemImage, CopyAllImages, or +// CopySpecificImages, to control whether, when the source reference is a list, +// copy.Image() copies only an image which matches the current runtime +// environment, or all images which match the supplied reference, or only +// specific images from the source reference. +type ImageListSelection int + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + RemoveSignatures bool // Remove any pre-existing signatures. Signers and SignBy… will still add a new signature. + // Signers to use to add signatures during the copy. + // Callers are still responsible for closing these Signer objects; they can be reused for multiple copy.Image operations in a row. + Signers []*signer.Signer + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphrase to use when signing with the key ID from `SignBy`. + SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path. + SignSigstorePrivateKeyPassphrase []byte // Passphrase to use when signing with `SignBySigstorePrivateKeyFile`. + SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination + + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + // Preserve digests, and fail if we cannot. + PreserveDigests bool + // manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type + ForceManifestMIMEType string + ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list + Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + // Give priority to pulling gzip images if multiple images are present when configured to OptionalBoolTrue, + // prefers the best compression if this is configured as OptionalBoolFalse. Choose automatically (and the choice may change over time) + // if this is set to OptionalBoolUndefined (which is the default behavior, and recommended for most callers). + // This only affects CopySystemImage. + PreferGzipInstances types.OptionalBool + + // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. + // The encryption options is derived from the construction of EncryptConfig object. + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. + // If nil, don't encrypt any layers. + // If non-nil and len==0, denotes encrypt all layers. + // integers in the slice represent 0-indexed layer indices, with support for negative + // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + + // A weighted semaphore to limit the amount of concurrently copied layers and configs. Applies to all copy operations using the semaphore. If set, MaxParallelDownloads is ignored. + ConcurrentBlobCopiesSemaphore *semaphore.Weighted + + // MaxParallelDownloads indicates the maximum layers to pull at the same time. Applies to a single copy operation. A reasonable default is used if this is left as 0. Ignored if ConcurrentBlobCopiesSemaphore is set. + MaxParallelDownloads uint + + // When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already + // exists (and is equivalent). Making the eventual (no-op) copy more performant for this case. Enabling the option + // is slightly pessimistic if the destination image doesn't exist, or is not equivalent. + OptimizeDestinationImageAlreadyExists bool + + // Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type + // to not indicate "nondistributable". + DownloadForeignLayers bool + + // Contains slice of OptionCompressionVariant, where copy will ensure that for each platform + // in the manifest list, a variant with the requested compression will exist. + // Invalid when copying a non-multi-architecture image. That will probably + // change in the future. + EnsureCompressionVariantsExist []OptionCompressionVariant + // ForceCompressionFormat ensures that the compression algorithm set in + // DestinationCtx.CompressionFormat is used exclusively, and blobs of other + // compression algorithms are not reused. + ForceCompressionFormat bool +} + +// OptionCompressionVariant allows to supply information about +// selected compression algorithm and compression level by the +// end-user. Refer to EnsureCompressionVariantsExist to know +// more about its usage. +type OptionCompressionVariant struct { + Algorithm compression.Algorithm + Level *int // Only used when we are creating a new image instance using the specified algorithm, not when the image already contains such an instance +} + +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +// The owner must call close() when done. +type copier struct { + policyContext *signature.PolicyContext + dest private.ImageDestination + rawSource private.ImageSource + options *Options // never nil + + reportWriter io.Writer + progressOutput io.Writer + + unparsedToplevel *image.UnparsedImage // for rawSource + blobInfoCache internalblobinfocache.BlobInfoCache2 + concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs + signers []*signer.Signer // Signers to use to create new signatures for the image + signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. +} + +// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions +func shouldRequireCompressionFormatMatch(options *Options) (bool, error) { + if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) { + return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format") + } + return options.ForceCompressionFormat, nil +} + +// Image copies image from srcRef to destRef, using policyContext to validate +// source image admissibility. It returns the manifest which was written to +// the new copy of the image. +func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (copiedManifest []byte, retErr error) { + if options == nil { + options = &Options{} + } + + if err := validateImageListSelection(options.ImageListSelection); err != nil { + return nil, err + } + + reportWriter := io.Discard + + if options.ReportWriter != nil { + reportWriter = options.ReportWriter + } + + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + if err != nil { + return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err) + } + dest := imagedestination.FromPublic(publicDest) + defer func() { + if err := dest.Close(); err != nil { + if retErr != nil { + retErr = fmt.Errorf(" (dest: %v): %w", err, retErr) + } else { + retErr = fmt.Errorf(" (dest: %v)", err) + } + } + }() + + publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + if err != nil { + return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err) + } + rawSource := imagesource.FromPublic(publicRawSource) + defer func() { + if err := rawSource.Close(); err != nil { + if retErr != nil { + retErr = fmt.Errorf(" (src: %v): %w", err, retErr) + } else { + retErr = fmt.Errorf(" (src: %v)", err) + } + } + }() + + // If reportWriter is not a TTY (e.g., when piping to a file), do not + // print the progress bars to avoid long and hard to parse output. + // Instead use printCopyInfo() to print single line "Copying ..." messages. + progressOutput := reportWriter + if !isTTY(reportWriter) { + progressOutput = io.Discard + } + + c := &copier{ + policyContext: policyContext, + dest: dest, + rawSource: rawSource, + options: options, + + reportWriter: reportWriter, + progressOutput: progressOutput, + + unparsedToplevel: image.UnparsedInstance(rawSource, nil), + // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more). + // Conceptually the cache settings should be in copy.Options instead. + blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), + } + defer c.close() + c.blobInfoCache.Open() + defer c.blobInfoCache.Close() + + // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. + if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { + c.concurrentBlobCopiesSemaphore = c.options.ConcurrentBlobCopiesSemaphore + if c.concurrentBlobCopiesSemaphore == nil { + max := c.options.MaxParallelDownloads + if max == 0 { + max = maxParallelDownloads + } + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(max)) + } + } else { + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1)) + if c.options.ConcurrentBlobCopiesSemaphore != nil { + if err := c.options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err) + } + defer c.options.ConcurrentBlobCopiesSemaphore.Release(1) + } + } + + if err := c.setupSigners(); err != nil { + return nil, err + } + + multiImage, err := isMultiImage(ctx, c.unparsedToplevel) + if err != nil { + return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err) + } + + if !multiImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + // The simple case: just copy a single image. + single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) + if err != nil { + return nil, err + } + copiedManifest = single.manifest + } else if c.options.ImageListSelection == CopySystemImage { + if len(options.EnsureCompressionVariantsExist) > 0 { + return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") + } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that + // matches the current system to copy, and copy it. + mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err) + } + manifestList, err := internalManifest.ListFromBlob(mfest, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err) + } + instanceDigest, err := manifestList.ChooseInstanceByCompression(c.options.SourceCtx, c.options.PreferGzipInstances) // try to pick one that matches c.options.SourceCtx + if err != nil { + return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) + if err != nil { + return nil, fmt.Errorf("copying system image from manifest list: %w", err) + } + copiedManifest = single.manifest + } else { /* c.options.ImageListSelection == CopyAllImages or c.options.ImageListSelection == CopySpecificImages, */ + // If we were asked to copy multiple images and can't, that's an error. + if !supportsMultipleImages(c.dest) { + return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) + } + // Copy some or all of the images. + switch c.options.ImageListSelection { + case CopyAllImages: + logrus.Debugf("Source is a manifest list; copying all instances") + case CopySpecificImages: + logrus.Debugf("Source is a manifest list; copying some instances") + } + if copiedManifest, err = c.copyMultipleImages(ctx); err != nil { + return nil, err + } + } + + if err := c.dest.Commit(ctx, c.unparsedToplevel); err != nil { + return nil, fmt.Errorf("committing the finished image: %w", err) + } + + return copiedManifest, nil +} + +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...any) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +// close tears down state owned by copier. +func (c *copier) close() { + for i, s := range c.signersToClose { + if err := s.Close(); err != nil { + logrus.Warnf("Error closing per-copy signer %d: %v", i+1, err) + } + } +} + +// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value +func validateImageListSelection(selection ImageListSelection) error { + switch selection { + case CopySystemImage, CopyAllImages, CopySpecificImages: + return nil + default: + return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection) + } +} + +// Checks if the destination supports accepting multiple images by checking if it can support +// manifest types that are lists of other manifests. +func supportsMultipleImages(dest types.ImageDestination) bool { + mtypes := dest.SupportedManifestMIMETypes() + if len(mtypes) == 0 { + // Anything goes! + return true + } + return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage) +} + +// isTTY returns true if the io.Writer is a file and a tty. +func isTTY(w io.Writer) bool { + if f, ok := w.(*os.File); ok { + return term.IsTerminal(int(f.Fd())) + } + return false +} diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go new file mode 100644 index 00000000..901d1082 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go @@ -0,0 +1,62 @@ +package copy + +import ( + "fmt" + "hash" + "io" + + digest "github.com/opencontainers/go-digest" +) + +type digestingReader struct { + source io.Reader + digester digest.Digester + hash hash.Hash + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool +} + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + var digester digest.Digester + if err := expectedDigest.Validate(); err != nil { + return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + } + digester = digestAlgorithm.Digester() + + return &digestingReader{ + source: source, + digester: digester, + hash: digester.Hash(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + d.validationSucceeded = true + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go new file mode 100644 index 00000000..b406b0c3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -0,0 +1,134 @@ +package copy + +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/types" + "github.com/containers/ocicrypt" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +// isOciEncrypted returns a bool indicating if a mediatype is encrypted +// This function will be moved to be part of OCI spec when adopted. +func isOciEncrypted(mediatype string) bool { + return strings.HasSuffix(mediatype, "+encrypted") +} + +// isEncrypted checks if an image is encrypted +func isEncrypted(i types.Image) bool { + layers := i.LayerInfos() + return slices.ContainsFunc(layers, func(l types.BlobInfo) bool { + return isOciEncrypted(l.MediaType) + }) +} + +// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step. +type bpDecryptionStepData struct { + decrypting bool // We are actually decrypting the stream +} + +// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary. +// srcInfo is only used for error messages. +// Returns data for other steps; the caller should eventually use updateCryptoOperation. +func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { + if !isOciEncrypted(stream.info.MediaType) || ic.c.options.OciDecryptConfig == nil { + return &bpDecryptionStepData{ + decrypting: false, + }, nil + } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + desc := imgspecv1.Descriptor{ + Annotations: stream.info.Annotations, + } + reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false) + if err != nil { + return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = decryptedDigest + stream.info.Size = -1 + maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { + return strings.HasPrefix(k, "org.opencontainers.image.enc") + }) + return &bpDecryptionStepData{ + decrypting: true, + }, nil +} + +// updateCryptoOperation sets *operation, if necessary. +func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) { + if d.decrypting { + *operation = types.Decrypt + } +} + +// bpdData contains data that the copy pipeline needs about the encryption step. +type bpEncryptionStepData struct { + encrypting bool // We are actually encrypting the stream + finalizer ocicrypt.EncryptLayerFinalizer +} + +// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations. +func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, + decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { + if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.options.OciEncryptConfig == nil { + return &bpEncryptionStepData{ + encrypting: false, + }, nil + } + + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) + } + + var annotations map[string]string + if !decryptionStep.decrypting { + annotations = srcInfo.Annotations + } + desc := imgspecv1.Descriptor{ + MediaType: srcInfo.MediaType, + Digest: srcInfo.Digest, + Size: srcInfo.Size, + Annotations: annotations, + } + reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.options.OciEncryptConfig, stream.reader, desc) + if err != nil { + return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 + return &bpEncryptionStepData{ + encrypting: true, + finalizer: finalizer, + }, nil +} + +// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary. +func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error { + if !d.encrypting { + return nil + } + + encryptAnnotations, err := d.finalizer() + if err != nil { + return fmt.Errorf("Unable to finalize encryption: %w", err) + } + *operation = types.Encrypt + if *annotations == nil { + *annotations = map[string]string{} + } + maps.Copy(*annotations, encryptAnnotations) + return nil +} diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go new file mode 100644 index 00000000..6f01cf5c --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -0,0 +1,224 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. +// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. +// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. +var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} + +// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption. +var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest} + +// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. +type orderedSet struct { + list []string + included *set.Set[string] +} + +// newOrderedSet creates a correctly initialized orderedSet. +// [Sometimes it would be really nice if Golang had constructors…] +func newOrderedSet() *orderedSet { + return &orderedSet{ + list: []string{}, + included: set.New[string](), + } +} + +// append adds s to the end of os, only if it is not included already. +func (os *orderedSet) append(s string) { + if !os.included.Contains(s) { + os.list = append(os.list, s) + os.included.Add(s) + } +} + +// determineManifestConversionInputs contains the inputs for determineManifestConversion. +type determineManifestConversionInputs struct { + srcMIMEType string // MIME type of the input manifest + + destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes() + + forceManifestMIMEType string // User’s choice of forced manifest MIME type + requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can +} + +// manifestConversionPlan contains the decisions made by determineManifestConversion. +type manifestConversionPlan struct { + // The preferred manifest MIME type (whether we are converting to it or using it unmodified). + // We compute this only to show it in error messages; without having to add this context + // in an error message, we would be happy enough to know only that no conversion is needed. + preferredMIMEType string + preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step. + otherMIMETypeCandidates []string // Other possible alternatives, in order +} + +// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in. +func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) { + srcType := in.srcMIMEType + normalizedSrcType := manifest.NormalizedMIMEType(srcType) + if srcType != normalizedSrcType { + logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) + srcType = normalizedSrcType + } + + destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes + if in.forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} + } + + if len(destSupportedManifestMIMETypes) == 0 { + if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) { + return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil + } + destSupportedManifestMIMETypes = ociEncryptionMIMETypes + } + supportedByDest := set.New[string]() + for _, t := range destSupportedManifestMIMETypes { + if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) { + supportedByDest.Add(t) + } + } + if supportedByDest.Empty() { + if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes + return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") + } + // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved. + if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption. + return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting") + } + // destSupportedManifestMIMETypes has three possible origins: + if in.forceManifestMIMEType != "" { // 1. forceManifestType specified + return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", + in.forceManifestMIMEType) + } + if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes + // Coverage: This should never happen, ociEncryptionMIMETypes all support encryption + return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + } + // 3. destination does not support encryption. + return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", + strings.Join(destSupportedManifestMIMETypes, ", ")) + } + + // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. + // So, build a list of types to try in order of decreasing preference. + // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, + // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. + // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types + // and never attempt the other one. + prioritizedTypes := newOrderedSet() + + // First of all, prefer to keep the original manifest unmodified. + if supportedByDest.Contains(srcType) { + prioritizedTypes.append(srcType) + } + if in.cannotModifyManifestReason != "" { + // We could also drop this check and have the caller + // make the choice; it is already doing that to an extent, to improve error + // messages. But it is nice to hide the “if we can't modify, do no conversion†+ // special case in here; the caller can then worry (or not) only about a good UI. + logrus.Debugf("We can't modify the manifest, hoping for the best...") + return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying? + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil + } + + // Then use our list of preferred types. + for _, t := range preferredManifestMIMETypes { + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } + } + + // Finally, try anything else the destination supports. + for _, t := range destSupportedManifestMIMETypes { + if supportedByDest.Contains(t) { + prioritizedTypes.append(t) + } + } + + logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. + return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") + } + res := manifestConversionPlan{ + preferredMIMEType: prioritizedTypes.list[0], + otherMIMETypeCandidates: prioritizedTypes.list[1:], + } + res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType + if !res.preferredMIMETypeNeedsConversion { + logrus.Debugf("... will first try using the original manifest unmodified") + } + return res, nil +} + +// isMultiImage returns true if img is a list of images +func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest(ctx) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} + +// determineListConversion takes the current MIME type of a list of manifests, +// the list of MIME types supported for a given destination, and a possible +// forced value, and returns the MIME type to which we should convert the list +// of manifests (regardless of whether we are converting to it or using it +// unmodified) and a slice of other list types which might be supported by the +// destination. +func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, []string, error) { + // If there's no list of supported types, then anything we support is expected to be supported. + if len(destSupportedMIMETypes) == 0 { + destSupportedMIMETypes = manifest.SupportedListMIMETypes + } + // If we're forcing it, replace the list of supported types with the forced value. + if forcedListMIMEType != "" { + destSupportedMIMETypes = []string{forcedListMIMEType} + } + + prioritizedTypes := newOrderedSet() + // The first priority is the current type, if it's in the list, since that lets us avoid a + // conversion that isn't strictly necessary. + if slices.Contains(destSupportedMIMETypes, currentListMIMEType) { + prioritizedTypes.append(currentListMIMEType) + } + // Pick out the other list types that we support. + for _, t := range destSupportedMIMETypes { + if manifest.MIMETypeIsMultiImage(t) { + prioritizedTypes.append(t) + } + } + + logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) + if len(prioritizedTypes.list) == 0 { + return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) + } + selectedType := prioritizedTypes.list[0] + otherSupportedTypes := prioritizedTypes.list[1:] + if selectedType != currentListMIMEType { + logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes) + } else { + logrus.Debugf("... will use the original manifest list type, and then try %v", otherSupportedTypes) + } + // Done. + return selectedType, otherSupportedTypes, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go new file mode 100644 index 00000000..30f6da25 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -0,0 +1,351 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "sort" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +type instanceCopyKind int + +const ( + instanceCopyCopy instanceCopyKind = iota + instanceCopyClone +) + +type instanceCopy struct { + op instanceCopyKind + sourceDigest digest.Digest + + // Fields which can be used by callers when operation + // is `instanceCopyCopy` + copyForceCompressionFormat bool + + // Fields which can be used by callers when operation + // is `instanceCopyClone` + cloneCompressionVariant OptionCompressionVariant + clonePlatform *imgspecv1.Platform + cloneAnnotations map[string]string +} + +// internal type only to make imgspecv1.Platform comparable +type platformComparable struct { + architecture string + os string + osVersion string + osFeatures string + variant string +} + +// Converts imgspecv1.Platform to a comparable format. +func platformV1ToPlatformComparable(platform *imgspecv1.Platform) platformComparable { + if platform == nil { + return platformComparable{} + } + osFeatures := slices.Clone(platform.OSFeatures) + sort.Strings(osFeatures) + return platformComparable{architecture: platform.Architecture, + os: platform.OS, + // This is strictly speaking ambiguous, fields of OSFeatures can contain a ','. Probably good enough for now. + osFeatures: strings.Join(osFeatures, ","), + osVersion: platform.OSVersion, + variant: platform.Variant, + } +} + +// platformCompressionMap prepares a mapping of platformComparable -> CompressionAlgorithmNames for given digests +func platformCompressionMap(list internalManifest.List, instanceDigests []digest.Digest) (map[platformComparable]*set.Set[string], error) { + res := make(map[platformComparable]*set.Set[string]) + for _, instanceDigest := range instanceDigests { + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return nil, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + platformSet, ok := res[platform] + if !ok { + platformSet = set.New[string]() + res[platform] = platformSet + } + platformSet.AddSlice(instanceDetails.ReadOnly.CompressionAlgorithmNames) + } + return res, nil +} + +func validateCompressionVariantExists(input []OptionCompressionVariant) error { + for _, option := range input { + _, err := compression.AlgorithmByName(option.Algorithm.Name()) + if err != nil { + return fmt.Errorf("invalid algorithm %q in option.EnsureCompressionVariantsExist: %w", option.Algorithm.Name(), err) + } + } + return nil +} + +// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list. +func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.Digest, options *Options) ([]instanceCopy, error) { + res := []instanceCopy{} + if options.ImageListSelection == CopySpecificImages && len(options.EnsureCompressionVariantsExist) > 0 { + // List can already contain compressed instance for a compression selected in `EnsureCompressionVariantsExist` + // It’s unclear what it means when `CopySpecificImages` includes an instance in options.Instances, + // EnsureCompressionVariantsExist asks for an instance with some compression, + // an instance with that compression already exists, but is not included in options.Instances. + // We might define the semantics and implement this in the future. + return res, fmt.Errorf("EnsureCompressionVariantsExist is not implemented for CopySpecificImages") + } + err := validateCompressionVariantExists(options.EnsureCompressionVariantsExist) + if err != nil { + return res, err + } + compressionsByPlatform, err := platformCompressionMap(list, instanceDigests) + if err != nil { + return nil, err + } + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages && + !slices.Contains(options.Instances, instanceDigest) { + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + continue + } + instanceDetails, err := list.Instance(instanceDigest) + if err != nil { + return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) + } + forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } + res = append(res, instanceCopy{ + op: instanceCopyCopy, + sourceDigest: instanceDigest, + copyForceCompressionFormat: forceCompressionFormat, + }) + platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) + compressionList := compressionsByPlatform[platform] + for _, compressionVariant := range options.EnsureCompressionVariantsExist { + if !compressionList.Contains(compressionVariant.Algorithm.Name()) { + res = append(res, instanceCopy{ + op: instanceCopyClone, + sourceDigest: instanceDigest, + cloneCompressionVariant: compressionVariant, + clonePlatform: instanceDetails.ReadOnly.Platform, + cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations), + }) + // add current compression to the list so that we don’t create duplicate clones + compressionList.Add(compressionVariant.Algorithm.Name()) + } + } + } + return res, nil +} + +// copyMultipleImages copies some or all of an image list's instances, using +// c.policyContext to validate source image admissibility. +func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, retErr error) { + // Parse the list and get a copy of the original value after it's re-encoded. + manifestList, manifestType, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading manifest list: %w", err) + } + originalList, err := internalManifest.ListFromBlob(manifestList, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err) + } + updatedList := originalList.CloneInternal() + + sigs, err := c.sourceSignatures(ctx, c.unparsedToplevel, + "Getting image list signatures", + "Checking if image list destination supports signatures") + if err != nil { + return nil, err + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copySingleImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if c.options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } + + // Determine if we'll need to convert the manifest list to a different format. + forceListMIMEType := c.options.ForceManifestMIMEType + switch forceListMIMEType { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + forceListMIMEType = manifest.DockerV2ListMediaType + case imgspecv1.MediaTypeImageManifest: + forceListMIMEType = imgspecv1.MediaTypeImageIndex + } + selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) + if err != nil { + return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err) + } + if selectedListType != originalList.MIMEType() { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) + } + } + + // Copy each image, or just the ones we want to copy, in turn. + instanceDigests := updatedList.Instances() + instanceEdits := []internalManifest.ListEdit{} + instanceCopyList, err := prepareInstanceCopies(updatedList, instanceDigests, c.options) + if err != nil { + return nil, fmt.Errorf("preparing instances for copy: %w", err) + } + c.Printf("Copying %d images generated from %d images in list\n", len(instanceCopyList), len(instanceDigests)) + for i, instance := range instanceCopyList { + // Update instances to be edited by their `ListOperation` and + // populate necessary fields. + switch instance.op { + case instanceCopyCopy: + logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat}) + if err != nil { + return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) + } + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpUpdate, + UpdateOldDigest: instance.sourceDigest, + UpdateDigest: updated.manifestDigest, + UpdateSize: int64(len(updated.manifest)), + UpdateCompressionAlgorithms: updated.compressionAlgorithms, + UpdateMediaType: updated.manifestMIMEType}) + case instanceCopyClone: + logrus.Debugf("Replicating instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) + c.Printf("Replicating image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{ + requireCompressionFormatMatch: true, + compressionFormat: &instance.cloneCompressionVariant.Algorithm, + compressionLevel: instance.cloneCompressionVariant.Level}) + if err != nil { + return nil, fmt.Errorf("replicating image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) + } + // Record the result of a possible conversion here. + instanceEdits = append(instanceEdits, internalManifest.ListEdit{ + ListOperation: internalManifest.ListOpAdd, + AddDigest: updated.manifestDigest, + AddSize: int64(len(updated.manifest)), + AddMediaType: updated.manifestMIMEType, + AddPlatform: instance.clonePlatform, + AddAnnotations: instance.cloneAnnotations, + AddCompressionAlgorithms: updated.compressionAlgorithms, + }) + default: + return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op) + } + } + + // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. + if err = updatedList.EditInstances(instanceEdits); err != nil { + return nil, fmt.Errorf("updating manifest list: %w", err) + } + + // Iterate through supported list types, preferred format first. + c.Printf("Writing manifest list to image destination\n") + var errs []string + for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) { + var attemptedList internalManifest.ListPublic = updatedList + + logrus.Debugf("Trying to use manifest list type %s…", thisListType) + + // Perform the list conversion, if we need one. + if thisListType != updatedList.MIMEType() { + attemptedList, err = updatedList.ConvertToMIMEType(thisListType) + if err != nil { + return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err) + } + } + + // Check if the updates or a type conversion meaningfully changed the list of images + // by serializing them both so that we can compare them. + attemptedManifestList, err := attemptedList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err) + } + originalManifestList, err := originalList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err) + } + + // If we can't just use the original value, but we have to change it, flag an error. + if !bytes.Equal(attemptedManifestList, originalManifestList) { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) + } + logrus.Debugf("Manifest list has been updated") + } else { + // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest. + attemptedManifestList = manifestList + } + + // Save the manifest list. + err = c.dest.PutManifest(ctx, attemptedManifestList, nil) + if err != nil { + logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err)) + continue + } + errs = nil + manifestList = attemptedManifestList + break + } + if errs != nil { + return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + + // Sign the manifest list. + newSigs, err := c.createSignatures(ctx, manifestList, c.options.SignIdentity) + if err != nil { + return nil, err + } + sigs = append(sigs, newSigs...) + + c.Printf("Storing list signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { + return nil, fmt.Errorf("writing signatures: %w", err) + } + + return manifestList, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go new file mode 100644 index 00000000..ce078234 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -0,0 +1,160 @@ +package copy + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/vbauerster/mpb/v8" + "github.com/vbauerster/mpb/v8/decor" +) + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + if s.Total == 0 { + pairFmt := "%.1f / %.1f (skipped: %.1f)" + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + } + pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) +} + +// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. +type progressBar struct { + *mpb.Bar + originalSize int64 // or -1 if unknown +} + +// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter +// is io.Discard, the progress bar's output will be discarded. Callers may call printCopyInfo() +// to print a single line instead. +// +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +// +// As a convention, most users of progress bars should call mark100PercentComplete on full success; +// by convention, we don't leave progress bars in partial state when fully done +// (even if we copied much less data than anticipated). +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + decor.Name(" | "), + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), + ) + } + return &progressBar{ + Bar: bar, + originalSize: info.Size, + } +} + +// printCopyInfo prints a "Copying ..." message on the copier if the output is +// set to `io.Discard`. In that case, the progress bars won't be rendered but +// we still want to indicate when blobs and configs are copied. +func (c *copier) printCopyInfo(kind string, info types.BlobInfo) { + if c.progressOutput == io.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } +} + +// mark100PercentComplete marks the progres bars as 100% complete; +// it may do so by possibly advancing the current state if it is below the known total. +func (bar *progressBar) mark100PercentComplete() { + if bar.originalSize > 0 { + // We can't call bar.SetTotal even if we wanted to; the total can not be changed + // after a progress bar is created with a definite total. + bar.SetCurrent(bar.originalSize) // This triggers the completion condition. + } else { + // -1 = unknown size + // 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known + // size (possible at least in theory), in mpb, zero-sized progress bars are treated + // as unknown size, in particular they are not configured to be marked as + // complete on bar.Current() reaching bar.total (because that would happen already + // when creating the progress bar). + // That means that we are both _allowed_ to call SetTotal, and we _have to_. + bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete. + } +} + +// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar +// with the number of received bytes. +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *progressBar // A progress bar updated with the number of bytes read so far +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + total += int64(c.Length) + } + s.bar.IncrInt64(total) + } + return rc, errs, err +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_channel.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go new file mode 100644 index 00000000..d5e9e09b --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go @@ -0,0 +1,79 @@ +package copy + +import ( + "io" + "time" + + "github.com/containers/image/v5/types" +) + +// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval. +type progressReader struct { + source io.Reader + channel chan<- types.ProgressProperties + interval time.Duration + artifact types.BlobInfo + lastUpdate time.Time + offset uint64 + offsetUpdate uint64 +} + +// newProgressReader creates a new progress reader for: +// `source`: The source when internally reading bytes +// `channel`: The reporter channel to which the progress will be sent +// `interval`: The update interval to indicate how often the progress should update +// `artifact`: The blob metadata which is currently being progressed +func newProgressReader( + source io.Reader, + channel chan<- types.ProgressProperties, + interval time.Duration, + artifact types.BlobInfo, +) *progressReader { + // The progress reader constructor informs the progress channel + // that a new artifact will be read + channel <- types.ProgressProperties{ + Event: types.ProgressEventNewArtifact, + Artifact: artifact, + } + return &progressReader{ + source: source, + channel: channel, + interval: interval, + artifact: artifact, + lastUpdate: time.Now(), + offset: 0, + offsetUpdate: 0, + } +} + +// reportDone indicates to the internal channel that the progress has been +// finished +func (r *progressReader) reportDone() { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventDone, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } +} + +// Read continuously reads bytes into the progress reader and reports the +// status via the internal channel +func (r *progressReader) Read(p []byte) (int, error) { + n, err := r.source.Read(p) + r.offset += uint64(n) + r.offsetUpdate += uint64(n) + + // Fire the progress reader in the provided interval + if time.Since(r.lastUpdate) > r.interval { + r.channel <- types.ProgressProperties{ + Event: types.ProgressEventRead, + Artifact: r.artifact, + Offset: r.offset, + OffsetUpdate: r.offsetUpdate, + } + r.lastUpdate = time.Now() + r.offsetUpdate = 0 + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go new file mode 100644 index 00000000..0ec54ded --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -0,0 +1,115 @@ +package copy + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + internalsig "github.com/containers/image/v5/internal/signature" + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature/sigstore" + "github.com/containers/image/v5/signature/simplesigning" + "github.com/containers/image/v5/transports" +) + +// setupSigners initializes c.signers. +func (c *copier) setupSigners() error { + c.signers = append(c.signers, c.options.Signers...) + // c.signersToClose is intentionally not updated with c.options.Signers. + + // We immediately append created signers to c.signers, and we rely on c.close() to clean them up; so we don’t need + // to clean up any created signers on failure. + + if c.options.SignBy != "" { + opts := []simplesigning.Option{ + simplesigning.WithKeyFingerprint(c.options.SignBy), + } + if c.options.SignPassphrase != "" { + opts = append(opts, simplesigning.WithPassphrase(c.options.SignPassphrase)) + } + signer, err := simplesigning.NewSigner(opts...) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + if c.options.SignBySigstorePrivateKeyFile != "" { + signer, err := sigstore.NewSigner( + sigstore.WithPrivateKeyFile(c.options.SignBySigstorePrivateKeyFile, c.options.SignSigstorePrivateKeyPassphrase), + ) + if err != nil { + return err + } + c.signers = append(c.signers, signer) + c.signersToClose = append(c.signersToClose, signer) + } + + return nil +} + +// sourceSignatures returns signatures from unparsedSource, +// and verifies that they can be used (to avoid copying a large image when we +// can tell in advance that it would ultimately fail) +func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, + gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) { + var sigs []internalsig.Signature + if c.options.RemoveSignatures { + sigs = []internalsig.Signature{} + } else { + c.Printf("%s\n", gettingSignaturesMessage) + s, err := unparsed.UntrustedSignatures(ctx) + if err != nil { + return nil, fmt.Errorf("reading signatures: %w", err) + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("%s\n", checkingDestMessage) + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err) + } + } + return sigs, nil +} + +// createSignatures creates signatures for manifest and an optional identity. +func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity reference.Named) ([]internalsig.Signature, error) { + if len(c.signers) == 0 { + // We must exit early here, otherwise copies with no Docker reference wouldn’t be possible. + return nil, nil + } + + if identity != nil { + if reference.IsNameOnly(identity) { + return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String()) + } + } else { + identity = c.dest.Reference().DockerReference() + if identity == nil { + return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } + } + + res := make([]internalsig.Signature, 0, len(c.signers)) + for signerIndex, signer := range c.signers { + msg := internalSigner.ProgressMessage(signer) + if len(c.signers) == 1 { + c.Printf("Creating signature: %s\n", msg) + } else { + c.Printf("Creating signature %d: %s\n", signerIndex+1, msg) + } + newSig, err := internalSigner.SignImageManifest(ctx, signer, manifest, identity) + if err != nil { + if len(c.signers) == 1 { + return nil, fmt.Errorf("creating signature: %w", err) + } else { + return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err) + } + } + res = append(res, newSig) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go new file mode 100644 index 00000000..37b1bfe9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -0,0 +1,904 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/vbauerster/mpb/v8" + "golang.org/x/exp/slices" +) + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src *image.SourcedImage + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int + requireCompressionFormatMatch bool +} + +type copySingleImageOptions struct { + requireCompressionFormatMatch bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int +} + +// copySingleImageResult carries data produced by copySingleImage +type copySingleImageResult struct { + manifest []byte + manifestMIMEType string + manifestDigest digest.Digest + compressionAlgorithms []compressiontypes.Algorithm +} + +// copySingleImage copies a single (non-manifest-list) image unparsedImage, using c.policyContext to validate +// source image admissibility. +func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest, opts copySingleImageOptions) (copySingleImageResult, error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(ctx, unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return copySingleImageResult{}, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err) + } + if multiImage { + return copySingleImageResult{}, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + + // Please keep this policy check BEFORE reading any other information about the image. + // (The multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) + if allowed, err := c.policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return copySingleImageResult{}, fmt.Errorf("Source image rejected: %w", err) + } + src, err := image.FromUnparsedImage(ctx, c.options.SourceCtx, unparsedImage) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err) + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's + // one item from a manifest list that matches it, accept that as a match. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest()) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + manifestList, _, err := c.unparsedToplevel.Manifest(ctx) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("reading manifest from source image: %w", err) + } + matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return copySingleImageResult{}, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return copySingleImageResult{}, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + } + + if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil { + return copySingleImageResult{}, err + } + + sigs, err := c.sourceSignatures(ctx, src, + "Getting image source signatures", + "Checking if image destination supports signatures") + if err != nil { + return copySingleImageResult{}, err + } + + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if c.options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // diffIDsAreNeeded is computed later + cannotModifyManifestReason: cannotModifyManifestReason, + requireCompressionFormatMatch: opts.requireCompressionFormatMatch, + } + if opts.compressionFormat != nil { + ic.compressionFormat = opts.compressionFormat + ic.compressionLevel = opts.compressionLevel + } else if c.options.DestinationCtx != nil { + // Note that compressionFormat and compressionLevel can be nil. + ic.compressionFormat = c.options.DestinationCtx.CompressionFormat + ic.compressionLevel = c.options.DestinationCtx.CompressionLevel + } + // Decide whether we can substitute blobs with semantic equivalents: + // - Don’t do that if we can’t modify the manifest at all + // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0 + + if err := ic.updateEmbeddedDockerReference(); err != nil { + return copySingleImageResult{}, err + } + + destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil + + manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ + srcMIMEType: ic.src.ManifestMIMEType, + destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), + forceManifestMIMEType: c.options.ForceManifestMIMEType, + requiresOCIEncryption: destRequiresOciEncryption, + cannotModifyManifestReason: ic.cannotModifyManifestReason, + }) + if err != nil { + return copySingleImageResult{}, err + } + // We set up this part of ic.manifestUpdates quite early, not just around the + // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code + // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based + // on the expected destination format. + if manifestConversionPlan.preferredMIMETypeNeedsConversion { + ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType + } + + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) + + // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal + if c.options.OptimizeDestinationImageAlreadyExists { + shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible + noPendingManifestUpdates := ic.noPendingManifestUpdates() + + logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch) + if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch { + matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance) + if err != nil { + logrus.Warnf("Failed to compare destination image manifest: %v", err) + return copySingleImageResult{}, err + } + + if matchedResult != nil { + c.Printf("Skipping: image already present at destination\n") + return *matchedResult, nil + } + } + } + + compressionAlgos, err := ic.copyLayers(ctx) + if err != nil { + return copySingleImageResult{}, err + } + + // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; + // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support + // without actually trying to upload something and getting a types.ManifestTypeRejectedError. + // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if + // we're altering how they're compressed. If the process succeeds, fine… + manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + wipResult := copySingleImageResult{ + manifest: manifestBytes, + manifestMIMEType: manifestConversionPlan.preferredMIMEType, + manifestDigest: manifestDigest, + } + if err != nil { + logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err) + // … if it fails, and the failure is either because the manifest is rejected by the registry, or + // because we failed to create a manifest of the specified type because the specific manifest type + // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may + // have other options available that could still succeed. + var manifestTypeRejectedError types.ManifestTypeRejectedError + var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError + isManifestRejected := errors.As(err, &manifestTypeRejectedError) + isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 { + // We don’t have other options. + // In principle the code below would handle this as well, but the resulting error message is fairly ugly. + // Don’t bother the user with MIME types if we have no choice. + return copySingleImageResult{}, err + } + // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType. + // So if we are here, we will definitely be trying to convert the manifest. + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, + // so let’s bail out early and with a better error message. + if ic.cannotModifyManifestReason != "" { + return copySingleImageResult{}, fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err) + } + + // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. + errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)} + for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates { + logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType + attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) + continue + } + + // We have successfully uploaded a manifest. + wipResult = copySingleImageResult{ + manifest: attemptedManifest, + manifestMIMEType: manifestMIMEType, + manifestDigest: attemptedManifestDigest, + } + errs = nil // Mark this as a success so that we don't abort below. + break + } + if errs != nil { + return copySingleImageResult{}, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + } + if targetInstance != nil { + targetInstance = &wipResult.manifestDigest + } + + newSigs, err := c.createSignatures(ctx, wipResult.manifest, c.options.SignIdentity) + if err != nil { + return copySingleImageResult{}, err + } + sigs = append(sigs, newSigs...) + + if len(sigs) > 0 { + c.Printf("Storing signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { + return copySingleImageResult{}, fmt.Errorf("writing signatures: %w", err) + } + } + wipResult.compressionAlgorithms = compressionAlgos + res := wipResult // We are done + return res, nil +} + +// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary. +func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { + if dest.MustMatchRuntimeOS() { + c, err := src.OCIConfig(ctx) + if err != nil { + return fmt.Errorf("parsing image configuration: %w", err) + } + wantedPlatforms, err := platform.WantedPlatforms(sys) + if err != nil { + return fmt.Errorf("getting current platform information %#v: %w", sys, err) + } + + options := newOrderedSet() + match := false + for _, wantedPlatform := range wantedPlatforms { + // For a transitional period, this might trigger warnings because the Variant + // field was added to OCI config only recently. If this turns out to be too noisy, + // revert this check to only look for (OS, Architecture). + if platform.MatchesPlatform(c.Platform, wantedPlatform) { + match = true + break + } + options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant)) + } + if !match { + logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q", + c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", ")) + } + } + return nil +} + +// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. +func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } + destRef := ic.c.dest.Reference().DockerReference() + if destRef == nil { + return nil // Destination does not care about Docker references + } + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { + return nil // No reference embedded in the manifest, or it matches destRef already. + } + + if ic.cannotModifyManifestReason != "" { + return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) + } + ic.manifestUpdates.EmbeddedDockerReference = destRef + return nil +} + +func (ic *imageCopier) noPendingManifestUpdates() bool { + return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) +} + +// compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the +// (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise. +func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) { + srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob) + if err != nil { + return nil, fmt.Errorf("calculating manifest digest: %w", err) + } + + destImageSource, err := ic.c.dest.Reference().NewImageSource(ctx, ic.c.options.DestinationCtx) + if err != nil { + logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err) + return nil, nil + } + defer destImageSource.Close() + + destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err) + return nil, nil + } + + destManifestDigest, err := manifest.Digest(destManifest) + if err != nil { + return nil, fmt.Errorf("calculating manifest digest: %w", err) + } + + logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) + if srcManifestDigest != destManifestDigest { + return nil, nil + } + + compressionAlgos := set.New[string]() + for _, srcInfo := range ic.src.LayerInfos() { + compression := compressionAlgorithmFromMIMEType(srcInfo) + compressionAlgos.Add(compression.Name()) + } + + algos, err := algorithmsByNames(compressionAlgos.Values()) + if err != nil { + return nil, err + } + + // Destination and source manifests, types and digests should all be equivalent + return ©SingleImageResult{ + manifest: destManifest, + manifestMIMEType: destManifestType, + manifestDigest: srcManifestDigest, + compressionAlgorithms: algos, + }, nil +} + +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". +func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) { + srcInfos := ic.src.LayerInfos() + numLayers := len(srcInfos) + updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) + if err != nil { + return nil, err + } + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if ic.cannotModifyManifestReason != "" { + return nil, fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // The manifest is used to extract the information whether a given + // layer is empty. + man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType) + if err != nil { + return nil, err + } + manifestLayerInfos := man.LayerInfos() + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + + data := make([]copyLayerData, numLayers) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) { + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} + if !ic.c.options.DownloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + } + } else { + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer) + } + data[index] = cld + } + + // Decide which layers to encrypt + layersToEncrypt := set.New[int]() + var encryptAll bool + if ic.c.options.OciEncryptLayers != nil { + encryptAll = len(*ic.c.options.OciEncryptLayers) == 0 + totalLayers := len(srcInfos) + for _, l := range *ic.c.options.OciEncryptLayers { + switch { + case l >= 0 && l < totalLayers: + layersToEncrypt.Add(l) + case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers + layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed. + default: + return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers) + } + } + + if encryptAll { + for i := 0; i < len(srcInfos); i++ { + layersToEncrypt.Add(i) + } + } + } + + if err := func() error { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + + // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool. + defer copyGroup.Wait() + + for i, srcLayer := range srcInfos { + err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1) + if err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying layer: %w", err) + } + copyGroup.Add(1) + go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference()) + } + + // A call to copyGroup.Wait() is done at this point by the defer above. + return nil + }(); err != nil { + return nil, err + } + + compressionAlgos := set.New[string]() + destInfos := make([]types.BlobInfo, numLayers) + diffIDs := make([]digest.Digest, numLayers) + for i, cld := range data { + if cld.err != nil { + return nil, cld.err + } + if cld.destInfo.CompressionAlgorithm != nil { + compressionAlgos.Add(cld.destInfo.CompressionAlgorithm.Name()) + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + + // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the + // OptimizeDestinationImageAlreadyExists short-circuit conditions + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + algos, err := algorithmsByNames(compressionAlgos.Values()) + if err != nil { + return nil, err + } + return algos, nil +} + +// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool { + return a.Digest == b.Digest + }) +} + +// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, +// stores the resulting config and manifest to the destination, and returns the stored manifest +// and its digest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { + var pendingImage types.Image = ic.src + if !ic.noPendingManifestUpdates() { + if ic.cannotModifyManifestReason != "" { + return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) + } + if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { + // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. + // So, this can only happen if we are trying to upload using one of the other MIME type candidates. + // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. + // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. + return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + } + pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) + if err != nil { + return nil, "", fmt.Errorf("creating an updated image manifest: %w", err) + } + pendingImage = pi + } + man, _, err := pendingImage.Manifest(ctx) + if err != nil { + return nil, "", fmt.Errorf("reading manifest: %w", err) + } + + if err := ic.copyConfig(ctx, pendingImage); err != nil { + return nil, "", err + } + + ic.c.Printf("Writing manifest to image destination\n") + manifestDigest, err := manifest.Digest(man) + if err != nil { + return nil, "", err + } + if instanceDigest != nil { + instanceDigest = &manifestDigest + } + if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", fmt.Errorf("writing manifest: %w", err) + } + return man, manifestDigest, nil +} + +// copyConfig copies config.json, if any, from src to dest. +func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying config: %w", err) + } + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + + destInfo, err := func() (types.BlobInfo, error) { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") + defer bar.Abort(false) + ic.c.printCopyInfo("config", srcInfo) + + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err) + } + + destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false) + if err != nil { + return types.BlobInfo{}, err + } + + bar.mark100PercentComplete() + return destInfo, nil + }() + if err != nil { + return err + } + if destInfo.Digest != srcInfo.Digest { + return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm { + // This MIME type → compression mapping belongs in manifest-specific code in our manifest + // package (but we should preferably replace/change UpdatedImage instead of productizing + // this workaround). + switch srcInfo.MediaType { + case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: + return &compression.Gzip + case imgspecv1.MediaTypeImageLayerZstd: + return &compression.Zstd + } + return nil +} + +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil. +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) { + // If the srcInfo doesn't contain compression information, try to compute it from the + // MediaType, which was either read from a manifest by way of LayerInfos() or constructed + // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob, + // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(), + // which uses the compression information to compute the updated MediaType values. + // (Sadly UpdatedImage() is documented to not update MediaTypes from + // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) + if srcInfo.CompressionAlgorithm == nil { + srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo) + } + + ic.c.printCopyInfo("blob", srcInfo) + + diffIDIsNeeded := false + var cachedDiffID digest.Digest = "" + if ic.diffIDsAreNeeded { + cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded = cachedDiffID == "" + } + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.options.OciDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { + canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) + logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", + srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) + canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType) + // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm + // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing + // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause + // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. + // Fixing that will probably require passing more information to TryReusingBlob() than the current version of + // the ImageDestination interface lets us pass in. + var requiredCompression *compressiontypes.Algorithm + var originalCompression *compressiontypes.Algorithm + if ic.requireCompressionFormatMatch { + requiredCompression = ic.compressionFormat + originalCompression = srcInfo.CompressionAlgorithm + } + reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: canSubstitute, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + RequiredCompression: requiredCompression, + OriginalCompression: originalCompression, + }) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) + } + if reused { + logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + func() { // A scope for defer + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists") + defer bar.Abort(false) + bar.mark100PercentComplete() + }() + + // Throw an event that the layer has been skipped + if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { + ic.c.options.Progress <- types.ProgressProperties{ + Event: types.ProgressEventSkipped, + Artifact: srcInfo, + } + } + + return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil + } + } + + // A partial pull is managed by the destination storage, that decides what portions + // of the source file are not known yet and must be fetched. + // Attempt a partial only when the source allows to retrieve a blob partially and + // the destination has support for it. + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { + if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer + bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") + hideProgressBar := true + defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. + bar.Abort(hideProgressBar) + }() + + proxy := blobChunkAccessorProxy{ + wrapped: ic.c.rawSource, + bar: bar, + } + uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) + if err == nil { + if srcInfo.Size != -1 { + bar.SetRefill(srcInfo.Size - bar.Current()) + } + bar.mark100PercentComplete() + hideProgressBar = false + logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) + return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob) + } + logrus.Debugf("Failed to retrieve partial blob: %v", err) + return false, types.BlobInfo{} + }(); reused { + return blobInfo, cachedDiffID, nil + } + } + + // Fallback: copy the layer, computing the diffID if we need to do so + return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer + bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") + defer bar.Abort(false) + + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + defer srcStream.Close() + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) + if err != nil { + return types.BlobInfo{}, "", err + } + + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err) + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } + diffID = diffIDResult.digest + } + } + + bar.mark100PercentComplete() + return blobInfo, diffID, nil + }() +} + +// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo. +func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo { + // The transport is only tasked with finding the blob, determining its size if necessary, and returning the right + // compression format if the blob was substituted. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + res := types.BlobInfo{ + Digest: reusedBlob.Digest, + Size: reusedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls) + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. + CompressionOperation: reusedBlob.CompressionOperation, + CompressionAlgorithm: reusedBlob.CompressionAlgorithm, + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. + } + // The transport is only expected to fill CompressionOperation and CompressionAlgorithm + // if the blob was substituted; otherwise, fill it in based + // on what we know from the srcInfos we were given. + if reusedBlob.Digest == inputInfo.Digest { + res.CompressionOperation = inputInfo.CompressionOperation + res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + } + return res +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer†scope. +// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps (de/re/)compressing the stream, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + + blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + defer s.Close() + stream = s + } + + return digest.Canonical.FromReader(stream) +} + +// algorithmsByNames returns slice of Algorithms from slice of Algorithm Names +func algorithmsByNames(names []string) ([]compressiontypes.Algorithm, error) { + result := []compressiontypes.Algorithm{} + for _, name := range names { + algo, err := compression.AlgorithmByName(name) + if err != nil { + return nil, err + } + result = append(result, algo) + } + return result, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go new file mode 100644 index 00000000..32ae1ae8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -0,0 +1,55 @@ +package explicitfilepath + +import ( + "fmt" + "os" + "path/filepath" +) + +// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. +// To do so, all elements of the input path must exist; as a special case, the final component may be +// a non-existent name (but not a symlink pointing to a non-existent name) +// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +func ResolvePathToFullyExplicit(path string) (string, error) { + switch _, err := os.Lstat(path); { + case err == nil: + return resolveExistingPathToFullyExplicit(path) + case os.IsNotExist(err): + parent, file := filepath.Split(path) + resolvedParent, err := resolveExistingPathToFullyExplicit(parent) + if err != nil { + return "", err + } + if file == "." || file == ".." { + // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. + // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. + // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components + // in the resulting path, and especially not at the end. + return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path) + } + resolvedPath := filepath.Join(resolvedParent, file) + // As a sanity check, ensure that there are no "." or ".." components. + cleanedResolvedPath := filepath.Clean(resolvedPath) + if cleanedResolvedPath != resolvedPath { + // Coverage: This should never happen. + return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + } + return resolvedPath, nil + default: // err != nil, unrecognized + return "", err + } +} + +// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, +// but without the special case for missing final component. +func resolveExistingPathToFullyExplicit(path string) (string, error) { + resolved, err := filepath.Abs(path) + if err != nil { + return "", err // Coverage: This can fail only if os.Getwd() fails. + } + resolved, err = filepath.EvalSymlinks(resolved) + if err != nil { + return "", err + } + return filepath.Clean(resolved), nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go new file mode 100644 index 00000000..632ee7c4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go @@ -0,0 +1,82 @@ +package archive + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +type archiveImageDestination struct { + *tarfile.Destination // Implements most of types.ImageDestination + ref archiveReference + writer *Writer // Should be closed if closeWriter + closeWriter bool +} + +func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) { + if ref.sourceIndex != -1 { + return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) + } + + var writer *Writer + var closeWriter bool + if ref.writer != nil { + writer = ref.writer + closeWriter = false + } else { + w, err := NewWriter(sys, ref.path) + if err != nil { + return nil, err + } + writer = w + closeWriter = true + } + tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref) + if sys != nil && sys.DockerArchiveAdditionalTags != nil { + tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) + } + return &archiveImageDestination{ + Destination: tarDest, + ref: ref, + writer: writer, + closeWriter: closeWriter, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *archiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *archiveImageDestination) Close() error { + if d.closeWriter { + return d.writer.Close() + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + d.writer.imageCommitted() + if d.closeWriter { + // We could do this only in .Close(), but failures in .Close() are much more likely to be + // ignored by callers that use defer. So, in single-image destinations, try to complete + // the archive here. + // But if Commit() is never called, let .Close() clean up. + err := d.writer.Close() + d.closeWriter = false + return err + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/reader.go b/vendor/github.com/containers/image/v5/docker/archive/reader.go new file mode 100644 index 00000000..875a1525 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/reader.go @@ -0,0 +1,121 @@ +package archive + +import ( + "fmt" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +// Reader manages a single Docker archive, allows listing its contents and accessing +// individual images with less overhead than creating image references individually +// (because the archive is, if necessary, copied or decompressed only once). +type Reader struct { + path string // The original, user-specified path; not the maintained temporary file, if any + archive *tarfile.Reader +} + +// NewReader returns a Reader for path. +// The caller should call .Close() on the returned object. +func NewReader(sys *types.SystemContext, path string) (*Reader, error) { + archive, err := tarfile.NewReaderFromFile(sys, path) + if err != nil { + return nil, err + } + return &Reader{ + path: path, + archive: archive, + }, nil +} + +// Close deletes temporary files associated with the Reader, if any. +func (r *Reader) Close() error { + return r.archive.Close() +} + +// NewReaderForReference creates a Reader from a Reader-independent imageReference, which must be from docker/archive.Transport, +// and a variant of imageReference that points at the same image within the reader. +// The caller should call .Close() on the returned Reader. +func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) { + standalone, ok := ref.(archiveReference) + if !ok { + return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) + } + if standalone.archiveReader != nil { + return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport()) + } + reader, err := NewReader(sys, standalone.path) + if err != nil { + return nil, nil, err + } + succeeded := false + defer func() { + if !succeeded { + reader.Close() + } + }() + readerRef, err := newReference(standalone.path, standalone.ref, standalone.sourceIndex, reader.archive, nil) + if err != nil { + return nil, nil, err + } + succeeded = true + return reader, readerRef, nil +} + +// List returns the a set of references for images in the Reader, +// grouped by the image the references point to. +// The references are valid only until the Reader is closed. +func (r *Reader) List() ([][]types.ImageReference, error) { + res := [][]types.ImageReference{} + for imageIndex, image := range r.archive.Manifest { + refs := []types.ImageReference{} + for _, tag := range image.RepoTags { + parsedTag, err := reference.ParseNormalizedNamed(tag) + if err != nil { + return nil, fmt.Errorf("Invalid tag %#v in manifest item @%d: %w", tag, imageIndex, err) + } + nt, ok := parsedTag.(reference.NamedTagged) + if !ok { + return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String()) + } + ref, err := newReference(r.path, nt, -1, r.archive, nil) + if err != nil { + return nil, fmt.Errorf("creating a reference for tag %#v in manifest item @%d: %w", tag, imageIndex, err) + } + refs = append(refs, ref) + } + if len(refs) == 0 { + ref, err := newReference(r.path, nil, imageIndex, r.archive, nil) + if err != nil { + return nil, fmt.Errorf("creating a reference for manifest item @%d: %w", imageIndex, err) + } + refs = append(refs, ref) + } + res = append(res, refs) + } + return res, nil +} + +// ManifestTagsForReference returns the set of tags “matching†ref in reader, as strings +// (i.e. exposing the short names before normalization). +// The function reports an error if ref does not identify a single image. +// If ref contains a NamedTagged reference, only a single tag “matching†ref is returned; +// If ref contains a source index, or neither a NamedTagged nor a source index, all tags +// matching the image are returned. +// Almost all users should use List() or ImageReference.DockerReference() instead. +func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) { + archiveRef, ok := ref.(archiveReference) + if !ok { + return nil, fmt.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) + } + manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex) + if err != nil { + return nil, err + } + if tagIndex != -1 { + return []string{manifestItem.RepoTags[tagIndex]}, nil + } + return manifestItem.RepoTags, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go new file mode 100644 index 00000000..c4ab9a8c --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/src.go @@ -0,0 +1,41 @@ +package archive + +import ( + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +type archiveImageSource struct { + *tarfile.Source // Implements most of types.ImageSource + ref archiveReference +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) { + var archive *tarfile.Reader + var closeArchive bool + if ref.archiveReader != nil { + archive = ref.archiveReader + closeArchive = false + } else { + a, err := tarfile.NewReaderFromFile(sys, ref.path) + if err != nil { + return nil, err + } + archive = a + closeArchive = true + } + src := tarfile.NewSource(archive, closeArchive, ref.Transport().Name(), ref.ref, ref.sourceIndex) + return &archiveImageSource{ + Source: src, + ref: ref, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *archiveImageSource) Reference() types.ImageReference { + return s.ref +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go new file mode 100644 index 00000000..39e92cac --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -0,0 +1,206 @@ +package archive + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + ctrImage "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for local Docker archives. +var Transport = archiveTransport{} + +type archiveTransport struct{} + +func (t archiveTransport) Name() string { + return "docker-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in archiveReference.PolicyConfigurationIdentity. + return errors.New(`docker-archive: does not support any scopes except the default "" one`) +} + +// archiveReference is an ImageReference for Docker images. +type archiveReference struct { + path string + // May be nil to read the only image in an archive, or to create an untagged image. + ref reference.NamedTagged + // If not -1, a zero-based index of the image in the manifest. Valid only for sources. + // Must not be set if ref is set. + sourceIndex int + // If not nil, must have been created from path (but archiveReader.path may point at a temporary + // file, not necessarily path precisely). + archiveReader *tarfile.Reader + // If not nil, must have been created for path + writer *Writer +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if refString == "" { + return nil, fmt.Errorf("docker-archive reference %s isn't of the form [:]", refString) + } + + path, tagOrIndex, gotTagOrIndex := strings.Cut(refString, ":") + var nt reference.NamedTagged + sourceIndex := -1 + + if gotTagOrIndex { + // A :tag or :@index was specified. + if len(tagOrIndex) > 0 && tagOrIndex[0] == '@' { + i, err := strconv.Atoi(tagOrIndex[1:]) + if err != nil { + return nil, fmt.Errorf("Invalid source index %s: %w", tagOrIndex, err) + } + if i < 0 { + return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i) + } + sourceIndex = i + } else { + ref, err := reference.ParseNormalizedNamed(tagOrIndex) + if err != nil { + return nil, fmt.Errorf("docker-archive parsing reference: %w", err) + } + ref = reference.TagNameOnly(ref) + refTagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { // If ref contains a digest, TagNameOnly does not change it + return nil, fmt.Errorf("reference does not include a tag: %s", ref.String()) + } + nt = refTagged + } + } + + return newReference(path, nt, sourceIndex, nil, nil) +} + +// NewReference returns a Docker archive reference for a path and an optional reference. +func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, error) { + return newReference(path, ref, -1, nil, nil) +} + +// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index. +func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) { + return newReference(path, nil, sourceIndex, nil, nil) +} + +// newReference returns a docker archive reference for a path, an optional reference or sourceIndex, +// and optionally a tarfile.Reader and/or a tarfile.Writer matching path. +func newReference(path string, ref reference.NamedTagged, sourceIndex int, + archiveReader *tarfile.Reader, writer *Writer) (types.ImageReference, error) { + if strings.Contains(path, ":") { + return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path) + } + if ref != nil && sourceIndex != -1 { + return nil, fmt.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index") + } + if _, isDigest := ref.(reference.Canonical); isDigest { + return nil, fmt.Errorf("docker-archive doesn't support digest references: %s", ref.String()) + } + if sourceIndex != -1 && sourceIndex < 0 { + return nil, fmt.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex) + } + return archiveReference{ + path: path, + ref: ref, + sourceIndex: sourceIndex, + archiveReader: archiveReader, + writer: writer, + }, nil +} + +func (ref archiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref archiveReference) StringWithinTransport() string { + switch { + case ref.ref != nil: + return fmt.Sprintf("%s:%s", ref.path, ref.ref.String()) + case ref.sourceIndex != -1: + return fmt.Sprintf("%s:@%d", ref.path, ref.sourceIndex) + default: + return ref.path + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref archiveReference) DockerReference() reference.Named { + return ref.ref +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref archiveReference) PolicyConfigurationIdentity() string { + // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. + return "" +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref archiveReference) PolicyConfigurationNamespaces() []string { + // TODO + return []string{} +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return ctrImage.FromReference(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Not really supported, for safety reasons. + return errors.New("Deleting images not implemented for docker-archive: images") +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go new file mode 100644 index 00000000..11f797c0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go @@ -0,0 +1,103 @@ +package archive + +import ( + "errors" + "fmt" + "io" + "os" + "sync" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +) + +// Writer manages a single in-progress Docker archive and allows adding images to it. +type Writer struct { + path string // The original, user-specified path; not the maintained temporary file, if any + regularFile bool // path refers to a regular file (e.g. not a pipe) + archive *tarfile.Writer + writer io.Closer + + // The following state can only be accessed with the mutex held. + mutex sync.Mutex + hadCommit bool // At least one successful commit has happened +} + +// NewWriter returns a Writer for path. +// The caller should call .Close() on the returned object. +func NewWriter(sys *types.SystemContext, path string) (*Writer, error) { + // path can be either a pipe or a regular file + // in the case of a pipe, we require that we can open it for write + // in the case of a regular file, we don't want to overwrite any pre-existing file + // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, + // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) + fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, fmt.Errorf("opening file %q: %w", path, err) + } + succeeded := false + defer func() { + if !succeeded { + fh.Close() + } + }() + + fhStat, err := fh.Stat() + if err != nil { + return nil, fmt.Errorf("statting file %q: %w", path, err) + } + regularFile := fhStat.Mode().IsRegular() + if regularFile && fhStat.Size() != 0 { + return nil, errors.New("docker-archive doesn't support modifying existing images") + } + + archive := tarfile.NewWriter(fh) + + succeeded = true + return &Writer{ + path: path, + regularFile: regularFile, + archive: archive, + writer: fh, + hadCommit: false, + }, nil +} + +// imageCommitted notifies the Writer that at least one image was successfully committed to the stream. +func (w *Writer) imageCommitted() { + w.mutex.Lock() + defer w.mutex.Unlock() + w.hadCommit = true +} + +// Close writes all outstanding data about images to the archive, and +// releases state associated with the Writer, if any. +// No more images can be added after this is called. +func (w *Writer) Close() error { + err := w.archive.Close() + if err2 := w.writer.Close(); err2 != nil && err == nil { + err = err2 + } + if err == nil && w.regularFile && !w.hadCommit { + // Writing to the destination never had a success; delete the destination if we created it. + // This is done primarily because we don’t implement adding another image to a pre-existing image, so if we + // left a partial archive around (notably because reading from the _source_ has failed), we couldn’t retry without + // the caller manually deleting the partial archive. So, delete it instead. + // + // Archives with at least one successfully created image are left around; they might still be valuable. + // + // Note a corner case: If there _originally_ was an empty file (which is not a valid archive anyway), this deletes it. + // Ideally, if w.regularFile, we should write the full contents to a temporary file and use os.Rename here, only on success. + if err2 := os.Remove(w.path); err2 != nil { + err = err2 + } + } + return err +} + +// NewReference returns an ImageReference that allows adding an image to Writer, +// with an optional reference. +func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) { + return newReference(w.path, destinationRef, -1, nil, w) +} diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go new file mode 100644 index 00000000..7d66ef6b --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/body_reader.go @@ -0,0 +1,253 @@ +package docker + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "syscall" + "time" + + "github.com/sirupsen/logrus" +) + +const ( + // bodyReaderMinimumProgress is the minimum progress we consider a good reason to retry + bodyReaderMinimumProgress = 1 * 1024 * 1024 + // bodyReaderMSSinceLastRetry is the minimum time since a last retry we consider a good reason to retry + bodyReaderMSSinceLastRetry = 60 * 1_000 +) + +// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob, +// which can transparently resume some (very limited) kinds of aborted connections. +type bodyReader struct { + ctx context.Context + c *dockerClient + path string // path to pass to makeRequest to retry + logURL *url.URL // a string to use in error messages + firstConnectionTime time.Time + + body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close. + lastRetryOffset int64 // -1 if N/A + lastRetryTime time.Time // time.Time{} if N/A + offset int64 // Current offset within the blob + lastSuccessTime time.Time // time.Time{} if N/A +} + +// newBodyReader creates a bodyReader for request path in c. +// firstBody is an already correctly opened body for the blob, returning the full blob from the start. +// If reading from firstBody fails, bodyReader may heuristically decide to resume. +func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) { + logURL, err := c.resolveRequestURL(path) + if err != nil { + return nil, err + } + res := &bodyReader{ + ctx: ctx, + c: c, + path: path, + logURL: logURL, + firstConnectionTime: time.Now(), + + body: firstBody, + lastRetryOffset: -1, + lastRetryTime: time.Time{}, + offset: 0, + lastSuccessTime: time.Time{}, + } + return res, nil +} + +// parseDecimalInString ensures that s[start:] starts with a non-negative decimal number, and returns that number and the offset after the number. +func parseDecimalInString(s string, start int) (int64, int, error) { + i := start + for i < len(s) && s[i] >= '0' && s[i] <= '9' { + i++ + } + if i == start { + return -1, -1, errors.New("missing decimal number") + } + v, err := strconv.ParseInt(s[start:i], 10, 64) + if err != nil { + return -1, -1, fmt.Errorf("parsing number: %w", err) + } + return v, i, nil +} + +// parseExpectedChar ensures that s[pos] is the expected byte, and returns the offset after it. +func parseExpectedChar(s string, pos int, expected byte) (int, error) { + if pos == len(s) || s[pos] != expected { + return -1, fmt.Errorf("missing expected %q", expected) + } + return pos + 1, nil +} + +// parseContentRange ensures that res contains a Content-Range header with a byte range, and returns (first, last, completeLength) on success. Size can be -1. +func parseContentRange(res *http.Response) (int64, int64, int64, error) { + hdrs := res.Header.Values("Content-Range") + switch len(hdrs) { + case 0: + return -1, -1, -1, errors.New("missing Content-Range: header") + case 1: + break + default: + return -1, -1, -1, fmt.Errorf("ambiguous Content-Range:, %d header values", len(hdrs)) + } + hdr := hdrs[0] + expectedPrefix := "bytes " + if !strings.HasPrefix(hdr, expectedPrefix) { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, missing prefix %q", hdr, expectedPrefix) + } + first, pos, err := parseDecimalInString(hdr, len(expectedPrefix)) + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing first-pos: %w", hdr, err) + } + pos, err = parseExpectedChar(hdr, pos, '-') + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) + } + last, pos, err := parseDecimalInString(hdr, pos) + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing last-pos: %w", hdr, err) + } + pos, err = parseExpectedChar(hdr, pos, '/') + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) + } + completeLength := int64(-1) + if pos < len(hdr) && hdr[pos] == '*' { + pos++ + } else { + completeLength, pos, err = parseDecimalInString(hdr, pos) + if err != nil { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing complete-length: %w", hdr, err) + } + } + if pos < len(hdr) { + return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, unexpected trailing content", hdr) + } + return first, last, completeLength, nil +} + +// Read implements io.ReadCloser +func (br *bodyReader) Read(p []byte) (int, error) { + if br.body == nil { + return 0, fmt.Errorf("internal error: bodyReader.Read called on a closed object for %s", br.logURL.Redacted()) + } + n, err := br.body.Read(p) + br.offset += int64(n) + switch { + case err == nil || err == io.EOF: + br.lastSuccessTime = time.Now() + return n, err // Unlike the default: case, don’t log anything. + + case errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET): + originalErr := err + redactedURL := br.logURL.Redacted() + if err := br.errorIfNotReconnecting(originalErr, redactedURL); err != nil { + return n, err + } + + if err := br.body.Close(); err != nil { + logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise + } + br.body = nil + time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede + + headers := map[string][]string{ + "Range": {fmt.Sprintf("bytes=%d-", br.offset)}, + } + res, err := br.c.makeRequest(br.ctx, http.MethodGet, br.path, headers, nil, v2Auth, nil) + if err != nil { + return n, fmt.Errorf("%w (while reconnecting: %v)", originalErr, err) + } + consumedBody := false + defer func() { + if !consumedBody { + res.Body.Close() + } + }() + switch res.StatusCode { + case http.StatusPartialContent: // OK + // A client MUST inspect a 206 response's Content-Type and Content-Range field(s) to determine what parts are enclosed and whether additional requests are needed. + // The recipient of an invalid Content-Range MUST NOT attempt to recombine the received content with a stored representation. + first, last, completeLength, err := parseContentRange(res) + if err != nil { + return n, fmt.Errorf("%w (after reconnecting, invalid Content-Range header: %v)", originalErr, err) + } + // We don’t handle responses that start at an unrequested offset, nor responses that terminate before the end of the full blob. + if first != br.offset || (completeLength != -1 && last+1 != completeLength) { + return n, fmt.Errorf("%w (after reconnecting at offset %d, got unexpected Content-Range %d-%d/%d)", originalErr, br.offset, first, last, completeLength) + } + // Continue below + case http.StatusOK: + return n, fmt.Errorf("%w (after reconnecting, server did not process a Range: header, status %d)", originalErr, http.StatusOK) + default: + err := registryHTTPResponseToError(res) + return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err) + } + + logrus.Debugf("Successfully reconnected to %s", redactedURL) + consumedBody = true + br.body = res.Body + br.lastRetryOffset = br.offset + br.lastRetryTime = time.Time{} + return n, nil + + default: + logrus.Debugf("Error reading blob body from %s: %#v", br.logURL.Redacted(), err) + return n, err + } +} + +// millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value. +// If tm is time.Time{}, it returns math.NaN() +func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 { + if tm == (time.Time{}) { + return math.NaN() + } + return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0 +} + +// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil, +// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic) +func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error { + currentTime := time.Now() + msSinceFirstConnection := millisecondsSinceOptional(currentTime, br.firstConnectionTime) + msSinceLastRetry := millisecondsSinceOptional(currentTime, br.lastRetryTime) + msSinceLastSuccess := millisecondsSinceOptional(currentTime, br.lastSuccessTime) + logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: total %d @%.3f ms, last retry %d @%.3f ms, last progress @%.3f ms", + redactedURL, originalErr, br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess) + progress := br.offset - br.lastRetryOffset + if progress >= bodyReaderMinimumProgress { + logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress) + return nil + } + if br.lastRetryTime == (time.Time{}) { + logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr) + return nil + } + if msSinceLastRetry >= bodyReaderMSSinceLastRetry { + logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %.3f ms…", redactedURL, originalErr, msSinceLastRetry) + return nil + } + logrus.Debugf("Not reconnecting to %s: insufficient progress %d / time since last retry %.3f ms", redactedURL, progress, msSinceLastRetry) + return fmt.Errorf("(heuristic tuning data: total %d @%.3f ms, last retry %d @%.3f ms, last progress @ %.3f ms): %w", + br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess, originalErr) +} + +// Close implements io.ReadCloser +func (br *bodyReader) Close() error { + if br.body == nil { + return nil + } + err := br.body.Close() + br.body = nil + return err +} diff --git a/vendor/github.com/containers/image/v5/docker/cache.go b/vendor/github.com/containers/image/v5/docker/cache.go new file mode 100644 index 00000000..728d32d1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/cache.go @@ -0,0 +1,23 @@ +package docker + +import ( + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +) + +// bicTransportScope returns a BICTransportScope appropriate for ref. +func bicTransportScope(ref dockerReference) types.BICTransportScope { + // Blobs can be reused across the whole registry. + return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} +} + +// newBICLocationReference returns a BICLocationReference appropriate for ref. +func newBICLocationReference(ref dockerReference) types.BICLocationReference { + // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). + return types.BICLocationReference{Opaque: ref.ref.Name()} +} + +// parseBICLocationReference returns a repository for encoded lr. +func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { + return reference.ParseNormalizedNamed(lr.Opaque) +} diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go new file mode 100644 index 00000000..0fe91524 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -0,0 +1,147 @@ +// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go +// Copyright 2022 github.com/distribution/distribution authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" +) + +// errNoErrorsInBody is returned when an HTTP response body parses to an empty +// errcode.Errors slice. +var errNoErrorsInBody = errors.New("no error details found in HTTP response body") + +// unexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type unexpectedHTTPStatusError struct { + Status string +} + +func (e *unexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +} + +// unexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type unexpectedHTTPResponseError struct { + ParseErr error + StatusCode int + Response []byte +} + +func (e *unexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { + var errors errcode.Errors + body, err := io.ReadAll(r) + if err != nil { + return err + } + + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &unexpectedHTTPResponseError{ + ParseErr: err, + StatusCode: statusCode, + Response: body, + } + } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &unexpectedHTTPResponseError{ + ParseErr: errNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + + return errors +} + +func makeErrorList(err error) []error { + if errL, ok := err.(errcode.Errors); ok { + return []error(errL) + } + return []error{err} +} + +func mergeErrors(err1, err2 error) error { + return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) +} + +// handleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func handleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + // Check for OAuth errors within the `WWW-Authenticate` header first + // See https://tools.ietf.org/html/rfc6750#section-3 + for _, c := range dockerChallenge.ResponseChallenges(resp) { + if c.Scheme == "bearer" { + var err errcode.Error + // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 + switch c.Parameters["error"] { + case "invalid_token": + err.Code = errcode.ErrorCodeUnauthorized + case "insufficient_scope": + err.Code = errcode.ErrorCodeDenied + default: + continue + } + if description := c.Parameters["error_description"]; description != "" { + err.Message = description + } else { + err.Message = err.Code.Message() + } + + return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) + } + } + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + return &unexpectedHTTPStatusError{Status: resp.Status} +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go new file mode 100644 index 00000000..288dd1a9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -0,0 +1,1161 @@ +package docker + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/internal/useragent" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/docker/distribution/registry/api/errcode" + v2 "github.com/docker/distribution/registry/api/v2" + "github.com/docker/go-connections/tlsconfig" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +const ( + dockerHostname = "docker.io" + dockerV1Hostname = "index.docker.io" + dockerRegistry = "registry-1.docker.io" + + resolvedPingV2URL = "%s://%s/v2/" + resolvedPingV1URL = "%s://%s/v1/_ping" + tagsPath = "/v2/%s/tags/list" + manifestPath = "/v2/%s/manifests/%s" + blobsPath = "/v2/%s/blobs/%s" + blobUploadPath = "/v2/%s/blobs/uploads/" + extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" + + minimumTokenLifetimeSeconds = 60 + + extensionSignatureSchemaVersion = 2 // extensionSignature.Version + extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type + + backoffNumIterations = 5 + backoffInitialDelay = 2 * time.Second + backoffMaxDelay = 60 * time.Second +) + +type certPath struct { + path string + absolute bool +} + +var ( + homeCertDir = filepath.FromSlash(".config/containers/certs.d") + perHostCertDirs = []certPath{ + {path: etcDir + "/containers/certs.d", absolute: true}, + {path: etcDir + "/docker/certs.d", absolute: true}, + } +) + +// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: +// signature represents a Docker image signature. +type extensionSignature struct { + Version int `json:"schemaVersion"` // Version specifies the schema version + Name string `json:"name"` // Name must be in "sha256:@signatureName" format + Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" + Content []byte `json:"content"` // Content contains the signature +} + +// signatureList represents list of Docker image signatures. +type extensionSignatureList struct { + Signatures []extensionSignature `json:"signatures"` +} + +type bearerToken struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + expirationTime time.Time +} + +// dockerClient is configuration for dealing with a single container registry. +type dockerClient struct { + // The following members are set by newDockerClient and do not change afterwards. + sys *types.SystemContext + registry string + userAgent string + + // tlsClientConfig is setup by newDockerClient and will be used and updated + // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. + tlsClientConfig *tls.Config + // The following members are not set by newDockerClient and must be set by callers if needed. + auth types.DockerAuthConfig + registryToken string + signatureBase lookasideStorageBase + useSigstoreAttachments bool + scope authScope + + // The following members are detected registry properties: + // They are set after a successful detectProperties(), and never change afterwards. + client *http.Client + scheme string + challenges []challenge + supportsSignatures bool + + // Private state for setupRequestAuth (key: string, value: bearerToken) + tokenCache sync.Map + // Private state for detectProperties: + detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. + detectPropertiesError error // detectPropertiesError caches the initial error. + // Private state for logResponseWarnings + reportedWarningsLock sync.Mutex + reportedWarnings *set.Set[string] +} + +type authScope struct { + resourceType string + remoteName string + actions string +} + +// sendAuth determines whether we need authentication for v2 or v1 endpoint. +type sendAuth int + +const ( + // v2 endpoint with authentication. + v2Auth sendAuth = iota + // v1 endpoint with authentication. + // TODO: Get v1Auth working + // v1Auth + // no authentication, works for both v1 and v2. + noAuth +) + +func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { + token := new(bearerToken) + if err := json.Unmarshal(blob, &token); err != nil { + return nil, err + } + if token.Token == "" { + token.Token = token.AccessToken + } + if token.ExpiresIn < minimumTokenLifetimeSeconds { + token.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) + } + if token.IssuedAt.IsZero() { + token.IssuedAt = time.Now().UTC() + } + token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) + return token, nil +} + +// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. +func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { + if sys != nil && sys.DockerCertPath != "" { + return sys.DockerCertPath, nil + } + if sys != nil && sys.DockerPerHostCertDirPath != "" { + return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil + } + + var ( + hostCertDir string + fullCertDirPath string + ) + + for _, perHostCertDir := range append([]certPath{{path: filepath.Join(homedir.Get(), homeCertDir), absolute: false}}, perHostCertDirs...) { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" && perHostCertDir.absolute { + hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, perHostCertDir.path) + } else { + hostCertDir = perHostCertDir.path + } + + fullCertDirPath = filepath.Join(hostCertDir, hostPort) + _, err := os.Stat(fullCertDirPath) + if err == nil { + break + } + if os.IsNotExist(err) { + continue + } + if os.IsPermission(err) { + logrus.Debugf("error accessing certs directory due to permissions: %v", err) + continue + } + return "", err + } + return fullCertDirPath, nil +} + +// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) +// “write†specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) +// signatureBase is always set in the return value +// The caller must call .Close() on the returned client when done. +func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) { + auth, err := config.GetCredentialsForRef(sys, ref.ref) + if err != nil { + return nil, fmt.Errorf("getting username and password: %w", err) + } + + sigBase, err := registryConfig.lookasideStorageBaseURL(ref, write) + if err != nil { + return nil, err + } + + registry := reference.Domain(ref.ref) + client, err := newDockerClient(sys, registry, ref.ref.Name()) + if err != nil { + return nil, err + } + client.auth = auth + if sys != nil { + client.registryToken = sys.DockerBearerRegistryToken + } + client.signatureBase = sigBase + client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref) + client.scope.resourceType = "repository" + client.scope.actions = actions + client.scope.remoteName = reference.Path(ref.ref) + return client, nil +} + +// newDockerClient returns a new dockerClient instance for the given registry +// and reference. The reference is used to query the registry configuration +// and can either be a registry (e.g, "registry.com[:5000]"), a repository +// (e.g., "registry.com[:5000][/some/namespace]/repo"). +// Please note that newDockerClient does not set all members of dockerClient +// (e.g., username and password); those must be set by callers if necessary. +// The caller must call .Close() on the returned client when done. +func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { + hostName := registry + if registry == dockerHostname { + registry = dockerRegistry + } + tlsClientConfig := &tls.Config{ + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + } + + // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, + // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible + // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because + // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is + // undocumented and may change if docker/docker changes. + certDir, err := dockerCertDir(sys, hostName) + if err != nil { + return nil, err + } + if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { + return nil, err + } + + // Check if TLS verification shall be skipped (default=false) which can + // be specified in the sysregistriesv2 configuration. + skipVerify := false + reg, err := sysregistriesv2.FindRegistry(sys, reference) + if err != nil { + return nil, fmt.Errorf("loading registries: %w", err) + } + if reg != nil { + if reg.Blocked { + return nil, fmt.Errorf("registry %s is blocked in %s or %s", reg.Prefix, sysregistriesv2.ConfigPath(sys), sysregistriesv2.ConfigDirPath(sys)) + } + skipVerify = reg.Insecure + } + tlsClientConfig.InsecureSkipVerify = skipVerify + + userAgent := useragent.DefaultUserAgent + if sys != nil && sys.DockerRegistryUserAgent != "" { + userAgent = sys.DockerRegistryUserAgent + } + + return &dockerClient{ + sys: sys, + registry: registry, + userAgent: userAgent, + tlsClientConfig: tlsClientConfig, + reportedWarnings: set.New[string](), + }, nil +} + +// CheckAuth validates the credentials by attempting to log into the registry +// returns an error if an error occurred while making the http request or the status code received was 401 +func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { + client, err := newDockerClient(sys, registry, registry) + if err != nil { + return fmt.Errorf("creating new docker client: %w", err) + } + defer client.Close() + client.auth = types.DockerAuthConfig{ + Username: username, + Password: password, + } + + resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + err := registryHTTPResponseToError(resp) + if resp.StatusCode == http.StatusUnauthorized { + err = ErrUnauthorizedForCredentials{Err: err} + } + return err + } + return nil +} + +// SearchResult holds the information of each matching image +// It matches the output returned by the v1 endpoint +type SearchResult struct { + Name string `json:"name"` + Description string `json:"description"` + // StarCount states the number of stars the image has + StarCount int `json:"star_count"` + IsTrusted bool `json:"is_trusted"` + // IsAutomated states whether the image is an automated build + IsAutomated bool `json:"is_automated"` + // IsOfficial states whether the image is an official build + IsOfficial bool `json:"is_official"` +} + +// SearchRegistry queries a registry for images that contain "image" in their name +// The limit is the max number of results desired +// Note: The limit value doesn't work with all registries +// for example registry.access.redhat.com returns all the results without limiting it to the limit value +func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { + type V2Results struct { + // Repositories holds the results returned by the /v2/_catalog endpoint + Repositories []string `json:"repositories"` + } + type V1Results struct { + // Results holds the results returned by the /v1/search endpoint + Results []SearchResult `json:"results"` + } + v1Res := &V1Results{} + + // Get credentials from authfile for the underlying hostname + // We can't use GetCredentialsForRef here because we want to search the whole registry. + auth, err := config.GetCredentials(sys, registry) + if err != nil { + return nil, fmt.Errorf("getting username and password: %w", err) + } + + // The /v2/_catalog endpoint has been disabled for docker.io therefore + // the call made to that endpoint will fail. So using the v1 hostname + // for docker.io for simplicity of implementation and the fact that it + // returns search results. + hostname := registry + if registry == dockerHostname { + hostname = dockerV1Hostname + } + + client, err := newDockerClient(sys, hostname, registry) + if err != nil { + return nil, fmt.Errorf("creating new docker client: %w", err) + } + defer client.Close() + client.auth = auth + if sys != nil { + client.registryToken = sys.DockerBearerRegistryToken + } + + // Only try the v1 search endpoint if the search query is not empty. If it is + // empty skip to the v2 endpoint. + if image != "" { + // set up the query values for the v1 endpoint + u := url.URL{ + Path: "/v1/search", + } + q := u.Query() + q.Set("q", image) + q.Set("n", strconv.Itoa(limit)) + u.RawQuery = q.Encode() + + logrus.Debugf("trying to talk to v1 search endpoint") + resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil) + if err != nil { + logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) + } else { + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp, "")) + } else { + if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { + return nil, err + } + return v1Res.Results, nil + } + } + } + + logrus.Debugf("trying to talk to v2 search endpoint") + searchRes := []SearchResult{} + path := "/v2/_catalog" + for len(searchRes) < limit { + resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) + if err != nil { + logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) + return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + err := registryHTTPResponseToError(resp) + logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err) + return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err) + } + v2Res := &V2Results{} + if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { + return nil, err + } + + for _, repo := range v2Res.Repositories { + if len(searchRes) == limit { + break + } + if strings.Contains(repo, image) { + res := SearchResult{ + Name: repo, + } + // bugzilla.redhat.com/show_bug.cgi?id=1976283 + // If we have a full match, make sure it's listed as the first result. + // (Note there might be a full match we never see if we reach the result limit first.) + if repo == image { + searchRes = append([]SearchResult{res}, searchRes...) + } else { + searchRes = append(searchRes, res) + } + } + } + + link := resp.Header.Get("Link") + if link == "" { + break + } + linkURLPart, _, _ := strings.Cut(link, ";") + linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) + if err != nil { + return searchRes, err + } + + // can be relative or absolute, but we only want the path (and I + // guess we're in trouble if it forwards to a new place...) + path = linkURL.Path + if linkURL.RawQuery != "" { + path += "?" + path += linkURL.RawQuery + } + } + return searchRes, nil +} + +// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. +func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { + if err := c.detectProperties(ctx); err != nil { + return nil, err + } + + requestURL, err := c.resolveRequestURL(path) + if err != nil { + return nil, err + } + return c.makeRequestToResolvedURL(ctx, method, requestURL, headers, stream, -1, auth, extraScope) +} + +// resolveRequestURL turns a path for c.makeRequest into a full URL. +// Most users should call makeRequest directly, this exists basically to make the URL available for debug logs. +func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { + urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + res, err := url.Parse(urlString) + if err != nil { + return nil, err + } + return res, nil +} + +// Checks if the auth headers in the response contain an indication of a failed +// authorizdation because of an "insufficient_scope" error. If that's the case, +// returns the required scope to be used for fetching a new token. +func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) { + if err == nil && res.StatusCode == http.StatusUnauthorized { + challenges := parseAuthHeader(res.Header) + for _, challenge := range challenges { + if challenge.Scheme == "bearer" { + if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" { + if scope, ok := challenge.Parameters["scope"]; ok && scope != "" { + if newScope, err := parseAuthScope(scope); err == nil { + return true, newScope + } else { + logrus.WithFields(logrus.Fields{ + "error": err, + "scope": scope, + "challenge": challenge, + }).Error("Failed to parse the authentication scope from the given challenge") + } + } + } + } + } + } + return false, nil +} + +// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it, +// silently falling back to fallbackDelay if the header is missing or invalid. +func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration { + after := res.Header.Get("Retry-After") + if after == "" { + return fallbackDelay + } + logrus.Debugf("Detected 'Retry-After' header %q", after) + // First, check if we have a numerical value. + if num, err := strconv.ParseInt(after, 10, 64); err == nil { + return time.Duration(num) * time.Second + } + // Second, check if we have an HTTP date. + if t, err := http.ParseTime(after); err == nil { + // If the delta between the date and now is positive, use it. + delta := time.Until(t) + if delta > 0 { + return delta + } + logrus.Debugf("Retry-After date in the past, ignoring it") + return fallbackDelay + } + logrus.Debugf("Invalid Retry-After format, ignoring it") + return fallbackDelay +} + +// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// streamLen, if not -1, specifies the length of the data expected on stream. +// makeRequest should generally be preferred. +// In case of an HTTP 429 status code in the response, it may automatically retry a few times. +// TODO(runcom): too many arguments here, use a struct +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, requestURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + delay := backoffInitialDelay + attempts := 0 + for { + res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope) + attempts++ + + // By default we use pre-defined scopes per operation. In + // certain cases, this can fail when our authentication is + // insufficient, then we might be getting an error back with a + // Www-Authenticate Header indicating an insufficient scope. + // + // Check for that and update the client challenges to retry after + // requesting a new token + // + // We only try this on the first attempt, to not overload an + // already struggling server. + // We also cannot retry with a body (stream != nil) as stream + // was already read + if attempts == 1 && stream == nil && auth != noAuth { + if retry, newScope := needsRetryWithUpdatedScope(err, res); retry { + logrus.Debug("Detected insufficient_scope error, will retry request with updated scope") + // Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently + // expect the insufficient_scope errors to happen for those callers. If that changes, we can add support + // for more than one extra scope. + res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope) + extraScope = newScope + } + } + if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately + stream != nil || // We can't retry with a body (which is not restartable in the general case) + attempts == backoffNumIterations { + return res, err + } + // close response body before retry or context done + res.Body.Close() + + delay = parseRetryAfter(res, delay) + if delay > backoffMaxDelay { + delay = backoffMaxDelay + } + logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds()) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // Nothing + } + delay *= 2 // If the registry does not specify a delay, back off exponentially. + } +} + +// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// streamLen, if not -1, specifies the length of the data expected on stream. +// makeRequest should generally be preferred. +// Note that no exponential back off is performed when receiving an http 429 status code. +func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, resolvedURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, resolvedURL.String(), stream) + if err != nil { + return nil, err + } + if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it. + req.ContentLength = streamLen + } + req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") + for n, h := range headers { + for _, hh := range h { + req.Header.Add(n, hh) + } + } + req.Header.Add("User-Agent", c.userAgent) + if auth == v2Auth { + if err := c.setupRequestAuth(req, extraScope); err != nil { + return nil, err + } + } + logrus.Debugf("%s %s", method, resolvedURL.Redacted()) + res, err := c.client.Do(req) + if err != nil { + return nil, err + } + if warnings := res.Header.Values("Warning"); len(warnings) != 0 { + c.logResponseWarnings(res, warnings) + } + return res, nil +} + +// logResponseWarnings logs warningHeaders from res, if any. +func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) { + c.reportedWarningsLock.Lock() + defer c.reportedWarningsLock.Unlock() + + for _, header := range warningHeaders { + warningString := parseRegistryWarningHeader(header) + if warningString == "" { + logrus.Debugf("Ignored Warning: header from registry: %q", header) + } else { + if !c.reportedWarnings.Contains(warningString) { + c.reportedWarnings.Add(warningString) + // Note that reportedWarnings is based only on warningString, so that we don’t + // repeat the same warning for every request - but the warning includes the URL; + // so it may not be specific to that URL. + logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString) + } else { + logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString) + } + } + } +} + +// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning +// values allowed by opencontainers/distribution-spec. +// It returns the warning string if the header has the expected format, or "" otherwise. +func parseRegistryWarningHeader(header string) string { + const expectedPrefix = `299 - "` + const expectedSuffix = `"` + + // warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ] + // distribution-spec requires warn-code=299, warn-agent="-", warn-date missing + if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) { + return "" + } + header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)] + + // â€Recipients that process the value of a quoted-string MUST handle a quoted-pair + // as if it were replaced by the octet following the backslash.â€, so let’s do that… + res := strings.Builder{} + afterBackslash := false + for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points + switch { + case c == 0x7F || (c < ' ' && c != '\t'): + return "" // Control characters are forbidden + case afterBackslash: + res.WriteByte(c) + afterBackslash = false + case c == '"': + // This terminates the warn-text and warn-date, forbidden by distribution-spec, follows, + // or completely invalid input. + return "" + case c == '\\': + afterBackslash = true + default: + res.WriteByte(c) + } + } + if afterBackslash { + return "" + } + return res.String() +} + +// we're using the challenges from the /v2/ ping response and not the one from the destination +// URL in this request because: +// +// 1) docker does that as well +// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request +// +// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up +func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { + if len(c.challenges) == 0 { + return nil + } + schemeNames := make([]string, 0, len(c.challenges)) + for _, challenge := range c.challenges { + schemeNames = append(schemeNames, challenge.Scheme) + switch challenge.Scheme { + case "basic": + req.SetBasicAuth(c.auth.Username, c.auth.Password) + return nil + case "bearer": + registryToken := c.registryToken + if registryToken == "" { + cacheKey := "" + scopes := []authScope{c.scope} + if extraScope != nil { + // Using ':' as a separator here is unambiguous because getBearerToken below + // uses the same separator when formatting a remote request (and because + // repository names that we create can't contain colons, and extraScope values + // coming from a server come from `parseAuthScope`, which also splits on colons). + cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions) + if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 { + return fmt.Errorf( + "Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d", + cacheKey, + colonCount, + ) + } + scopes = append(scopes, *extraScope) + } + var token bearerToken + t, inCache := c.tokenCache.Load(cacheKey) + if inCache { + token = t.(bearerToken) + } + if !inCache || time.Now().After(token.expirationTime) { + var ( + t *bearerToken + err error + ) + if c.auth.IdentityToken != "" { + t, err = c.getBearerTokenOAuth2(req.Context(), challenge, scopes) + } else { + t, err = c.getBearerToken(req.Context(), challenge, scopes) + } + if err != nil { + return err + } + + token = *t + c.tokenCache.Store(cacheKey, token) + } + registryToken = token.Token + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken)) + return nil + default: + logrus.Debugf("no handler for %s authentication", challenge.Scheme) + } + } + logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) + return nil +} + +func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge challenge, + scopes []authScope) (*bearerToken, error) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return nil, errors.New("missing realm in bearer auth challenge") + } + + authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil) + if err != nil { + return nil, err + } + + // Make the form data required against the oauth2 authentication + // More details here: https://docs.docker.com/registry/spec/auth/oauth/ + params := authReq.URL.Query() + if service, ok := challenge.Parameters["service"]; ok && service != "" { + params.Add("service", service) + } + + for _, scope := range scopes { + if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" { + params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions)) + } + } + params.Add("grant_type", "refresh_token") + params.Add("refresh_token", c.auth.IdentityToken) + params.Add("client_id", "containers/image") + + authReq.Body = io.NopCloser(strings.NewReader(params.Encode())) + authReq.Header.Add("User-Agent", c.userAgent) + authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) + res, err := c.client.Do(authReq) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := httpResponseToError(res, "Trying to obtain access token"); err != nil { + return nil, err + } + + tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) + if err != nil { + return nil, err + } + + return newBearerTokenFromJSONBlob(tokenBlob) +} + +func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, + scopes []authScope) (*bearerToken, error) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return nil, errors.New("missing realm in bearer auth challenge") + } + + authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil) + if err != nil { + return nil, err + } + + params := authReq.URL.Query() + if c.auth.Username != "" { + params.Add("account", c.auth.Username) + } + + if service, ok := challenge.Parameters["service"]; ok && service != "" { + params.Add("service", service) + } + + for _, scope := range scopes { + if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" { + params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions)) + } + } + + authReq.URL.RawQuery = params.Encode() + + if c.auth.Username != "" && c.auth.Password != "" { + authReq.SetBasicAuth(c.auth.Username, c.auth.Password) + } + authReq.Header.Add("User-Agent", c.userAgent) + + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) + res, err := c.client.Do(authReq) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := httpResponseToError(res, "Requesting bearer token"); err != nil { + return nil, err + } + tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) + if err != nil { + return nil, err + } + + return newBearerTokenFromJSONBlob(tokenBlob) +} + +// detectPropertiesHelper performs the work of detectProperties which executes +// it at most once. +func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { + // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly + // specified by the system context + if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { + c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue + } + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = c.tlsClientConfig + c.client = &http.Client{Transport: tr} + + ping := func(scheme string) error { + pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)) + if err != nil { + return err + } + resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err) + return err + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return registryHTTPResponseToError(resp) + } + c.challenges = parseAuthHeader(resp.Header) + c.scheme = scheme + c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" + return nil + } + err := ping("https") + if err != nil && c.tlsClientConfig.InsecureSkipVerify { + err = ping("http") + } + if err != nil { + err = fmt.Errorf("pinging container registry %s: %w", c.registry, err) + if c.sys != nil && c.sys.DockerDisableV1Ping { + return err + } + // best effort to understand if we're talking to a V1 registry + pingV1 := func(scheme string) bool { + pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)) + if err != nil { + return false + } + resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err) + return false + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return false + } + return true + } + isV1 := pingV1("https") + if !isV1 && c.tlsClientConfig.InsecureSkipVerify { + isV1 = pingV1("http") + } + if isV1 { + err = ErrV1NotSupported + } + } + return err +} + +// detectProperties detects various properties of the registry. +// See the dockerClient documentation for members which are affected by this. +func (c *dockerClient) detectProperties(ctx context.Context) error { + c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) + return c.detectPropertiesError +} + +func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) { + path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest) + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + res, err := c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) + if err != nil { + return nil, "", err + } + logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type")) + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, "", fmt.Errorf("reading manifest %s in %s: %w", tagOrDigest, ref.ref.Name(), registryHTTPResponseToError(res)) + } + + manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize) + if err != nil { + return nil, "", err + } + return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil +} + +// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. +// This function can return nil reader when no url is supported by this function. In this case, the caller +// should fallback to fetch the non-external blob (i.e. pull from the registry). +func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + var ( + resp *http.Response + err error + ) + if len(urls) == 0 { + return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") + } + for _, u := range urls { + blobURL, err := url.Parse(u) + if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") { + continue // unsupported url. skip this url. + } + // NOTE: we must not authenticate on additional URLs as those + // can be abused to leak credentials or tokens. Please + // refer to CVE-2020-15157 for more information. + resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) + if err == nil { + if resp.StatusCode != http.StatusOK { + err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + logrus.Debug(err) + resp.Body.Close() + continue + } + break + } + } + if resp == nil && err == nil { + return nil, 0, nil // fallback to non-external blob + } + if err != nil { + return nil, 0, err + } + return resp.Body, getBlobSize(resp), nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + +// getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + r, s, err := c.getExternalBlob(ctx, info.URLs) + if err != nil { + return nil, 0, err + } else if r != nil { + return r, s, nil + } + } + + path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String()) + logrus.Debugf("Downloading %s", path) + res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) + if err != nil { + return nil, 0, err + } + if res.StatusCode != http.StatusOK { + err := registryHTTPResponseToError(res) + res.Body.Close() + return nil, 0, fmt.Errorf("fetching blob: %w", err) + } + cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) + blobSize := getBlobSize(res) + + reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) + if err != nil { + res.Body.Close() + return nil, 0, err + } + return reconnectingReader, blobSize, nil +} + +// getOCIDescriptorContents returns the contents a blob specified by descriptor in ref, which must fit within limit. +func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) { + // Note that this copies all kinds of attachments: attestations, and whatever else is there, + // not just signatures. We leave the signature consumers to decide based on the MIME type. + reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache) + if err != nil { + return nil, err + } + defer reader.Close() + payload, err := iolimits.ReadAtMost(reader, maxSize) + if err != nil { + return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err) + } + return payload, nil +} + +// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown†error. +func isManifestUnknownError(err error) bool { + // docker/distribution, and as defined in the spec + var ec errcode.ErrorCoder + if errors.As(err, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown { + return true + } + // registry.redhat.io as of October 2022 + var e errcode.Error + if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" { + return true + } + // opencontainers/distribution-spec does not require the errcode.Error payloads to be used, + // but specifies that the HTTP status must be 404. + var unexpected *unexpectedHTTPResponseError + if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound { + return true + } + return false +} + +// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for +// digest in ref. +// It returns (nil, nil) if the manifest does not exist. +func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) { + tag := sigstoreAttachmentTag(digest) + sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag) + if err != nil { + return nil, err + } + logrus.Debugf("Looking for sigstore attachments in %s", sigstoreRef.String()) + manifestBlob, mimeType, err := c.fetchManifest(ctx, ref, tag) + if err != nil { + // FIXME: Are we going to need better heuristics?? + // This alone is probably a good enough reason for sigstore to be opt-in only, + // otherwise we would just break ordinary copies. + if isManifestUnknownError(err) { + logrus.Debugf("Fetching sigstore attachment manifest failed, assuming it does not exist: %v", err) + return nil, nil + } + logrus.Debugf("Fetching sigstore attachment manifest failed: %v", err) + return nil, err + } + if mimeType != imgspecv1.MediaTypeImageManifest { + // FIXME: Try anyway?? + return nil, fmt.Errorf("unexpected MIME type for sigstore attachment manifest %s: %q", + sigstoreRef.String(), mimeType) + } + res, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { + return nil, fmt.Errorf("parsing manifest %s: %w", sigstoreRef.String(), err) + } + return res, nil +} + +// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, +// using the original data structures. +func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) + res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), registryHTTPResponseToError(res)) + } + + body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize) + if err != nil { + return nil, err + } + + var parsedBody extensionSignatureList + if err := json.Unmarshal(body, &parsedBody); err != nil { + return nil, fmt.Errorf("decoding signature list: %w", err) + } + return &parsedBody, nil +} + +// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest. +func sigstoreAttachmentTag(d digest.Digest) string { + return strings.Replace(d.String(), ":", "-", 1) + ".sig" +} + +// Close removes resources associated with an initialized dockerClient, if any. +func (c *dockerClient) Close() error { + if c.client != nil { + c.client.CloseIdleConnections() + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go new file mode 100644 index 00000000..42bbfd95 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -0,0 +1,163 @@ +package docker + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods +// which are specific to Docker. +type Image struct { + types.ImageCloser + src *dockerImageSource +} + +// newImage returns a new Image interface type after setting up +// a client to the registry hosting the given image. +// The caller must call .Close() on the returned Image. +func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { + s, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, s) + if err != nil { + return nil, err + } + return &Image{ImageCloser: img, src: s}, nil +} + +// SourceRefFullName returns a fully expanded name for the repository this image is in. +func (i *Image) SourceRefFullName() string { + return i.src.logicalRef.ref.Name() +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. (This is a +// backward-compatible shim method which calls the module-level +// GetRepositoryTags) +func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { + return GetRepositoryTags(ctx, i.src.c.sys, i.src.logicalRef) +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. +func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { + dr, ok := ref.(dockerReference) + if !ok { + return nil, errors.New("ref must be a dockerReference") + } + + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return nil, err + } + path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) + client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull") + if err != nil { + return nil, fmt.Errorf("failed to create client: %w", err) + } + defer client.Close() + + tags := make([]string, 0) + + for { + res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("fetching tags list: %w", registryHTTPResponseToError(res)) + } + + var tagsHolder struct { + Tags []string + } + if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { + return nil, err + } + tags = append(tags, tagsHolder.Tags...) + + link := res.Header.Get("Link") + if link == "" { + break + } + + linkURLPart, _, _ := strings.Cut(link, ";") + linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) + if err != nil { + return tags, err + } + + // can be relative or absolute, but we only want the path (and I + // guess we're in trouble if it forwards to a new place...) + path = linkURL.Path + if linkURL.RawQuery != "" { + path += "?" + path += linkURL.RawQuery + } + } + return tags, nil +} + +// GetDigest returns the image's digest +// Use this to optimize and avoid use of an ImageSource based on the returned digest; +// if you are going to use an ImageSource anyway, it’s more efficient to create it first +// and compute the digest from the value returned by GetManifest. +// NOTE: Implemented to avoid Docker Hub API limits, and mirror configuration may be +// ignored (but may be implemented in the future) +func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) { + dr, ok := ref.(dockerReference) + if !ok { + return "", errors.New("ref must be a dockerReference") + } + + tagOrDigest, err := dr.tagOrDigest() + if err != nil { + return "", err + } + + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return "", err + } + client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull") + if err != nil { + return "", fmt.Errorf("failed to create client: %w", err) + } + defer client.Close() + + path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest) + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + + res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil) + if err != nil { + return "", err + } + + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return "", fmt.Errorf("reading digest %s in %s: %w", tagOrDigest, dr.ref.Name(), registryHTTPResponseToError(res)) + } + + dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest")) + if err != nil { + return "", err + } + + return dig, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go new file mode 100644 index 00000000..0e7b154c --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -0,0 +1,888 @@ +package docker + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/internal/streamdigest" + "github.com/containers/image/v5/internal/uploadreader" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/docker/distribution/registry/api/errcode" + v2 "github.com/docker/distribution/registry/api/v2" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +type dockerImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + + ref dockerReference + c *dockerClient + // State + manifestDigest digest.Digest // or "" if not yet known. +} + +// newImageDestination creates a new ImageDestination for the specified image reference. +func newImageDestination(sys *types.SystemContext, ref dockerReference) (private.ImageDestination, error) { + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return nil, err + } + c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "pull,push") + if err != nil { + return nil, err + } + mimeTypes := []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + imgspecv1.MediaTypeImageIndex, + manifest.DockerV2ListMediaType, + } + if c.sys == nil || !c.sys.DockerDisableDestSchema1MIMETypes { + mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType) + } + + dest := &dockerImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: mimeTypes, + DesiredLayerCompression: types.Compress, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. + HasThreadSafePutBlob: true, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + + ref: ref, + c: c, + } + dest.Compat = impl.AddCompat(dest) + return dest, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dockerImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dockerImageDestination) Close() error { + return d.c.Close() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.supportsSignatures: + return nil + case d.c.signatureBase != nil: + return nil + default: + return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + } +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// sizeCounter is an io.Writer which only counts the total size of its input. +type sizeCounter struct{ size int64 } + +func (c *sizeCounter) Write(p []byte) (n int, err error) { + c.size += int64(len(p)) + return len(p), nil +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. + // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, + // the source blob is uncompressed, and the destination blob is being compressed "on the fly". + if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests { + logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref)) + streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo) + if err != nil { + return private.UploadedBlob{}, err + } + defer cleanup() + stream = streamCopy + } + + if inputInfo.Digest != "" { + // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. + // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. + haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache) + if err != nil { + return private.UploadedBlob{}, err + } + if haveBlob { + return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil + } + } + + // FIXME? Chunked upload, progress reporting, etc. + uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) + logrus.Debugf("Uploading %s", uploadPath) + res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil) + if err != nil { + return private.UploadedBlob{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusAccepted { + logrus.Debugf("Error initiating layer upload, response %#v", *res) + return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res)) + } + uploadLocation, err := res.Location() + if err != nil { + return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err) + } + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + sizeCounter := &sizeCounter{} + stream = io.TeeReader(stream, sizeCounter) + + uploadLocation, err = func() (*url.URL, error) { // A scope for defer + uploadReader := uploadreader.NewUploadReader(stream) + // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL + // returns, so there isn’t a way for the error text to be provided to any of our callers. + defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) + if err != nil { + logrus.Debugf("Error uploading layer chunked %v", err) + return nil, err + } + defer res.Body.Close() + if !successStatus(res.StatusCode) { + return nil, fmt.Errorf("uploading layer chunked: %w", registryHTTPResponseToError(res)) + } + uploadLocation, err := res.Location() + if err != nil { + return nil, fmt.Errorf("determining upload URL: %w", err) + } + return uploadLocation, nil + }() + if err != nil { + return private.UploadedBlob{}, err + } + blobDigest := digester.Digest() + + // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) + + locationQuery := uploadLocation.Query() + locationQuery.Set("digest", blobDigest.String()) + uploadLocation.RawQuery = locationQuery.Encode() + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) + if err != nil { + return private.UploadedBlob{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + logrus.Debugf("Error uploading layer, response %#v", *res) + return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res)) + } + + logrus.Debugf("Upload of layer %s complete", blobDigest) + options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref)) + return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil +} + +// blobExists returns true iff repo contains a blob with digest, and if so, also its size. +// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); +// it returns a non-nil error only on an unexpected failure. +func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { + checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) + logrus.Debugf("Checking %s", checkPath) + res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope) + if err != nil { + return false, -1, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusOK: + logrus.Debugf("... already exists") + return true, getBlobSize(res), nil + case http.StatusUnauthorized: + logrus.Debugf("... not authorized") + return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) + case http.StatusNotFound: + logrus.Debugf("... not present") + return false, -1, nil + default: + return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) + } +} + +// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. +func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { + u := url.URL{ + Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), + RawQuery: url.Values{ + "mount": {srcDigest.String()}, + "from": {reference.Path(srcRepo)}, + }.Encode(), + } + logrus.Debugf("Trying to mount %s", u.Redacted()) + res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope) + if err != nil { + return err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusCreated: + logrus.Debugf("... mount OK") + return nil + case http.StatusAccepted: + // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. + // Abort, and let the ultimate caller do an upload when its ready, instead. + // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. + uploadLocation, err := res.Location() + if err != nil { + return fmt.Errorf("determining upload URL after a mount attempt: %w", err) + } + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted()) + res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope) + if err != nil { + logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) + } else { + defer res2.Body.Close() + if res2.StatusCode != http.StatusNoContent { + logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) + } + } + // Anyway, if canceling the upload fails, ignore it and return the more important error: + return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + default: + logrus.Debugf("Error mounting, response %#v", *res) + return fmt.Errorf("mounting %s from %s to %s: %w", srcDigest, srcRepo.Name(), d.ref.ref.Name(), registryHTTPResponseToError(res)) + } +} + +// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified +// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read. +// The caller must ensure info.Digest is set. +func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) { + exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) + if err != nil { + return false, private.ReusedBlob{}, err + } + if exists { + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) + return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil + } + return false, private.ReusedBlob{}, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if info.Digest == "" { + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") + } + + if impl.OriginalBlobMatchesRequiredCompression(options) { + // First, check whether the blob happens to already exist at the destination. + haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) + if err != nil { + return false, private.ReusedBlob{}, err + } + if haveBlob { + return true, reusedInfo, nil + } + } else { + requiredCompression := "nil" + if options.OriginalCompression != nil { + requiredCompression = options.OriginalCompression.Name() + } + logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression) + } + + // Then try reusing blobs from other locations. + candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute) + for _, candidate := range candidates { + candidateRepo, err := parseBICLocationReference(candidate.Location) + if err != nil { + logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) + continue + } + compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) + if err != nil { + logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err) + continue + } + if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) { + requiredCompression := "nil" + if compressionAlgorithm != nil { + requiredCompression = compressionAlgorithm.Name() + } + logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + continue + } + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + } else { + logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name()) + } + + // Sanity checks: + if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo + // (the "from" parameter); in that case we might try to use these candidates as well. + // + // OTOH that would mean we can’t do the “blobExists†check, and if there is no match + // we could get an upload request that we would have to cancel. + logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) + continue + } + if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { + logrus.Debug("... Already tried the primary destination") + continue + } + + // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. + + // Checking candidateRepo, and mounting from it, requires an + // expanded token scope. + extraScope := &authScope{ + resourceType: "repository", + remoteName: reference.Path(candidateRepo), + actions: "pull", + } + // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. + // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. + // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. + // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. + // Even worse, docker/distribution does not actually reasonably implement canceling uploads + // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); + // so, be a nice client and don't create unnecessary upload sessions on the server. + exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + if !exists { + // FIXME? Should we drop the blob from cache here (and elsewhere?)? + continue // logrus.Debug() already happened in blobExists + } + if candidateRepo.Name() != d.ref.ref.Name() { + if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { + logrus.Debugf("... Mount failed: %v", err) + continue + } + } + + options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + + return true, private.ReusedBlob{ + Digest: candidate.Digest, + Size: size, + CompressionOperation: compressionOperation, + CompressionAlgorithm: compressionAlgorithm}, nil + } + + return false, private.ReusedBlob{}, nil +} + +// PutManifest writes manifest to the destination. +// When the primary manifest is a manifest list, if instanceDigest is nil, we're saving the list +// itself, else instanceDigest contains a digest of the specific manifest instance to overwrite the +// manifest for; when the primary manifest is not a manifest list, instanceDigest should always be nil. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + var refTail string + if instanceDigest != nil { + // If the instanceDigest is provided, then use it as the refTail, because the reference, + // whether it includes a tag or a digest, refers to the list as a whole, and not this + // particular instance. + refTail = instanceDigest.String() + // Double-check that the manifest we've been given matches the digest we've been given. + matches, err := manifest.MatchesDigest(m, *instanceDigest) + if err != nil { + return fmt.Errorf("digesting manifest in PutManifest: %w", err) + } + if !matches { + manifestDigest, merr := manifest.Digest(m) + if merr != nil { + return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest: %w", instanceDigest.String(), merr) + } + return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String()) + } + } else { + // Compute the digest of the main manifest, or the list if it's a list, so that we + // have a digest value to use if we're asked to save a signature for the manifest. + digest, err := manifest.Digest(m) + if err != nil { + return err + } + d.manifestDigest = digest + // The refTail should be either a digest (which we expect to match the value we just + // computed) or a tag name. + refTail, err = d.ref.tagOrDigest() + if err != nil { + return err + } + } + + return d.uploadManifest(ctx, m, refTail) +} + +// uploadManifest writes manifest to tagOrDigest. +func (d *dockerImageDestination) uploadManifest(ctx context.Context, m []byte, tagOrDigest string) error { + path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), tagOrDigest) + + headers := map[string][]string{} + mimeType := manifest.GuessMIMEType(m) + if mimeType != "" { + headers["Content-Type"] = []string{mimeType} + } + res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if !successStatus(res.StatusCode) { + rawErr := registryHTTPResponseToError(res) + err := fmt.Errorf("uploading manifest %s to %s: %w", tagOrDigest, d.ref.ref.Name(), rawErr) + if isManifestInvalidError(rawErr) { + err = types.ManifestTypeRejectedError{Err: err} + } + return err + } + // A HTTP server may not be a registry at all, and just return 200 OK to everything + // (in particular that can fairly easily happen after tearing down a website and + // replacing it with a global 302 redirect to a new website, completely ignoring the + // path in the request); in that case we could “succeed†uploading a whole image. + // With docker/distribution we could rely on a Docker-Content-Digest header being present + // (because docker/distribution/registry/client has been failing uploads if it was missing), + // but that has been defined as explicitly optional by + // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers + // So, just note the missing header in a debug log. + if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 { + logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry") + } + return nil +} + +// successStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func successStatus(status int) bool { + return status >= 200 && status <= 399 +} + +// isManifestInvalidError returns true iff err from registryHTTPResponseToError is a “manifest invalid†error. +func isManifestInvalidError(err error) bool { + var ec errcode.ErrorCoder + if ok := errors.As(err, &ec); !ok { + return false + } + + switch ec.ErrorCode() { + // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. + case v2.ErrorCodeManifestInvalid: + return true + // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) + // when uploading to a tag (because it can’t find a matching tag inside the manifest) + case v2.ErrorCodeTagInvalid: + return true + // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when + // uploading an OCI manifest that is (correctly, according to the spec) missing + // a top-level media type. See libpod issue #1719 + // FIXME: remove this case when ECR behavior is fixed + case errcode.ErrorCodeUnsupported: + return strings.Contains(err.Error(), "Invalid JSON syntax") + default: + return false + } +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + if instanceDigest == nil { + if d.manifestDigest == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures + return errors.New("Unknown manifest digest, can't add signatures") + } + instanceDigest = &d.manifestDigest + } + + sigstoreSignatures := []signature.Sigstore{} + otherSignatures := []signature.Signature{} + for _, sig := range signatures { + if sigstoreSig, ok := sig.(signature.Sigstore); ok { + sigstoreSignatures = append(sigstoreSignatures, sigstoreSig) + } else { + otherSignatures = append(otherSignatures, sig) + } + } + + // Only write sigstores signatures to sigstores attachments. We _could_ store them to lookaside + // instead, but that would probably be rather surprising. + // FIXME: So should we enable sigstores in all cases? Or write in all cases, but opt-in to read? + + if len(sigstoreSignatures) != 0 { + if err := d.putSignaturesToSigstoreAttachments(ctx, sigstoreSignatures, *instanceDigest); err != nil { + return err + } + } + + if len(otherSignatures) != 0 { + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.supportsSignatures: + if err := d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest); err != nil { + return err + } + case d.c.signatureBase != nil: + if err := d.putSignaturesToLookaside(signatures, *instanceDigest); err != nil { + return err + } + default: + return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + } + } + + return nil +} + +// putSignaturesToLookaside implements PutSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, +// which is not nil, for a manifest with manifestDigest. +func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature.Signature, manifestDigest digest.Digest) error { + // FIXME? This overwrites files one at a time, definitely not atomic. + // A failure when updating signatures with a reordered copy could lose some of them. + + // Skip dealing with the manifest digest if not necessary. + if len(signatures) == 0 { + return nil + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + for i, signature := range signatures { + sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) + err := d.putOneSignature(sigURL, signature) + if err != nil { + return err + } + } + // Remove any other signatures, if present. + // We stop at the first missing signature; if a previous deleting loop aborted + // prematurely, this may not clean up all of them, but one missing signature + // is enough for dockerImageSource to stop looking for other signatures, so that + // is sufficient. + for i := len(signatures); ; i++ { + sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) + missing, err := d.c.deleteOneSignature(sigURL) + if err != nil { + return err + } + if missing { + break + } + } + + return nil +} + +// putOneSignature stores sig to sigURL. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (d *dockerImageDestination) putOneSignature(sigURL *url.URL, sig signature.Signature) error { + switch sigURL.Scheme { + case "file": + logrus.Debugf("Writing to %s", sigURL.Path) + err := os.MkdirAll(filepath.Dir(sigURL.Path), 0755) + if err != nil { + return err + } + blob, err := signature.Blob(sig) + if err != nil { + return err + } + err = os.WriteFile(sigURL.Path, blob, 0644) + if err != nil { + return err + } + return nil + + case "http", "https": + return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted()) + default: + return fmt.Errorf("Unsupported scheme when writing signature to %s", sigURL.Redacted()) + } +} + +func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.Context, signatures []signature.Sigstore, manifestDigest digest.Digest) error { + if !d.c.useSigstoreAttachments { + return errors.New("writing sigstore attachments is disabled by configuration") + } + + ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest) + if err != nil { + return err + } + var ociConfig imgspecv1.Image // Most fields empty by default + if ociManifest == nil { + ociManifest = manifest.OCI1FromComponents(imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Digest: "", // We will fill this in later. + Size: 0, + }, nil) + ociConfig.RootFS.Type = "layers" + } else { + logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String()) + // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs. + configBlob, err := d.c.getOCIDescriptorContents(ctx, d.ref, ociManifest.Config, iolimits.MaxConfigBodySize, + none.NoCache) + if err != nil { + return err + } + if err := json.Unmarshal(configBlob, &ociConfig); err != nil { + return fmt.Errorf("parsing sigstore attachment config %s in %s: %w", ociManifest.Config.Digest.String(), + d.ref.ref.Name(), err) + } + } + + for _, sig := range signatures { + mimeType := sig.UntrustedMIMEType() + payloadBlob := sig.UntrustedPayload() + annotations := sig.UntrustedAnnotations() + + alreadyOnRegistry := false + for _, layer := range ociManifest.Layers { + if layerMatchesSigstoreSignature(layer, mimeType, payloadBlob, annotations) { + logrus.Debugf("Signature with digest %s already exists on the registry", layer.Digest.String()) + alreadyOnRegistry = true + break + } + } + if alreadyOnRegistry { + continue + } + + // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads. + // That might eventually need to change if payloads grow to be not just signatures, but something + // significantly large. + sigDesc, err := d.putBlobBytesAsOCI(ctx, payloadBlob, mimeType, private.PutBlobOptions{ + Cache: none.NoCache, + IsConfig: false, + EmptyLayer: false, + LayerIndex: nil, + }) + if err != nil { + return err + } + sigDesc.Annotations = annotations + ociManifest.Layers = append(ociManifest.Layers, sigDesc) + ociConfig.RootFS.DiffIDs = append(ociConfig.RootFS.DiffIDs, sigDesc.Digest) + logrus.Debugf("Adding new signature, digest %s", sigDesc.Digest.String()) + } + + configBlob, err := json.Marshal(ociConfig) + if err != nil { + return err + } + logrus.Debugf("Uploading updated sigstore attachment config") + // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs. + configDesc, err := d.putBlobBytesAsOCI(ctx, configBlob, imgspecv1.MediaTypeImageConfig, private.PutBlobOptions{ + Cache: none.NoCache, + IsConfig: true, + EmptyLayer: false, + LayerIndex: nil, + }) + if err != nil { + return err + } + ociManifest.Config = configDesc + + manifestBlob, err := ociManifest.Serialize() + if err != nil { + return err + } + logrus.Debugf("Uploading sigstore attachment manifest") + return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest)) +} + +func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string, + payloadBlob []byte, annotations map[string]string) bool { + if layer.MediaType != mimeType || + layer.Size != int64(len(payloadBlob)) || + // This is not quite correct, we should use the layer’s digest algorithm. + // But right now we don’t want to deal with corner cases like bad digest formats + // or unavailable algorithms; in the worst case we end up with duplicate signature + // entries. + layer.Digest.String() != digest.FromBytes(payloadBlob).String() || + !maps.Equal(layer.Annotations, annotations) { + return false + } + return true +} + +// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate +// OCI descriptor. +func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) { + blobDigest := digest.FromBytes(contents) + info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents), + types.BlobInfo{ + Digest: blobDigest, + Size: int64(len(contents)), + MediaType: mimeType, + }, options) + if err != nil { + return imgspecv1.Descriptor{}, fmt.Errorf("writing blob %s: %w", blobDigest.String(), err) + } + return imgspecv1.Descriptor{ + MediaType: mimeType, + Digest: info.Digest, + Size: info.Size, + }, nil +} + +// deleteOneSignature deletes a signature from sigURL, if it exists. +// If it successfully determines that the signature does not exist, returns (true, nil) +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (c *dockerClient) deleteOneSignature(sigURL *url.URL) (missing bool, err error) { + switch sigURL.Scheme { + case "file": + logrus.Debugf("Deleting %s", sigURL.Path) + err := os.Remove(sigURL.Path) + if err != nil && os.IsNotExist(err) { + return true, nil + } + return false, err + + case "http", "https": + return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted()) + default: + return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", sigURL.Redacted()) + } +} + +// putSignaturesToAPIExtension implements PutSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension, +// for a manifest with manifestDigest. +func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures []signature.Signature, manifestDigest digest.Digest) error { + // Skip dealing with the manifest digest, or reading the old state, if not necessary. + if len(signatures) == 0 { + return nil + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures, + // but the X-Registry-Supports-Signatures API extension does not support that yet. + + existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, manifestDigest) + if err != nil { + return err + } + existingSigNames := set.New[string]() + for _, sig := range existingSignatures.Signatures { + existingSigNames.Add(sig.Name) + } + + for _, newSigWithFormat := range signatures { + newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) + if !ok { + return signature.UnsupportedFormatError(newSigWithFormat) + } + newSig := newSigSimple.UntrustedSignature() + + if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool { + return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) + }) { + continue + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return fmt.Errorf("generating random signature len %d: %w", n, err) + } + signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes) + if !existingSigNames.Contains(signatureName) { + break + } + } + sig := extensionSignature{ + Version: extensionSignatureSchemaVersion, + Name: signatureName, + Type: extensionSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + if err != nil { + return err + } + + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String()) + res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) + return fmt.Errorf("uploading signature to %s in %s: %w", path, d.c.registry, registryHTTPResponseToError(res)) + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go new file mode 100644 index 00000000..231d5d21 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -0,0 +1,789 @@ +package docker + +import ( + "context" + "errors" + "fmt" + "io" + "mime" + "mime/multipart" + "net/http" + "net/url" + "os" + "strings" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/regexp" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// maxLookasideSignatures is an arbitrary limit for the total number of signatures we would try to read from a lookaside server, +// even if it were broken or malicious and it continued serving an enormous number of items. +const maxLookasideSignatures = 128 + +type dockerImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.DoesNotAffectLayerInfosForCopy + stubs.ImplementsGetBlobAt + + logicalRef dockerReference // The reference the user requested. + physicalRef dockerReference // The actual reference we are accessing (possibly a mirror) + c *dockerClient + // State + cachedManifest []byte // nil if not loaded yet + cachedManifestMIMEType string // Only valid if cachedManifest != nil +} + +// newImageSource creates a new ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return nil, err + } + registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) + if err != nil { + return nil, fmt.Errorf("loading registries configuration: %w", err) + } + if registry == nil { + // No configuration was found for the provided reference, so use the + // equivalent of a default configuration. + registry = &sysregistriesv2.Registry{ + Endpoint: sysregistriesv2.Endpoint{ + Location: ref.ref.String(), + }, + Prefix: ref.ref.String(), + } + } + + // Check all endpoints for the manifest availability. If we find one that does + // contain the image, it will be used for all future pull actions. Always try the + // non-mirror original location last; this both transparently handles the case + // of no mirrors configured, and ensures we return the error encountered when + // accessing the upstream location if all endpoints fail. + pullSources, err := registry.PullSourcesFromReference(ref.ref) + if err != nil { + return nil, err + } + type attempt struct { + ref reference.Named + err error + } + attempts := []attempt{} + for _, pullSource := range pullSources { + if sys != nil && sys.DockerLogMirrorChoice { + logrus.Infof("Trying to access %q", pullSource.Reference) + } else { + logrus.Debugf("Trying to access %q", pullSource.Reference) + } + s, err := newImageSourceAttempt(ctx, sys, ref, pullSource, registryConfig) + if err == nil { + return s, nil + } + logrus.Debugf("Accessing %q failed: %v", pullSource.Reference, err) + attempts = append(attempts, attempt{ + ref: pullSource.Reference, + err: err, + }) + } + switch len(attempts) { + case 0: + return nil, errors.New("Internal error: newImageSource returned without trying any endpoint") + case 1: + return nil, attempts[0].err // If no mirrors are used, perfectly preserve the error type and add no noise. + default: + // Don’t just build a string, try to preserve the typed error. + primary := &attempts[len(attempts)-1] + extras := []string{} + for i := 0; i < len(attempts)-1; i++ { + // This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use. + // The paired [] at least have some chance of being unambiguous. + extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err)) + } + return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err) + } +} + +// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource. +// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable. +// The caller must call .Close() on the returned ImageSource. +func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource, + registryConfig *registryConfiguration) (*dockerImageSource, error) { + physicalRef, err := newReference(pullSource.Reference) + if err != nil { + return nil, err + } + + endpointSys := sys + // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. + if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(physicalRef.ref) != reference.Domain(logicalRef.ref) { + copy := *endpointSys + copy.DockerAuthConfig = nil + copy.DockerBearerRegistryToken = "" + endpointSys = © + } + + client, err := newDockerClientFromRef(endpointSys, physicalRef, registryConfig, false, "pull") + if err != nil { + return nil, err + } + client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure + + s := &dockerImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: true, + }), + + logicalRef: logicalRef, + physicalRef: physicalRef, + c: client, + } + s.Compat = impl.AddCompat(s) + + if err := s.ensureManifestIsLoaded(ctx); err != nil { + client.Close() + return nil, err + } + return s, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dockerImageSource) Reference() types.ImageReference { + return s.logicalRef +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dockerImageSource) Close() error { + return s.c.Close() +} + +// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) +// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. +func simplifyContentType(contentType string) string { + if contentType == "" { + return contentType + } + mimeType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return "" + } + return mimeType +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return s.fetchManifest(ctx, instanceDigest.String()) + } + err := s.ensureManifestIsLoaded(ctx) + if err != nil { + return nil, "", err + } + return s.cachedManifest, s.cachedManifestMIMEType, nil +} + +func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { + return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest) +} + +// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType +// +// ImageSource implementations are not required or expected to do any caching, +// but because our signatures are “attached†to the manifest digest, +// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) +// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious +// signature verification failures when pulling while a tag is being updated. +func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { + if s.cachedManifest != nil { + return nil + } + + reference, err := s.physicalRef.tagOrDigest() + if err != nil { + return err + } + + manblob, mt, err := s.fetchManifest(ctx, reference) + if err != nil { + return err + } + // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. + s.cachedManifest = manblob + s.cachedManifestMIMEType = mt + return nil +} + +// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks +func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) { + defer close(streams) + defer close(errs) + currentOffset := uint64(0) + + body = makeBufferedNetworkReader(body, 64, 16384) + defer body.Close() + for _, c := range chunks { + if c.Offset != currentOffset { + if c.Offset < currentOffset { + errs <- fmt.Errorf("invalid chunk offset specified %v (expected >= %v)", c.Offset, currentOffset) + break + } + toSkip := c.Offset - currentOffset + if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil { + errs <- err + break + } + currentOffset += toSkip + } + s := signalCloseReader{ + closed: make(chan struct{}), + stream: io.NopCloser(io.LimitReader(body, int64(c.Length))), + consumeStream: true, + } + streams <- s + + // Wait until the stream is closed before going to the next chunk + <-s.closed + currentOffset += c.Length + } +} + +// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan. +func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) { + defer close(streams) + defer close(errs) + if !strings.HasPrefix(mediaType, "multipart/") { + streams <- body + return + } + boundary, found := params["boundary"] + if !found { + errs <- errors.New("could not find boundary") + body.Close() + return + } + buffered := makeBufferedNetworkReader(body, 64, 16384) + defer buffered.Close() + mr := multipart.NewReader(buffered, boundary) + parts := 0 + for { + p, err := mr.NextPart() + if err != nil { + if err != io.EOF { + errs <- err + } + if parts != len(chunks) { + errs <- errors.New("invalid number of chunks returned by the server") + } + return + } + s := signalCloseReader{ + closed: make(chan struct{}), + stream: p, + } + streams <- s + // NextPart() cannot be called while the current part + // is being read, so wait until it is closed + <-s.closed + parts++ + } +} + +var multipartByteRangesRe = regexp.Delayed("multipart/byteranges; boundary=([A-Za-z-0-9:]+)") + +func parseMediaType(contentType string) (string, map[string]string, error) { + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + if err == mime.ErrInvalidMediaParameter { + // CloudFront returns an invalid MIME type, that contains an unquoted ":" in the boundary + // param, let's handle it here. + matches := multipartByteRangesRe.FindStringSubmatch(contentType) + if len(matches) == 2 { + mediaType = "multipart/byteranges" + params = map[string]string{ + "boundary": matches[1], + } + err = nil + } + } + if err != nil { + return "", nil, err + } + } + return mediaType, params, err +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + headers := make(map[string][]string) + + rangeVals := make([]string, 0, len(chunks)) + for _, c := range chunks { + rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1)) + } + + headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))} + + if len(info.URLs) != 0 { + return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt") + } + + path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) + logrus.Debugf("Downloading %s", path) + res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) + if err != nil { + return nil, nil, err + } + + switch res.StatusCode { + case http.StatusOK: + // if the server replied with a 200 status code, convert the full body response to a series of + // streams as it would have been done with 206. + streams := make(chan io.ReadCloser) + errs := make(chan error) + go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks) + return streams, errs, nil + case http.StatusPartialContent: + mediaType, params, err := parseMediaType(res.Header.Get("Content-Type")) + if err != nil { + return nil, nil, err + } + + streams := make(chan io.ReadCloser) + errs := make(chan error) + + go handle206Response(streams, errs, res.Body, chunks, mediaType, params) + return streams, errs, nil + case http.StatusBadRequest: + res.Body.Close() + return nil, nil, private.BadPartialRequestError{Status: res.Status} + default: + err := registryHTTPResponseToError(res) + res.Body.Close() + return nil, nil, fmt.Errorf("fetching partial blob: %w", err) + } +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + return s.c.getBlob(ctx, s.physicalRef, info, cache) +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + if err := s.c.detectProperties(ctx); err != nil { + return nil, err + } + var res []signature.Signature + switch { + case s.c.supportsSignatures: + sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest) + if err != nil { + return nil, err + } + res = append(res, sigs...) + case s.c.signatureBase != nil: + sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest) + if err != nil { + return nil, err + } + res = append(res, sigs...) + default: + return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + } + + sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest) + if err != nil { + return nil, err + } + res = append(res, sigstoreSigs...) + return res, nil +} + +// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, +// or finally, from a fetched manifest. +func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { + if instanceDigest != nil { + return *instanceDigest, nil + } + if digested, ok := s.physicalRef.ref.(reference.Digested); ok { + d := digested.Digest() + if d.Algorithm() == digest.Canonical { + return d, nil + } + } + if err := s.ensureManifestIsLoaded(ctx); err != nil { + return "", err + } + return manifest.Digest(s.cachedManifest) +} + +// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, +// which is not nil. +func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + signatures := []signature.Signature{} + for i := 0; ; i++ { + if i >= maxLookasideSignatures { + return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) + } + + sigURL := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) + signature, missing, err := s.getOneSignature(ctx, sigURL) + if err != nil { + return nil, err + } + if missing { + break + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil) +// If it successfully determines that the signature does not exist, returns (nil, true, nil). +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL) (signature.Signature, bool, error) { + switch sigURL.Scheme { + case "file": + logrus.Debugf("Reading %s", sigURL.Path) + sigBlob, err := os.ReadFile(sigURL.Path) + if err != nil { + if os.IsNotExist(err) { + return nil, true, nil + } + return nil, false, err + } + sig, err := signature.FromBlob(sigBlob) + if err != nil { + return nil, false, fmt.Errorf("parsing signature %q: %w", sigURL.Path, err) + } + return sig, false, nil + + case "http", "https": + logrus.Debugf("GET %s", sigURL.Redacted()) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL.String(), nil) + if err != nil { + return nil, false, err + } + res, err := s.c.client.Do(req) + if err != nil { + return nil, false, err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + logrus.Debugf("... got status 404, as expected = end of signatures") + return nil, true, nil + } else if res.StatusCode != http.StatusOK { + return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", sigURL.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) + } + + contentType := res.Header.Get("Content-Type") + if mimeType := simplifyContentType(contentType); mimeType == "text/html" { + logrus.Warnf("Signature %q has Content-Type %q, unexpected for a signature", sigURL.Redacted(), contentType) + // Don’t immediately fail; the lookaside spec does not place any requirements on Content-Type. + // If the content really is HTML, it’s going to fail in signature.FromBlob. + } + + sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) + if err != nil { + return nil, false, err + } + sig, err := signature.FromBlob(sigBlob) + if err != nil { + return nil, false, fmt.Errorf("parsing signature %s: %w", sigURL.Redacted(), err) + } + return sig, false, nil + + default: + return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", sigURL.Redacted()) + } +} + +// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension. +func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest) + if err != nil { + return nil, err + } + + var sigs []signature.Signature + for _, sig := range parsedBody.Signatures { + if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { + sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content)) + } + } + return sigs, nil +} + +func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + if !s.c.useSigstoreAttachments { + logrus.Debugf("Not looking for sigstore attachments: disabled by configuration") + return nil, nil + } + + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest) + if err != nil { + return nil, err + } + if ociManifest == nil { + return nil, nil + } + + logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers)) + res := []signature.Signature{} + for layerIndex, layer := range ociManifest.Layers { + // Note that this copies all kinds of attachments: attestations, and whatever else is there, + // not just signatures. We leave the signature consumers to decide based on the MIME type. + logrus.Debugf("Fetching sigstore attachment %d/%d: %s", layerIndex+1, len(ociManifest.Layers), layer.Digest.String()) + // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads. + // That might eventually need to change if payloads grow to be not just signatures, but something + // significantly large. + payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize, + none.NoCache) + if err != nil { + return nil, err + } + res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) + } + return res, nil +} + +// deleteImage deletes the named image from the registry, if supported. +func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return err + } + // docker/distribution does not document what action should be used for deleting images. + // + // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. + // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. + // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). + // + // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". + c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "*") + if err != nil { + return err + } + defer c.Close() + + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + refTail, err := ref.tagOrDigest() + if err != nil { + return err + } + getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) + get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer get.Body.Close() + switch get.StatusCode { + case http.StatusOK: + case http.StatusNotFound: + return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) + default: + return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(get)) + } + manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize) + if err != nil { + return err + } + + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return fmt.Errorf("computing manifest digest: %w", err) + } + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest) + + // When retrieving the digest from a registry >= 2.3 use the following header: + // "Accept": "application/vnd.docker.distribution.manifest.v2+json" + delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer delete.Body.Close() + if delete.StatusCode != http.StatusAccepted { + return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(delete)) + } + + for i := 0; ; i++ { + sigURL := lookasideStorageURL(c.signatureBase, manifestDigest, i) + missing, err := c.deleteOneSignature(sigURL) + if err != nil { + return err + } + if missing { + break + } + } + + return nil +} + +type bufferedNetworkReaderBuffer struct { + data []byte + len int + consumed int + err error +} + +type bufferedNetworkReader struct { + stream io.ReadCloser + emptyBuffer chan *bufferedNetworkReaderBuffer + readyBuffer chan *bufferedNetworkReaderBuffer + terminate chan bool + current *bufferedNetworkReaderBuffer + mutex sync.Mutex + gotEOF bool +} + +// handleBufferedNetworkReader runs in a goroutine +func handleBufferedNetworkReader(br *bufferedNetworkReader) { + defer close(br.readyBuffer) + for { + select { + case b := <-br.emptyBuffer: + b.len, b.err = br.stream.Read(b.data) + br.readyBuffer <- b + if b.err != nil { + return + } + case <-br.terminate: + return + } + } +} + +func (n *bufferedNetworkReader) Close() error { + close(n.terminate) + close(n.emptyBuffer) + return n.stream.Close() +} + +func (n *bufferedNetworkReader) read(p []byte) (int, error) { + if n.current != nil { + copied := copy(p, n.current.data[n.current.consumed:n.current.len]) + n.current.consumed += copied + if n.current.consumed == n.current.len { + n.emptyBuffer <- n.current + n.current = nil + } + if copied > 0 { + return copied, nil + } + } + if n.gotEOF { + return 0, io.EOF + } + + var b *bufferedNetworkReaderBuffer + + select { + case b = <-n.readyBuffer: + if b.err != nil { + if b.err != io.EOF { + return b.len, b.err + } + n.gotEOF = true + } + b.consumed = 0 + n.current = b + return n.read(p) + case <-n.terminate: + return 0, io.EOF + } +} + +func (n *bufferedNetworkReader) Read(p []byte) (int, error) { + n.mutex.Lock() + defer n.mutex.Unlock() + + return n.read(p) +} + +func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) *bufferedNetworkReader { + br := bufferedNetworkReader{ + stream: stream, + emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), + readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), + terminate: make(chan bool), + } + + go func() { + handleBufferedNetworkReader(&br) + }() + + for i := uint(0); i < nBuffers; i++ { + b := bufferedNetworkReaderBuffer{ + data: make([]byte, bufferSize), + } + br.emptyBuffer <- &b + } + + return &br +} + +type signalCloseReader struct { + closed chan struct{} + stream io.ReadCloser + consumeStream bool +} + +func (s signalCloseReader) Read(p []byte) (int, error) { + return s.stream.Read(p) +} + +func (s signalCloseReader) Close() error { + defer close(s.closed) + if s.consumeStream { + if _, err := io.Copy(io.Discard, s.stream); err != nil { + s.stream.Close() + return err + } + } + return s.stream.Close() +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go new file mode 100644 index 00000000..6ae84915 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go @@ -0,0 +1,168 @@ +package docker + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for container registry-hosted images. +var Transport = dockerTransport{} + +type dockerTransport struct{} + +func (t dockerTransport) Name() string { + return "docker" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// dockerReference is an ImageReference for Docker images. +type dockerReference struct { + ref reference.Named // By construction we know that !reference.IsNameOnly(ref) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if !strings.HasPrefix(refString, "//") { + return nil, fmt.Errorf("docker: image reference %s does not start with //", refString) + } + ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + return NewReference(ref) +} + +// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). +func NewReference(ref reference.Named) (types.ImageReference, error) { + return newReference(ref) +} + +// newReference returns a dockerReference for a named reference. +func newReference(ref reference.Named) (dockerReference, error) { + if reference.IsNameOnly(ref) { + return dockerReference{}, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // The docker/distribution API does not really support that (we can’t ask for an image with a specific + // tag and digest), so fail. This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return dockerReference{}, errors.New("Docker references with both a tag and digest are currently not supported") + } + + return dockerReference{ + ref: ref, + }, nil +} + +func (ref dockerReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dockerReference) StringWithinTransport() string { + return "//" + reference.FamiliarString(ref.ref) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dockerReference) DockerReference() reference.Named { + return ref.ref +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dockerReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dockerReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.ref) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return deleteImage(ctx, sys, ref) +} + +// tagOrDigest returns a tag or digest from the reference. +func (ref dockerReference) tagOrDigest() (string, error) { + if ref, ok := ref.ref.(reference.Canonical); ok { + return ref.Digest().String(), nil + } + if ref, ok := ref.ref.(reference.NamedTagged); ok { + return ref.Tag(), nil + } + // This should not happen, NewReference above refuses reference.IsNameOnly values. + return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) +} diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go new file mode 100644 index 00000000..e0396918 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -0,0 +1,101 @@ +package docker + +import ( + "errors" + "fmt" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/sirupsen/logrus" +) + +var ( + // ErrV1NotSupported is returned when we're trying to talk to a + // docker V1 registry. + ErrV1NotSupported = errors.New("can't talk to a V1 container registry") + // ErrTooManyRequests is returned when the status code returned is 429 + ErrTooManyRequests = errors.New("too many requests to registry") +) + +// ErrUnauthorizedForCredentials is returned when the status code returned is 401 +type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ErrUnauthorizedForCredentials) Error() string { + return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error()) +} + +// httpResponseToError translates the https.Response into an error, possibly prefixing it with the supplied context. It returns +// nil if the response is not considered an error. +// NOTE: Almost all callers in this package should use registryHTTPResponseToError instead. +func httpResponseToError(res *http.Response, context string) error { + switch res.StatusCode { + case http.StatusOK: + return nil + case http.StatusTooManyRequests: + return ErrTooManyRequests + case http.StatusUnauthorized: + err := registryHTTPResponseToError(res) + return ErrUnauthorizedForCredentials{Err: err} + default: + if context != "" { + context += ": " + } + return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) + } +} + +// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution +// registry. +// +// WARNING: The OCI distribution spec says +// “A `4XX` response code from the registry MAY return a body in any format.â€; but if it is +// JSON, it MUST use the errcode.Error structure. +// So, callers should primarily decide based on HTTP StatusCode, not based on error type here. +func registryHTTPResponseToError(res *http.Response) error { + err := handleErrorResponse(res) + // len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is. + if errs, ok := err.(errcode.Errors); ok && len(errs) > 0 { + // The docker/distribution registry implementation almost never returns + // more than one error in the HTTP body; it seems there is only one + // possible instance, where the second error reports a cleanup failure + // we don't really care about. + // + // The only _common_ case where a multi-element error is returned is + // created by the handleErrorResponse parser when OAuth authorization fails: + // the first element contains errors from a WWW-Authenticate header, the second + // element contains errors from the response body. + // + // In that case the first one is currently _slightly_ more informative (ErrorCodeUnauthorized + // for invalid tokens, ErrorCodeDenied for permission denied with a valid token + // for the first error, vs. ErrorCodeUnauthorized for both cases for the second error.) + // + // Also, docker/docker similarly only logs the other errors and returns the + // first one. + if len(errs) > 1 { + logrus.Debugf("Discarding non-primary errors:") + for _, err := range errs[1:] { + logrus.Debugf(" %s", err.Error()) + } + } + err = errs[0] + } + switch e := err.(type) { + case *unexpectedHTTPResponseError: + response := string(e.Response) + if len(response) > 50 { + response = response[:50] + "..." + } + // %.0w makes e visible to error.Unwrap() without including any text + err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e) + case errcode.Error: + // e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually + // rather redundant. So reword it without using e.Code.Error() if e.Message is the default. + if e.Message == e.Code.Message() { + // %.0w makes e visible to error.Unwrap() without including any text + err = fmt.Errorf("%s%.0w", e.Message, e) + } + } + return err +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go new file mode 100644 index 00000000..7507d855 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -0,0 +1,173 @@ +package tarfile + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/streamdigest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// Destination is a partial implementation of private.ImageDestination for writing to an io.Writer. +type Destination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + stubs.NoSignaturesInitialize + + archive *Writer + repoTags []reference.NamedTagged + // Other state. + config []byte + sysCtx *types.SystemContext +} + +// NewDestination returns a tarfile.Destination adding images to the specified Writer. +func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged) *Destination { + repoTags := []reference.NamedTagged{} + if ref != nil { + repoTags = append(repoTags, ref) + } + dest := &Destination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{ + manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. + }, + DesiredLayerCompression: types.Decompress, + AcceptsForeignLayerURLs: false, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. + // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where + // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t + // much benefit from concurrency, mostly just extra CPU, memory and I/O contention. + HasThreadSafePutBlob: false, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartialRaw(transportName), + NoSignaturesInitialize: stubs.NoSignatures("Storing signatures for docker tar files is not supported"), + + archive: archive, + repoTags: repoTags, + sysCtx: sys, + } + dest.Compat = impl.AddCompat(dest) + return dest +} + +// AddRepoTags adds the specified tags to the destination's repoTags. +func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { + d.repoTags = append(d.repoTags, tags...) +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + // Ouch, we need to stream the blob into a temporary file just to determine the size. + // When the layer is decompressed, we also have to generate the digest on uncompressed data. + if inputInfo.Size == -1 || inputInfo.Digest == "" { + logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") + streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo) + if err != nil { + return private.UploadedBlob{}, err + } + defer cleanup() + stream = streamCopy + logrus.Debugf("... streaming done") + } + + if err := d.archive.lock(); err != nil { + return private.UploadedBlob{}, err + } + defer d.archive.unlock() + + // Maybe the blob has been already sent + ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo) + if err != nil { + return private.UploadedBlob{}, err + } + if ok { + return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil + } + + if options.IsConfig { + buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err) + } + d.config = buf + if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil { + return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err) + } + } else { + if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil { + return private.UploadedBlob{}, err + } + } + d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}) + return private.UploadedBlob{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } + if err := d.archive.lock(); err != nil { + return false, private.ReusedBlob{}, err + } + defer d.archive.unlock() + + return d.archive.tryReusingBlobLocked(info) +} + +// PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported for docker tar files`) + } + // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, + // so the caller trying a different manifest kind would be pointless. + var man manifest.Schema2 + if err := json.Unmarshal(m, &man); err != nil { + return fmt.Errorf("parsing manifest: %w", err) + } + if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { + return errors.New("Unsupported manifest type, need a Docker schema 2 manifest") + } + + if err := d.archive.lock(); err != nil { + return err + } + defer d.archive.unlock() + + if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil { + return err + } + + return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags) +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go new file mode 100644 index 00000000..6845893b --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -0,0 +1,273 @@ +package tarfile + +import ( + "archive/tar" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" +) + +// Reader is a ((docker save)-formatted) tar archive that allows random access to any component. +type Reader struct { + // None of the fields below are modified after the archive is created, until .Close(); + // this allows concurrent readers of the same archive. + path string // "" if the archive has already been closed. + removeOnClose bool // Remove file on close if true + Manifest []ManifestItem // Guaranteed to exist after the archive is created. +} + +// NewReaderFromFile returns a Reader for the specified path. +// The caller should call .Close() on the returned archive when done. +func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("opening file %q: %w", path, err) + } + defer file.Close() + + // If the file is seekable and already not compressed we can just return the file itself + // as a source. Otherwise we pass the stream to NewReaderFromStream. + var stream io.Reader = file + if _, err := file.Seek(0, io.SeekCurrent); err == nil { // seeking is possible + decompressed, isCompressed, err := compression.AutoDecompress(file) + if err != nil { + return nil, fmt.Errorf("detecting compression for file %q: %w", path, err) + } + defer decompressed.Close() + stream = decompressed + if !isCompressed { + return newReader(path, false) + } + } + return NewReaderFromStream(sys, stream) +} + +// NewReaderFromStream returns a Reader for the specified inputStream, +// which can be either compressed or uncompressed. The caller can close the +// inputStream immediately after NewReaderFromFile returns. +// The caller should call .Close() on the returned archive when done. +func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { + // Save inputStream to a temporary file + tarCopyFile, err := tmpdir.CreateBigFileTemp(sys, "docker-tar") + if err != nil { + return nil, fmt.Errorf("creating temporary file: %w", err) + } + defer tarCopyFile.Close() + + succeeded := false + defer func() { + if !succeeded { + os.Remove(tarCopyFile.Name()) + } + }() + + // In order to be compatible with docker-load, we need to support + // auto-decompression (it's also a nice quality-of-life thing to avoid + // giving users really confusing "invalid tar header" errors). + uncompressedStream, _, err := compression.AutoDecompress(inputStream) + if err != nil { + return nil, fmt.Errorf("auto-decompressing input: %w", err) + } + defer uncompressedStream.Close() + + // Copy the plain archive to the temporary file. + // + // TODO: This can take quite some time, and should ideally be cancellable + // using a context.Context. + if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { + return nil, fmt.Errorf("copying contents to temporary file %q: %w", tarCopyFile.Name(), err) + } + succeeded = true + + return newReader(tarCopyFile.Name(), true) +} + +// newReader creates a Reader for the specified path and removeOnClose flag. +// The caller should call .Close() on the returned archive when done. +func newReader(path string, removeOnClose bool) (*Reader, error) { + // This is a valid enough archive, except Manifest is not yet filled. + r := Reader{ + path: path, + removeOnClose: removeOnClose, + } + succeeded := false + defer func() { + if !succeeded { + r.Close() + } + }() + + // We initialize Manifest immediately when constructing the Reader instead + // of later on-demand because every caller will need the data, and because doing it now + // removes the need to synchronize the access/creation of the data if the archive is later + // used from multiple goroutines to access different images. + + // FIXME? Do we need to deal with the legacy format? + bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize) + if err != nil { + return nil, err + } + if err := json.Unmarshal(bytes, &r.Manifest); err != nil { + return nil, fmt.Errorf("decoding tar manifest.json: %w", err) + } + + succeeded = true + return &r, nil +} + +// Close removes resources associated with an initialized Reader, if any. +func (r *Reader) Close() error { + path := r.path + r.path = "" // Mark the archive as closed + if r.removeOnClose { + return os.Remove(path) + } + return nil +} + +// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or +// both of which should be (nil, -1). +// On success, it returns the manifest item and an index of the matching tag, if a tag was used +// for matching; the index is -1 if a tag was not used. +func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) { + switch { + case ref != nil && sourceIndex != -1: + return nil, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d", + ref.String(), sourceIndex) + + case ref != nil: + refString := ref.String() + for i := range r.Manifest { + for tagIndex, tag := range r.Manifest[i].RepoTags { + parsedTag, err := reference.ParseNormalizedNamed(tag) + if err != nil { + return nil, -1, fmt.Errorf("Invalid tag %#v in manifest.json item @%d: %w", tag, i, err) + } + if parsedTag.String() == refString { + return &r.Manifest[i], tagIndex, nil + } + } + } + return nil, -1, fmt.Errorf("Tag %#v not found", refString) + + case sourceIndex != -1: + if sourceIndex >= len(r.Manifest) { + return nil, -1, fmt.Errorf("Invalid source index @%d, only %d manifest items available", + sourceIndex, len(r.Manifest)) + } + return &r.Manifest[sourceIndex], -1, nil + + default: + if len(r.Manifest) != 1 { + return nil, -1, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest)) + } + return &r.Manifest[0], -1, nil + } +} + +// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. +type tarReadCloser struct { + *tar.Reader + backingFile *os.File +} + +func (t *tarReadCloser) Close() error { + return t.backingFile.Close() +} + +// openTarComponent returns a ReadCloser for the specific file within the archive. +// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), +// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. +// It is safe to call this method from multiple goroutines simultaneously. +// The caller should call .Close() on the returned stream. +func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) { + // This is only a sanity check; if anyone did concurrently close ra, this access is technically + // racy against the write in .Close(). + if r.path == "" { + return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader") + } + + f, err := os.Open(r.path) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + f.Close() + } + }() + + tarReader, header, err := findTarComponent(f, componentPath) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested + // We follow only one symlink; so no loops are possible. + if _, err := f.Seek(0, io.SeekStart); err != nil { + return nil, err + } + // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, + // so we don't care. + tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + } + + if !header.FileInfo().Mode().IsRegular() { + return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + } + succeeded = true + return &tarReadCloser{Reader: tarReader, backingFile: f}, nil +} + +// findTarComponent returns a header and a reader matching componentPath within inputFile, +// or (nil, nil, nil) if not found. +func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) { + t := tar.NewReader(inputFile) + componentPath = path.Clean(componentPath) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, err + } + if path.Clean(h.Name) == componentPath { + return t, h, nil + } + } + return nil, nil, nil +} + +// readTarComponent returns full contents of componentPath. +// It is safe to call this method from multiple goroutines simultaneously. +func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) { + file, err := r.openTarComponent(path) + if err != nil { + return nil, fmt.Errorf("loading tar component %s: %w", path, err) + } + defer file.Close() + bytes, err := iolimits.ReadAtMost(file, limit) + if err != nil { + return nil, err + } + return bytes, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go new file mode 100644 index 00000000..b63b5316 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go @@ -0,0 +1,319 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +// Source is a partial implementation of types.ImageSource for reading from tarPath. +type Source struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + + archive *Reader + closeArchive bool // .Close() the archive when the source is closed. + // If ref is nil and sourceIndex is -1, indicates the only image in the archive. + ref reference.NamedTagged // May be nil + sourceIndex int // May be -1 + // The following data is only available after ensureCachedDataIsPresent() succeeds + tarManifest *ManifestItem // nil if not available yet. + configBytes []byte + configDigest digest.Digest + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo + // Other state + generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. + cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe + cacheDataResult error // Private state for ensureCachedDataIsPresent +} + +type layerInfo struct { + path string + size int64 +} + +// NewSource returns a tarfile.Source for an image in the specified archive matching ref +// and sourceIndex (or the only image if they are (nil, -1)). +// The archive will be closed if closeArchive +func NewSource(archive *Reader, closeArchive bool, transportName string, ref reference.NamedTagged, sourceIndex int) *Source { + s := &Source{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: true, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAtRaw(transportName), + + archive: archive, + closeArchive: closeArchive, + ref: ref, + sourceIndex: sourceIndex, + } + s.Compat = impl.AddCompat(s) + return s +} + +// ensureCachedDataIsPresent loads data necessary for any of the public accessors. +// It is safe to call this from multi-threaded code. +func (s *Source) ensureCachedDataIsPresent() error { + s.cacheDataLock.Do(func() { + s.cacheDataResult = s.ensureCachedDataIsPresentPrivate() + }) + return s.cacheDataResult +} + +// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent. +// Call ensureCachedDataIsPresent instead. +func (s *Source) ensureCachedDataIsPresentPrivate() error { + tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex) + if err != nil { + return err + } + + // Read and parse config. + configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize) + if err != nil { + return err + } + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. + if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { + return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err) + } + if parsedConfig.RootFS == nil { + return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config) + } + + knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig) + if err != nil { + return err + } + + // Success; commit. + s.tarManifest = tarManifest + s.configBytes = configBytes + s.configDigest = digest.FromBytes(configBytes) + s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs + s.knownLayers = knownLayers + return nil +} + +// Close removes resources associated with an initialized Source, if any. +func (s *Source) Close() error { + if s.closeArchive { + return s.archive.Close() + } + return nil +} + +// TarManifest returns contents of manifest.json +func (s *Source) TarManifest() []ManifestItem { + return s.archive.Manifest +} + +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { + // Collect layer data available in manifest and config. + if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { + return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) + } + knownLayers := map[digest.Digest]*layerInfo{} + unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. + for i, diffID := range parsedConfig.RootFS.DiffIDs { + if _, ok := knownLayers[diffID]; ok { + // Apparently it really can happen that a single image contains the same layer diff more than once. + // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter + // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. + continue + } + layerPath := path.Clean(tarManifest.Layers[i]) + if _, ok := unknownLayerSizes[layerPath]; ok { + return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + } + li := &layerInfo{ // A new element in each iteration + path: layerPath, + size: -1, + } + knownLayers[diffID] = li + unknownLayerSizes[layerPath] = li + } + + // Scan the tar file to collect layer sizes. + file, err := os.Open(s.archive.path) + if err != nil { + return nil, err + } + defer file.Close() + t := tar.NewReader(file) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + layerPath := path.Clean(h.Name) + // FIXME: Cache this data across images in Reader. + if li, ok := unknownLayerSizes[layerPath]; ok { + // Since GetBlob will decompress layers that are compressed we need + // to do the decompression here as well, otherwise we will + // incorrectly report the size. Pretty critical, since tools like + // umoci always compress layer blobs. Obviously we only bother with + // the slower method of checking if it's compressed. + uncompressedStream, isCompressed, err := compression.AutoDecompress(t) + if err != nil { + return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err) + } + defer uncompressedStream.Close() + + uncompressedSize := h.Size + if isCompressed { + uncompressedSize, err = io.Copy(io.Discard, uncompressedStream) + if err != nil { + return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err) + } + } + li.size = uncompressedSize + delete(unknownLayerSizes, layerPath) + } + } + if len(unknownLayerSizes) != 0 { + return nil, errors.New("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. + } + + return knownLayers, nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary instances. +func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`) + } + if s.generatedManifest == nil { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, "", err + } + m := manifest.Schema2{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + ConfigDescriptor: manifest.Schema2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + Size: int64(len(s.configBytes)), + Digest: s.configDigest, + }, + LayersDescriptors: []manifest.Schema2Descriptor{}, + } + for _, diffID := range s.orderedDiffIDList { + li, ok := s.knownLayers[diffID] + if !ok { + return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID) + } + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball + MediaType: manifest.DockerV2Schema2LayerMediaType, + Size: li.size, + }) + } + manifestBytes, err := json.Marshal(&m) + if err != nil { + return nil, "", err + } + s.generatedManifest = manifestBytes + } + return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil +} + +// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. +type uncompressedReadCloser struct { + io.Reader + underlyingCloser func() error + uncompressedCloser func() error +} + +func (r uncompressedReadCloser) Close() error { + var res error + if err := r.uncompressedCloser(); err != nil { + res = err + } + if err := r.underlyingCloser(); err != nil && res == nil { + res = err + } + return res +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, 0, err + } + + if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. + return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + } + + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, + underlyingStream, err := s.archive.openTarComponent(li.path) + if err != nil { + return nil, 0, err + } + closeUnderlyingStream := true + defer func() { + if closeUnderlyingStream { + underlyingStream.Close() + } + }() + + // In order to handle the fact that digests != diffIDs (and thus that a + // caller which is trying to verify the blob will run into problems), + // we need to decompress blobs. This is a bit ugly, but it's a + // consequence of making everything addressable by their DiffID rather + // than by their digest... + // + // In particular, because the v2s2 manifest being generated uses + // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of + // layers not their _actual_ digest. The result is that copy/... will + // be verifying a "digest" which is not the actual layer's digest (but + // is instead the DiffID). + + uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) + if err != nil { + return nil, 0, fmt.Errorf("auto-decompressing blob %s: %w", info.Digest, err) + } + + newStream := uncompressedReadCloser{ + Reader: uncompressedStream, + underlyingCloser: underlyingStream.Close, + uncompressedCloser: uncompressedStream.Close, + } + closeUnderlyingStream = false + + return newStream, li.size, nil + } + + return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest) +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go new file mode 100644 index 00000000..6e6ccd2d --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go @@ -0,0 +1,28 @@ +package tarfile + +import ( + "github.com/containers/image/v5/manifest" + "github.com/opencontainers/go-digest" +) + +// Various data structures. + +// Based on github.com/docker/docker/image/tarexport/tarexport.go +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +// ManifestItem is an element of the array stored in the top-level manifest.json file. +type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API. + Config string + RepoTags []string + Layers []string + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` +} + +type imageID string diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go new file mode 100644 index 00000000..df7b2c09 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -0,0 +1,379 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +// Writer allows creating a (docker save)-formatted tar archive containing one or more images. +type Writer struct { + mutex sync.Mutex + // ALL of the following members can only be accessed with the mutex held. + // Use Writer.lock() to obtain the mutex. + writer io.Writer + tar *tar.Writer // nil if the Writer has already been closed. + // Other state. + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs + repositories map[string]map[string]string + legacyLayers *set.Set[string] // A set of IDs of legacy layers that have been already sent. + manifest []ManifestItem + manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above. +} + +// NewWriter returns a Writer for the specified io.Writer. +// The caller must eventually call .Close() on the returned object to create a valid archive. +func NewWriter(dest io.Writer) *Writer { + return &Writer{ + writer: dest, + tar: tar.NewWriter(dest), + blobs: make(map[digest.Digest]types.BlobInfo), + repositories: map[string]map[string]string{}, + legacyLayers: set.New[string](), + manifestByConfig: map[digest.Digest]int{}, + } +} + +// lock does some sanity checks and locks the Writer. +// If this function succeeds, the caller must call w.unlock. +// Do not use Writer.mutex directly. +func (w *Writer) lock() error { + w.mutex.Lock() + if w.tar == nil { + w.mutex.Unlock() + return errors.New("Internal error: trying to use an already closed tarfile.Writer") + } + return nil +} + +// unlock releases the lock obtained by Writer.lock +// Do not use Writer.mutex directly. +func (w *Writer) unlock() { + w.mutex.Unlock() +} + +// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata. +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// The caller must have locked the Writer. +func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, private.ReusedBlob, error) { + if info.Digest == "" { + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") + } + if blob, ok := w.blobs[info.Digest]; ok { + return true, private.ReusedBlob{Digest: info.Digest, Size: blob.Size}, nil + } + return false, private.ReusedBlob{}, nil +} + +// recordBlob records metadata of a recorded blob, which must contain at least a digest and size. +// The caller must have locked the Writer. +func (w *Writer) recordBlobLocked(info types.BlobInfo) { + w.blobs[info.Digest] = info +} + +// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer +// The caller must have locked the Writer. +func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error { + if !w.legacyLayers.Contains(layerID) { + // Create a symlink for the legacy format, where there is one subdirectory per layer ("image"). + // See also the comment in physicalLayerPath. + physicalLayerPath := w.physicalLayerPath(layerDigest) + if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { + return fmt.Errorf("creating layer symbolic link: %w", err) + } + + b := []byte("1.0") + if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil { + return fmt.Errorf("writing VERSION file: %w", err) + } + + if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil { + return fmt.Errorf("writing config json file: %w", err) + } + + w.legacyLayers.Add(layerID) + } + return nil +} + +// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image. +func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error { + var chainID digest.Digest + lastLayerID := "" + for i, l := range layerDescriptors { + // The legacy format requires a config file per layer + layerConfig := make(map[string]any) + + // The root layer doesn't have any parent + if lastLayerID != "" { + layerConfig["parent"] = lastLayerID + } + // The top layer configuration file is generated by using subpart of the image configuration + if i == len(layerDescriptors)-1 { + var config map[string]*json.RawMessage + err := json.Unmarshal(configBytes, &config) + if err != nil { + return fmt.Errorf("unmarshaling config: %w", err) + } + for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { + layerConfig[attr] = config[attr] + } + } + + // This chainID value matches the computation in docker/docker/layer.CreateChainID … + if chainID == "" { + chainID = l.Digest + } else { + chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) + } + // … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because + // we create the image configs differently in details. At least recent versions allocate new IDs on load, + // so this is fine as long as the IDs we use are unique / cannot loop. + // + // For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created + // config makes sure that everything uses the same “namespaceâ€; a bit less efficient but clearer. + // + // Temporarily add the chainID to the config, only for the purpose of generating the image ID. + layerConfig["layer_id"] = chainID + b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point. + if err != nil { + return fmt.Errorf("marshaling layer config: %w", err) + } + delete(layerConfig, "layer_id") + layerID := digest.Canonical.FromBytes(b).Hex() + layerConfig["id"] = layerID + + configBytes, err := json.Marshal(layerConfig) + if err != nil { + return fmt.Errorf("marshaling layer config: %w", err) + } + + if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil { + return err + } + + lastLayerID = layerID + } + + if lastLayerID != "" { + for _, repoTag := range repoTags { + if val, ok := w.repositories[repoTag.Name()]; ok { + val[repoTag.Tag()] = lastLayerID + } else { + w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID} + } + } + } + return nil +} + +// checkManifestItemsMatch checks that a and b describe the same image, +// and returns an error if that’s not the case (which should never happen). +func checkManifestItemsMatch(a, b *ManifestItem) error { + if a.Config != b.Config { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config) + } + if !slices.Equal(a.Layers, b.Layers) { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers) + } + // Ignore RepoTags, that will be built later. + // Ignore Parent and LayerSources, which we don’t set to anything meaningful. + return nil +} + +// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags +// The caller must have locked the Writer. +func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error { + layerPaths := []string{} + for _, l := range layerDescriptors { + layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest)) + } + + var item *ManifestItem + newItem := ManifestItem{ + Config: w.configPath(configDigest), + RepoTags: []string{}, + Layers: layerPaths, + Parent: "", // We don’t have this information + LayerSources: nil, + } + if i, ok := w.manifestByConfig[configDigest]; ok { + item = &w.manifest[i] + if err := checkManifestItemsMatch(item, &newItem); err != nil { + return err + } + } else { + i := len(w.manifest) + w.manifestByConfig[configDigest] = i + w.manifest = append(w.manifest, newItem) + item = &w.manifest[i] + } + + knownRepoTags := set.New[string]() + for _, repoTag := range item.RepoTags { + knownRepoTags.Add(repoTag) + } + for _, tag := range repoTags { + // For github.com/docker/docker consumers, this works just as well as + // refString := ref.String() + // because when reading the RepoTags strings, github.com/docker/docker/reference + // normalizes both of them to the same value. + // + // Doing it this way to include the normalized-out `docker.io[/library]` does make + // a difference for github.com/projectatomic/docker consumers, with the + // “Add --add-registry and --block-registry options to docker daemon†patch. + // These consumers treat reference strings which include a hostname and reference + // strings without a hostname differently. + // + // Using the host name here is more explicit about the intent, and it has the same + // effect as (docker pull) in projectatomic/docker, which tags the result using + // a hostname-qualified reference. + // See https://github.com/containers/image/issues/72 for a more detailed + // analysis and explanation. + refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) + + if !knownRepoTags.Contains(refString) { + item.RepoTags = append(item.RepoTags, refString) + knownRepoTags.Add(refString) + } + } + + return nil +} + +// Close writes all outstanding data about images to the archive, and finishes writing data +// to the underlying io.Writer. +// No more images can be added after this is called. +func (w *Writer) Close() error { + if err := w.lock(); err != nil { + return err + } + defer w.unlock() + + b, err := json.Marshal(&w.manifest) + if err != nil { + return err + } + if err := w.sendBytesLocked(manifestFileName, b); err != nil { + return err + } + + b, err = json.Marshal(w.repositories) + if err != nil { + return fmt.Errorf("marshaling repositories: %w", err) + } + if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil { + return fmt.Errorf("writing config json file: %w", err) + } + + if err := w.tar.Close(); err != nil { + return err + } + w.tar = nil // Mark the Writer as closed. + return nil +} + +// configPath returns a path we choose for storing a config with the specified digest. +// NOTE: This is an internal implementation detail, not a format property, and can change +// any time. +func (w *Writer) configPath(configDigest digest.Digest) string { + return configDigest.Hex() + ".json" +} + +// physicalLayerPath returns a path we choose for storing a layer with the specified digest +// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format). +// NOTE: This is an internal implementation detail, not a format property, and can change +// any time. +func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string { + // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way + // writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described + // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) + // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers + // in the root of the tarball. + return layerDigest.Hex() + ".tar" +} + +type tarFI struct { + path string + size int64 + isSymlink bool +} + +func (t *tarFI) Name() string { + return t.path +} +func (t *tarFI) Size() int64 { + return t.size +} +func (t *tarFI) Mode() os.FileMode { + if t.isSymlink { + return os.ModeSymlink + } + return 0444 +} +func (t *tarFI) ModTime() time.Time { + return time.Unix(0, 0) +} +func (t *tarFI) IsDir() bool { + return false +} +func (t *tarFI) Sys() any { + return nil +} + +// sendSymlinkLocked sends a symlink into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendSymlinkLocked(path string, target string) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) + if err != nil { + return err + } + logrus.Debugf("Sending as tar link %s -> %s", path, target) + return w.tar.WriteHeader(hdr) +} + +// sendBytesLocked sends a path into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendBytesLocked(path string, b []byte) error { + return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b)) +} + +// sendFileLocked sends a file into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") + if err != nil { + return err + } + logrus.Debugf("Sending as tar file %s", path) + if err := w.tar.WriteHeader(hdr); err != nil { + return err + } + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + size, err := io.Copy(w.tar, stream) + if err != nil { + return err + } + if size != expectedSize { + return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/paths_common.go b/vendor/github.com/containers/image/v5/docker/paths_common.go new file mode 100644 index 00000000..862e8803 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/paths_common.go @@ -0,0 +1,6 @@ +//go:build !freebsd +// +build !freebsd + +package docker + +const etcDir = "/etc" diff --git a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go new file mode 100644 index 00000000..2bf27ac0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go @@ -0,0 +1,6 @@ +//go:build freebsd +// +build freebsd + +package docker + +const etcDir = "/usr/local/etc" diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go new file mode 100644 index 00000000..e1f1f1f2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -0,0 +1,78 @@ +package policyconfiguration + +import ( + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" +) + +// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceIdentity(ref reference.Named) (string, error) { + res := ref.Name() + tagged, isTagged := ref.(reference.NamedTagged) + digested, isDigested := ref.(reference.Canonical) + switch { + case isTagged && isDigested: // Note that this CAN actually happen. + return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) + case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() + return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) + case isTagged: + res = res + ":" + tagged.Tag() + case isDigested: + res = res + "@" + digested.Digest().String() + default: // Coverage: The above was supposed to be exhaustive. + return "", errors.New("Internal inconsistency, unexpected default branch") + } + return res, nil +} + +// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceNamespaces(ref reference.Named) []string { + // Look for a match of the repository, and then of the possible parent + // namespaces. Note that this only happens on the expanded host names + // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", + // then in its parent "docker.io/library"; in none of "busybox", + // un-namespaced "library" nor in "" supposedly implicitly representing "library/". + // + // ref.Name() == ref.Domain() + "/" + ref.Path(), so the last + // iteration matches the host name (for any namespace). + res := []string{} + name := ref.Name() + for { + res = append(res, name) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + + // Strip port number if any, before appending to res slice. + // Currently, the most compatible behavior is to return + // example.com:8443/ns, example.com:8443, *.com. + // If a port number is not specified, the expected behavior would be + // example.com/ns, example.com, *.com + portNumColon := strings.Index(name, ":") + if portNumColon != -1 { + name = name[:portNumColon] + } + + // Append wildcarded domains to res slice + for { + firstDot := strings.Index(name, ".") + if firstDot == -1 { + break + } + name = name[firstDot+1:] + + res = append(res, "*."+name) + } + return res +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/README.md b/vendor/github.com/containers/image/v5/docker/reference/README.md new file mode 100644 index 00000000..3c4d74eb --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/README.md @@ -0,0 +1,2 @@ +This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, +except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/v5/docker/reference/helpers.go b/vendor/github.com/containers/image/v5/docker/reference/helpers.go new file mode 100644 index 00000000..978df7ea --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go new file mode 100644 index 00000000..d3f47d21 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go @@ -0,0 +1,181 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go new file mode 100644 index 00000000..6c5484c0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alphanumeric [separator alphanumeric]* +// alphanumeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// Deprecated: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go new file mode 100644 index 00000000..7b15871f --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go @@ -0,0 +1,6 @@ +package reference + +// Return true if the specified string fully matches `IdentifierRegexp`. +func IsFullIdentifier(s string) bool { + return anchoredIdentifierRegexp.MatchString(s) +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go new file mode 100644 index 00000000..76ba5c2d --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go @@ -0,0 +1,156 @@ +package reference + +import ( + "regexp" + "strings" + + storageRegexp "github.com/containers/storage/pkg/regexp" +) + +const ( + // alphaNumeric defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumeric = `[a-z0-9]+` + + // separator defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. Repeated dashes and underscores are intentionally treated + // differently. In order to support valid hostnames as name components, + // supporting repeated dash was added. Additionally double underscore is + // now allowed as a separator to loosen the restriction for previously + // supported names. + separator = `(?:[._]|__|[-]*)` + + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` + + // The string counterpart for TagRegexp. + tag = `[\w][\w.-]{0,127}` + + // The string counterpart for DigestRegexp. + digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + + // The string counterpart for IdentifierRegexp. + identifier = `([a-f0-9]{64})` + + // The string counterpart for ShortIdentifierRegexp. + shortIdentifier = `([a-f0-9]{6,64})` +) + +var ( + // nameComponent restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponent = expression( + alphaNumeric, + optional(repeated(separator, alphaNumeric))) + + domain = expression( + domainComponent, + optional(repeated(literal(`.`), domainComponent)), + optional(literal(`:`), `[0-9]+`)) + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = re(domain) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = re(tag) + + anchoredTag = anchored(tag) + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = storageRegexp.Delayed(anchoredTag) + + // DigestRegexp matches valid digests. + DigestRegexp = re(digestPat) + + anchoredDigest = anchored(digestPat) + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = storageRegexp.Delayed(anchoredDigest) + + namePat = expression( + optional(domain, literal(`/`)), + nameComponent, + optional(repeated(literal(`/`), nameComponent))) + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = re(namePat) + + anchoredName = anchored( + optional(capture(domain), literal(`/`)), + capture(nameComponent, + optional(repeated(literal(`/`), nameComponent)))) + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = storageRegexp.Delayed(anchoredName) + + referencePat = anchored(capture(namePat), + optional(literal(":"), capture(tag)), + optional(literal("@"), capture(digestPat))) + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = re(referencePat) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = re(identifier) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = re(shortIdentifier) + + anchoredIdentifier = anchored(identifier) + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = storageRegexp.Delayed(anchoredIdentifier) +) + +// re compiles the string to a regular expression. +var re = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) string { + return regexp.QuoteMeta(s) +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...string) string { + return strings.Join(res, "") +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...string) string { + return group(expression(res...)) + `?` +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...string) string { + return group(expression(res...)) + `+` +} + +// group wraps the regexp in a non-capturing group. +func group(res ...string) string { + return `(?:` + expression(res...) + `)` +} + +// capture wraps the expression in a capturing group. +func capture(res ...string) string { + return `(` + expression(res...) + `)` +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...string) string { + return `^` + expression(res...) + `$` +} diff --git a/vendor/github.com/containers/image/v5/docker/registries_d.go b/vendor/github.com/containers/image/v5/docker/registries_d.go new file mode 100644 index 00000000..c7b884ab --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/registries_d.go @@ -0,0 +1,293 @@ +package docker + +import ( + "errors" + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/rootless" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" +) + +// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path' +var systemRegistriesDirPath = builtinRegistriesDirPath + +// builtinRegistriesDirPath is the path to registries.d. +// DO NOT change this, instead see systemRegistriesDirPath above. +const builtinRegistriesDirPath = etcDir + "/containers/registries.d" + +// userRegistriesDirPath is the path to the per user registries.d. +var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d") + +// defaultUserDockerDir is the default lookaside directory for unprivileged user +var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore") + +// defaultDockerDir is the default lookaside directory for root +var defaultDockerDir = "/var/lib/containers/sigstore" + +// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. +// NOTE: Keep this in sync with docs/registries.d.md! +type registryConfiguration struct { + DefaultDocker *registryNamespace `yaml:"default-docker"` + // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), + Docker map[string]registryNamespace `yaml:"docker"` +} + +// registryNamespace defines lookaside locations for a single namespace. +type registryNamespace struct { + Lookaside string `yaml:"lookaside"` // For reading, and if LookasideStaging is not present, for writing. + LookasideStaging string `yaml:"lookaside-staging"` // For writing only. + SigStore string `yaml:"sigstore"` // For compatibility, deprecated in favor of Lookaside. + SigStoreStaging string `yaml:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging. + UseSigstoreAttachments *bool `yaml:"use-sigstore-attachments,omitempty"` +} + +// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage. +// Users outside of this file should use SignatureStorageBaseURL and lookasideStorageURL below. +type lookasideStorageBase *url.URL + +// SignatureStorageBaseURL reads configuration to find an appropriate lookaside storage URL for ref, for write access if “writeâ€. +// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md +// Warning: This function only exposes configuration in registries.d; +// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S). +func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) { + dr, ok := ref.(dockerReference) + if !ok { + return nil, errors.New("ref must be a dockerReference") + } + config, err := loadRegistryConfiguration(sys) + if err != nil { + return nil, err + } + + return config.lookasideStorageBaseURL(dr, write) +} + +// loadRegistryConfiguration returns a registryConfiguration appropriate for sys. +func loadRegistryConfiguration(sys *types.SystemContext) (*registryConfiguration, error) { + dirPath := registriesDirPath(sys) + logrus.Debugf(`Using registries.d directory %s`, dirPath) + return loadAndMergeConfig(dirPath) +} + +// registriesDirPath returns a path to registries.d +func registriesDirPath(sys *types.SystemContext) string { + return registriesDirPathWithHomeDir(sys, homedir.Get()) +} + +// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath, +// it exists only to allow testing it with an artificial home directory. +func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string { + if sys != nil && sys.RegistriesDirPath != "" { + return sys.RegistriesDirPath + } + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) + if _, err := os.Stat(userRegistriesDirPath); err == nil { + return userRegistriesDirPath + } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) + } + + return systemRegistriesDirPath +} + +// loadAndMergeConfig loads configuration files in dirPath +// FIXME: Probably rename to loadRegistryConfigurationForPath +func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { + mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} + dockerDefaultMergedFrom := "" + nsMergedFrom := map[string]string{} + + dir, err := os.Open(dirPath) + if err != nil { + if os.IsNotExist(err) { + return &mergedConfig, nil + } + return nil, err + } + configNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + for _, configName := range configNames { + if !strings.HasSuffix(configName, ".yaml") { + continue + } + configPath := filepath.Join(dirPath, configName) + configBytes, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + var config registryConfiguration + err = yaml.Unmarshal(configBytes, &config) + if err != nil { + return nil, fmt.Errorf("parsing %s: %w", configPath, err) + } + + if config.DefaultDocker != nil { + if mergedConfig.DefaultDocker != nil { + return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, + dockerDefaultMergedFrom, configPath) + } + mergedConfig.DefaultDocker = config.DefaultDocker + dockerDefaultMergedFrom = configPath + } + + for nsName, nsConfig := range config.Docker { // includes config.Docker == nil + if _, ok := mergedConfig.Docker[nsName]; ok { + return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, + nsName, nsMergedFrom[nsName], configPath) + } + mergedConfig.Docker[nsName] = nsConfig + nsMergedFrom[nsName] = configPath + } + } + + return &mergedConfig, nil +} + +// lookasideStorageBaseURL returns an appropriate signature storage URL for ref, for write access if “writeâ€. +// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md +func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) { + topLevel := config.signatureTopLevel(dr, write) + var baseURL *url.URL + if topLevel != "" { + u, err := url.Parse(topLevel) + if err != nil { + return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err) + } + baseURL = u + } else { + // returns default directory if no lookaside specified in configuration file + baseURL = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID()) + logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), baseURL.Redacted()) + } + // NOTE: Keep this in sync with docs/signature-protocols.md! + // FIXME? Restrict to explicitly supported schemes? + repo := reference.Path(dr.ref) // Note that this is without a tag or digest. + if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references + return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String()) + } + baseURL.Path = baseURL.Path + "/" + repo + return baseURL, nil +} + +// builtinDefaultLookasideStorageDir returns default signature storage URL as per euid +func builtinDefaultLookasideStorageDir(euid int) *url.URL { + if euid != 0 { + return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)} + } + return &url.URL{Scheme: "file", Path: defaultDockerDir} +} + +// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “writeâ€. +// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured. +func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { + if config.Docker != nil { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if ns, ok := config.Docker[identity]; ok { + logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity) + if ret := ns.signatureTopLevel(write); ret != "" { + return ret + } + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if ns, ok := config.Docker[name]; ok { + logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name) + if ret := ns.signatureTopLevel(write); ret != "" { + return ret + } + } + } + } + // Look for a default location + if config.DefaultDocker != nil { + logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`) + if ret := config.DefaultDocker.signatureTopLevel(write); ret != "" { + return ret + } + } + return "" +} + +// config.useSigstoreAttachments returns whether we should look for and write sigstore attachments. +// for ref. +func (config *registryConfiguration) useSigstoreAttachments(ref dockerReference) bool { + if config.Docker != nil { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if ns, ok := config.Docker[identity]; ok { + logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, identity) + if ns.UseSigstoreAttachments != nil { + return *ns.UseSigstoreAttachments + } + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if ns, ok := config.Docker[name]; ok { + logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, name) + if ns.UseSigstoreAttachments != nil { + return *ns.UseSigstoreAttachments + } + } + } + } + // Look for a default location + if config.DefaultDocker != nil { + logrus.Debugf(` Sigstore attachments: using "default-docker" configuration`) + if config.DefaultDocker.UseSigstoreAttachments != nil { + return *config.DefaultDocker.UseSigstoreAttachments + } + } + return false +} + +// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “writeâ€. +// or "" if nothing has been configured. +func (ns registryNamespace) signatureTopLevel(write bool) string { + if write { + if ns.LookasideStaging != "" { + logrus.Debugf(` Using "lookaside-staging" %s`, ns.LookasideStaging) + return ns.LookasideStaging + } + if ns.SigStoreStaging != "" { + logrus.Debugf(` Using "sigstore-staging" %s`, ns.SigStoreStaging) + return ns.SigStoreStaging + } + } + if ns.Lookaside != "" { + logrus.Debugf(` Using "lookaside" %s`, ns.Lookaside) + return ns.Lookaside + } + if ns.SigStore != "" { + logrus.Debugf(` Using "sigstore" %s`, ns.SigStore) + return ns.SigStore + } + return "" +} + +// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest. +// base is not nil from the caller +// NOTE: Keep this in sync with docs/signature-protocols.md! +func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL { + sigURL := *base + sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) + return &sigURL +} diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go new file mode 100644 index 00000000..6bcb835b --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go @@ -0,0 +1,172 @@ +package docker + +// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. + +import ( + "fmt" + "net/http" + "strings" +) + +// challenge carries information from a WWW-Authenticate response header. +// See RFC 7235. +type challenge struct { + // Scheme is the auth-scheme according to RFC 7235 + Scheme string + + // Parameters are the auth-params according to RFC 7235 + Parameters map[string]string +} + +// Octet types from RFC 7230. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []challenge { + challenges := []challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions` +func parseAuthScope(scopeStr string) (*authScope, error) { + if parts := strings.Split(scopeStr, ":"); len(parts) == 3 { + return &authScope{ + resourceType: parts[0], + remoteName: parts[1], + actions: parts[2], + }, nil + } + return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr) +} + +// NOTE: This is not a fully compliant parser per RFC 7235: +// Most notably it does not support more than one challenge within a single header +// Some of the whitespace parsing also seems noncompliant. +// But it is clearly better than what we used to have… +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i++; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go new file mode 100644 index 00000000..2767c395 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go @@ -0,0 +1,70 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original +// object if it implements BlobInfoCache2, or a wrapper which discards compression information +// if it only implements BlobInfoCache. +func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 { + if bic2, ok := bic.(BlobInfoCache2); ok { + return bic2 + } + return &v1OnlyBlobInfoCache{ + BlobInfoCache: bic, + } +} + +type v1OnlyBlobInfoCache struct { + types.BlobInfoCache +} + +func (bic *v1OnlyBlobInfoCache) Open() { +} + +func (bic *v1OnlyBlobInfoCache) Close() { +} + +func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +} + +func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 { + return nil +} + +// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of +// types.BICReplacementCandidate, dropping compression information. +func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate { + candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates)) + for _, c := range v2candidates { + candidates = append(candidates, types.BICReplacementCandidate{ + Digest: c.Digest, + Location: c.Location, + }) + } + return candidates +} + +// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm +// values suitable for inclusion in a types.BlobInfo structure, based on the name of the +// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by +// TryReusingBlob() implementations to set values in the BlobInfo structure that they return +// upon success. +func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) { + switch compressorName { + case Uncompressed: + return types.Decompress, nil, nil + case UnknownCompression: + return types.PreserveOriginal, nil, nil + default: + algo, err := compression.AlgorithmByName(compressorName) + if err == nil { + return types.Compress, &algo, nil + } + return types.PreserveOriginal, nil, err + } +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go new file mode 100644 index 00000000..fdd24581 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -0,0 +1,52 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +const ( + // Uncompressed is the value we store in a blob info cache to indicate that we know that + // the blob in the corresponding location is not compressed. + Uncompressed = "uncompressed" + // UnknownCompression is the value we store in a blob info cache to indicate that we don't + // know if the blob in the corresponding location is compressed (and if so, how) or not. + UnknownCompression = "unknown" +) + +// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind +// of compression was applied to the blobs it keeps information about. +type BlobInfoCache2 interface { + types.BlobInfoCache + + // Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). + // Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). + Open() + // Close destroys state created by Open(). + Close() + + // RecordDigestCompressorName records a compressor for the blob with the specified digest, + // or Uncompressed or UnknownCompression. + // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a + // digest just because some remote author claims so (e.g. because a manifest says so); + // otherwise the cache could be poisoned and cause us to make incorrect edits to type + // information in a manifest. + RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) + // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations + // that could possibly be reused within the specified (transport scope) (if they still + // exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if + // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look + // up variants of the blob which have the same uncompressed digest. + // + // The CompressorName fields in returned data must never be UnknownCompression. + CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 +} + +// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. +type BICReplacementCandidate2 struct { + Digest digest.Digest + CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression + Location types.BICLocationReference +} diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go new file mode 100644 index 00000000..617a451a --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/docker_list.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/types" +) + +func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + list, err := manifest.Schema2ListFromManifest(manblob) + if err != nil { + return nil, fmt.Errorf("parsing schema2 manifest list: %w", err) + } + targetManifestDigest, err := list.ChooseInstance(sys) + if err != nil { + return nil, fmt.Errorf("choosing image instance: %w", err) + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, fmt.Errorf("fetching target platform image selected from manifest list: %w", err) + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, fmt.Errorf("computing manifest digest: %w", err) + } + if !matches { + return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go new file mode 100644 index 00000000..3ef8e144 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go @@ -0,0 +1,257 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type manifestSchema1 struct { + m *manifest.Schema1 +} + +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { + m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema1) manifestMIMEType() string { + return manifest.DockerV2Schema1SignedMediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { + return nil, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{}) + if err != nil { + return nil, err + } + return v2s2.OCIConfig(ctx) +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + // This is a bit convoluted: We can’t just have a "get embedded docker reference" method + // and have the “does it conflict†logic in the generic copy code, because the manifest does not actually + // embed a full docker/distribution reference, but only the repo name and tag (without the host name). + // So we would have to provide a “return repo without host name, and tag†getter for the generic code, + // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the + // generic copy code needs to know about is reference.Named and that a manifest may need updating + // for some destinations. + name := reference.Path(ref) + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } else { + tag = "" + } + return m.m.Name != name || m.m.Tag != tag +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { + return m.m.Inspect(nil) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} + + // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, + // handle conversions between them by doing nothing. + if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType { + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + if options.EmbeddedDockerReference != nil { + copy.m.Name = reference.Path(options.EmbeddedDockerReference) + if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { + copy.m.Tag = tagged.Tag() + } else { + copy.m.Tag = "" + } + } + + return memoryImageFromManifest(©), nil +} + +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// Based on github.com/docker/docker/distribution/pull_v2.go +func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { + uploadedLayerInfos := options.InformationOnly.LayerInfos + layerDiffIDs := options.InformationOnly.LayerDiffIDs + + if len(m.m.ExtractedV1Compatibility) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. + return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) + } + if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { + return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) + } + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) + } + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) + } + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.FSLayers) { + return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.FSLayers)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index + + if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { + var size int64 + if uploadedLayerInfos != nil { + size = uploadedLayerInfos[v2Index].Size + } + var d digest.Digest + if layerDiffIDs != nil { + d = layerDiffIDs[v2Index] + } + layers = append(layers, manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Size: size, + Digest: m.m.FSLayers[v1Index].BlobSum, + }) + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index]) + } + diffIDs = append(diffIDs, d) + } + } + configJSON, err := m.m.ToSchema2Config(diffIDs) + if err != nil { + return nil, err + } + configDescriptor := manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.container.image.v1+json", + Size: int64(len(configJSON)), + Digest: digest.FromBytes(configJSON), + } + + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil +} + +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestOCI1(ctx, options) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestSchema1) SupportsEncryption(context.Context) bool { + return false +} + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts +// to a different manifest format). +func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool { + return true // There are no MIME types in the manifest, so we must assume a valid image. +} diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go new file mode 100644 index 00000000..c3234c37 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go @@ -0,0 +1,413 @@ +package image + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +// +// This is publicly visible as c/image/image.GzippedEmptyLayer. +var GzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +// +// This is publicly visible as c/image/image.GzippedEmptyLayerDigest. +const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 +} + +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema2{ + src: src, + m: m, + }, nil +} + +// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { + return &manifestSchema2{ + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), + } +} + +func (m *manifestSchema2) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema2) manifestMIMEType() string { + return m.m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema2) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + configBlob, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields + // than OCI v1. This unmarshal makes sure we drop docker v2s2 + // fields that aren't needed in OCI v1. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(configBlob, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema2) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the CompressionOperation and CompressionAlgorithm specified in one or more +// options.LayerInfos items is anything other than gzip. +func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. + + return memoryImageFromManifest(©), nil +} + +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { + configOCI, err := m.OCIConfig(ctx) + if err != nil { + return nil, err + } + configOCIBytes, err := json.Marshal(configOCI) + if err != nil { + return nil, err + } + + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + } + + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) + for idx := range layers { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + switch m.m.LayersDescriptors[idx].MediaType { + case manifest.DockerV2Schema2ForeignLayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + case manifest.DockerV2SchemaLayerMediaTypeUncompressed: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema2LayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) + } + } + + return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +// +// Based on docker/distribution/manifest/schema1/config_builder.go +func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + dest := options.InformationOnly.Destination + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.LayersDescriptors) { + return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.LayersDescriptors)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + + configBytes, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + imageConfig := &manifest.Schema2Image{} + if err := json.Unmarshal(configBytes, imageConfig); err != nil { + return nil, err + } + + // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) + nonemptyLayerIndex := 0 + var parentV1ID string // Set in the loop + v1ID := "" + haveGzippedEmptyLayer := false + if len(imageConfig.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) + } + for v2Index, historyEntry := range imageConfig.History { + parentV1ID = v1ID + v1Index := len(imageConfig.History) - 1 - v2Index + + var blobDigest digest.Digest + if historyEntry.EmptyLayer { + emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} + + if !haveGzippedEmptyLayer { + logrus.Debugf("Uploading empty layer during conversion to schema 1") + // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, + // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) + if err != nil { + return nil, fmt.Errorf("uploading empty layer: %w", err) + } + if info.Digest != emptyLayerBlobInfo.Digest { + return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) + } + haveGzippedEmptyLayer = true + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) + } + blobDigest = emptyLayerBlobInfo.Digest + } else { + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) + } + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest + nonemptyLayerIndex++ + } + + // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. + v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) + if err != nil { + return nil, err + } + v1ID = v + + fakeImage := manifest.Schema1V1Compatibility{ + ID: v1ID, + Parent: parentV1ID, + Comment: historyEntry.Comment, + Created: historyEntry.Created, + Author: historyEntry.Author, + ThrowAway: historyEntry.EmptyLayer, + } + fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} + v1CompatibilityBytes, err := json.Marshal(&fakeImage) + if err != nil { + return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) + } + + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} + // Note that parentV1ID of the top layer is preserved when exiting this loop + } + + // Now patch in real configuration for the top layer (v1Index == 0) + v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. + if err != nil { + return nil, err + } + v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) + if err != nil { + return nil, err + } + history[0].V1Compatibility = string(v1Config) + + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } + m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + if err != nil { + return nil, err // This should never happen, we should have created all the components correctly. + } + return m1, nil +} + +func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { + if err := blobDigest.Validate(); err != nil { + return "", err + } + parts := append([]string{blobDigest.Hex()}, others...) + v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) + return hex.EncodeToString(v1IDHash[:]), nil +} + +func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + delete(rawContents, "rootfs") + delete(rawContents, "history") + + updates := map[string]any{"id": v1ID} + if parentV1ID != "" { + updates["parent"] = parentV1ID + } + if throwaway { + updates["throwaway"] = throwaway + } + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestSchema2) SupportsEncryption(context.Context) bool { + return false +} + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts +// to a different manifest format). +func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool { + return m.m.CanChangeLayerCompression(mimeType) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/manifest.go b/vendor/github.com/containers/image/v5/internal/image/manifest.go new file mode 100644 index 00000000..75e472aa --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/manifest.go @@ -0,0 +1,121 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// genericManifest is an interface for parsing, modifying image manifests and related data. +// The public methods are related to types.Image so that embedding a genericManifest implements most of it, +// but there are also public methods that are only visible by packages that can import c/image/internal/image. +type genericManifest interface { + serialize() ([]byte, error) + manifestMIMEType() string + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() types.BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*imgspecv1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*types.ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) + // SupportsEncryption returns if encryption is supported for the manifest type + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 + SupportsEncryption(ctx context.Context) bool + + // The following methods are not a part of types.Image: + // === + + // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image + // (and the code can handle that). + // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted + // algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts + // to a different manifest format). + CanChangeLayerCompression(mimeType string) bool +} + +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + return manifestSchema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return manifestOCI1FromManifest(src, manblob) + case manifest.DockerV2Schema2MediaType: + return manifestSchema2FromManifest(src, manblob) + case manifest.DockerV2ListMediaType: + return manifestSchema2FromManifestList(ctx, sys, src, manblob) + case imgspecv1.MediaTypeImageIndex: + return manifestOCI1FromImageIndex(ctx, sys, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. +func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { + blobs := make([]types.BlobInfo, len(layers)) + for i, layer := range layers { + blobs[i] = layer.BlobInfo + } + return blobs +} + +// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation +// converted to a specific manifest MIME type. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original genericManifest object. +type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) + +// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if +// required and re-apply the options to the converted type. +// It returns (nil, nil) if no conversion was requested. +func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) { + if options.ManifestMIMEType == "" { + return nil, nil + } + + converter, ok := converters[options.ManifestMIMEType] + if !ok { + return nil, fmt.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) + } + + optionsCopy := options + convertedManifest, err := converter(ctx, &optionsCopy) + if err != nil { + return nil, err + } + convertedImage := memoryImageFromManifest(convertedManifest) + + optionsCopy.ManifestMIMEType = "" + return convertedImage.UpdatedImage(ctx, optionsCopy) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/memory.go b/vendor/github.com/containers/image/v5/internal/image/memory.go new file mode 100644 index 00000000..e22c7aaf --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/memory.go @@ -0,0 +1,64 @@ +package image + +import ( + "context" + "errors" + + "github.com/containers/image/v5/types" +) + +// memoryImage is a mostly-implementation of types.Image assembled from data +// created in memory, used primarily as a return value of types.Image.UpdatedImage +// as a way to carry various structured information in a type-safe and easy-to-use way. +// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone +// collection of all related information, e.g. there is no way to get layer blobs +// from a memoryImage. +type memoryImage struct { + genericManifest + serializedManifest []byte // A private cache for Manifest() +} + +func memoryImageFromManifest(m genericManifest) types.Image { + return &memoryImage{ + genericManifest: m, + serializedManifest: nil, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *memoryImage) Reference() types.ImageReference { + // It would really be inappropriate to return the ImageReference of the image this was based on. + return nil +} + +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + return -1, nil +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.serializedManifest == nil { + m, err := i.genericManifest.serialize() + if err != nil { + return nil, "", err + } + i.serializedManifest = m + } + return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { + // Modifying an image invalidates signatures; a caller asking the updated image for signatures + // is probably confused. + return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") +} + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go new file mode 100644 index 00000000..6629967b --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -0,0 +1,323 @@ +package image + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + ociencspec "github.com/containers/ocicrypt/spec" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" +) + +type manifestOCI1 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 +} + +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestOCI1{ + src: src, + m: m, + }, nil +} + +// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { + return &manifestOCI1{ + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), + } +} + +func (m *manifestOCI1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestOCI1) manifestMIMEType() string { + return imgspecv1.MediaTypeImageManifest +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestOCI1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.New("Internal error: neither src nor configBlob set in manifestOCI1") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.Config.Digest { + return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) + } + + cb, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestOCI1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the combination of CompressionOperation and CompressionAlgorithm specified +// in one or more options.LayerInfos items indicates that a layer is compressed using +// an algorithm that is not allowed in OCI. +func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. + + return memoryImageFromManifest(©), nil +} + +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// prepareLayerDecryptEditsIfNecessary checks if options requires layer decryptions. +// If not, it returns (nil, nil). +// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos, +// and edits *options to not try decryption again. +func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) { + if options == nil || !slices.ContainsFunc(options.LayerInfos, func(info types.BlobInfo) bool { + return info.CryptoOperation == types.Decrypt + }) { + return nil, nil + } + + originalInfos := m.LayerInfos() + if len(originalInfos) != len(options.LayerInfos) { + return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos)) + } + + res := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionaly deviate. + updatedEdits := slices.Clone(options.LayerInfos) + for i, info := range options.LayerInfos { + if info.CryptoOperation == types.Decrypt { + res[i].CryptoOperation = types.Decrypt + updatedEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail. + } + // Don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit. + res[i].CompressionOperation = types.PreserveOriginal + res[i].CompressionAlgorithm = nil + } + options.LayerInfos = updatedEdits + return res, nil +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { + if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) + } + + // Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits + // which remove OCI-specific features, because trying to convert those layers would fail. + // So, do the layer updates for decryption. + ociManifest := m.m + layerDecryptEdits, err := m.prepareLayerDecryptEditsIfNecessary(options) + if err != nil { + return nil, err + } + if layerDecryptEdits != nil { + ociManifest = manifest.OCI1Clone(ociManifest) + if err := ociManifest.UpdateLayerInfos(layerDecryptEdits); err != nil { + return nil, err + } + } + + // Create a copy of the descriptor. + config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config) + + // Above, we have already checked that this manifest refers to an image, not an OCI artifact, + // so the only difference between OCI and DockerSchema2 is the mediatypes. The + // media type of the manifest is handled by manifestSchema2FromComponents. + config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers)) + for idx := range layers { + layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx]) + switch layers[idx].MediaType { + case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType + case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip + case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + case imgspecv1.MediaTypeImageLayer: + layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + case imgspecv1.MediaTypeImageLayerGzip: + layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + case imgspecv1.MediaTypeImageLayerZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + // FIXME: s/Zsdt/Zstd/ after ocicrypt with https://github.com/containers/ocicrypt/pull/91 is released + case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc, + ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZsdtEnc: + return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType) + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) + } + } + + // Rather than copying the ConfigBlob now, we just pass m.src to the + // translated manifest, since the only difference is the mediatype of + // descriptors there is no change to any blob stored in m.src. + return manifestSchema2FromComponents(config, m.src, nil, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) + } + + // We can't directly convert images to V1, but we can transitively convert via a V2 image + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestSchema1(ctx, options) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestOCI1) SupportsEncryption(context.Context) bool { + return true +} + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts +// to a different manifest format). +func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool { + return m.m.CanChangeLayerCompression(mimeType) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go new file mode 100644 index 00000000..0e945c85 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/oci_index.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/types" +) + +func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + index, err := manifest.OCI1IndexFromManifest(manblob) + if err != nil { + return nil, fmt.Errorf("parsing OCI1 index: %w", err) + } + targetManifestDigest, err := index.ChooseInstance(sys) + if err != nil { + return nil, fmt.Errorf("choosing image instance: %w", err) + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, fmt.Errorf("fetching target platform image selected from image index: %w", err) + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, fmt.Errorf("computing manifest digest: %w", err) + } + if !matches { + return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/sourced.go b/vendor/github.com/containers/image/v5/internal/image/sourced.go new file mode 100644 index 00000000..661891aa --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/sourced.go @@ -0,0 +1,134 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "github.com/containers/image/v5/types" +) + +// FromReference returns a types.ImageCloser implementation for the default instance reading from reference. +// If reference points to a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) { + src, err := ref.NewImageSource(ctx, sys) + if err != nil { + return nil, err + } + img, err := FromSource(ctx, sys, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership†of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +// +// Most callers can use either FromUnparsedImage or FromReference instead. +// +// This is publicly visible as c/image/image.FromSource. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() +} + +// SourcedImage is a general set of utilities for working with container images, +// whatever is their underlying transport (i.e. ImageSource-independent). +// Note the existence of docker.Image and image.memoryImage: various instances +// of a types.Image may not be a SourcedImage directly. +// +// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do. +// +// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image. +type SourcedImage struct { + *UnparsedImage + ManifestBlob []byte // The manifest of the relevant instance + ManifestMIMEType string // MIME type of ManifestBlob + // genericManifest contains data corresponding to manifestBlob. + // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest + // if you want to preserve the original manifest; use manifestBlob directly. + genericManifest +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +// +// This is publicly visible as c/image/image.FromUnparsedImage. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) { + // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: + // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, + // this is the only UnparsedImage implementation around, anyway. + + // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). + manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) + if err != nil { + return nil, err + } + + parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) + if err != nil { + return nil, err + } + + return &SourcedImage{ + UnparsedImage: unparsed, + ManifestBlob: manifestBlob, + ManifestMIMEType: manifestMIMEType, + genericManifest: parsedManifest, + }, nil +} + +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *SourcedImage) Size() (int64, error) { + return -1, nil +} + +// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. +func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { + return i.ManifestBlob, i.ManifestMIMEType, nil +} + +func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go new file mode 100644 index 00000000..0f026501 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go @@ -0,0 +1,119 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/imagesource" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// This is publicly visible as c/image/image.UnparsedImage. +type UnparsedImage struct { + src private.ImageSource + instanceDigest *digest.Digest + cachedManifest []byte // A private cache for Manifest(); nil if not yet known. + // A private cache for Manifest(), may be the empty string if guessing failed. + // Valid iff cachedManifest is not nil. + cachedManifestMIMEType string + cachedSignatures []signature.Signature // A private cache for Signatures(); nil if not yet known. +} + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +// +// This is publicly visible as c/image/image.UnparsedInstance. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: imagesource.FromPublic(src), + instanceDigest: instanceDigest, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. + return i.src.Reference() +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.cachedManifest == nil { + m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) + if err != nil { + return nil, "", err + } + + // ImageSource.GetManifest does not do digest verification, but we do; + // this immediately protects also any user of types.Image. + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", fmt.Errorf("computing manifest digest: %w", err) + } + if !matches { + return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest) + } + } + + i.cachedManifest = m + i.cachedManifestMIMEType = mt + } + return i.cachedManifest, i.cachedManifestMIMEType, nil +} + +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { + // It would be consistent to make this an internal/unparsedimage/impl.Compat wrapper, + // but this is very likely to be the only implementation ever. + sigs, err := i.UntrustedSignatures(ctx) + if err != nil { + return nil, err + } + simpleSigs := [][]byte{} + for _, sig := range sigs { + if sig, ok := sig.(signature.SimpleSigning); ok { + simpleSigs = append(simpleSigs, sig.UntrustedSignature()) + } + } + return simpleSigs, nil +} + +// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) { + if i.cachedSignatures == nil { + sigs, err := i.src.GetSignaturesWithFormat(ctx, i.instanceDigest) + if err != nil { + return nil, err + } + i.cachedSignatures = sigs + } + return i.cachedSignatures, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go new file mode 100644 index 00000000..47c169a1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go @@ -0,0 +1,101 @@ +package impl + +import ( + "context" + "io" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// Compat implements the obsolete parts of types.ImageDestination +// for implementations of private.ImageDestination. +// See AddCompat below. +type Compat struct { + dest private.ImageDestinationInternalOnly +} + +// AddCompat initializes Compat to implement the obsolete parts of types.ImageDestination +// for implementations of private.ImageDestination. +// +// Use it like this: +// +// type yourDestination struct { +// impl.Compat +// … +// } +// +// dest := &yourDestination{…} +// dest.Compat = impl.AddCompat(dest) +func AddCompat(dest private.ImageDestinationInternalOnly) Compat { + return Compat{dest} +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{ + Cache: blobinfocache.FromBlobInfoCache(cache), + IsConfig: isConfig, + }) + if err != nil { + return types.BlobInfo{}, err + } + return types.BlobInfo{ + Digest: res.Digest, + Size: res.Size, + }, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{ + Cache: blobinfocache.FromBlobInfoCache(cache), + CanSubstitute: canSubstitute, + }) + if !reused || err != nil { + return reused, types.BlobInfo{}, err + } + res := types.BlobInfo{ + Digest: blob.Digest, + Size: blob.Size, + CompressionOperation: blob.CompressionOperation, + CompressionAlgorithm: blob.CompressionAlgorithm, + } + // This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers. + // Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution; + // provide the value in cases where it is likely to be correct. + if blob.Digest == info.Digest { + res.MediaType = info.MediaType + } + return true, res, nil +} + +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + withFormat := []signature.Signature{} + for _, sig := range signatures { + withFormat = append(withFormat, signature.SimpleSigningFromBlob(sig)) + } + return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go new file mode 100644 index 00000000..d5de81a6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go @@ -0,0 +1,20 @@ +package impl + +import ( + "github.com/containers/image/v5/internal/private" + compression "github.com/containers/image/v5/pkg/compression/types" +) + +// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required +// then function performs a match against the compression requested by the caller and compression of existing blob +// (which can be nil to represent uncompressed or unknown) +func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool { + if options.RequiredCompression == nil { + return true // no requirement imposed + } + return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name()) +} + +func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool { + return BlobMatchesRequiredCompression(opts, opts.OriginalCompression) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go new file mode 100644 index 00000000..704812e9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go @@ -0,0 +1,72 @@ +package impl + +import "github.com/containers/image/v5/types" + +// Properties collects properties of an ImageDestination that are constant throughout its lifetime +// (but might differ across instances). +type Properties struct { + // SupportedManifestMIMETypes tells which manifest MIME types the destination supports. + // A empty slice or nil means any MIME type can be tried to upload. + SupportedManifestMIMETypes []string + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression types.LayerCompression + // AcceptsForeignLayerURLs is false if foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs bool + // MustMatchRuntimeOS is set to true if the destination can store only images targeted for the current runtime architecture and OS. + MustMatchRuntimeOS bool + // IgnoresEmbeddedDockerReference is set to true if the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference bool + // HasThreadSafePutBlob indicates that PutBlob can be executed concurrently. + HasThreadSafePutBlob bool +} + +// PropertyMethodsInitialize implements parts of private.ImageDestination corresponding to Properties. +type PropertyMethodsInitialize struct { + // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name. + + vals Properties +} + +// PropertyMethods creates an PropertyMethodsInitialize for vals. +func PropertyMethods(vals Properties) PropertyMethodsInitialize { + return PropertyMethodsInitialize{ + vals: vals, + } +} + +// SupportedManifestMIMETypes tells which manifest mime types the destination supports +// If an empty slice or nil it's returned, then any mime type can be tried to upload +func (o PropertyMethodsInitialize) SupportedManifestMIMETypes() []string { + return o.vals.SupportedManifestMIMETypes +} + +// DesiredLayerCompression indicates the kind of compression to apply on layers +func (o PropertyMethodsInitialize) DesiredLayerCompression() types.LayerCompression { + return o.vals.DesiredLayerCompression +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (o PropertyMethodsInitialize) AcceptsForeignLayerURLs() bool { + return o.vals.AcceptsForeignLayerURLs +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (o PropertyMethodsInitialize) MustMatchRuntimeOS() bool { + return o.vals.MustMatchRuntimeOS +} + +// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (o PropertyMethodsInitialize) IgnoresEmbeddedDockerReference() bool { + return o.vals.IgnoresEmbeddedDockerReference +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (o PropertyMethodsInitialize) HasThreadSafePutBlob() bool { + return o.vals.HasThreadSafePutBlob +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go new file mode 100644 index 00000000..0dc6bd5a --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go @@ -0,0 +1,52 @@ +package stubs + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// NoPutBlobPartialInitialize implements parts of private.ImageDestination +// for transports that don’t support PutBlobPartial(). +// See NoPutBlobPartial() below. +type NoPutBlobPartialInitialize struct { + transportName string +} + +// NoPutBlobPartial creates a NoPutBlobPartialInitialize for ref. +func NoPutBlobPartial(ref types.ImageReference) NoPutBlobPartialInitialize { + return NoPutBlobPartialRaw(ref.Transport().Name()) +} + +// NoPutBlobPartialRaw is the same thing as NoPutBlobPartial, but it can be used +// in situations where no ImageReference is available. +func NoPutBlobPartialRaw(transportName string) NoPutBlobPartialInitialize { + return NoPutBlobPartialInitialize{ + transportName: transportName, + } +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { + return false +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { + return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) +} + +// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true. +type ImplementsPutBlobPartial struct{} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (stub ImplementsPutBlobPartial) SupportsPutBlobPartial() bool { + return true +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go new file mode 100644 index 00000000..7015fd06 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go @@ -0,0 +1,50 @@ +package stubs + +import ( + "context" + "errors" + + "github.com/containers/image/v5/internal/signature" + "github.com/opencontainers/go-digest" +) + +// NoSignaturesInitialize implements parts of private.ImageDestination +// for transports that don’t support storing signatures. +// See NoSignatures() below. +type NoSignaturesInitialize struct { + message string +} + +// NoSignatures creates a NoSignaturesInitialize, failing with message. +func NoSignatures(message string) NoSignaturesInitialize { + return NoSignaturesInitialize{ + message: message, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (stub NoSignaturesInitialize) SupportsSignatures(ctx context.Context) error { + return errors.New(stub.message) +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (stub NoSignaturesInitialize) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + if len(signatures) != 0 { + return errors.New(stub.message) + } + return nil +} + +// SupportsSignatures implements SupportsSignatures() that returns nil. +// Note that it might be even more useful to return a value dynamically detected based on +type AlwaysSupportsSignatures struct{} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (stub AlwaysSupportsSignatures) SupportsSignatures(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go new file mode 100644 index 00000000..ab233406 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go @@ -0,0 +1,27 @@ +// Package stubs contains trivial stubs for parts of private.ImageDestination. +// It can be used from internal/wrapper, so it should not drag in any extra dependencies. +// Compare with imagedestination/impl, which might require non-trivial implementation work. +// +// There are two kinds of stubs: +// +// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination +// implementation: +// +// type yourDestination struct { +// stubs.ImplementsPutBlobPartial +// … +// } +// +// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker +// means that a constructor must be called: +// +// type yourDestination struct { +// stubs.NoPutBlobPartialInitialize +// … +// } +// +// dest := &yourDestination{ +// … +// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), +// } +package stubs diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go new file mode 100644 index 00000000..17e1870c --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -0,0 +1,96 @@ +package imagedestination + +import ( + "context" + "io" + + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// wrapped provides the private.ImageDestination operations +// for a destination that only implements types.ImageDestination +type wrapped struct { + stubs.NoPutBlobPartialInitialize + + types.ImageDestination +} + +// FromPublic(dest) returns an object that provides the private.ImageDestination API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageDestination, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageDestination. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(dest types.ImageDestination) private.ImageDestination { + if dest2, ok := dest.(private.ImageDestination); ok { + return dest2 + } + return &wrapped{ + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(dest.Reference()), + + ImageDestination: dest, + } +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) + if err != nil { + return private.UploadedBlob{}, err + } + return private.UploadedBlob{ + Digest: res.Digest, + Size: res.Size, + }, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if options.RequiredCompression != nil { + return false, private.ReusedBlob{}, nil + } + reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) + if !reused || err != nil { + return reused, private.ReusedBlob{}, err + } + return true, private.ReusedBlob{ + Digest: blob.Digest, + Size: blob.Size, + CompressionOperation: blob.CompressionOperation, + CompressionAlgorithm: blob.CompressionAlgorithm, + }, nil +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + simpleSigs := [][]byte{} + for _, sig := range signatures { + simpleSig, ok := sig.(signature.SimpleSigning) + if !ok { + return signature.UnsupportedFormatError(sig) + } + simpleSigs = append(simpleSigs, simpleSig.UntrustedSignature()) + } + return w.PutSignatures(ctx, simpleSigs, instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go new file mode 100644 index 00000000..7d859c31 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go @@ -0,0 +1,55 @@ +package impl + +import ( + "context" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/opencontainers/go-digest" +) + +// Compat implements the obsolete parts of types.ImageSource +// for implementations of private.ImageSource. +// See AddCompat below. +type Compat struct { + src private.ImageSourceInternalOnly +} + +// AddCompat initializes Compat to implement the obsolete parts of types.ImageSource +// for implementations of private.ImageSource. +// +// Use it like this: +// +// type yourSource struct { +// impl.Compat +// … +// } +// +// src := &yourSource{…} +// src.Compat = impl.AddCompat(src) +func AddCompat(src private.ImageSourceInternalOnly) Compat { + return Compat{src} +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (c *Compat) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + // Silently ignore signatures with other formats; the caller can’t handle them. + // Admittedly callers that want to sync all of the image might want to fail instead; this + // way an upgrade of c/image neither breaks them nor adds new functionality. + // Alternatively, we could possibly define the old GetSignatures to use the multi-format + // signature.Blob representation now, in general, but that could silently break them as well. + sigs, err := c.src.GetSignaturesWithFormat(ctx, instanceDigest) + if err != nil { + return nil, err + } + simpleSigs := [][]byte{} + for _, sig := range sigs { + if sig, ok := sig.(signature.SimpleSigning); ok { + simpleSigs = append(simpleSigs, sig.UntrustedSignature()) + } + } + return simpleSigs, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go new file mode 100644 index 00000000..d5eae635 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go @@ -0,0 +1,23 @@ +package impl + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// DoesNotAffectLayerInfosForCopy implements LayerInfosForCopy() that returns nothing. +type DoesNotAffectLayerInfosForCopy struct{} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (stub DoesNotAffectLayerInfosForCopy) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go new file mode 100644 index 00000000..73e8c78e --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go @@ -0,0 +1,27 @@ +package impl + +// Properties collects properties of an ImageSource that are constant throughout its lifetime +// (but might differ across instances). +type Properties struct { + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob bool +} + +// PropertyMethodsInitialize implements parts of private.ImageSource corresponding to Properties. +type PropertyMethodsInitialize struct { + // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name. + + vals Properties +} + +// PropertyMethods creates an PropertyMethodsInitialize for vals. +func PropertyMethods(vals Properties) PropertyMethodsInitialize { + return PropertyMethodsInitialize{ + vals: vals, + } +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (o PropertyMethodsInitialize) HasThreadSafeGetBlob() bool { + return o.vals.HasThreadSafeGetBlob +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go new file mode 100644 index 00000000..b3a8c7e8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go @@ -0,0 +1,19 @@ +package impl + +import ( + "context" + + "github.com/containers/image/v5/internal/signature" + "github.com/opencontainers/go-digest" +) + +// NoSignatures implements GetSignatures() that returns nothing. +type NoSignatures struct{} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (stub NoSignatures) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go new file mode 100644 index 00000000..15aee6d4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go @@ -0,0 +1,52 @@ +package stubs + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// NoGetBlobAtInitialize implements parts of private.ImageSource +// for transports that don’t support GetBlobAt(). +// See NoGetBlobAt() below. +type NoGetBlobAtInitialize struct { + transportName string +} + +// NoGetBlobAt() creates a NoGetBlobAtInitialize for ref. +func NoGetBlobAt(ref types.ImageReference) NoGetBlobAtInitialize { + return NoGetBlobAtRaw(ref.Transport().Name()) +} + +// NoGetBlobAtRaw is the same thing as NoGetBlobAt, but it can be used +// in situations where no ImageReference is available. +func NoGetBlobAtRaw(transportName string) NoGetBlobAtInitialize { + return NoGetBlobAtInitialize{ + transportName: transportName, + } +} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool { + return false +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName) +} + +// ImplementsGetBlobAt implements SupportsGetBlobAt() that returns true. +type ImplementsGetBlobAt struct{} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (stub ImplementsGetBlobAt) SupportsGetBlobAt() bool { + return true +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go new file mode 100644 index 00000000..cb345395 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go @@ -0,0 +1,28 @@ +// Package stubs contains trivial stubs for parts of private.ImageSource. +// It can be used from internal/wrapper, so it should not drag in any extra dependencies. +// Compare with imagesource/impl, which might require non-trivial implementation work. +// +// There are two kinds of stubs: +// +// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource +// +// implementation: +// +// type yourSource struct { +// stubs.ImplementsGetBlobAt +// … +// } +// +// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker +// means that a constructor must be called: +// +// type yourSource struct { +// stubs.NoGetBlobAtInitialize +// … +// } +// +// dest := &yourSource{ +// … +// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), +// } +package stubs diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go new file mode 100644 index 00000000..886b4e83 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go @@ -0,0 +1,56 @@ +package imagesource + +import ( + "context" + + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// wrapped provides the private.ImageSource operations +// for a source that only implements types.ImageSource +type wrapped struct { + stubs.NoGetBlobAtInitialize + + types.ImageSource +} + +// FromPublic(src) returns an object that provides the private.ImageSource API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageSource, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageSource. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(src types.ImageSource) private.ImageSource { + if src2, ok := src.(private.ImageSource); ok { + return src2 + } + return &wrapped{ + NoGetBlobAtInitialize: stubs.NoGetBlobAt(src.Reference()), + + ImageSource: src, + } +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (w *wrapped) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + sigs, err := w.GetSignatures(ctx, instanceDigest) + if err != nil { + return nil, err + } + res := []signature.Signature{} + for _, sig := range sigs { + res = append(res, signature.SimpleSigningFromBlob(sig)) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go new file mode 100644 index 00000000..f17d0024 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go @@ -0,0 +1,58 @@ +package iolimits + +import ( + "fmt" + "io" +) + +// All constants below are intended to be used as limits for `ReadAtMost`. The +// immediate use-case for limiting the size of in-memory copied data is to +// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of +// copying data until running out of memory, we error out after hitting the +// specified limit. +const ( + // megaByte denotes one megabyte and is intended to be used as a limit in + // `ReadAtMost`. + megaByte = 1 << 20 + // MaxManifestBodySize is the maximum allowed size of a manifest. The limit + // of 4 MB aligns with the one of a Docker registry: + // https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30 + MaxManifestBodySize = 4 * megaByte + // MaxAuthTokenBodySize is the maximum allowed size of an auth token. + // The limit of 1 MB is considered to be greatly sufficient. + MaxAuthTokenBodySize = megaByte + // MaxSignatureListBodySize is the maximum allowed size of a signature list. + // The limit of 4 MB is considered to be greatly sufficient. + MaxSignatureListBodySize = 4 * megaByte + // MaxSignatureBodySize is the maximum allowed size of a signature. + // The limit of 4 MB is considered to be greatly sufficient. + MaxSignatureBodySize = 4 * megaByte + // MaxErrorBodySize is the maximum allowed size of an error-response body. + // The limit of 1 MB is considered to be greatly sufficient. + MaxErrorBodySize = megaByte + // MaxConfigBodySize is the maximum allowed size of a config blob. + // The limit of 4 MB is considered to be greatly sufficient. + MaxConfigBodySize = 4 * megaByte + // MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body. + // The limit of 4 MB is considered to be greatly sufficient. + MaxOpenShiftStatusBody = 4 * megaByte + // MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images) + // The limit of 1 MB is considered to be greatly sufficient. + MaxTarFileManifestSize = megaByte +) + +// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded. +func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { + limitedReader := io.LimitReader(reader, int64(limit+1)) + + res, err := io.ReadAll(limitedReader) + if err != nil { + return nil, err + } + + if len(res) > limit { + return nil, fmt.Errorf("exceeded maximum allowed size of %d bytes", limit) + } + + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/common.go b/vendor/github.com/containers/image/v5/internal/manifest/common.go new file mode 100644 index 00000000..1f2ccb52 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/common.go @@ -0,0 +1,72 @@ +package manifest + +import ( + "encoding/json" + "fmt" +) + +// AllowedManifestFields is a bit mask of “essential†manifest fields that ValidateUnambiguousManifestFormat +// can expect to be present. +type AllowedManifestFields int + +const ( + AllowedFieldConfig AllowedManifestFields = 1 << iota + AllowedFieldFSLayers + AllowedFieldHistory + AllowedFieldLayers + AllowedFieldManifests + AllowedFieldFirstUnusedBit // Keep this at the end! +) + +// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than +// one kind we currently recognize, i.e. if they contain any of the known “essential†format fields +// other than the ones the caller specifically allows. +// expectedMIMEType is used only for diagnostics. +// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format +// identification/version, or other “magic numbersâ€) before calling this, to cleanly reject unambiguous +// data that just isn’t what was expected, as opposed to actually ambiguous data. +func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, + allowed AllowedManifestFields) error { + if allowed >= AllowedFieldFirstUnusedBit { + return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed) + } + // Use a private type to decode, not just a map[string]any, because we want + // to also reject case-insensitive matches (which would be used by Go when really decoding + // the manifest). + // (It is expected that as manifest formats are added or extended over time, more fields will be added + // here.) + detectedFields := struct { + Config any `json:"config"` + FSLayers any `json:"fsLayers"` + History any `json:"history"` + Layers any `json:"layers"` + Manifests any `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &detectedFields); err != nil { + // The caller was supposed to already validate version numbers, so this should not happen; + // let’s not bother with making this error “niceâ€. + return err + } + unexpected := []string{} + // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste. + if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 { + unexpected = append(unexpected, "config") + } + if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 { + unexpected = append(unexpected, "fsLayers") + } + if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 { + unexpected = append(unexpected, "history") + } + if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 { + unexpected = append(unexpected, "layers") + } + if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 { + unexpected = append(unexpected, "manifests") + } + if len(unexpected) != 0 { + return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`, + unexpected, expectedMIMEType) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go new file mode 100644 index 00000000..68d07969 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go @@ -0,0 +1,15 @@ +package manifest + +import ( + "github.com/opencontainers/go-digest" +) + +// Schema2Descriptor is a “descriptor†in docker/distribution schema 2. +// +// This is publicly visible as c/image/manifest.Schema2Descriptor. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go new file mode 100644 index 00000000..357e2f3d --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -0,0 +1,312 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + platform "github.com/containers/image/v5/internal/pkg/platform" + compression "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" +) + +// Schema2PlatformSpec describes the platform which a particular manifest is +// specialized for. +// This is publicly visible as c/image/manifest.Schema2PlatformSpec. +type Schema2PlatformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI +} + +// Schema2ManifestDescriptor references a platform-specific manifest. +// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor. +type Schema2ManifestDescriptor struct { + Schema2Descriptor + Platform Schema2PlatformSpec `json:"platform"` +} + +// Schema2ListPublic is a list of platform-specific manifests. +// This is publicly visible as c/image/manifest.Schema2List. +// Internal users should usually use Schema2List instead. +type Schema2ListPublic struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []Schema2ManifestDescriptor `json:"manifests"` +} + +// MIMEType returns the MIME type of this particular manifest list. +func (list *Schema2ListPublic) MIMEType() string { + return list.MediaType +} + +// Instances returns a slice of digests of the manifests that this list knows of. +func (list *Schema2ListPublic) Instances() []digest.Digest { + results := make([]digest.Digest, len(list.Manifests)) + for i, m := range list.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the list. +func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range list.Manifests { + if manifest.Digest == instanceDigest { + ret := ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + } + ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName} + platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) + ret.ReadOnly.Platform = &platform + return ret, nil + } + } + return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { + editInstances := []ListEdit{} + for i, instance := range updates { + editInstances = append(editInstances, ListEdit{ + UpdateOldDigest: index.Manifests[i].Digest, + UpdateDigest: instance.Digest, + UpdateSize: instance.Size, + UpdateMediaType: instance.MediaType, + ListOperation: ListOpUpdate}) + } + return index.editInstances(editInstances) +} + +func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { + addedEntries := []Schema2ManifestDescriptor{} + for i, editInstance := range editInstances { + switch editInstance.ListOperation { + case ListOpUpdate: + if err := editInstance.UpdateOldDigest.Validate(); err != nil { + return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) + } + if err := editInstance.UpdateDigest.Validate(); err != nil { + return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) + } + targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool { + return m.Digest == editInstance.UpdateOldDigest + }) + if targetIndex == -1 { + return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest) + } + index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + if editInstance.UpdateSize < 0 { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) + } + index.Manifests[targetIndex].Size = editInstance.UpdateSize + if editInstance.UpdateMediaType == "" { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + } + index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + case ListOpAdd: + if editInstance.AddPlatform == nil { + // Should we create a struct with empty fields instead? + // Right now ListOpAdd is only called when an instance with the same platform value + // already exists in the manifest, so this should not be reached in practice. + return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported") + } + addedEntries = append(addedEntries, Schema2ManifestDescriptor{ + Schema2Descriptor{ + Digest: editInstance.AddDigest, + Size: editInstance.AddSize, + MediaType: editInstance.AddMediaType, + }, + schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform), + }) + default: + return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) + } + } + if len(addedEntries) != 0 { + index.Manifests = append(index.Manifests, addedEntries...) + } + return nil +} + +func (index *Schema2List) EditInstances(editInstances []ListEdit) error { + return index.editInstances(editInstances) +} + +func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { + // ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list. + return list.ChooseInstance(ctx) +} + +// ChooseInstance parses blob as a schema2 manifest list, and returns the digest +// of the image which is appropriate for the current environment. +func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) + } + for _, wantedPlatform := range wantedPlatforms { + for _, d := range list.Manifests { + imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform) + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } + } + } + return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +// Serialize returns the list in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (list *Schema2ListPublic) Serialize() ([]byte, error) { + buf, err := json.Marshal(list) + if err != nil { + return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err) + } + return buf, nil +} + +// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the +// supplied data. +// This is publicly visible as c/image/manifest.Schema2ListFromComponents. +func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic { + list := Schema2ListPublic{ + SchemaVersion: 2, + MediaType: DockerV2ListMediaType, + Manifests: make([]Schema2ManifestDescriptor, len(components)), + } + for i, component := range components { + m := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: slices.Clone(component.URLs), + }, + Schema2PlatformSpec{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: slices.Clone(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + Features: slices.Clone(component.Platform.Features), + }, + } + list.Manifests[i] = m + } + return &list +} + +// Schema2ListPublicClone creates a deep copy of the passed-in list. +// This is publicly visible as c/image/manifest.Schema2ListClone. +func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic { + return Schema2ListPublicFromComponents(list.Manifests) +} + +// ToOCI1Index returns the list encoded as an OCI1 index. +func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) { + components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) + for _, manifest := range list.Manifests { + platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) + components = append(components, imgspecv1.Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: slices.Clone(manifest.URLs), + Platform: &platform, + }) + } + oci := OCI1IndexPublicFromComponents(components, nil) + return oci, nil +} + +// ToSchema2List returns the list encoded as a Schema2 list. +func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) { + return Schema2ListPublicClone(list), nil +} + +// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +// This is publicly visible as c/image/manifest.Schema2ListFromManifest. +func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) { + list := Schema2ListPublic{ + Manifests: []Schema2ManifestDescriptor{}, + } + if err := json.Unmarshal(manifest, &list); err != nil { + return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err) + } + if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, + AllowedFieldManifests); err != nil { + return nil, err + } + return &list, nil +} + +// Clone returns a deep copy of this list and its contents. +func (list *Schema2ListPublic) Clone() ListPublic { + return Schema2ListPublicClone(list) +} + +// ConvertToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return list.Clone(), nil + case imgspecv1.MediaTypeImageIndex: + return list.ToOCI1Index() + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) + } +} + +// Schema2List is a list of platform-specific manifests. +type Schema2List struct { + Schema2ListPublic +} + +func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List { + return &Schema2List{*public} +} + +func (index *Schema2List) CloneInternal() List { + return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic)) +} + +func (index *Schema2List) Clone() ListPublic { + return index.CloneInternal() +} + +// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { + public, err := Schema2ListPublicFromManifest(manifest) + if err != nil { + return nil, err + } + return schema2ListFromPublic(public), nil +} + +// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture. +func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform { + return imgspecv1.Platform{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: slices.Clone(p.OSFeatures), + Variant: p.Variant, + // Features is not supported in OCI, and discarded. + } +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/vendor/github.com/containers/image/v5/internal/manifest/errors.go new file mode 100644 index 00000000..6c8e233d --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/errors.go @@ -0,0 +1,56 @@ +package manifest + +import ( + "fmt" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType. +// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 . +const dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + +// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation +// on an object which is not a “container image†in the standard sense (e.g. an OCI artifact) +// +// This is publicly visible as c/image/manifest.NonImageArtifactError (but we don’t provide a public constructor) +type NonImageArtifactError struct { + // Callers should not be blindly calling image-specific operations and only checking MIME types + // on failure; if they care about the artifact type, they should check before using it. + // If they blindly assume an image, they don’t really need this value; just a type check + // is sufficient for basic "we can only pull images" UI. + // + // Also, there are fairly widespread “artifacts†which nevertheless use imgspecv1.MediaTypeImageConfig, + // e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers + // to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of + // type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions + // based on that kind of data. + // + // So, let’s not expose this until a specific need is identified. + mimeType string +} + +// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest. +// +// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig . +func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error { + // Callers decide based on manifest.Config.MediaType that this is not an image; + // in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically + // more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON. + // + // If ArtifactType and Config.MediaType are both defined and non-trivial, presumably + // ArtifactType is the “top-level†one, although that’s not defined by the spec. + mimeType := manifest.ArtifactType + if mimeType == "" { + mimeType = manifest.Config.MediaType + } + return NonImageArtifactError{mimeType: mimeType} +} + +func (e NonImageArtifactError) Error() string { + // Special-case these invalid mixed images, which show up from time to time: + if e.mimeType == dockerV2Schema2ConfigMediaType { + return fmt.Sprintf("invalid mixed OCI image with Docker v2s2 config (%q)", e.mimeType) + } + return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType) +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go new file mode 100644 index 00000000..189f1a71 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -0,0 +1,131 @@ +package manifest + +import ( + "fmt" + + compression "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ListPublic is a subset of List which is a part of the public API; +// so no methods can be added, removed or changed. +// +// Internal users should usually use List instead. +type ListPublic interface { + // MIMEType returns the MIME type of this particular manifest list. + MIMEType() string + + // Instances returns a list of the manifests that this list knows of, other than its own. + Instances() []digest.Digest + + // Update information about the list's instances. The length of the passed-in slice must + // match the length of the list of instances which the list already contains, and every field + // must be specified. + UpdateInstances([]ListUpdate) error + + // Instance returns the size and MIME type of a particular instance in the list. + Instance(digest.Digest) (ListUpdate, error) + + // ChooseInstance selects which manifest is most appropriate for the platform described by the + // SystemContext, or for the current platform if the SystemContext doesn't specify any details. + ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) + + // Serialize returns the list in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded + // from, even if no modifications were made! + Serialize() ([]byte, error) + + // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. + ConvertToMIMEType(mimeType string) (ListPublic, error) + + // Clone returns a deep copy of this list and its contents. + Clone() ListPublic +} + +// List is an interface for parsing, modifying lists of image manifests. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members +// directly. +type List interface { + ListPublic + // CloneInternal returns a deep copy of this list and its contents. + CloneInternal() List + // ChooseInstanceInstanceByCompression selects which manifest is most appropriate for the platform and compression described by the + // SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which + // when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined. + ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) + // Edit information about the list's instances. Contains Slice of ListEdit where each element + // is responsible for either Modifying or Adding a new instance to the Manifest. Operation is + // selected on the basis of configured ListOperation field. + EditInstances([]ListEdit) error +} + +// ListUpdate includes the fields which a List's UpdateInstances() method will modify. +// This is publicly visible as c/image/manifest.ListUpdate. +type ListUpdate struct { + Digest digest.Digest + Size int64 + MediaType string + // ReadOnly fields: may be set by Instance(), ignored by UpdateInstance() + ReadOnly struct { + Platform *imgspecv1.Platform + Annotations map[string]string + CompressionAlgorithmNames []string + } +} + +type ListOp int + +const ( + listOpInvalid ListOp = iota + ListOpAdd + ListOpUpdate +) + +// ListEdit includes the fields which a List's EditInstances() method will modify. +type ListEdit struct { + ListOperation ListOp + + // if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set. + UpdateOldDigest digest.Digest + UpdateDigest digest.Digest + UpdateSize int64 + UpdateMediaType string + UpdateAffectAnnotations bool + UpdateAnnotations map[string]string + UpdateCompressionAlgorithms []compression.Algorithm + + // If Op = ListEditAdd. All fields must be set. + AddDigest digest.Digest + AddSize int64 + AddMediaType string + AddPlatform *imgspecv1.Platform + AddAnnotations map[string]string + AddCompressionAlgorithms []compression.Algorithm +} + +// ListPublicFromBlob parses a list of manifests. +// This is publicly visible as c/image/manifest.ListFromBlob. +func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) { + list, err := ListFromBlob(manifest, manifestMIMEType) + if err != nil { + return nil, err + } + return list, nil +} + +// ListFromBlob parses a list of manifests. +func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { + normalized := NormalizedMIMEType(manifestMIMEType) + switch normalized { + case DockerV2ListMediaType: + return Schema2ListFromManifest(manifest) + case imgspecv1.MediaTypeImageIndex: + return OCI1IndexFromManifest(manifest) + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") + } + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go new file mode 100644 index 00000000..1dbcc141 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -0,0 +1,167 @@ +package manifest + +import ( + "encoding/json" + + "github.com/containers/libtrust" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we have a mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. + DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +// This is publicly visible as c/image/manifest.GuessMIMEType. +func GuessMIMEType(manifest []byte) string { + // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. + // Also docker/distribution/manifest.Versioned. + meta := struct { + MediaType string `json:"mediaType"` + SchemaVersion int `json:"schemaVersion"` + Signatures any `json:"signatures"` + }{} + if err := json.Unmarshal(manifest, &meta); err != nil { + return "" + } + + switch meta.MediaType { + case DockerV2Schema2MediaType, DockerV2ListMediaType, + imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. + return meta.MediaType + } + // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. + switch meta.SchemaVersion { + case 1: + if meta.Signatures != nil { + return DockerV2Schema1SignedMediaType + } + return DockerV2Schema1MediaType + case 2: + // Best effort to understand if this is an OCI image since mediaType + // wasn't in the manifest for OCI image-spec < 1.0.2. + // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + ociMan := struct { + Config struct { + MediaType string `json:"mediaType"` + } `json:"config"` + }{} + if err := json.Unmarshal(manifest, &ociMan); err != nil { + return "" + } + switch ociMan.Config.MediaType { + case imgspecv1.MediaTypeImageConfig: + return imgspecv1.MediaTypeImageManifest + case DockerV2Schema2ConfigMediaType: + // This case should not happen since a Docker image + // must declare a top-level media type and + // `meta.MediaType` has already been checked. + return DockerV2Schema2MediaType + } + // Maybe an image index or an OCI artifact. + ociIndex := struct { + Manifests []imgspecv1.Descriptor `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &ociIndex); err != nil { + return "" + } + if len(ociIndex.Manifests) != 0 { + if ociMan.Config.MediaType == "" { + return imgspecv1.MediaTypeImageIndex + } + // FIXME: this is mixing media types of manifests and configs. + return ociMan.Config.MediaType + } + // It's most likely an OCI artifact with a custom config media + // type which is not (and cannot) be covered by the media-type + // checks cabove. + return imgspecv1.MediaTypeImageManifest + } + return "" +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +// This is publicly visible as c/image/manifest.Digest. +func Digest(manifest []byte) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + return digest.FromBytes(manifest), nil +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +// This is publicly visible as c/image/manifest.MatchesDigest. +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { + // This should eventually support various digest types. + actualDigest, err := Digest(manifest) + if err != nil { + return false, err + } + return expectedDigest == actualDigest, nil +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +// This is publicly visible as c/image/manifest.NormalizedMIMEType. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized†in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go new file mode 100644 index 00000000..dcd2646d --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -0,0 +1,444 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "math" + "runtime" + + platform "github.com/containers/image/v5/internal/pkg/platform" + compression "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +const ( + // OCI1InstanceAnnotationCompressionZSTD is an annotation name that can be placed on a manifest descriptor in an OCI index. + // The value of the annotation must be the string "true". + // If this annotation is present on a manifest, consuming that image instance requires support for Zstd compression. + // That also suggests that this instance benefits from + // Zstd compression, so it can be preferred by compatible consumers over instances that + // use gzip, depending on their local policy. + OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd" + OCI1InstanceAnnotationCompressionZSTDValue = "true" +) + +// OCI1IndexPublic is just an alias for the OCI index type, but one which we can +// provide methods for. +// This is publicly visible as c/image/manifest.OCI1Index +// Internal users should usually use OCI1Index instead. +type OCI1IndexPublic struct { + imgspecv1.Index +} + +// MIMEType returns the MIME type of this particular manifest index. +func (index *OCI1IndexPublic) MIMEType() string { + return imgspecv1.MediaTypeImageIndex +} + +// Instances returns a slice of digests of the manifests that this index knows of. +func (index *OCI1IndexPublic) Instances() []digest.Digest { + results := make([]digest.Digest, len(index.Manifests)) + for i, m := range index.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the index. +func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range index.Manifests { + if manifest.Digest == instanceDigest { + ret := ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + } + ret.ReadOnly.Platform = manifest.Platform + ret.ReadOnly.Annotations = manifest.Annotations + ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations) + return ret, nil + } + } + return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error { + editInstances := []ListEdit{} + for i, instance := range updates { + editInstances = append(editInstances, ListEdit{ + UpdateOldDigest: index.Manifests[i].Digest, + UpdateDigest: instance.Digest, + UpdateSize: instance.Size, + UpdateMediaType: instance.MediaType, + ListOperation: ListOpUpdate}) + } + return index.editInstances(editInstances) +} + +func annotationsToCompressionAlgorithmNames(annotations map[string]string) []string { + result := make([]string, 0, 1) + if annotations[OCI1InstanceAnnotationCompressionZSTD] == OCI1InstanceAnnotationCompressionZSTDValue { + result = append(result, compression.ZstdAlgorithmName) + } + // No compression was detected, hence assume instance has default compression `Gzip` + if len(result) == 0 { + result = append(result, compression.GzipAlgorithmName) + } + return result +} + +func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap *map[string]string) { + // TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm + // list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable + // and full compressionAlghorithms list. + if *annotationsMap == nil && len(compressionAlgorithms) > 0 { + *annotationsMap = map[string]string{} + } + for _, algo := range compressionAlgorithms { + switch algo.Name() { + case compression.ZstdAlgorithmName: + (*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue + default: + continue + } + } +} + +func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { + addedEntries := []imgspecv1.Descriptor{} + updatedAnnotations := false + for i, editInstance := range editInstances { + switch editInstance.ListOperation { + case ListOpUpdate: + if err := editInstance.UpdateOldDigest.Validate(); err != nil { + return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) + } + if err := editInstance.UpdateDigest.Validate(); err != nil { + return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) + } + targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool { + return m.Digest == editInstance.UpdateOldDigest + }) + if targetIndex == -1 { + return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest) + } + index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + if editInstance.UpdateSize < 0 { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) + } + index.Manifests[targetIndex].Size = editInstance.UpdateSize + if editInstance.UpdateMediaType == "" { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + } + index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + if editInstance.UpdateAnnotations != nil { + updatedAnnotations = true + if editInstance.UpdateAffectAnnotations { + index.Manifests[targetIndex].Annotations = maps.Clone(editInstance.UpdateAnnotations) + } else { + if index.Manifests[targetIndex].Annotations == nil { + index.Manifests[targetIndex].Annotations = map[string]string{} + } + maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations) + } + } + addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, &index.Manifests[targetIndex].Annotations) + case ListOpAdd: + annotations := map[string]string{} + if editInstance.AddAnnotations != nil { + annotations = maps.Clone(editInstance.AddAnnotations) + } + addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations) + addedEntries = append(addedEntries, imgspecv1.Descriptor{ + MediaType: editInstance.AddMediaType, + Size: editInstance.AddSize, + Digest: editInstance.AddDigest, + Platform: editInstance.AddPlatform, + Annotations: annotations}) + default: + return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) + } + } + if len(addedEntries) != 0 { + index.Manifests = append(index.Manifests, addedEntries...) + } + if len(addedEntries) != 0 || updatedAnnotations { + slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int { + // FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes + // into more algorithms? + aZstd := instanceIsZstd(a) + bZstd := instanceIsZstd(b) + switch { + case aZstd == bZstd: + return 0 + case !aZstd: // Implies bZstd + return -1 + default: // aZstd && !bZstd + return 1 + } + }) + } + return nil +} + +func (index *OCI1Index) EditInstances(editInstances []ListEdit) error { + return index.editInstances(editInstances) +} + +// instanceIsZstd returns true if instance is a zstd instance otherwise false. +func instanceIsZstd(manifest imgspecv1.Descriptor) bool { + if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" { + return true + } + return false +} + +type instanceCandidate struct { + platformIndex int // Index of the candidate in platform.WantedPlatforms: lower numbers are preferred; or math.maxInt if the candidate doesn’t have a platform + isZstd bool // tells if particular instance if zstd instance + manifestPosition int // A zero-based index of the instance in the manifest list + digest digest.Digest // Instance digest +} + +func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool { + switch { + case ic.platformIndex != other.platformIndex: + return ic.platformIndex < other.platformIndex + case ic.isZstd != other.isZstd: + if !preferGzip { + return ic.isZstd + } else { + return !ic.isZstd + } + case ic.manifestPosition != other.manifestPosition: + return ic.manifestPosition < other.manifestPosition + } + panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. +} + +// chooseInstance is a private equivalent to ChooseInstanceByCompression, +// shared by ChooseInstance and ChooseInstanceByCompression. +func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { + didPreferGzip := false + if preferGzip == types.OptionalBoolTrue { + didPreferGzip = true + } + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) + } + var bestMatch *instanceCandidate + bestMatch = nil + for manifestIndex, d := range index.Manifests { + candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest} + if d.Platform != nil { + imagePlatform := ociPlatformClone(*d.Platform) + platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { + return platform.MatchesPlatform(imagePlatform, wantedPlatform) + }) + if platformIndex == -1 { + continue + } + candidate.platformIndex = platformIndex + } + if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) { + bestMatch = &candidate + } + } + if bestMatch != nil { + return bestMatch.digest, nil + } + return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { + return index.chooseInstance(ctx, preferGzip) +} + +// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest +// of the image which is appropriate for the current environment. +func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + return index.chooseInstance(ctx, types.OptionalBoolFalse) +} + +// Serialize returns the index in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (index *OCI1IndexPublic) Serialize() ([]byte, error) { + buf, err := json.Marshal(index) + if err != nil { + return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err) + } + return buf, nil +} + +// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the +// supplied data. +// This is publicly visible as c/image/manifest.OCI1IndexFromComponents. +func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic { + index := OCI1IndexPublic{ + imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: make([]imgspecv1.Descriptor, len(components)), + Annotations: maps.Clone(annotations), + }, + } + for i, component := range components { + var platform *imgspecv1.Platform + if component.Platform != nil { + platformCopy := ociPlatformClone(*component.Platform) + platform = &platformCopy + } + m := imgspecv1.Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: slices.Clone(component.URLs), + Annotations: maps.Clone(component.Annotations), + Platform: platform, + } + index.Manifests[i] = m + } + return &index +} + +// OCI1IndexPublicClone creates a deep copy of the passed-in index. +// This is publicly visible as c/image/manifest.OCI1IndexClone. +func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic { + return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations) +} + +// ToOCI1Index returns the index encoded as an OCI1 index. +func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) { + return OCI1IndexPublicClone(index), nil +} + +// ToSchema2List returns the index encoded as a Schema2 list. +func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) { + components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) + for _, manifest := range index.Manifests { + platform := manifest.Platform + if platform == nil { + platform = &imgspecv1.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } + } + components = append(components, Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: slices.Clone(manifest.URLs), + }, + schema2PlatformSpecFromOCIPlatform(*platform), + }) + } + s2 := Schema2ListPublicFromComponents(components) + return s2, nil +} + +// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest index. +// This is publicly visible as c/image/manifest.OCI1IndexFromManifest. +func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) { + index := OCI1IndexPublic{ + Index: imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: []imgspecv1.Descriptor{}, + Annotations: make(map[string]string), + }, + } + if err := json.Unmarshal(manifest, &index); err != nil { + return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err) + } + if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, + AllowedFieldManifests); err != nil { + return nil, err + } + return &index, nil +} + +// Clone returns a deep copy of this list and its contents. +func (index *OCI1IndexPublic) Clone() ListPublic { + return OCI1IndexPublicClone(index) +} + +// ConvertToMIMEType converts the passed-in image index to a manifest list of +// the specified type. +func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return index.ToSchema2List() + case imgspecv1.MediaTypeImageIndex: + return index.Clone(), nil + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) + } +} + +type OCI1Index struct { + OCI1IndexPublic +} + +func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index { + return &OCI1Index{*public} +} + +func (index *OCI1Index) CloneInternal() List { + return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic)) +} + +func (index *OCI1Index) Clone() ListPublic { + return index.CloneInternal() +} + +// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest list. +func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { + public, err := OCI1IndexPublicFromManifest(manifest) + if err != nil { + return nil, err + } + return oci1IndexFromPublic(public), nil +} + +// ociPlatformClone returns an independent copy of p. +func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform { + // The only practical way in Go to give read-only access to an array is to copy it. + // The only practical way in Go to copy a deep structure is to either do it manually field by field, + // or to use reflection (incl. a round-trip through JSON, which uses reflection). + // + // The combination of the two is just sad, and leads to code like this, which will + // need to be updated with every new Platform field. + return imgspecv1.Platform{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: slices.Clone(p.OSFeatures), + Variant: p.Variant, + } +} + +// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure. +func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec { + return Schema2PlatformSpec{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: slices.Clone(p.OSFeatures), + Variant: p.Variant, + Features: nil, + } +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go new file mode 100644 index 00000000..3ba0e408 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -0,0 +1,197 @@ +package platform + +// Largely based on +// https://github.com/moby/moby/blob/bc846d2e8fe5538220e0c31e9d0e8446f6fbc022/distribution/cpuinfo_unix.go +// Copyright 2012-2017 Docker, Inc. +// +// https://github.com/containerd/containerd/blob/726dcaea50883e51b2ec6db13caff0e7936b711d/platforms/cpuinfo.go +// Copyright The containerd Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// https://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bufio" + "fmt" + "os" + "runtime" + "strings" + + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" +) + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if runtime.GOOS != "linux" { + return "", fmt.Errorf("getCPUInfo for OS %s not implemented", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern) +} + +func getCPUVariantWindows(arch string) string { + // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch arch { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "" + } + + return variant +} + +func getCPUVariantArm() string { + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + return "" + } + // TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286) + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "" + } + + return variant +} + +func getCPUVariant(os string, arch string) string { + if os == "windows" { + return getCPUVariantWindows(arch) + } + if arch == "arm" || arch == "arm64" { + return getCPUVariantArm() + } + return "" +} + +// compatibility contains, for a specified architecture, a list of known variants, in the +// order from most capable (most restrictive) to least capable (most compatible). +// Architectures that don’t have variants should not have an entry here. +var compatibility = map[string][]string{ + "arm": {"v8", "v7", "v6", "v5"}, + "arm64": {"v8"}, +} + +// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user, +// the most compatible platform is first. +// If some option (arch, os, variant) is not present, a value from current platform is detected. +func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { + // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all. + // The fields are not specified by the OCI specification, as of version 1.1, usefully enough + // to be interoperable, anyway. + + wantedArch := runtime.GOARCH + wantedVariant := "" + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } else { + // Only auto-detect the variant if we are using the default architecture. + // If the user has specified the ArchitectureChoice, don't autodetect, even if + // ctx.ArchitectureChoice == runtime.GOARCH, because we have no idea whether the runtime.GOARCH + // value is relevant to the use case, and if we do autodetect a variant, + // ctx.VariantChoice can't be used to override it back to "". + wantedVariant = getCPUVariant(runtime.GOOS, runtime.GOARCH) + } + if ctx != nil && ctx.VariantChoice != "" { + wantedVariant = ctx.VariantChoice + } + + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + var variants []string = nil + if wantedVariant != "" { + // If the user requested a specific variant, we'll walk down + // the list from most to least compatible. + if variantOrder := compatibility[wantedArch]; variantOrder != nil { + if i := slices.Index(variantOrder, wantedVariant); i != -1 { + variants = variantOrder[i:] + } + } + if variants == nil { + // user wants a variant which we know nothing about - not even compatibility + variants = []string{wantedVariant} + } + // Make sure to have a candidate with an empty variant as well. + variants = append(variants, "") + } else { + // Make sure to have a candidate with an empty variant as well. + variants = append(variants, "") + // If available add the entire compatibility matrix for the specific architecture. + if possibleVariants, ok := compatibility[wantedArch]; ok { + variants = append(variants, possibleVariants...) + } + } + + res := make([]imgspecv1.Platform, 0, len(variants)) + for _, v := range variants { + res = append(res, imgspecv1.Platform{ + OS: wantedOS, + Architecture: wantedArch, + Variant: v, + }) + } + return res, nil +} + +// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches +// an item from the return value of WantedPlatforms. +func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool { + return image.Architecture == wanted.Architecture && + image.OS == wanted.OS && + image.Variant == wanted.Variant +} diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go new file mode 100644 index 00000000..95d561fc --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -0,0 +1,164 @@ +package private + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/signature" + compression "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// ImageSourceInternalOnly is the part of private.ImageSource that is not +// a part of types.ImageSource. +type ImageSourceInternalOnly interface { + // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. + SupportsGetBlobAt() bool + // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt(). + BlobChunkAccessor + + // GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) +} + +// ImageSource is an internal extension to the types.ImageSource interface. +type ImageSource interface { + types.ImageSource + ImageSourceInternalOnly +} + +// ImageDestinationInternalOnly is the part of private.ImageDestination that is not +// a part of types.ImageDestination. +type ImageDestinationInternalOnly interface { + // SupportsPutBlobPartial returns true if PutBlobPartial is supported. + SupportsPutBlobPartial() bool + // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures + // on unsupported formats. + + // PutBlobWithOptions writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. + PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error) + + // PutBlobPartial attempts to create a blob using the data that is already present + // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. + // It is available only if SupportsPutBlobPartial(). + // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller + // should fall back to PutBlobWithOptions. + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error) + + // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If the blob has been successfully reused, returns (true, info, nil). + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error) + + // PutSignaturesWithFormat writes a set of signatures to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // MUST be called after PutManifest (signatures may reference manifest contents). + PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error +} + +// ImageDestination is an internal extension to the types.ImageDestination +// interface. +type ImageDestination interface { + types.ImageDestination + ImageDestinationInternalOnly +} + +// UploadedBlob is information about a blob written to a destination. +// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided. +type UploadedBlob struct { + Digest digest.Digest + Size int64 +} + +// PutBlobOptions are used in PutBlobWithOptions. +type PutBlobOptions struct { + Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos. + IsConfig bool // True if the blob is a config + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers + // if they use internal/imagedestination/impl.Compat; + // in that case, they will all be consistently zero-valued. + + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. +} + +// TryReusingBlobOptions are used in TryReusingBlobWithOptions. +type TryReusingBlobOptions struct { + Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. + // If true, it is allowed to use an equivalent of the desired blob; + // in that case the returned info may not match the input. + CanSubstitute bool + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers + // if they use internal/imagedestination/impl.Compat; + // in that case, they will all be consistently zero-valued. + RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go + OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed†or “unknownâ€. + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. + SrcRef reference.Named // A reference to the source image that contains the input blob. +} + +// ReusedBlob is information about a blob reused in a destination. +// It is the subset of types.BlobInfo fields the transport is responsible for setting. +type ReusedBlob struct { + Digest digest.Digest // Must be provided + Size int64 // Must be provided + // The following compression fields should be set when the reuse substitutes + // a differently-compressed blob. + CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A + CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A +} + +// ImageSourceChunk is a portion of a blob. +// This API is experimental and can be changed without bumping the major version number. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// BlobChunkAccessor allows fetching discontiguous chunks of a blob. +type BlobChunkAccessor interface { + // GetBlobAt returns a sequential channel of readers that contain data for the requested + // blob chunks, and a channel that might get a single error value. + // The specified chunks must be not overlapping and sorted by their offset. + // The readers must be fully consumed, in the order they are returned, before blocking + // to read the next chunk. + GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) +} + +// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request. +type BadPartialRequestError struct { + Status string +} + +func (e BadPartialRequestError) Error() string { + return e.Status +} + +// UnparsedImage is an internal extension to the types.UnparsedImage interface. +type UnparsedImage interface { + types.UnparsedImage + // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. + UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) +} diff --git a/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go new file mode 100644 index 00000000..b8d3a7e5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go @@ -0,0 +1,57 @@ +package putblobdigest + +import ( + "io" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// Digester computes a digest of the provided stream, if not known yet. +type Digester struct { + knownDigest digest.Digest // Or "" + digester digest.Digester // Or nil +} + +// newDigester initiates computation of a digest.Canonical digest of stream, +// if !validDigest; otherwise it just records knownDigest to be returned later. +// The caller MUST use the returned stream instead of the original value. +func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) { + if validDigest { + return Digester{knownDigest: knownDigest}, stream + } else { + res := Digester{ + digester: digest.Canonical.Digester(), + } + stream = io.TeeReader(stream, res.digester.Hash()) + return res, stream + } +} + +// DigestIfUnknown initiates computation of a digest.Canonical digest of stream, +// if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will +// be used (accepting any algorithm). +// The caller MUST use the returned stream instead of the original value. +func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { + d := blobInfo.Digest + return newDigester(stream, d, d != "") +} + +// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream, +// if a digest.Canonical digest is not supplied in the provided blobInfo; +// otherwise blobInfo.Digest will be used. +// The caller MUST use the returned stream instead of the original value. +func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { + d := blobInfo.Digest + return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical) +} + +// Digest() returns a digest value possibly computed by Digester. +// This must be called only after all of the stream returned by a Digester constructor +// has been successfully read. +func (d Digester) Digest() digest.Digest { + if d.digester != nil { + return d.digester.Digest() + } + return d.knownDigest +} diff --git a/vendor/github.com/containers/image/v5/internal/rootless/rootless.go b/vendor/github.com/containers/image/v5/internal/rootless/rootless.go new file mode 100644 index 00000000..80623bfb --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/rootless/rootless.go @@ -0,0 +1,25 @@ +package rootless + +import ( + "os" + "strconv" +) + +// GetRootlessEUID returns the UID of the current user (in the parent userNS, if any) +// +// Podman and similar software, in “rootless†configuration, when run as a non-root +// user, very early switches to a user namespace, where Geteuid() == 0 (but does not +// switch to a limited mount namespace); so, code relying on Geteuid() would use +// system-wide paths in e.g. /var, when the user is actually not privileged to write to +// them, and expects state to be stored in the home directory. +// +// If Podman is setting up such a user namespace, it records the original UID in an +// environment variable, allowing us to make choices based on the actual user’s identity. +func GetRootlessEUID() int { + euidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if euidEnv != "" { + euid, _ := strconv.Atoi(euidEnv) + return euid + } + return os.Geteuid() +} diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go new file mode 100644 index 00000000..acf30343 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/set/set.go @@ -0,0 +1,52 @@ +package set + +import "golang.org/x/exp/maps" + +// FIXME: +// - Docstrings +// - This should be in a public library somewhere + +type Set[E comparable] struct { + m map[E]struct{} +} + +func New[E comparable]() *Set[E] { + return &Set[E]{ + m: map[E]struct{}{}, + } +} + +func NewWithValues[E comparable](values ...E) *Set[E] { + s := New[E]() + for _, v := range values { + s.Add(v) + } + return s +} + +func (s *Set[E]) Add(v E) { + s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. +} + +func (s *Set[E]) AddSlice(slice []E) { + for _, v := range slice { + s.Add(v) + } +} + +func (s *Set[E]) Delete(v E) { + delete(s.m, v) +} + +func (s *Set[E]) Contains(v E) bool { + _, ok := s.m[v] + return ok +} + +func (s *Set[E]) Empty() bool { + return len(s.m) == 0 +} + +func (s *Set[E]) Values() []E { + return maps.Keys(s.m) +} diff --git a/vendor/github.com/containers/image/v5/internal/signature/signature.go b/vendor/github.com/containers/image/v5/internal/signature/signature.go new file mode 100644 index 00000000..6f95115a --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signature/signature.go @@ -0,0 +1,102 @@ +package signature + +import ( + "bytes" + "errors" + "fmt" +) + +// FIXME FIXME: MIME type? Int? String? +// An interface with a name, parse methods? +type FormatID string + +const ( + SimpleSigningFormat FormatID = "simple-signing" + SigstoreFormat FormatID = "sigstore-json" + // Update also UnsupportedFormatError below +) + +// Signature is an image signature of some kind. +type Signature interface { + FormatID() FormatID + // blobChunk returns a representation of signature as a []byte, suitable for long-term storage. + // Almost everyone should use signature.Blob() instead. + blobChunk() ([]byte, error) +} + +// Blob returns a representation of sig as a []byte, suitable for long-term storage. +func Blob(sig Signature) ([]byte, error) { + chunk, err := sig.blobChunk() + if err != nil { + return nil, err + } + + format := sig.FormatID() + switch format { + case SimpleSigningFormat: + // For compatibility with old dir formats: + return chunk, nil + default: + res := []byte{0} // Start with a zero byte to clearly mark this is a binary format, and disambiguate from random text. + res = append(res, []byte(format)...) + res = append(res, '\n') + res = append(res, chunk...) + return res, nil + } +} + +// FromBlob returns a signature from parsing a blob created by signature.Blob. +func FromBlob(blob []byte) (Signature, error) { + if len(blob) == 0 { + return nil, errors.New("empty signature blob") + } + // Historically we’ve just been using GPG with no identification; try to auto-detect that. + switch blob[0] { + // OpenPGP "compressed data" wrapping the message + case 0xA0, 0xA1, 0xA2, 0xA3, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 8 (tag: compressed data packet); bits 1…0 = length-type (any) + 0xC8, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 8 (tag: compressed data packet) + // OpenPGP “one-pass signature†starting a signature + 0x90, 0x91, 0x92, 0x3d, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 4 (tag: one-pass signature packet); bits 1…0 = length-type (any) + 0xC4, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 4 (tag: one-pass signature packet) + // OpenPGP signature packet signing the following data + 0x88, 0x89, 0x8A, 0x8B, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 2 (tag: signature packet); bits 1…0 = length-type (any) + 0xC2: // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 2 (tag: signature packet) + return SimpleSigningFromBlob(blob), nil + + // The newer format: binary 0, format name, newline, data + case 0x00: + blob = blob[1:] + formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'}) + if !foundNewline { + return nil, fmt.Errorf("invalid signature format, missing newline") + } + for _, b := range formatBytes { + if b < 32 || b >= 0x7F { + return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b) + } + } + switch { + case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)): + return SimpleSigningFromBlob(blobChunk), nil + case bytes.Equal(formatBytes, []byte(SigstoreFormat)): + return sigstoreFromBlobChunk(blobChunk) + default: + return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes)) + } + + default: + return nil, fmt.Errorf("unrecognized signature format, starting with binary %#x", blob[0]) + } + +} + +// UnsupportedFormatError returns an error complaining about sig having an unsupported format. +func UnsupportedFormatError(sig Signature) error { + formatID := sig.FormatID() + switch formatID { + case SimpleSigningFormat, SigstoreFormat: + return fmt.Errorf("unsupported signature format %s", string(formatID)) + default: + return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID)) + } +} diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go new file mode 100644 index 00000000..b8a9b366 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go @@ -0,0 +1,87 @@ +package signature + +import ( + "encoding/json" + + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +const ( + // from sigstore/cosign/pkg/types.SimpleSigningMediaType + SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json" + // from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey + SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature" + // from sigstore/cosign/pkg/oci/static.BundleAnnotationKey + SigstoreSETAnnotationKey = "dev.sigstore.cosign/bundle" + // from sigstore/cosign/pkg/oci/static.CertificateAnnotationKey + SigstoreCertificateAnnotationKey = "dev.sigstore.cosign/certificate" + // from sigstore/cosign/pkg/oci/static.ChainAnnotationKey + SigstoreIntermediateCertificateChainAnnotationKey = "dev.sigstore.cosign/chain" +) + +// Sigstore is a github.com/cosign/cosign signature. +// For the persistent-storage format used for blobChunk(), we want +// a degree of forward compatibility against unexpected field changes +// (as has happened before), which is why this data type +// contains just a payload + annotations (including annotations +// that we don’t recognize or support), instead of individual fields +// for the known annotations. +type Sigstore struct { + untrustedMIMEType string + untrustedPayload []byte + untrustedAnnotations map[string]string +} + +// sigstoreJSONRepresentation needs the files to be public, which we don’t want for +// the main Sigstore type. +type sigstoreJSONRepresentation struct { + UntrustedMIMEType string `json:"mimeType"` + UntrustedPayload []byte `json:"payload"` + UntrustedAnnotations map[string]string `json:"annotations"` +} + +// SigstoreFromComponents returns a Sigstore object from its components. +func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore { + return Sigstore{ + untrustedMIMEType: untrustedMimeType, + untrustedPayload: slices.Clone(untrustedPayload), + untrustedAnnotations: maps.Clone(untrustedAnnotations), + } +} + +// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object. +func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) { + var v sigstoreJSONRepresentation + if err := json.Unmarshal(blobChunk, &v); err != nil { + return Sigstore{}, err + } + return SigstoreFromComponents(v.UntrustedMIMEType, + v.UntrustedPayload, + v.UntrustedAnnotations), nil +} + +func (s Sigstore) FormatID() FormatID { + return SigstoreFormat +} + +// blobChunk returns a representation of signature as a []byte, suitable for long-term storage. +// Almost everyone should use signature.Blob() instead. +func (s Sigstore) blobChunk() ([]byte, error) { + return json.Marshal(sigstoreJSONRepresentation{ + UntrustedMIMEType: s.UntrustedMIMEType(), + UntrustedPayload: s.UntrustedPayload(), + UntrustedAnnotations: s.UntrustedAnnotations(), + }) +} + +func (s Sigstore) UntrustedMIMEType() string { + return s.untrustedMIMEType +} +func (s Sigstore) UntrustedPayload() []byte { + return slices.Clone(s.untrustedPayload) +} + +func (s Sigstore) UntrustedAnnotations() map[string]string { + return maps.Clone(s.untrustedAnnotations) +} diff --git a/vendor/github.com/containers/image/v5/internal/signature/simple.go b/vendor/github.com/containers/image/v5/internal/signature/simple.go new file mode 100644 index 00000000..c0937040 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signature/simple.go @@ -0,0 +1,29 @@ +package signature + +import "golang.org/x/exp/slices" + +// SimpleSigning is a “simple signing†signature. +type SimpleSigning struct { + untrustedSignature []byte +} + +// SimpleSigningFromBlob converts a “simple signing†signature into a SimpleSigning object. +func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning { + return SimpleSigning{ + untrustedSignature: slices.Clone(blobChunk), + } +} + +func (s SimpleSigning) FormatID() FormatID { + return SimpleSigningFormat +} + +// blobChunk returns a representation of signature as a []byte, suitable for long-term storage. +// Almost everyone should use signature.Blob() instead. +func (s SimpleSigning) blobChunk() ([]byte, error) { + return slices.Clone(s.untrustedSignature), nil +} + +func (s SimpleSigning) UntrustedSignature() []byte { + return slices.Clone(s.untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/internal/signer/signer.go b/vendor/github.com/containers/image/v5/internal/signer/signer.go new file mode 100644 index 00000000..5720254d --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signer/signer.go @@ -0,0 +1,47 @@ +package signer + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/signature" +) + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This type is visible to external callers, so it has no public fields or methods apart from Close(). +// +// The owner of a Signer must call Close() when done. +type Signer struct { + implementation SignerImplementation +} + +// NewSigner creates a public Signer from a SignerImplementation +func NewSigner(impl SignerImplementation) *Signer { + return &Signer{implementation: impl} +} + +func (s *Signer) Close() error { + return s.implementation.Close() +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +// Alternatively, should SignImageManifest be provided a logging writer of some kind? +func ProgressMessage(signer *Signer) string { + return signer.implementation.ProgressMessage() +} + +// SignImageManifest invokes a SignerImplementation. +// This is a function, not a method, so that it can only be called by code that is allowed to import this internal subpackage. +func SignImageManifest(ctx context.Context, signer *Signer, manifest []byte, dockerReference reference.Named) (signature.Signature, error) { + return signer.implementation.SignImageManifest(ctx, manifest, dockerReference) +} + +// SignerImplementation is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// This interface is distinct from Signer so that implementations can be created outside of this package. +type SignerImplementation interface { + // ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. + ProgressMessage() string + // SignImageManifest creates a new signature for manifest m as dockerReference. + SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) + Close() error +} diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go new file mode 100644 index 00000000..d5a5436a --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -0,0 +1,40 @@ +package streamdigest + +import ( + "fmt" + "io" + "os" + + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" +) + +// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo. +// The temporary file is returned as an io.Reader along with a cleanup function. +// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. +// If an error occurs, inputInfo is not modified. +func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { + diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob") + if err != nil { + return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) + } + cleanup := func() { + diskBlob.Close() + os.Remove(diskBlob.Name()) + } + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo) + written, err := io.Copy(diskBlob, stream) + if err != nil { + cleanup() + return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err) + } + _, err = diskBlob.Seek(0, io.SeekStart) + if err != nil { + cleanup() + return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err) + } + inputInfo.Digest = digester.Digest() + inputInfo.Size = written + return diskBlob, cleanup, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go new file mode 100644 index 00000000..bab73ee3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go @@ -0,0 +1,44 @@ +package tmpdir + +import ( + "os" + "runtime" + + "github.com/containers/image/v5/types" +) + +// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path' +var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles + +// builtinUnixTempDirForBigFiles is the directory path to store big files. +// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. +// DO NOT change this, instead see unixTempDirForBigFiles above. +const builtinUnixTempDirForBigFiles = "/var/tmp" + +const prefix = "container_images_" + +// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. +// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp +// which on systemd based systems could be the unsuitable tmpfs filesystem. +func temporaryDirectoryForBigFiles(sys *types.SystemContext) string { + if sys != nil && sys.BigFilesTemporaryDir != "" { + return sys.BigFilesTemporaryDir + } + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() + } else { + temporaryDirectoryForBigFiles = unixTempDirForBigFiles + } + return temporaryDirectoryForBigFiles +} + +func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) { + return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name) +} + +func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) { + return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name) +} diff --git a/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go new file mode 100644 index 00000000..fe65b1a9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go @@ -0,0 +1,38 @@ +package unparsedimage + +import ( + "context" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" +) + +// wrapped provides the private.UnparsedImage operations +// for an object that only implements types.UnparsedImage +type wrapped struct { + types.UnparsedImage +} + +// FromPublic(unparsed) returns an object that provides the private.UnparsedImage API +func FromPublic(unparsed types.UnparsedImage) private.UnparsedImage { + if unparsed2, ok := unparsed.(private.UnparsedImage); ok { + return unparsed2 + } + return &wrapped{ + UnparsedImage: unparsed, + } +} + +// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. +func (w *wrapped) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) { + sigs, err := w.Signatures(ctx) + if err != nil { + return nil, err + } + res := []signature.Signature{} + for _, sig := range sigs { + res = append(res, signature.SimpleSigningFromBlob(sig)) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go new file mode 100644 index 00000000..b95370af --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go @@ -0,0 +1,61 @@ +package uploadreader + +import ( + "io" + "sync" +) + +// UploadReader is a pass-through reader for use in sending non-trivial data using the net/http +// package (http.NewRequest, http.Post and the like). +// +// The net/http package uses a separate goroutine to upload data to a HTTP connection, +// and it is possible for the server to return a response (typically an error) before consuming +// the full body of the request. In that case http.Client.Do can return with an error while +// the body is still being read — regardless of the cancellation, if any, of http.Request.Context(). +// +// As a result, any data used/updated by the io.Reader() provided as the request body may be +// used/updated even after http.Client.Do returns, causing races. +// +// To fix this, UploadReader provides a synchronized Terminate() method, which can block for +// a not-completely-negligible time (for a duration of the underlying Read()), but guarantees that +// after Terminate() returns, the underlying reader is never used any more (unlike calling +// the cancellation callback of context.WithCancel, which returns before any recipients may have +// reacted to the cancellation). +type UploadReader struct { + mutex sync.Mutex + // The following members can only be used with mutex held + reader io.Reader + terminationError error // nil if not terminated yet +} + +// NewUploadReader returns an UploadReader for an "underlying" reader. +func NewUploadReader(underlying io.Reader) *UploadReader { + return &UploadReader{ + reader: underlying, + terminationError: nil, + } +} + +// Read returns the error set by Terminate, if any, or calls the underlying reader. +// It is safe to call this from a different goroutine than Terminate. +func (ur *UploadReader) Read(p []byte) (int, error) { + ur.mutex.Lock() + defer ur.mutex.Unlock() + + if ur.terminationError != nil { + return 0, ur.terminationError + } + return ur.reader.Read(p) +} + +// Terminate waits for in-progress Read calls, if any, to finish, and ensures that after +// this function returns, any Read calls will fail with the provided error, and the underlying +// reader will never be used any more. +// +// It is safe to call this from a different goroutine than Read. +func (ur *UploadReader) Terminate(err error) { + ur.mutex.Lock() // May block for some time if ur.reader.Read() is in progress + defer ur.mutex.Unlock() + + ur.terminationError = err +} diff --git a/vendor/github.com/containers/image/v5/internal/useragent/useragent.go b/vendor/github.com/containers/image/v5/internal/useragent/useragent.go new file mode 100644 index 00000000..7ac49693 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/useragent/useragent.go @@ -0,0 +1,6 @@ +package useragent + +import "github.com/containers/image/v5/version" + +// DefaultUserAgent is a value that should be used by User-Agent headers, unless the user specifically instructs us otherwise. +var DefaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go new file mode 100644 index 00000000..1bdcf3d3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -0,0 +1,152 @@ +package manifest + +import ( + "fmt" + + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func layerInfosToStrings(infos []LayerInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} + +// compressionMIMETypeSet describes a set of MIME type “variants†that represent differently-compressed +// versions of “the same kind of contentâ€. +// The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed; +// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported". +type compressionMIMETypeSet map[string]string + +const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant +const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported†+ +// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found +func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet { + for _, variants := range variantTable { + for _, mt := range variants { + if mt == mimeType { + return variants + } + } + } + return nil +} + +// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil +// to mean "no compression"), based on variantTable. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but it can't be combined with this +// algorithm to produce an updated MIME type that complies with the standard that defines mimeType. +// If the compression algorithm is unrecognized, or mimeType is not known to have variants that +// differ from it only in what type of compression has been applied, the returned error will not be +// a ManifestLayerCompressionIncompatibilityError. +func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) { + if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries + return "", fmt.Errorf("cannot update unknown MIME type") + } + variants := findCompressionMIMETypeSet(variantTable, mimeType) + if variants != nil { + name := mtsUncompressed + if algorithm != nil { + name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() + } + if res, ok := variants[name]; ok { + if res != mtsUnsupportedMIMEType { + return res, nil + } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)} + } + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} + } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)} + } + // We can't very well say “the idea of no compression is unknown†+ return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} + } + if algorithm != nil { + return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType) + } + return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType) +} + +// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to +// mimeType, based on variantTable. It may use updated.Digest for error messages. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but applying updated.CompressionOperation +// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the +// standard that defines mimeType. +func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) { + // Note that manifests in containers-storage might be reporting the + // wrong media type since the original manifests are stored while layers + // are decompressed in storage. Hence, we need to consider the case + // that an already {de}compressed layer should be {de}compressed; + // compressionVariantMIMEType does that by not caring whether the original is + // {de}compressed. + switch updated.CompressionOperation { + case types.PreserveOriginal: + // Force a change to the media type if we're being told to use a particular compressor, + // since it might be different from the one associated with the media type. Otherwise, + // try to keep the original media type. + if updated.CompressionAlgorithm != nil { + return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) + } + // Keep the original media type. + return mimeType, nil + + case types.Decompress: + return compressionVariantMIMEType(variantTable, mimeType, nil) + + case types.Compress: + if updated.CompressionAlgorithm == nil { + logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", updated.Digest) + return mimeType, nil + } + return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) + + default: + return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation) + } +} + +// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm +// could not be applied to a layer MIME type. A caller that receives this should either retry +// the call with a different compression algorithm, or attempt to use a different manifest type. +type ManifestLayerCompressionIncompatibilityError struct { + text string +} + +func (m ManifestLayerCompressionIncompatibilityError) Error() string { + return m.text +} + +// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType +// Note that the caller still needs to worry about a specific algorithm not being supported. +func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool { + if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries + return false + } + variants := findCompressionMIMETypeSet(variantTable, mimeType) + return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm. +} + +// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer { + layers := make([]types.ImageInspectLayer, len(infos)) + for i, info := range infos { + layers[i].MIMEType = info.MediaType + layers[i].Digest = info.Digest + layers[i].Size = info.Size + layers[i].Annotations = info.Annotations + } + return layers +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go new file mode 100644 index 00000000..a80af701 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -0,0 +1,331 @@ +package manifest + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/regexp" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! + ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) + SchemaVersion int `json:"schemaVersion"` +} + +type schema1V1CompatibilityContainerConfig struct { + Cmd []string +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifestBlob, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType, + manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil { + return nil, err + } + if err := s1.initialize(); err != nil { + return nil, err + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + s1 := Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } + if err := s1.initialize(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. +func (m *Schema1) initialize() error { + if len(m.FSLayers) != len(m.History) { + return errors.New("length of history not equal to number of layers") + } + if len(m.FSLayers) == 0 { + return errors.New("no FSLayers in manifest") + } + m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) + for i, h := range m.History { + if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { + return fmt.Errorf("parsing v2s1 history entry %d: %w", i, err) + } + } + return nil +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []LayerInfo { + layers := make([]LayerInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, + EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, + } + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) + for i, info := range layerInfos { + // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + if info.CryptoOperation != types.PreserveOriginalCrypto { + return fmt.Errorf("encryption change (for layer %q) is not supported in schema1 manifests", info.Digest) + } + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + // m.initialize() has verified that len(m.FSLayers) == len(m.History) + for _, compat := range m.ExtractedV1Compatibility { + if err := validateV1ID(compat.ID); err != nil { + return err + } + } + if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := set.New[string]() + var lastID string + for _, img := range m.ExtractedV1Compatibility { + // skip IDs that appear after each other, we handle those later + if img.ID != lastID && idmap.Contains(img.ID) { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap.Add(lastID) + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { + if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue + m.FSLayers = slices.Delete(m.FSLayers, i, i+1) + m.History = slices.Delete(m.History, i, i+1) + m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1) + } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) + } + } + return nil +} + +var validHex = regexp.Delayed(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + layerInfos := m.LayerInfos() + i := &types.ImageInspectInfo{ + Tag: m.Tag, + Created: &s1.Created, + DockerVersion: s1.DockerVersion, + Architecture: s1.Architecture, + Variant: s1.Variant, + Os: s1.OS, + Layers: layerInfosToStrings(layerInfos), + LayersData: imgInspectLayersFromLayerInfos(layerInfos), + Author: s1.Author, + } + if s1.Config != nil { + i.Labels = s1.Config.Labels + i.Env = s1.Config.Env + } + return i, nil +} + +// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s1 := Schema2V1Image{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s1) + if err != nil { + return nil, fmt.Errorf("decoding configuration: %w", err) + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s1) + if err != nil { + return nil, fmt.Errorf("re-encoding compat image config %#v: %w", s1, err) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, compat := range m.ExtractedV1Compatibility { + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, fmt.Errorf("re-decoding compat image config %#v: %w", s1, err) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2Config(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go new file mode 100644 index 00000000..20b721f4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -0,0 +1,306 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/containers/image/v5/internal/manifest" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/pkg/strslice" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// Schema2Descriptor is a “descriptor†in docker/distribution schema 2. +type Schema2Descriptor = manifest.Schema2Descriptor + +// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. +func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + MediaType: desc.MediaType, + } +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is built and runs on + Architecture string `json:"architecture,omitempty"` + // Variant is a variant of the CPU that the image is built and runs on + Variant string `json:"variant,omitempty"` + // OS is the operating system used to built and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifestBlob, &s2); err != nil { + return nil, err + } + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType, + manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { + return nil, err + } + // Check manifest's and layers' media types. + if err := SupportedSchema2MediaType(s2.MediaType); err != nil { + return nil, err + } + for _, layer := range s2.LayersDescriptors { + if err := SupportedSchema2MediaType(layer.MediaType); err != nil { + return nil, err + } + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromSchema2Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ + { + mtsUncompressed: DockerV2Schema2ForeignLayerMediaType, + compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip, + compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, + }, + { + mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed, + compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType, + compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, + }, +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that would result in anything other than gzip compression. +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + mimeType := original[i].MediaType + // First make sure we support the media type of the original layer. + if err := SupportedSchema2MediaType(mimeType); err != nil { + return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer %q: %q", info.Digest, mimeType) + } + mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info) + if err != nil { + return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) + } + m.LayersDescriptors[i].MediaType = mimeType + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + if info.CryptoOperation != types.PreserveOriginalCrypto { + return fmt.Errorf("encryption change (for layer %q) is not supported in schema2 manifests", info.Digest) + } + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + layerInfos := m.LayerInfos() + i := &types.ImageInspectInfo{ + Tag: "", + Created: &s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Variant: s2.Variant, + Os: s2.OS, + Layers: layerInfosToStrings(layerInfos), + LayersData: imgInspectLayersFromLayerInfos(layerInfos), + Author: s2.Author, + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + i.Env = s2.Config.Env + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion. +func (m *Schema2) CanChangeLayerCompression(mimeType string) bool { + return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go new file mode 100644 index 00000000..c958a3fa --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go @@ -0,0 +1,32 @@ +package manifest + +import ( + "github.com/containers/image/v5/internal/manifest" +) + +// Schema2PlatformSpec describes the platform which a particular manifest is +// specialized for. +type Schema2PlatformSpec = manifest.Schema2PlatformSpec + +// Schema2ManifestDescriptor references a platform-specific manifest. +type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor + +// Schema2List is a list of platform-specific manifests. +type Schema2List = manifest.Schema2ListPublic + +// Schema2ListFromComponents creates a Schema2 manifest list instance from the +// supplied data. +func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List { + return manifest.Schema2ListPublicFromComponents(components) +} + +// Schema2ListClone creates a deep copy of the passed-in list. +func Schema2ListClone(list *Schema2List) *Schema2List { + return manifest.Schema2ListPublicClone(list) +} + +// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) { + return manifest.Schema2ListPublicFromManifest(manifestBlob) +} diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go new file mode 100644 index 00000000..1d6fdc9f --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/list.go @@ -0,0 +1,35 @@ +package manifest + +import ( + "github.com/containers/image/v5/internal/manifest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // SupportedListMIMETypes is a list of the manifest list types that we know how to + // read/manipulate/write. + SupportedListMIMETypes = []string{ + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, + } +) + +// List is an interface for parsing, modifying lists of image manifests. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members +// directly. +type List = manifest.ListPublic + +// ListUpdate includes the fields which a List's UpdateInstances() method will modify. +type ListUpdate = manifest.ListUpdate + +// ListFromBlob parses a list of manifests. +func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) { + return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType) +} + +// ConvertListToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func ConvertListToMIMEType(list List, manifestMIMEType string) (List, error) { + return list.ConvertToMIMEType(manifestMIMEType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go new file mode 100644 index 00000000..959aac93 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -0,0 +1,170 @@ +package manifest + +import ( + "fmt" + + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/libtrust" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we have a mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType + // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. + DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = manifest.DockerV2ListMediaType + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip +) + +// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation +// on an object which is not a “container image†in the standard sense (e.g. an OCI artifact) +type NonImageArtifactError = manifest.NonImageArtifactError + +// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. +func SupportedSchema2MediaType(m string) error { + switch m { + case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed: + return nil + default: + return fmt.Errorf("unsupported docker v2s2 media type: %q", m) + } +} + +// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource +// should request from the backend unless directed otherwise. +var DefaultRequestedManifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2Schema1SignedMediaType, + DockerV2Schema1MediaType, + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, +} + +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []LayerInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) +} + +// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. +type LayerInfo struct { + types.BlobInfo + EmptyLayer bool // The layer is an “emptyâ€/“throwaway†one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. +} + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +func GuessMIMEType(manifestBlob []byte) string { + return manifest.GuessMIMEType(manifestBlob) +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +func Digest(manifestBlob []byte) (digest.Digest, error) { + return manifest.Digest(manifestBlob) +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) { + return manifest.MatchesDigest(manifestBlob, expectedDigest) +} + +// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. +// This is useful to make the manifest acceptable to a docker/distribution registry (even though nothing needs or wants the JWS signature). +func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err // Coverage: This can fail only if rand.Reader fails. + } + + js, err := libtrust.NewJSONSignature(manifest) + if err != nil { + return nil, err + } + if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. + return nil, err + } + return js.PrettySignature("signatures") +} + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex +} + +// MIMETypeSupportsEncryption returns true if the mimeType supports encryption +func MIMETypeSupportsEncryption(mimeType string) bool { + return mimeType == imgspecv1.MediaTypeImageManifest +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + return manifest.NormalizedMIMEType(input) +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + nmt := NormalizedMIMEType(mt) + switch nmt { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType, imgspecv1.MediaTypeImageIndex: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + } + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt) +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go new file mode 100644 index 00000000..a85641c3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -0,0 +1,274 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/containers/image/v5/internal/manifest" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + ociencspec "github.com/containers/ocicrypt/spec" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" +) + +// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. +func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + Annotations: desc.Annotations, + MediaType: desc.MediaType, + } +} + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// SupportedOCI1MediaType checks if the specified string is a supported OCI1 +// media type. +// +// Deprecated: blindly rejecting unknown MIME types when the consumer does not +// need to process the input just reduces interoperability (and violates the +// standard) with no benefit, and that this function does not check that the +// media type is appropriate for any specific purpose, so it’s not all that +// useful for validation anyway. +func SupportedOCI1MediaType(m string) error { + switch m { + case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, + imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd, + imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeLayoutHeader, + ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: + return nil + default: + return fmt.Errorf("unsupported OCIv1 media type: %q", m) + } +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifestBlob, &oci1); err != nil { + return nil, err + } + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex, + manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { + return nil, err + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return BlobInfoFromOCI1Descriptor(m.Config) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromOCI1Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ + { + mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + }, + { + mtsUncompressed: imgspecv1.MediaTypeImageLayer, + compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip, + compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd, + }, +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that isn't supported by OCI. +// +// It’s generally the caller’s responsibility to determine whether a particular edit is acceptable, rather than relying on +// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs +// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression +// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently +// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers. +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + mimeType := original[i].MediaType + if info.CryptoOperation == types.Decrypt { + decMimeType, err := getDecryptedMediaType(mimeType) + if err != nil { + return fmt.Errorf("error preparing updated manifest: decryption specified but original mediatype is not encrypted: %q", mimeType) + } + mimeType = decMimeType + } + mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info) + if err != nil { + return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) + } + if info.CryptoOperation == types.Encrypt { + encMediaType, err := getEncryptedMediaType(mimeType) + if err != nil { + return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", mimeType) + } + mimeType = encMediaType + } + + m.Layers[i].MediaType = mimeType + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support encryption +func getEncryptedMediaType(mediatype string) (string, error) { + if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") { + return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype) + } + unsuffixedMediatype := strings.Split(mediatype, "+")[0] + switch unsuffixedMediatype { + case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, + imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + return mediatype + "+encrypted", nil + } + + return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype) +} + +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support decryption +func getDecryptedMediaType(mediatype string) (string, error) { + if !strings.HasSuffix(mediatype, "+encrypted") { + return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype) + } + + return strings.TrimSuffix(mediatype, "+encrypted"), nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + // We could return at least the layers, but that’s already available in a better format via types.Image.LayerInfos. + // Most software calling this without human intervention is going to expect the values to be realistic and relevant, + // and is probably better served by failing; we can always re-visit that later if we fail now, but + // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later. + return nil, manifest.NewNonImageArtifactError(&m.Manifest) + } + + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + if err := json.Unmarshal(config, d1); err != nil { + return nil, err + } + layerInfos := m.LayerInfos() + i := &types.ImageInspectInfo{ + Tag: "", + Created: v1.Created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Variant: v1.Variant, + Os: v1.OS, + Layers: layerInfosToStrings(layerInfos), + LayersData: imgInspectLayersFromLayerInfos(layerInfos), + Env: v1.Config.Env, + Author: v1.Author, + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + // The way m.Config.Digest “uniquely identifies†an image is + // by containing RootFS.DiffIDs, which identify the layers of the image. + // For non-image artifacts, the we can’t expect the config to change + // any time the other layers (semantically) change, so this approach of + // distinguishing objects only by m.Config.Digest doesn’t work in general. + // + // Any caller of this method presumably wants to disambiguate the same + // images with a different representation, but doesn’t want to disambiguate + // representations (by using a manifest digest). So, submitting a non-image + // artifact to such a caller indicates an expectation mismatch. + // So, we just fail here instead of inventing some other ID value (e.g. + // by combining the config and blob layer digests). That still + // gives us the option to not fail, and return some value, in the future, + // without committing to that approach now. + // (The only known caller of ImageID is storage/storageImageDestination.computeID, + // which can’t work with non-image artifacts.) + if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return "", manifest.NewNonImageArtifactError(&m.Manifest) + } + + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion. +func (m *OCI1) CanChangeLayerCompression(mimeType string) bool { + if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return false + } + return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go new file mode 100644 index 00000000..193b0893 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -0,0 +1,27 @@ +package manifest + +import ( + "github.com/containers/image/v5/internal/manifest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// OCI1Index is just an alias for the OCI index type, but one which we can +// provide methods for. +type OCI1Index = manifest.OCI1IndexPublic + +// OCI1IndexFromComponents creates an OCI1 image index instance from the +// supplied data. +func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index { + return manifest.OCI1IndexPublicFromComponents(components, annotations) +} + +// OCI1IndexClone creates a deep copy of the passed-in index. +func OCI1IndexClone(index *OCI1Index) *OCI1Index { + return manifest.OCI1IndexPublicClone(index) +} + +// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest index. +func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) { + return manifest.OCI1IndexPublicFromManifest(manifestBlob) +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go new file mode 100644 index 00000000..8386c47a --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -0,0 +1,189 @@ +package archive + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +type ociArchiveImageDestination struct { + impl.Compat + + ref ociArchiveReference + unpackedDest private.ImageDestination + tempDirRef tempDirOCIRef +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageDestination, error) { + tempDirRef, err := createOCIRef(sys, ref.image) + if err != nil { + return nil, fmt.Errorf("creating oci reference: %w", err) + } + unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) + } + return nil, err + } + d := &ociArchiveImageDestination{ + ref: ref, + unpackedDest: imagedestination.FromPublic(unpackedDest), + tempDirRef: tempDirRef, + } + d.Compat = impl.AddCompat(d) + return d, nil +} + +// Reference returns the reference used to set up this destination. +func (d *ociArchiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any +// Close deletes the temp directory of the oci-archive image +func (d *ociArchiveImageDestination) Close() error { + defer func() { + err := d.tempDirRef.deleteTempDir() + logrus.Debugf("Error deleting temporary directory: %v", err) + }() + return d.unpackedDest.Close() +} + +func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string { + return d.unpackedDest.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures +func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error { + return d.unpackedDest.SupportsSignatures(ctx) +} + +func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return d.unpackedDest.DesiredLayerCompression() +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool { + return d.unpackedDest.AcceptsForeignLayerURLs() +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise +func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { + return d.unpackedDest.MustMatchRuntimeOS() +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.unpackedDest.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool { + return d.unpackedDest.SupportsPutBlobPartial() +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options) +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { + return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options) +} + +// PutManifest writes the manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutManifest(ctx, m, instanceDigest) +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *ociArchiveImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutSignaturesWithFormat(ctx, signatures, instanceDigest) +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// after the directory is made, it is tarred up into a file and the directory is deleted +func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil { + return fmt.Errorf("storing image %q: %w", d.ref.image, err) + } + + // path of directory to tar up + src := d.tempDirRef.tempDirectory + // path to save tarred up file + dst := d.ref.resolvedFile + return tarDirectory(src, dst) +} + +// tar converts the directory at src and saves it to dst +func tarDirectory(src, dst string) error { + // input is a stream of bytes from the archive of the directory at path + input, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err) + } + + // creates the tar file + outFile, err := os.Create(dst) + if err != nil { + return fmt.Errorf("creating tar file %q: %w", dst, err) + } + defer outFile.Close() + + // copies the contents of the directory to the tar file + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + _, err = io.Copy(outFile, input) + + return err +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go new file mode 100644 index 00000000..6c9ee334 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -0,0 +1,162 @@ +package archive + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/containers/image/v5/internal/imagesource" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough, +// but nothing matches the “image†part of the provided reference. +type ImageNotFoundError struct { + ref ociArchiveReference + // We may make members public, or add methods, in the future. +} + +func (e ImageNotFoundError) Error() string { + return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) +} + +type ociArchiveImageSource struct { + impl.Compat + + ref ociArchiveReference + unpackedSrc private.ImageSource + tempDirRef tempDirOCIRef +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource untars the file and saves it in a temp directory +func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageSource, error) { + tempDirRef, err := createUntarTempDir(sys, ref) + if err != nil { + return nil, fmt.Errorf("creating temp directory: %w", err) + } + + unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) + if err != nil { + var notFound ocilayout.ImageNotFoundError + if errors.As(err, ¬Found) { + err = ImageNotFoundError{ref: ref} + } + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) + } + return nil, err + } + s := &ociArchiveImageSource{ + ref: ref, + unpackedSrc: imagesource.FromPublic(unpackedSrc), + tempDirRef: tempDirRef, + } + s.Compat = impl.AddCompat(s) + return s, nil +} + +// LoadManifestDescriptor loads the manifest +// Deprecated: use LoadManifestDescriptorWithContext instead +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + return LoadManifestDescriptorWithContext(nil, imgRef) +} + +// LoadManifestDescriptorWithContext loads the manifest +func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociArchRef, ok := imgRef.(ociArchiveReference) + if !ok { + return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociArchiveReference") + } + tempDirRef, err := createUntarTempDir(sys, ociArchRef) + if err != nil { + return imgspecv1.Descriptor{}, fmt.Errorf("creating temp directory: %w", err) + } + defer func() { + err := tempDirRef.deleteTempDir() + logrus.Debugf("Error deleting temporary directory: %v", err) + }() + + descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) + if err != nil { + return imgspecv1.Descriptor{}, fmt.Errorf("loading index: %w", err) + } + return descriptor, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociArchiveImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +// Close deletes the temporary directory at dst +func (s *ociArchiveImageSource) Close() error { + defer func() { + err := s.tempDirRef.deleteTempDir() + logrus.Debugf("error deleting tmp dir: %v", err) + }() + return s.unpackedSrc.Close() +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return s.unpackedSrc.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + return s.unpackedSrc.GetBlob(ctx, info, cache) +} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (s *ociArchiveImageSource) SupportsGetBlobAt() bool { + return s.unpackedSrc.SupportsGetBlobAt() +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + return s.unpackedSrc.GetBlobAt(ctx, info, chunks) +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + return s.unpackedSrc.GetSignaturesWithFormat(ctx, instanceDigest) +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return s.unpackedSrc.LayerInfosForCopy(ctx, instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go new file mode 100644 index 00000000..2a03feee --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -0,0 +1,193 @@ +package archive + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/oci/internal" + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OCI archive +// it creates an oci-archive tar file by calling into the OCI transport +// tarring the directory created by oci and deleting the directory +var Transport = ociArchiveTransport{} + +type ociArchiveTransport struct{} + +// ociArchiveReference is an ImageReference for OCI Archive paths +type ociArchiveReference struct { + file string + resolvedFile string + image string +} + +func (t ociArchiveTransport) Name() string { + return "oci-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix +// into an ImageReference. +func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + file, image := internal.SplitPathAndImage(reference) + return NewReference(file, image) +} + +// NewReference returns an OCI reference for a file and a image. +func NewReference(file, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(file); err != nil { + return nil, err + } + + if err := internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil +} + +func (ref ociArchiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +func (ref ociArchiveReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.file, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +func (ref ociArchiveReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +func (ref ociArchiveReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set +func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for oci: images") +} + +// struct to store the ociReference and temporary directory returned by createOCIRef +type tempDirOCIRef struct { + tempDirectory string + ociRefExtracted types.ImageReference +} + +// deletes the temporary directory created +func (t *tempDirOCIRef) deleteTempDir() error { + return os.RemoveAll(t.tempDirectory) +} + +// createOCIRef creates the oci reference of the image +// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files +func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { + dir, err := tmpdir.MkDirBigFileTemp(sys, "oci") + if err != nil { + return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err) + } + ociRef, err := ocilayout.NewReference(dir, image) + if err != nil { + return tempDirOCIRef{}, err + } + + tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef} + return tempDirRef, nil +} + +// creates the temporary directory and copies the tarred content to it +func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) { + tempDirRef, err := createOCIRef(sys, ref.image) + if err != nil { + return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err) + } + src := ref.resolvedFile + dst := tempDirRef.tempDirectory + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + arch, err := os.Open(src) + if err != nil { + return tempDirOCIRef{}, err + } + defer arch.Close() + if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) + } + return tempDirOCIRef{}, fmt.Errorf("untarring file %q: %w", tempDirRef.tempDirectory, err) + } + return tempDirRef, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go new file mode 100644 index 00000000..53827b11 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -0,0 +1,121 @@ +package internal + +import ( + "errors" + "fmt" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = fmt.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":" + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return fmt.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return fmt.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + if !matched { + return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go new file mode 100644 index 00000000..8ff43d44 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -0,0 +1,321 @@ +package layout + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ociImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + stubs.NoSignaturesInitialize + + ref ociReference + index imgspecv1.Index + sharedBlobDir string +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) { + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + Annotations: make(map[string]string), + } + } + + desiredLayerCompression := types.Compress + if sys != nil && sys.OCIAcceptUncompressedLayers { + desiredLayerCompression = types.PreserveOriginal + } + + d := &ociImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{ + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + }, + DesiredLayerCompression: desiredLayerCompression, + AcceptsForeignLayerURLs: true, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. + HasThreadSafePutBlob: true, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + NoSignaturesInitialize: stubs.NoSignatures("Pushing signatures for OCI images is not supported"), + + ref: ref, + index: *index, + } + d.Compat = impl.AddCompat(d) + if sys != nil { + d.sharedBlobDir = sys.OCISharedBlobDirPath + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + return nil, err + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ociImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ociImageDestination) Close() error { + return nil +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") + if err != nil { + return private.UploadedBlob{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + blobFile.Close() + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return private.UploadedBlob{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return private.UploadedBlob{}, err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files†argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return private.UploadedBlob{}, err + } + } + + blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) + if err != nil { + return private.UploadedBlob{}, err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return private.UploadedBlob{}, err + } + + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return private.UploadedBlob{}, err + } + succeeded = true + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil). +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + if !impl.OriginalBlobMatchesRequiredCompression(options) { + return false, private.ReusedBlob{}, nil + } + if info.Digest == "" { + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") + } + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) + if err != nil { + return false, private.ReusedBlob{}, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, private.ReusedBlob{}, nil + } + if err != nil { + return false, private.ReusedBlob{}, err + } + + return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, +// this should be either an OCI manifest (possibly converted to this format by the caller) or index, +// neither of which we'll need to modify further. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + var digest digest.Digest + var err error + if instanceDigest != nil { + digest = *instanceDigest + } else { + digest, err = manifest.Digest(m) + if err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + if err := os.WriteFile(blobPath, m, 0644); err != nil { + return err + } + + if instanceDigest != nil { + return nil + } + + // If we had platform information, we'd build an imgspecv1.Platform structure here. + + // Start filling out the descriptor for this entry + desc := imgspecv1.Descriptor{} + desc.Digest = digest + desc.Size = int64(len(m)) + if d.ref.image != "" { + desc.Annotations = make(map[string]string) + desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image + } + + // If we knew the MIME type, we wouldn't have to guess here. + desc.MediaType = manifest.GuessMIMEType(m) + + d.addManifest(&desc) + + return nil +} + +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + // If the new entry has a name, remove any conflicting names which we already have. + if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" { + // The name is being set on a new entry, so remove any older ones that had the same name. + // We might be storing an index and all of its component images, and we'll want to attach + // the name to the last one, which is the index. + for i, manifest := range d.index.Manifests { + if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] { + delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName) + break + } + } + } + // If it has the same digest as another entry in the index, we already overwrote the file, + // so just pick up the other information. + for i, manifest := range d.index.Manifests { + if manifest.Digest == desc.Digest && manifest.Annotations[imgspecv1.AnnotationRefName] == "" { + // Replace it completely. + d.index.Manifests[i] = *desc + return + } + } + // It's a new entry to be added to the index. + d.index.Manifests = append(d.index.Manifests, *desc) +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { + if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + return err + } + indexJSON, err := json.Marshal(d.index) + if err != nil { + return err + } + return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + _, err := os.Stat(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go new file mode 100644 index 00000000..6b423f3b --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -0,0 +1,216 @@ +package layout + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageNotFoundError is used when the OCI structure, in principle, exists and seems valid enough, +// but nothing matches the “image†part of the provided reference. +type ImageNotFoundError struct { + ref ociReference + // We may make members public, or add methods, in the future. +} + +func (e ImageNotFoundError) Error() string { + return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) +} + +type ociImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + + ref ociReference + index *imgspecv1.Index + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = tlsconfig.ServerDefault() + + if sys != nil && sys.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr + descriptor, err := ref.getManifestDescriptor() + if err != nil { + return nil, err + } + index, err := ref.getIndex() + if err != nil { + return nil, err + } + s := &ociImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + index: index, + descriptor: descriptor, + client: client, + } + if sys != nil { + // TODO(jonboulle): check dir existence? + s.sharedBlobDir = sys.OCISharedBlobDirPath + } + s.Compat = impl.AddCompat(s) + return s, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() error { + s.client.CloseIdleConnections() + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + var err error + + if instanceDigest == nil { + dig = s.descriptor.Digest + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + for _, md := range s.index.Manifests { + if md.Digest == dig { + mimeType = md.MediaType + break + } + } + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) + if err != nil { + return nil, "", err + } + + m, err := os.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + if mimeType == "" { + mimeType = manifest.GuessMIMEType(m) + } + + return m, mimeType, nil +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + r, s, err := s.getExternalBlob(ctx, info.URLs) + if err != nil { + return nil, 0, err + } else if r != nil { + return r, s, nil + } + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. +// This function can return nil reader when no url is supported by this function. In this case, the caller +// should fallback to fetch the non-external blob (i.e. pull from the registry). +func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + if len(urls) == 0 { + return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") + } + + errWrap := errors.New("failed fetching external blob from all urls") + hasSupportedURL := false + for _, u := range urls { + if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + continue // unsupported url. skip this url. + } + hasSupportedURL = true + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap) + continue + } + + resp, err := s.client.Do(req) + if err != nil { + errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + if !hasSupportedURL { + return nil, 0, nil // fallback to non-external blob + } + + return nil, 0, errWrap +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go new file mode 100644 index 00000000..6586b844 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -0,0 +1,254 @@ +package layout + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/oci/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) + +type ociTransport struct{} + +func (t ociTransport) Name() string { + return "oci" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ociReference is an ImageReference for OCI directory paths. +type ociReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + dir string // As specified by the user. May be relative, contain symlinks, etc. + resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. + // If image=="", it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json + image string +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + dir, image := internal.SplitPathAndImage(reference) + return NewReference(dir, image) +} + +// NewReference returns an OCI reference for a directory and a image. +// +// We do not expose an API supplying the resolvedDir; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +func NewReference(dir, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err + } + + if err = internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil +} + +func (ref ociReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ociReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ociReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref ociReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return ref.resolvedDir +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ociReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedDir + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return image.FromReference(ctx, sys, ref) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + indexJSON, err := os.Open(ref.indexPath()) + if err != nil { + return nil, err + } + defer indexJSON.Close() + + index := &imgspecv1.Index{} + if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + return nil, err + } + return index, nil +} + +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { + index, err := ref.getIndex() + if err != nil { + return imgspecv1.Descriptor{}, err + } + + if ref.image == "" { + // return manifest if only one image is in the oci directory + if len(index.Manifests) != 1 { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, ErrMoreThanOneImage + } + return index.Manifests[0], nil + } else { + // if image specified, look through all manifests for a match + var unsupportedMIMETypes []string + for _, md := range index.Manifests { + if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { + if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex { + return md, nil + } + unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) + } + } + if len(unsupportedMIMETypes) != 0 { + return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) + } + } + return imgspecv1.Descriptor{}, ImageNotFoundError{ref} +} + +// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name +// when pulling an image +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociRef, ok := imgRef.(ociReference) + if !ok { + return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef") + } + return ociRef.getManifestDescriptor() +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for oci: images") +} + +// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. +func (ref ociReference) ociLayoutPath() string { + return filepath.Join(ref.dir, "oci-layout") +} + +// indexPath returns a path for the index.json within a directory using OCI conventions. +func (ref ociReference) indexPath() string { + return filepath.Join(ref.dir, "index.json") +} + +// blobPath returns a path for a blob within a directory using OCI image-layout conventions. +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { + if err := digest.Validate(); err != nil { + return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err) + } + blobDir := filepath.Join(ref.dir, "blobs") + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go new file mode 100644 index 00000000..037572b0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go @@ -0,0 +1,76 @@ +package blobinfocache + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/internal/rootless" + "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/image/v5/pkg/blobinfocache/sqlite" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +const ( + // blobInfoCacheFilename is the file name used for blob info caches. + // If the format changes in an incompatible way, increase the version number. + blobInfoCacheFilename = "blob-info-cache-v1.sqlite" + // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. + systemBlobInfoCacheDir = "/var/lib/containers/cache" +) + +// blobInfoCacheDir returns a path to a blob info cache appropriate for sys and euid. +// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. +func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { + if sys != nil && sys.BlobInfoCacheDir != "" { + return sys.BlobInfoCacheDir, nil + } + + // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged + // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. + if euid == 0 { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil + } + return systemBlobInfoCacheDir, nil + } + + // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home := os.Getenv("HOME") + if home == "" { + return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") + } + dataDir = filepath.Join(home, ".local", "share") + } + return filepath.Join(dataDir, "containers", "cache"), nil +} + +// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. +func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { + dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID()) + if err != nil { + logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) + return memory.New() + } + path := filepath.Join(dir, blobInfoCacheFilename) + if err := os.MkdirAll(dir, 0700); err != nil { + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err) + return memory.New() + } + + // It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open + // as global singleton, for the vast majority of callers who don’t override thde cache location. + // OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely, + // and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%. + + cache, err := sqlite.New(path) + if err != nil { + logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err) + return memory.New() + } + logrus.Debugf("Using SQLite blob info cache at %s", path) + return cache +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go new file mode 100644 index 00000000..bc9315f6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -0,0 +1,111 @@ +// Package prioritize provides utilities for prioritizing locations in +// types.BlobInfoCache.CandidateLocations. +package prioritize + +import ( + "sort" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/opencontainers/go-digest" +) + +// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. +type CandidateWithTime struct { + Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + LastSeen time.Time // Time the candidate was last known to exist (either read or written) +} + +// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, +// along with the specially-treated digest values for the implementation of sort.Interface.Less +type candidateSortState struct { + cs []CandidateWithTime // The entries to sort + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) Len() int { + return len(css.cs) +} + +func (css *candidateSortState) Less(i, j int) bool { + xi := css.cs[i] + xj := css.cs[j] + + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.Candidate.Digest != xj.Candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.Candidate.Digest == css.primaryDigest { + return true + } + if xj.Candidate.Digest == css.primaryDigest { + return false + } + if css.uncompressedDigest != "" { + if xi.Candidate.Digest == css.uncompressedDigest { + return false + } + if xj.Candidate.Digest == css.uncompressedDigest { + return true + } + } + } else { // xi.Candidate.Digest == xj.Candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { + return xi.LastSeen.After(xj.LastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time + return xi.LastSeen.After(xj.LastSeen) + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return xi.Candidate.Digest < xj.Candidate.Digest +} + +func (css *candidateSortState) Swap(i, j int) { + css.cs[i], css.cs[j] = css.cs[j], css.cs[i] +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the +// number of entries to limit, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 { + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + // FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available. + sort.Sort(&candidateSortState{ + cs: cs, + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }) + + resLength := len(cs) + if resLength > maxCandidates { + resLength = maxCandidates + } + res := make([]blobinfocache.BICReplacementCandidate2, resLength) + for i := range res { + res[i] = cs[i].Candidate + } + return res +} + +// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), +// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go new file mode 100644 index 00000000..9e5c4256 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -0,0 +1,200 @@ +// Package memory implements an in-memory BlobInfoCache. +package memory + +import ( + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// cache implements an in-memory-only BlobInfoCache. +type cache struct { + mutex sync.Mutex + // The following fields can only be accessed with mutex held. + uncompressedDigests map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest +} + +// New returns a BlobInfoCache implementation which is in-memory only. +// +// This is primarily intended for tests, but also used as a fallback +// if blobinfocache.DefaultCache can’t determine, or set up, the +// location for a persistent cache. Most users should use +// blobinfocache.DefaultCache. instead of calling this directly. +// Manual users of types.{ImageSource,ImageDestination} might also use +// this instead of a persistent cache. +func New() types.BlobInfoCache { + return new2() +} + +func new2() *cache { + return &cache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]string{}, + } +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (mem *cache) Open() { +} + +// Close destroys state created by Open(). +func (mem *cache) Close() { +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + return mem.uncompressedDigestLocked(anyDigest) +} + +// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. +func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = set.New[digest.Digest]() + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet.Add(anyDigest) +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified +// algorithm, or uncompressed, or that we no longer know. +func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName) + } + if compressorName == blobinfocache.UnknownCompression { + delete(mem.compressors, blobDigest) + return + } + mem.compressors[blobDigest] = compressorName +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + for l, t := range locations { + compressorName, compressorKnown := mem.compressors[digest] + if !compressorKnown { + if requireCompressionInfo { + continue + } + compressorName = blobinfocache.UnknownCompression + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: l, + }, + LastSeen: t, + }) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +} + +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { + mem.mutex.Lock() + defer mem.mutex.Unlock() + res := []prioritize.CandidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { + if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok { + for _, d := range otherDigests.Values() { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) + } + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo) + } + } + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go new file mode 100644 index 00000000..4b7122f9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -0,0 +1,50 @@ +// Package none implements a dummy BlobInfoCache which records no data. +package none + +import ( + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// noCache implements a dummy BlobInfoCache which records no data. +type noCache struct { +} + +// NoCache implements BlobInfoCache by not recording any data. +// +// This exists primarily for implementations of configGetter for +// Manifest.Inspect, because configs only have one representation. +// Any use of BlobInfoCache with blobs should usually use at least a +// short-lived cache, ideally blobinfocache.DefaultCache. +var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{}) + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go new file mode 100644 index 00000000..276913d6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -0,0 +1,553 @@ +// Package boltdb implements a BlobInfoCache backed by SQLite. +package sqlite + +import ( + "database/sql" + "errors" + "fmt" + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + _ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + // NOTE: There is no versioning data inside the file; this is a “cacheâ€, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + // That also means we don’t have to worry about co-existing readers/writers which know different versions of the schema + // (which would require compatibility in both directions). + + // Assembled sqlite options used when opening the database. + sqliteOptions = "?" + + // Deal with timezone automatically. + // go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset. + // _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or) + // if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location; + // (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used). + "_loc=auto" + + // Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous). + "&_sync=FULL" + + // Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys). + // We don’t currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons). + "&_foreign_keys=1" + + // Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html); + // i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock, + // never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously). + // + // This, together with go-sqlite3’s default for _busy_timeout=5000, means that we should never see a “database is locked†error, + // the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous + // holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented. + // Compare https://github.com/mattn/go-sqlite3/issues/274 . + // + // Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167 + // or https://github.com/mattn/go-sqlite3/issues/400 . + // The currently-proposed workaround is to create two different SQL “databases†(= connection pools) with different _txlock settings, + // which seems rather wasteful. + "&_txlock=exclusive" +) + +// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path. +type cache struct { + path string + + // The database/sql package says “It is rarely necessary to close a DB.â€, and steers towards a long-term *sql.DB connection pool. + // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily + // the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly + // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have + // a Close method, so creating a lot of single-use caches could leak data. + // + // Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image. + // This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open. + // Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single + // *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every + // single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation, + // somewhere around "700"). + + lock sync.Mutex + // The following fields can only be accessed with lock held. + refCount int // number of outstanding Open() calls + db *sql.DB // nil if not set (may happen even if refCount > 0 on errors) +} + +// New returns BlobInfoCache implementation which uses a SQLite file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) (types.BlobInfoCache, error) { + return new2(path) +} + +func new2(path string) (*cache, error) { + db, err := rawOpen(path) + if err != nil { + return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err) + } + defer db.Close() + + // We don’t check the schema before every operation, because that would be costly + // and because we assume schema changes will be handled by using a different path. + if err := ensureDBHasCurrentSchema(db); err != nil { + return nil, err + } + + return &cache{ + path: path, + refCount: 0, + db: nil, + }, nil +} + +// rawOpen returns a new *sql.DB for path. +// The caller should arrange for it to be .Close()d. +func rawOpen(path string) (*sql.DB, error) { + // This exists to centralize the use of sqliteOptions. + return sql.Open("sqlite3", path+sqliteOptions) +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (sqc *cache) Open() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.refCount == 0 { + db, err := rawOpen(sqc.path) + if err != nil { + logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err) + db = nil // But still increase sqc.refCount, because a .Close() will happen + } + sqc.db = db + } + sqc.refCount++ +} + +// Close destroys state created by Open(). +func (sqc *cache) Close() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + switch sqc.refCount { + case 0: + logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()") + return + case 1: + if sqc.db != nil { + sqc.db.Close() + sqc.db = nil + } + } + sqc.refCount-- +} + +type void struct{} // So that we don’t have to write struct{}{} all over the place + +// transaction calls fn within a read-write transaction in sqc. +func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) { + db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.db != nil { + return sqc.db, func() {}, nil + } + db, err := rawOpen(sqc.path) + if err != nil { + return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err) + } + return db, func() { db.Close() }, nil + }() + if err != nil { + var zeroRes T // A zero value of T + return zeroRes, err + } + defer closeDB() + + return dbTransaction(db, fn) +} + +// dbTransaction calls fn within a read-write transaction in db. +func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { + // Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion. + + var zeroRes T // A zero value of T + + tx, err := db.Begin() + if err != nil { + return zeroRes, fmt.Errorf("beginning transaction: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + if err := tx.Rollback(); err != nil { + logrus.Errorf("Rolling back transaction: %v", err) + } + } + }() + + res, err := fn(tx) + if err != nil { + return zeroRes, err + } + if err := tx.Commit(); err != nil { + return zeroRes, fmt.Errorf("committing transaction: %w", err) + } + + succeeded = true + return res, nil +} + +// querySingleValue executes a SELECT which is expected to return at most one row with a single column. +// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned. +func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) { + var value T + if err := tx.QueryRow(query, params...).Scan(&value); err != nil { + var zeroValue T // A zero value of T + if errors.Is(err, sql.ErrNoRows) { + return zeroValue, false, nil + } + return zeroValue, false, err + } + return value, true, nil +} + +// ensureDBHasCurrentSchema adds the necessary tables and indices to a database. +// This is typically used when creating a previously-nonexistent database. +// We don’t really anticipate schema migrations; with c/image usually vendored, not using +// shared libraries, migrating a schema on an existing database would affect old-version users. +// Instead, schema changes are likely to be implemented by using a different cache file name, +// and leaving existing caches around for old users. +func ensureDBHasCurrentSchema(db *sql.DB) error { + // Considered schema design alternatives: + // + // (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring + // to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.) + // + // * This schema uses the text representation of digests. + // + // We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation; + // and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm + // is documented to “define the string encoding†suggests that assuming a hexadecimal representation and turning that + // into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm + // — and that would require us to implement two code paths, one of them basically never exercised / never tested. + // + // * There are two separate items for recording the uncompressed digest and digest compressors. + // Alternatively, we could have a single "digest facts" table with NULLable columns. + // + // The way the BlobInfoCache API works, we are only going to write one value at a time, so + // sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples). + // Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs + // do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then + // we search in DigestUncompressedPairs by uncompressed digest, not by the primary key). + // + // Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to + // do a more verbose ON CONFLICT(…) DO UPDATE SET … = …. + // + // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests. + // + // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra + // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups) + // is probably costlier than comparing a few more bytes of data. + // + // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without + // having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally). + // + items := []struct{ itemName, command string }{ + { + "DigestUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` + + // index implied by PRIMARY KEY + `anyDigest TEXT PRIMARY KEY NOT NULL,` + + // DigestUncompressedPairs_index_uncompressedDigest + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestUncompressedPairs_index_uncompressedDigest", + `CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`, + }, + { + "DigestCompressors", + `CREATE TABLE IF NOT EXISTS DigestCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression). + `compressor TEXT NOT NULL + )`, + }, + { + "KnownLocations", + `CREATE TABLE IF NOT EXISTS KnownLocations( + transport TEXT NOT NULL, + scope TEXT NOT NULL, + digest TEXT NOT NULL, + location TEXT NOT NULL,` + + // TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics) + // format "2006-01-02 15:04:05.999999999-07:00". + // See also the _loc option in the sql.Open data source name. + `time TIMESTAMP NOT NULL,` + + // Implies an index. + // We also search by (transport, scope, digest), that doesn’t need an extra index + // because it is a prefix of the implied primary-key index. + `PRIMARY KEY (transport, scope, digest, location) + )`, + }, + } + + _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { + // If the the last-created item exists, assume nothing needs to be done. + lastItemName := items[len(items)-1].itemName + _, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName) + if err != nil { + return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err) + } + if !found { + // Item does not exist, assuming a fresh database. + for _, i := range items { + if _, err := tx.Exec(i.command); err != nil { + return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err) + } + } + } + return void{}, nil + }) + return err +} + +// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction. +func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + // A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + _, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + return anyDigest, nil + } + return "", nil +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + return sqc.uncompressedDigest(tx, anyDigest) + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)", + anyDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)", + transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry. + return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w", + location.Opaque, transport.Name(), scope.Opaque, digest.String(), err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordDigestCompressorName records a compressor for the blob with the specified digest, +// or Uncompressed or UnknownCompression. +// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a +// digest just because some remote author claims so (e.g. because a manifest says so); +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest) + } + if gotPrevious && previous != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName) + } + if compressorName == blobinfocache.UnknownCompression { + if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) + } + } else { + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", + anyDigest.String(), compressorName); err != nil { + return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err) + } + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) { + var rows *sql.Rows + var err error + if requireCompressionInfo { + rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+ + "ON KnownLocations.digest = DigestCompressors.digest "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + transport.Name(), scope.Opaque, digest.String()) + } else { + rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+ + "LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + blobinfocache.UnknownCompression, + transport.Name(), scope.Opaque, digest.String()) + } + if err != nil { + return nil, fmt.Errorf("looking up candidate locations: %w", err) + } + defer rows.Close() + + for rows.Next() { + var location string + var time time.Time + var compressorName string + if err := rows.Scan(&location, &time, &compressorName); err != nil { + return nil, fmt.Errorf("scanning candidate: %w", err) + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: types.BICLocationReference{Opaque: location}, + }, + LastSeen: time, + }) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through locations: %w", err) + } + return candidates, nil +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if +// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look +// up variants of the blob which have the same uncompressed digest. +// +// The CompressorName fields in returned data must never be UnknownCompression. +func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return sqc.candidateLocations(transport, scope, digest, canSubstitute, true) +} + +func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { + var uncompressedDigest digest.Digest // = "" + res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) { + res := []prioritize.CandidateWithTime{} + res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo) + if err != nil { + return nil, err + } + if canSubstitute { + uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest) + if err != nil { + return nil, err + } + + // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. + // (In the extreme, we could turn _everything_ this function does into a single query. + // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) + // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. + rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) + if err != nil { + return nil, fmt.Errorf("querying for other digests: %w", err) + } + defer rows.Close() + for rows.Next() { + var otherDigestString string + if err := rows.Scan(&otherDigestString); err != nil { + return nil, fmt.Errorf("scanning other digest: %w", err) + } + otherDigest, err := digest.Parse(otherDigestString) + if err != nil { + return nil, err + } + if otherDigest != primaryDigest && otherDigest != uncompressedDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo) + if err != nil { + return nil, err + } + } + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through other digests: %w", err) + } + + if uncompressedDigest != primaryDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo) + if err != nil { + return nil, err + } + } + } + return res, nil + }) + if err != nil { + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) + +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go new file mode 100644 index 00000000..4443dda7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -0,0 +1,165 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "fmt" + "io" + + "github.com/containers/image/v5/pkg/compression/internal" + "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/storage/pkg/chunked/compressor" + "github.com/klauspost/pgzip" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm = types.Algorithm + +var ( + // Gzip compression. + Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName, + []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) + // Bzip2 compression. + Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName, + []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) + // Xz compression. + Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName, + []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) + // Zstd compression. + Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName, + []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + // ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files. + ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */ + nil, ZstdDecompressor, compressor.ZstdCompressor) + + compressionAlgorithms = map[string]Algorithm{ + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + ZstdChunked.Name(): ZstdChunked, + } +) + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + algorithm, ok := compressionAlgorithms[name] + if ok { + return algorithm, nil + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return io.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return io.NopCloser(r), nil +} + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + m := map[string]string{} + return internal.AlgorithmCompressor(algo)(dest, m, level) +} + +// CompressStreamWithMetadata returns the compressor by its name. If the compression +// generates any metadata, it is written to the provided metadata map. +func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, metadata, level) +} + +// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid +// value and nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real†error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return Algorithm{}, nil, nil, err + } + + var retAlgo Algorithm + var decompressor DecompressorFunc + for _, algo := range compressionAlgorithms { + prefix := internal.AlgorithmPrefix(algo) + if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) { + logrus.Debugf("Detected compression format %s", algo.Name()) + retAlgo = algo + decompressor = internal.AlgorithmDecompressor(algo) + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, fmt.Errorf("detecting compression: %w", err) + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, fmt.Errorf("initializing decompression: %w", err) + } + } else { + res = io.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go new file mode 100644 index 00000000..ba619be0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -0,0 +1,65 @@ +package internal + +import "io" + +// CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// The caller must call Close() on the stream (even if the input stream does not need closing!). +type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc func(io.Reader) (io.ReadCloser, error) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm struct { + name string + mime string + prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. + decompressor DecompressorFunc + compressor CompressorFunc +} + +// NewAlgorithm creates an Algorithm instance. +// This function exists so that Algorithm instances can only be created by code that +// is allowed to import this internal subpackage. +func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + return Algorithm{ + name: name, + mime: mime, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, + } +} + +// Name returns the name for the compression algorithm. +func (c Algorithm) Name() string { + return c.name +} + +// InternalUnstableUndocumentedMIMEQuestionMark ??? +// DO NOT USE THIS anywhere outside of c/image until it is properly documented. +func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string { + return c.mime +} + +// AlgorithmCompressor returns the compressor field of algo. +// This is a function instead of a public method so that it is only callable by code +// that is allowed to import this internal subpackage. +func AlgorithmCompressor(algo Algorithm) CompressorFunc { + return algo.compressor +} + +// AlgorithmDecompressor returns the decompressor field of algo. +// This is a function instead of a public method so that it is only callable by code +// that is allowed to import this internal subpackage. +func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { + return algo.decompressor +} + +// AlgorithmPrefix returns the prefix field of algo. +// This is a function instead of a public method so that it is only callable by code +// that is allowed to import this internal subpackage. +func AlgorithmPrefix(algo Algorithm) []byte { + return algo.prefix +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go new file mode 100644 index 00000000..43d03b60 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go @@ -0,0 +1,41 @@ +package types + +import ( + "github.com/containers/image/v5/pkg/compression/internal" +) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// Algorithm is a compression algorithm provided and supported by pkg/compression. +// It can’t be supplied from the outside. +type Algorithm = internal.Algorithm + +const ( + // GzipAlgorithmName is the name used by pkg/compression.Gzip. + // NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + GzipAlgorithmName = "gzip" + // Bzip2AlgorithmName is the name used by pkg/compression.Bzip2. + // NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + Bzip2AlgorithmName = "bzip2" + // XzAlgorithmName is the name used by pkg/compression.Xz. + // NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + XzAlgorithmName = "Xz" + // ZstdAlgorithmName is the name used by pkg/compression.Zstd. + // NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + ZstdAlgorithmName = "zstd" + // ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked. + // NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + ZstdChunkedAlgorithmName = "zstd:chunked" +) diff --git a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go new file mode 100644 index 00000000..39ae014d --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go new file mode 100644 index 00000000..b987c580 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -0,0 +1,824 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" + helperclient "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/hashicorp/go-multierror" + "github.com/sirupsen/logrus" +) + +type dockerAuthConfig struct { + Auth string `json:"auth,omitempty"` + IdentityToken string `json:"identitytoken,omitempty"` +} + +type dockerConfigFile struct { + AuthConfigs map[string]dockerAuthConfig `json:"auths"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` +} + +var ( + defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") + xdgConfigHomePath = filepath.FromSlash("containers/auth.json") + xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") + dockerHomePath = filepath.FromSlash(".docker/config.json") + dockerLegacyHomePath = ".dockercfg" + nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json") + + // ErrNotLoggedIn is returned for users not logged into a registry + // that they are trying to logout of + ErrNotLoggedIn = errors.New("not logged in") + // ErrNotSupported is returned for unsupported methods + ErrNotSupported = errors.New("not supported") +) + +// authPath combines a path to a file with container registry credentials, +// along with expected properties of that path (currently just whether it's +// legacy format or not). +type authPath struct { + path string + legacyFormat bool +} + +// newAuthPathDefault constructs an authPath in non-legacy format. +func newAuthPathDefault(path string) authPath { + return authPath{path: path, legacyFormat: false} +} + +// SetCredentials stores the username and password in a location +// appropriate for sys and the users’ configuration. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. +// Returns a human-readable description of the location that was updated. +// NOTE: The return value is only intended to be read by humans; its form is not an API, +// it may change (or new forms can be added) any time. +func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { + isNamespaced, err := validateKey(key) + if err != nil { + return "", err + } + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return "", err + } + + // Make sure to collect all errors. + var multiErr error + for _, helper := range helpers { + var desc string + var err error + switch helper { + // Special-case the built-in helpers for auth files. + case sysregistriesv2.AuthenticationFileHelper: + desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + if ch, exists := fileContents.CredHelpers[key]; exists { + if isNamespaced { + return false, "", unsupportedNamespaceErr(ch) + } + desc, err := setCredsInCredHelper(ch, key, username, password) + if err != nil { + return false, "", err + } + return false, desc, nil + } + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + fileContents.AuthConfigs[key] = newCreds + return true, "", nil + }) + // External helpers. + default: + if isNamespaced { + err = unsupportedNamespaceErr(helper) + } else { + desc, err = setCredsInCredHelper(helper, key, username, password) + } + } + if err != nil { + multiErr = multierror.Append(multiErr, err) + logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err) + continue + } + logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper) + return desc, nil + } + return "", multiErr +} + +func unsupportedNamespaceErr(helper string) error { + return fmt.Errorf("namespaced key is not supported for credential helper %s", helper) +} + +// SetAuthentication stores the username and password in the credential helper or file +// See the documentation of SetCredentials for format of "key" +func SetAuthentication(sys *types.SystemContext, key, username, password string) error { + _, err := SetCredentials(sys, key, username, password) + return err +} + +// GetAllCredentials returns the registry credentials for all registries stored +// in any of the configured credential helpers. +func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) { + // To keep things simple, let's first extract all registries from all + // possible sources, and then call `GetCredentials` on them. That + // prevents us from having to reverse engineer the logic in + // `GetCredentials`. + allKeys := set.New[string]() + + // To use GetCredentials, we must at least convert the URL forms into host names. + // While we're at it, we’ll also canonicalize docker.io to the standard format. + normalizedDockerIORegistry := normalizeRegistry("docker.io") + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return nil, err + } + for _, helper := range helpers { + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + for _, path := range getAuthFilePaths(sys, homedir.Get()) { + // parse returns an empty map in case the path doesn't exist. + fileContents, err := path.parse() + if err != nil { + return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err) + } + // Credential helpers in the auth file have a + // direct mapping to a registry, so we can just + // walk the map. + for registry := range fileContents.CredHelpers { + allKeys.Add(registry) + } + for key := range fileContents.AuthConfigs { + key := normalizeAuthFileKey(key, path.legacyFormat) + if key == normalizedDockerIORegistry { + key = "docker.io" + } + allKeys.Add(key) + } + } + // External helpers. + default: + creds, err := listCredsInCredHelper(helper) + if err != nil { + logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err) + if errors.Is(err, exec.ErrNotFound) { + creds = nil // It's okay if the helper doesn't exist. + } else { + return nil, err + } + } + for registry := range creds { + allKeys.Add(registry) + } + } + } + + // Now use `GetCredentials` to the specific auth configs for each + // previously listed registry. + allCreds := make(map[string]types.DockerAuthConfig) + for _, key := range allKeys.Values() { + creds, err := GetCredentials(sys, key) + if err != nil { + // Note: we rely on the logging in `GetCredentials`. + return nil, err + } + if creds != (types.DockerAuthConfig{}) { + allCreds[key] = creds + } + } + + return allCreds, nil +} + +// getAuthFilePaths returns a slice of authPaths based on the system context +// in the order they should be searched. Note that some paths may not exist. +// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden +// by tests. +func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { + paths := []authPath{} + pathToAuth, userSpecifiedPath, err := getPathToAuth(sys) + if err == nil { + paths = append(paths, pathToAuth) + } else { + // Error means that the path set for XDG_RUNTIME_DIR does not exist + // but we don't want to completely fail in the case that the user is pulling a public image + // Logging the error as a warning instead and moving on to pulling the image + logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) + } + if !userSpecifiedPath { + xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") + if xdgCfgHome == "" { + xdgCfgHome = filepath.Join(homeDir, ".config") + } + paths = append(paths, newAuthPathDefault(filepath.Join(xdgCfgHome, xdgConfigHomePath))) + if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { + paths = append(paths, newAuthPathDefault(filepath.Join(dockerConfig, "config.json"))) + } else { + paths = append(paths, + newAuthPathDefault(filepath.Join(homeDir, dockerHomePath)), + ) + } + paths = append(paths, + authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, + ) + } + return paths +} + +// GetCredentials returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. +// +// GetCredentialsForRef should almost always be used in favor of this API. +func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, key, homedir.Get()) +} + +// GetCredentialsForRef returns the registry credentials necessary for +// accessing ref on the registry ref points to, +// appropriate for sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get()) +} + +// getCredentialsWithHomeDir is an internal implementation detail of +// GetCredentialsForRef and GetCredentials. It exists only to allow testing it +// with an artificial home directory. +func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) { + _, err := validateKey(key) + if err != nil { + return types.DockerAuthConfig{}, err + } + + if sys != nil && sys.DockerAuthConfig != nil { + logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key) + return *sys.DockerAuthConfig, nil + } + + var registry string // We compute this once because it is used in several places. + if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 { + registry = key[:firstSlash] + } else { + registry = key + } + + // Anonymous function to query credentials from auth files. + getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { + for _, path := range getAuthFilePaths(sys, homeDir) { + creds, err := findCredentialsInFile(key, registry, path) + if err != nil { + return types.DockerAuthConfig{}, "", err + } + + if creds != (types.DockerAuthConfig{}) { + return creds, path.path, nil + } + } + return types.DockerAuthConfig{}, "", nil + } + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return types.DockerAuthConfig{}, err + } + + var multiErr error + for _, helper := range helpers { + var ( + creds types.DockerAuthConfig + helperKey string + credHelperPath string + err error + ) + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + helperKey = key + creds, credHelperPath, err = getCredentialsFromAuthFiles() + // External helpers. + default: + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers, but a "registry" is a valid parent of "key". + helperKey = registry + creds, err = getCredsFromCredHelper(helper, registry) + } + if err != nil { + logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) + multiErr = multierror.Append(multiErr, err) + continue + } + if creds != (types.DockerAuthConfig{}) { + msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper) + if credHelperPath != "" { + msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + } + logrus.Debug(msg) + return creds, nil + } + } + if multiErr != nil { + return types.DockerAuthConfig{}, multiErr + } + + logrus.Debugf("No credentials for %s found", key) + return types.DockerAuthConfig{}, nil +} + +// GetAuthentication returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. +// +// Deprecated: This API only has support for username and password. To get the +// support for oauth2 in container registry authentication, we added the new +// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to +// maintain backward compatibility. +func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) { + return getAuthenticationWithHomeDir(sys, key, homedir.Get()) +} + +// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, +// it exists only to allow testing it with an artificial home directory. +func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { + creds, err := getCredentialsWithHomeDir(sys, key, homeDir) + if err != nil { + return "", "", err + } + if creds.IdentityToken != "" { + return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported) + } + return creds.Username, creds.Password, nil +} + +// RemoveAuthentication removes credentials for `key` from all possible +// sources such as credential helpers and auth files. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. +func RemoveAuthentication(sys *types.SystemContext, key string) error { + isNamespaced, err := validateKey(key) + if err != nil { + return err + } + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return err + } + + var multiErr error + isLoggedIn := false + + removeFromCredHelper := func(helper string) { + if isNamespaced { + logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper) + return + } + err := deleteCredsFromCredHelper(helper, key) + if err == nil { + logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) + isLoggedIn = true + return + } + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", key, helper) + return + } + multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err)) + } + + for _, helper := range helpers { + var err error + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + if innerHelper, exists := fileContents.CredHelpers[key]; exists { + removeFromCredHelper(innerHelper) + } + if _, ok := fileContents.AuthConfigs[key]; ok { + isLoggedIn = true + delete(fileContents.AuthConfigs, key) + } + return true, "", multiErr + }) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } + // External helpers. + default: + removeFromCredHelper(helper) + } + } + + if multiErr != nil { + return multiErr + } + if !isLoggedIn { + return ErrNotLoggedIn + } + + return nil +} + +// RemoveAllAuthentication deletes all the credentials stored in credential +// helpers and auth files. +func RemoveAllAuthentication(sys *types.SystemContext) error { + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return err + } + + var multiErr error + for _, helper := range helpers { + var err error + switch helper { + // Special-case the built-in helper for auth files. + case sysregistriesv2.AuthenticationFileHelper: + _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + for registry, helper := range fileContents.CredHelpers { + // Helpers in auth files are expected + // to exist, so no special treatment + // for them. + if err := deleteCredsFromCredHelper(helper, registry); err != nil { + return false, "", err + } + } + fileContents.CredHelpers = make(map[string]string) + fileContents.AuthConfigs = make(map[string]dockerAuthConfig) + return true, "", nil + }) + // External helpers. + default: + var creds map[string]string + creds, err = listCredsInCredHelper(helper) + if err != nil { + if errors.Is(err, exec.ErrNotFound) { + // It's okay if the helper doesn't exist. + continue + } else { + break + } + } + for registry := range creds { + err = deleteCredsFromCredHelper(helper, registry) + if err != nil { + break + } + } + } + if err != nil { + logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err) + multiErr = multierror.Append(multiErr, err) + continue + } + logrus.Debugf("All credentials removed from credential helper %s", helper) + } + + return multiErr +} + +func listCredsInCredHelper(credHelper string) (map[string]string, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.List(p) +} + +// getPathToAuth gets the path of the auth.json file used for reading and writing credentials, +// and a boolean indicating whether the return value came from an explicit user choice (i.e. not defaults) +func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) { + return getPathToAuthWithOS(sys, runtime.GOOS) +} + +// getPathToAuthWithOS is an internal implementation detail of getPathToAuth, +// it exists only to allow testing it with an artificial runtime.GOOS. +func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) { + if sys != nil { + if sys.AuthFilePath != "" { + return newAuthPathDefault(sys.AuthFilePath), true, nil + } + if sys.LegacyFormatAuthFilePath != "" { + return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil + } + // Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME + if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" { + return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil + } + } + if goOS != "linux" { + return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil + } + + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir != "" { + // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. + // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. + _, err := os.Stat(runtimeDir) + if os.IsNotExist(err) { + // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory + // or made a typo while setting the environment variable, + // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. + return authPath{}, false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err) + } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. + return newAuthPathDefault(filepath.Join(runtimeDir, xdgRuntimeDirPath)), false, nil + } + return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil +} + +// parse unmarshals the credentials stored in the auth.json file and returns it +// or returns an empty dockerConfigFile data structure if auth.json does not exist +// if the file exists and is empty, this function returns an error. +func (path authPath) parse() (dockerConfigFile, error) { + var fileContents dockerConfigFile + + raw, err := os.ReadFile(path.path) + if err != nil { + if os.IsNotExist(err) { + fileContents.AuthConfigs = map[string]dockerAuthConfig{} + return fileContents, nil + } + return dockerConfigFile{}, err + } + + if path.legacyFormat { + if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil { + return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) + } + return fileContents, nil + } + + if err = json.Unmarshal(raw, &fileContents); err != nil { + return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) + } + + if fileContents.AuthConfigs == nil { + fileContents.AuthConfigs = map[string]dockerAuthConfig{} + } + if fileContents.CredHelpers == nil { + fileContents.CredHelpers = make(map[string]string) + } + + return fileContents, nil +} + +// modifyJSON finds an auth.json file, calls editor on the contents, and +// writes it back if editor returns true. +// Returns a human-readable description of the file, to be returned by SetCredentials. +// +// The editor may also return a human-readable description of the updated location; if it is "", +// the file itself is used. +func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) { + path, _, err := getPathToAuth(sys) + if err != nil { + return "", err + } + if path.legacyFormat { + return "", fmt.Errorf("writes to %s using legacy format are not supported", path.path) + } + + dir := filepath.Dir(path.path) + if err = os.MkdirAll(dir, 0700); err != nil { + return "", err + } + + fileContents, err := path.parse() + if err != nil { + return "", fmt.Errorf("reading JSON file %q: %w", path.path, err) + } + + updated, description, err := editor(&fileContents) + if err != nil { + return "", fmt.Errorf("updating %q: %w", path.path, err) + } + if updated { + newData, err := json.MarshalIndent(fileContents, "", "\t") + if err != nil { + return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err) + } + + if err = ioutils.AtomicWriteFile(path.path, newData, 0600); err != nil { + return "", fmt.Errorf("writing to file %q: %w", path.path, err) + } + } + + if description == "" { + description = path.path + } + return description, nil +} + +func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds, err := helperclient.Get(p, registry) + if err != nil { + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper) + err = nil + } + return types.DockerAuthConfig{}, err + } + + switch creds.Username { + case "": + return types.DockerAuthConfig{ + IdentityToken: creds.Secret, + }, nil + default: + return types.DockerAuthConfig{ + Username: creds.Username, + Password: creds.Secret, + }, nil + } +} + +// setCredsInCredHelper stores (username, password) for registry in credHelper. +// Returns a human-readable description of the destination, to be returned by SetCredentials. +func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds := &credentials.Credentials{ + ServerURL: registry, + Username: username, + Secret: password, + } + if err := helperclient.Store(p, creds); err != nil { + return "", err + } + return fmt.Sprintf("credential helper: %s", credHelper), nil +} + +func deleteCredsFromCredHelper(credHelper, registry string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.Erase(p, registry) +} + +// findCredentialsInFile looks for credentials matching "key" +// (which is "registry" or a namespace in "registry") in "path". +func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) { + fileContents, err := path.parse() + if err != nil { + return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err) + } + + // First try cred helpers. They should always be normalized. + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers. + if ch, exists := fileContents.CredHelpers[registry]; exists { + logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path) + return getCredsFromCredHelper(ch, registry) + } + + // Support sub-registry namespaces in auth. + // (This is not a feature of ~/.docker/config.json; we support it even for + // those files as an extension.) + var keys []string + if !path.legacyFormat { + keys = authKeysForKey(key) + } else { + keys = []string{registry} + } + + // Repo or namespace keys are only supported as exact matches. For registry + // keys we prefer exact matches as well. + for _, key := range keys { + if val, exists := fileContents.AuthConfigs[key]; exists { + return decodeDockerAuth(path.path, key, val) + } + } + + // bad luck; let's normalize the entries first + // This primarily happens for legacyFormat, which for a time used API URLs + // (http[s:]//…/v1/) as keys. + // Secondarily, (docker login) accepted URLs with no normalization for + // several years, and matched registry hostnames against that, so support + // those entries even in non-legacyFormat ~/.docker/config.json. + // The docker.io registry still uses the /v1/ key with a special host name, + // so account for that as well. + registry = normalizeRegistry(registry) + for k, v := range fileContents.AuthConfigs { + if normalizeAuthFileKey(k, path.legacyFormat) == registry { + return decodeDockerAuth(path.path, k, v) + } + } + + // Only log this if we found nothing; getCredentialsWithHomeDir logs the + // source of found data. + logrus.Debugf("No credentials matching %s found in %s", key, path.path) + return types.DockerAuthConfig{}, nil +} + +// authKeysForKey returns the keys matching a provided auth file key, in order +// from the best match to worst. For example, +// when given a repository key "quay.io/repo/ns/image", it returns +// - quay.io/repo/ns/image +// - quay.io/repo/ns +// - quay.io/repo +// - quay.io +func authKeysForKey(key string) (res []string) { + for { + res = append(res, key) + + lastSlash := strings.LastIndex(key, "/") + if lastSlash == -1 { + break + } + key = key[:lastSlash] + } + + return res +} + +// decodeDockerAuth decodes the username and password from conf, +// which is entry key in path. +func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) { + decoded, err := base64.StdEncoding.DecodeString(conf.Auth) + if err != nil { + return types.DockerAuthConfig{}, err + } + + user, passwordPart, valid := strings.Cut(string(decoded), ":") + if !valid { + // if it's invalid just skip, as docker does + if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those + logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log. + } else { + logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path) + } + return types.DockerAuthConfig{}, nil + } + + password := strings.Trim(passwordPart, "\x00") + return types.DockerAuthConfig{ + Username: user, + Password: password, + IdentityToken: conf.IdentityToken, + }, nil +} + +// normalizeAuthFileKey takes a key, converts it to a host name and normalizes +// the resulting registry. +func normalizeAuthFileKey(key string, legacyFormat bool) string { + stripped := strings.TrimPrefix(key, "http://") + stripped = strings.TrimPrefix(stripped, "https://") + + if legacyFormat || stripped != key { + stripped, _, _ = strings.Cut(stripped, "/") + } + + return normalizeRegistry(stripped) +} + +// normalizeRegistry converts the provided registry if a known docker.io host +// is provided. +func normalizeRegistry(registry string) string { + switch registry { + case "registry-1.docker.io", "docker.io": + return "index.docker.io" + } + return registry +} + +// validateKey verifies that the input key does not have a prefix that is not +// allowed and returns an indicator if the key is namespaced. +func validateKey(key string) (bool, error) { + if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") { + return false, fmt.Errorf("key %s contains http[s]:// prefix", key) + } + + // Ideally this should only accept explicitly valid keys, compare + // validateIdentityRemappingPrefix. For now, just reject values that look + // like tagged or digested values. + if strings.ContainsRune(key, '@') { + return false, fmt.Errorf(`key %s contains a '@' character`, key) + } + + firstSlash := strings.IndexRune(key, '/') + isNamespaced := firstSlash != -1 + // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo. + if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') { + return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key) + } + // check if the provided key contains one or more subpaths. + return isNamespaced, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/README.md b/vendor/github.com/containers/image/v5/pkg/strslice/README.md new file mode 100644 index 00000000..ae6097e8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/README.md @@ -0,0 +1 @@ +This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go new file mode 100644 index 00000000..bad493fb --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go new file mode 100644 index 00000000..07fe5029 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go @@ -0,0 +1,12 @@ +//go:build !freebsd +// +build !freebsd + +package sysregistriesv2 + +// builtinRegistriesConfPath is the path to the registry configuration file. +// DO NOT change this, instead see systemRegistriesConfPath above. +const builtinRegistriesConfPath = "/etc/containers/registries.conf" + +// builtinRegistriesConfDirPath is the path to the registry configuration directory. +// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. +const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go new file mode 100644 index 00000000..741b99f8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go @@ -0,0 +1,12 @@ +//go:build freebsd +// +build freebsd + +package sysregistriesv2 + +// builtinRegistriesConfPath is the path to the registry configuration file. +// DO NOT change this, instead see systemRegistriesConfPath above. +const builtinRegistriesConfPath = "/usr/local/etc/containers/registries.conf" + +// builtinRegistriesConfDirPath is the path to the registry configuration directory. +// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. +const builtinRegistriesConfDirPath = "/usr/local/etc/containers/registries.conf.d" diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go new file mode 100644 index 00000000..3a11542c --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -0,0 +1,350 @@ +package sysregistriesv2 + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/BurntSushi/toml" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/rootless" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/lockfile" + "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" +) + +// defaultShortNameMode is the default mode of registries.conf files if the +// corresponding field is left empty. +const defaultShortNameMode = types.ShortNameModePermissive + +// userShortNamesFile is the user-specific config file to store aliases. +var userShortNamesFile = filepath.FromSlash("containers/short-name-aliases.conf") + +// shortNameAliasesConfPath returns the path to the machine-generated +// short-name-aliases.conf file. +func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) { + if ctx != nil && len(ctx.UserShortNameAliasConfPath) > 0 { + return ctx.UserShortNameAliasConfPath, nil + } + + if rootless.GetRootlessEUID() == 0 { + // Root user or in a non-conforming user NS + return filepath.Join("/var/cache", userShortNamesFile), nil + } + + // Rootless user + cacheRoot, err := homedir.GetCacheHome() + if err != nil { + return "", err + } + + return filepath.Join(cacheRoot, userShortNamesFile), nil +} + +// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the +// software-maintained `userShortNamesFile`. +type shortNameAliasConf struct { + // A map for aliasing short names to their fully-qualified image + // reference counter parts. + // Note that Aliases is niled after being loaded from a file. + Aliases map[string]string `toml:"aliases"` + + // If you add any field, make sure to update nonempty() below. +} + +// nonempty returns true if config contains at least one configuration entry. +func (c *shortNameAliasConf) nonempty() bool { + copy := *c // A shallow copy + if copy.Aliases != nil && len(copy.Aliases) == 0 { + copy.Aliases = nil + } + return !reflect.DeepEqual(copy, shortNameAliasConf{}) +} + +// alias combines the parsed value of an alias with the config file it has been +// specified in. The config file is crucial for an improved user experience +// such that users are able to resolve potential pull errors. +type alias struct { + // The parsed value of an alias. May be nil if set to "" in a config. + value reference.Named + // The config file the alias originates from. + configOrigin string +} + +// shortNameAliasCache is the result of parsing shortNameAliasConf, +// pre-processed for faster usage. +type shortNameAliasCache struct { + // Note that an alias value may be nil iff it's set as an empty string + // in the config. + namedAliases map[string]alias +} + +// ResolveShortNameAlias performs an alias resolution of the specified name. +// The user-specific short-name-aliases.conf has precedence over aliases in the +// assembled registries.conf. It returns the possibly resolved alias or nil, a +// human-readable description of the config where the alias is specified, and +// an error. The origin of the config file is crucial for an improved user +// experience such that users are able to resolve potential pull errors. +// Almost all callers should use pkg/shortnames instead. +// +// Note that it’s the caller’s responsibility to pass only a repository +// (reference.IsNameOnly) as the short name. +func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Named, string, error) { + if err := validateShortName(name); err != nil { + return nil, "", err + } + confPath, lock, err := shortNameAliasesConfPathAndLock(ctx) + if err != nil { + return nil, "", err + } + + // Acquire the lock as a reader to allow for multiple routines in the + // same process space to read simultaneously. + lock.RLock() + defer lock.Unlock() + + _, aliasCache, err := loadShortNameAliasConf(confPath) + if err != nil { + return nil, "", err + } + + // First look up the short-name-aliases.conf. Note that a value may be + // nil iff it's set as an empty string in the config. + alias, resolved := aliasCache.namedAliases[name] + if resolved { + return alias.value, alias.configOrigin, nil + } + + config, err := getConfig(ctx) + if err != nil { + return nil, "", err + } + alias, resolved = config.aliasCache.namedAliases[name] + if resolved { + return alias.value, alias.configOrigin, nil + } + return nil, "", nil +} + +// editShortNameAlias loads the aliases.conf file and changes it. If value is +// set, it adds the name-value pair as a new alias. Otherwise, it will remove +// name from the config. +func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error { + if err := validateShortName(name); err != nil { + return err + } + if value != nil { + if _, err := parseShortNameValue(*value); err != nil { + return err + } + } + + confPath, lock, err := shortNameAliasesConfPathAndLock(ctx) + if err != nil { + return err + } + + // Acquire the lock as a writer to prevent data corruption. + lock.Lock() + defer lock.Unlock() + + // Load the short-name-alias.conf, add the specified name-value pair, + // and write it back to the file. + conf, _, err := loadShortNameAliasConf(confPath) + if err != nil { + return err + } + + if conf.Aliases == nil { // Ensure we have a map to update. + conf.Aliases = make(map[string]string) + } + if value != nil { + conf.Aliases[name] = *value + } else { + // If the name does not exist, throw an error. + if _, exists := conf.Aliases[name]; !exists { + return fmt.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath) + } + + delete(conf.Aliases, name) + } + + f, err := os.OpenFile(confPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + + encoder := toml.NewEncoder(f) + return encoder.Encode(conf) +} + +// AddShortNameAlias adds the specified name-value pair as a new alias to the +// user-specific aliases.conf. It may override an existing alias for `name`. +// +// Note that it’s the caller’s responsibility to pass only a repository +// (reference.IsNameOnly) as the short name. +func AddShortNameAlias(ctx *types.SystemContext, name string, value string) error { + return editShortNameAlias(ctx, name, &value) +} + +// RemoveShortNameAlias clears the alias for the specified name. It throws an +// error in case name does not exist in the machine-generated +// short-name-alias.conf. In such case, the alias must be specified in one of +// the registries.conf files, which is the users' responsibility. +// +// Note that it’s the caller’s responsibility to pass only a repository +// (reference.IsNameOnly) as the short name. +func RemoveShortNameAlias(ctx *types.SystemContext, name string) error { + return editShortNameAlias(ctx, name, nil) +} + +// parseShortNameValue parses the specified alias into a reference.Named. The alias is +// expected to not be tagged or carry a digest and *must* include a +// domain/registry. +// +// Note that the returned reference is always normalized. +func parseShortNameValue(alias string) (reference.Named, error) { + ref, err := reference.Parse(alias) + if err != nil { + return nil, fmt.Errorf("parsing alias %q: %w", alias, err) + } + + if _, ok := ref.(reference.Digested); ok { + return nil, fmt.Errorf("invalid alias %q: must not contain digest", alias) + } + + if _, ok := ref.(reference.Tagged); ok { + return nil, fmt.Errorf("invalid alias %q: must not contain tag", alias) + } + + named, ok := ref.(reference.Named) + if !ok { + return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) + } + + registry := reference.Domain(named) + if !(strings.ContainsAny(registry, ".:") || registry == "localhost") { + return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) + } + + // A final parse to make sure that docker.io references are correctly + // normalized (e.g., docker.io/alpine to docker.io/library/alpine. + named, err = reference.ParseNormalizedNamed(alias) + return named, err +} + +// validateShortName parses the specified `name` of an alias (i.e., the left-hand +// side) and checks if it's a short name and does not include a tag or digest. +func validateShortName(name string) error { + repo, err := reference.Parse(name) + if err != nil { + return fmt.Errorf("cannot parse short name: %q: %w", name, err) + } + + if _, ok := repo.(reference.Digested); ok { + return fmt.Errorf("invalid short name %q: must not contain digest", name) + } + + if _, ok := repo.(reference.Tagged); ok { + return fmt.Errorf("invalid short name %q: must not contain tag", name) + } + + named, ok := repo.(reference.Named) + if !ok { + return fmt.Errorf("invalid short name %q: no name", name) + } + + registry := reference.Domain(named) + if strings.ContainsAny(registry, ".:") || registry == "localhost" { + return fmt.Errorf("invalid short name %q: must not contain registry", name) + } + return nil +} + +// newShortNameAliasCache parses shortNameAliasConf and returns the corresponding internal +// representation. +func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAliasCache, error) { + res := shortNameAliasCache{ + namedAliases: make(map[string]alias), + } + errs := []error{} + for name, value := range conf.Aliases { + if err := validateShortName(name); err != nil { + errs = append(errs, err) + } + + // Empty right-hand side values in config files allow to reset + // an alias in a previously loaded config. This way, drop-in + // config files from registries.conf.d can reset potentially + // malconfigured aliases. + if value == "" { + res.namedAliases[name] = alias{nil, path} + continue + } + + named, err := parseShortNameValue(value) + if err != nil { + // We want to report *all* malformed entries to avoid a + // whack-a-mole for the user. + errs = append(errs, err) + } else { + res.namedAliases[name] = alias{named, path} + } + } + if len(errs) > 0 { + err := errs[0] + for i := 1; i < len(errs); i++ { + err = fmt.Errorf("%v\n: %w", errs[i], err) + } + return nil, err + } + return &res, nil +} + +// updateWithConfigurationFrom updates c with configuration from updates. +// In case of conflict, updates is preferred. +func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) { + maps.Copy(c.namedAliases, updates.namedAliases) +} + +func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { + conf := shortNameAliasConf{} + + meta, err := toml.DecodeFile(confPath, &conf) + if err != nil && !os.IsNotExist(err) { + // It's okay if the config doesn't exist. Other errors are not. + return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err) + } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, confPath) + } + + // Even if we don’t always need the cache, doing so validates the machine-generated config. The + // file could still be corrupted by another process or user. + cache, err := newShortNameAliasCache(confPath, &conf) + if err != nil { + return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err) + } + + return &conf, cache, nil +} + +func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, *lockfile.LockFile, error) { + shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx) + if err != nil { + return "", nil, err + } + // Make sure the path to file exists. + if err := os.MkdirAll(filepath.Dir(shortNameAliasesConfPath), 0700); err != nil { + return "", nil, err + } + + lockPath := shortNameAliasesConfPath + ".lock" + locker, err := lockfile.GetLockFile(lockPath) + return shortNameAliasesConfPath, locker, err +} diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go new file mode 100644 index 00000000..f45fd9de --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -0,0 +1,1056 @@ +package sysregistriesv2 + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + + "github.com/BurntSushi/toml" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/regexp" + "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" +) + +// systemRegistriesConfPath is the path to the system-wide registry +// configuration file and is used to add/subtract potential registries for +// obtaining images. You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path' +var systemRegistriesConfPath = builtinRegistriesConfPath + +// systemRegistriesConfDirPath is the path to the system-wide registry +// configuration directory and is used to add/subtract potential registries for +// obtaining images. You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path' +var systemRegistriesConfDirPath = builtinRegistriesConfDirPath + +// AuthenticationFileHelper is a special key for credential helpers indicating +// the usage of consulting containers-auth.json files instead of a credential +// helper. +const AuthenticationFileHelper = "containers-auth.json" + +const ( + // configuration values for "pull-from-mirror" + // mirrors will be used for both digest pulls and tag pulls + MirrorAll = "all" + // mirrors will only be used for digest pulls + MirrorByDigestOnly = "digest-only" + // mirrors will only be used for tag pulls + MirrorByTagOnly = "tag-only" +) + +// Endpoint describes a remote location of a registry. +type Endpoint struct { + // The endpoint's remote location. Can be empty iff Prefix contains + // wildcard in the format: "*.example.com" for subdomain matching. + // Please refer to FindRegistry / PullSourcesFromReference instead + // of accessing/interpreting `Location` directly. + Location string `toml:"location,omitempty"` + // If true, certs verification will be skipped and HTTP (non-TLS) + // connections will be allowed. + Insecure bool `toml:"insecure,omitempty"` + // PullFromMirror is used for adding restrictions to image pull through the mirror. + // Set to "all", "digest-only", or "tag-only". + // If "digest-only", mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Restricting mirrors to pulls by digest avoids that issue. + // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror + // that it is less likely to be out of sync if tags move, it should not be unnecessarily + // used for digest references. + // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry. + // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint. + // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry. + PullFromMirror string `toml:"pull-from-mirror,omitempty"` +} + +// userRegistriesFile is the path to the per user registry configuration file. +var userRegistriesFile = filepath.FromSlash(".config/containers/registries.conf") + +// userRegistriesDir is the path to the per user registry configuration file. +var userRegistriesDir = filepath.FromSlash(".config/containers/registries.conf.d") + +// rewriteReference will substitute the provided reference `prefix` to the +// endpoints `location` from the `ref` and creates a new named reference from it. +// The function errors if the newly created reference is not parsable. +func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { + refString := ref.String() + var newNamedRef string + // refMatchingPrefix returns the length of the match. Everything that + // follows the match gets appended to registries location. + prefixLen := refMatchingPrefix(refString, prefix) + if prefixLen == -1 { + return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) + } + // In the case of an empty `location` field, simply return the original + // input ref as-is. + // + // FIXME: already validated in postProcessRegistries, so check can probably + // be dropped. + // https://github.com/containers/image/pull/1191#discussion_r610621608 + if e.Location == "" { + if !strings.HasPrefix(prefix, "*.") { + return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix) + } + return ref, nil + } + newNamedRef = e.Location + refString[prefixLen:] + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, fmt.Errorf("rewriting reference: %w", err) + } + + return newParsedRef, nil +} + +// Registry represents a registry. +type Registry struct { + // Prefix is used for matching images, and to translate one namespace to + // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` + // and we pull from "example.com/bar/myimage:latest", the image will + // effectively be pulled from "example.com/foo/bar/myimage:latest". + // If no Prefix is specified, it defaults to the specified location. + // Prefix can also be in the format: "*.example.com" for matching + // subdomains. The wildcard should only be in the beginning and should also + // not contain any namespaces or special characters: "/", "@" or ":". + // Please refer to FindRegistry / PullSourcesFromReference instead + // of accessing/interpreting `Prefix` directly. + Prefix string `toml:"prefix"` + // A registry is an Endpoint too + Endpoint + // The registry's mirrors. + Mirrors []Endpoint `toml:"mirror,omitempty"` + // If true, pulling from the registry will be blocked. + Blocked bool `toml:"blocked,omitempty"` + // If true, mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Restricting mirrors to pulls by digest avoids that issue. + MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` +} + +// PullSource consists of an Endpoint and a Reference. Note that the reference is +// rewritten according to the registries prefix and the Endpoint's location. +type PullSource struct { + Endpoint Endpoint + Reference reference.Named +} + +// PullSourcesFromReference returns a slice of PullSource's based on the passed +// reference. +func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { + var endpoints []Endpoint + _, isDigested := ref.(reference.Canonical) + if r.MirrorByDigestOnly { + // Only use mirrors when the reference is a digested one. + if isDigested { + endpoints = append(endpoints, r.Mirrors...) + } + } else { + for _, mirror := range r.Mirrors { + // skip the mirror if per mirror setting exists but reference does not match the restriction + switch mirror.PullFromMirror { + case MirrorByDigestOnly: + if !isDigested { + continue + } + case MirrorByTagOnly: + if isDigested { + continue + } + } + endpoints = append(endpoints, mirror) + } + } + endpoints = append(endpoints, r.Endpoint) + + sources := []PullSource{} + for _, ep := range endpoints { + rewritten, err := ep.rewriteReference(ref, r.Prefix) + if err != nil { + return nil, err + } + sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) + } + + return sources, nil +} + +// V1TOMLregistries is for backwards compatibility to sysregistries v1 +type V1TOMLregistries struct { + Registries []string `toml:"registries"` +} + +// V1TOMLConfig is for backwards compatibility to sysregistries v1 +type V1TOMLConfig struct { + Search V1TOMLregistries `toml:"search"` + Insecure V1TOMLregistries `toml:"insecure"` + Block V1TOMLregistries `toml:"block"` +} + +// V1RegistriesConf is the sysregistries v1 configuration format. +type V1RegistriesConf struct { + V1TOMLConfig `toml:"registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +// Empty arrays are treated as missing entries. +func (config *V1RegistriesConf) Nonempty() bool { + copy := *config // A shallow copy + if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 { + copy.V1TOMLConfig.Search.Registries = nil + } + if copy.V1TOMLConfig.Insecure.Registries != nil && len(copy.V1TOMLConfig.Insecure.Registries) == 0 { + copy.V1TOMLConfig.Insecure.Registries = nil + } + if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 { + copy.V1TOMLConfig.Block.Registries = nil + } + return copy.hasSetField() +} + +// hasSetField returns true if config contains at least one configuration entry. +// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field +// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled +// as a non-nil []string{}. +func (config *V1RegistriesConf) hasSetField() bool { + return !reflect.DeepEqual(*config, V1RegistriesConf{}) +} + +// V2RegistriesConf is the sysregistries v2 configuration format. +type V2RegistriesConf struct { + Registries []Registry `toml:"registry"` + // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references + UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` + // An array of global credential helpers to use for authentication + // (e.g., ["pass", "secretservice"]). The helpers are consulted in the + // specified order. Note that "containers-auth.json" is a reserved + // value for consulting auth files as specified in + // containers-auth.json(5). + // + // If empty, CredentialHelpers defaults to ["containers-auth.json"]. + CredentialHelpers []string `toml:"credential-helpers"` + + // ShortNameMode defines how short-name resolution should be handled by + // _consumers_ of this package. Depending on the mode, the user should + // be prompted with a choice of using one of the unqualified-search + // registries when referring to a short name. + // + // Valid modes are: * "prompt": prompt if stdout is a TTY, otherwise + // use all unqualified-search registries * "enforcing": always prompt + // and error if stdout is not a TTY * "disabled": do not prompt and + // potentially use all unqualified-search registries + ShortNameMode string `toml:"short-name-mode"` + + shortNameAliasConf + + // If you add any field, make sure to update Nonempty() below. +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V2RegistriesConf) Nonempty() bool { + copy := *config // A shallow copy + if copy.Registries != nil && len(copy.Registries) == 0 { + copy.Registries = nil + } + if copy.UnqualifiedSearchRegistries != nil && len(copy.UnqualifiedSearchRegistries) == 0 { + copy.UnqualifiedSearchRegistries = nil + } + if copy.CredentialHelpers != nil && len(copy.CredentialHelpers) == 0 { + copy.CredentialHelpers = nil + } + if !copy.shortNameAliasConf.nonempty() { + copy.shortNameAliasConf = shortNameAliasConf{} + } + return copy.hasSetField() +} + +// hasSetField returns true if config contains at least one configuration entry. +// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field +// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled +// as a non-nil []string{}. +func (config *V2RegistriesConf) hasSetField() bool { + return !reflect.DeepEqual(*config, V2RegistriesConf{}) +} + +// parsedConfig is the result of parsing, and possibly merging, configuration files; +// it is the boundary between the process of reading+ingesting the files, and +// later interpreting the configuration based on caller’s requests. +type parsedConfig struct { + // NOTE: Update also parsedConfig.updateWithConfigurationFrom! + + // partialV2 must continue to exist to maintain the return value of TryUpdatingCache + // for compatibility with existing callers. + // We store the authoritative Registries and UnqualifiedSearchRegistries values there as well. + partialV2 V2RegistriesConf + // Absolute path to the configuration file that set the UnqualifiedSearchRegistries. + unqualifiedSearchRegistriesOrigin string + // Result of parsing of partialV2.ShortNameMode. + // NOTE: May be ShortNameModeInvalid to represent ShortNameMode == "" in intermediate values; + // the full configuration in configCache / getConfig() always contains a valid value. + shortNameMode types.ShortNameMode + aliasCache *shortNameAliasCache +} + +// InvalidRegistries represents an invalid registry configurations. An example +// is when "registry.com" is defined multiple times in the configuration but +// with conflicting security settings. +type InvalidRegistries struct { + s string +} + +// Error returns the error string. +func (e *InvalidRegistries) Error() string { + return e.s +} + +// parseLocation parses the input string, performs some sanity checks and returns +// the sanitized input string. An error is returned if the input string is +// empty or if contains an "http{s,}://" prefix. +func parseLocation(input string) (string, error) { + trimmed := strings.TrimRight(input, "/") + + // FIXME: This check needs to exist but fails for empty Location field with + // wildcarded prefix. Removal of this check "only" allows invalid input in, + // and does not prevent correct operation. + // https://github.com/containers/image/pull/1191#discussion_r610122617 + // + // if trimmed == "" { + // return "", &InvalidRegistries{s: "invalid location: cannot be empty"} + // } + // + + if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { + msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) + return "", &InvalidRegistries{s: msg} + } + + return trimmed, nil +} + +// ConvertToV2 returns a v2 config corresponding to a v1 one. +func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { + regMap := make(map[string]*Registry) + // The order of the registries is not really important, but make it deterministic (the same for the same config file) + // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. + registryOrder := []string{} + + getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object + var err error + location, err = parseLocation(location) + if err != nil { + return nil, err + } + reg, exists := regMap[location] + if !exists { + reg = &Registry{ + Endpoint: Endpoint{Location: location}, + Mirrors: []Endpoint{}, + Prefix: location, + } + regMap[location] = reg + registryOrder = append(registryOrder, location) + } + return reg, nil + } + + for _, blocked := range config.V1TOMLConfig.Block.Registries { + reg, err := getRegistry(blocked) + if err != nil { + return nil, err + } + reg.Blocked = true + } + for _, insecure := range config.V1TOMLConfig.Insecure.Registries { + reg, err := getRegistry(insecure) + if err != nil { + return nil, err + } + reg.Insecure = true + } + + res := &V2RegistriesConf{ + UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, + } + for _, location := range registryOrder { + reg := regMap[location] + res.Registries = append(res.Registries, *reg) + } + return res, nil +} + +// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. +var anchoredDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$") + +// postProcess checks the consistency of all the configuration, looks for conflicts, +// and normalizes the configuration (e.g., sets the Prefix to Location if not set). +func (config *V2RegistriesConf) postProcessRegistries() error { + regMap := make(map[string][]*Registry) + + for i := range config.Registries { + reg := &config.Registries[i] + // make sure Location and Prefix are valid + var err error + reg.Location, err = parseLocation(reg.Location) + if err != nil { + return err + } + + if reg.Prefix == "" { + if reg.Location == "" { + return &InvalidRegistries{s: "invalid condition: both location and prefix are unset"} + } + reg.Prefix = reg.Location + } else { + reg.Prefix, err = parseLocation(reg.Prefix) + if err != nil { + return err + } + // FIXME: allow config authors to always use Prefix. + // https://github.com/containers/image/pull/1191#discussion_r610622495 + if !strings.HasPrefix(reg.Prefix, "*.") && reg.Location == "" { + return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"} + } + } + + // validate the mirror usage settings does not apply to primary registry + if reg.PullFromMirror != "" { + return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) + } + // make sure mirrors are valid + for _, mir := range reg.Mirrors { + mir.Location, err = parseLocation(mir.Location) + if err != nil { + return err + } + + //FIXME: unqualifiedSearchRegistries now also accepts empty values + //and shouldn't + // https://github.com/containers/image/pull/1191#discussion_r610623216 + if mir.Location == "" { + return &InvalidRegistries{s: "invalid condition: mirror location is unset"} + } + + if reg.MirrorByDigestOnly && mir.PullFromMirror != "" { + return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)} + } + if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll && + mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly { + return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)} + } + } + if reg.Location == "" { + regMap[reg.Prefix] = append(regMap[reg.Prefix], reg) + } else { + regMap[reg.Location] = append(regMap[reg.Location], reg) + } + } + + // Given a registry can be mentioned multiple times (e.g., to have + // multiple prefixes backed by different mirrors), we need to make sure + // there are no conflicts among them. + // + // Note: we need to iterate over the registries array to ensure a + // deterministic behavior which is not guaranteed by maps. + for _, reg := range config.Registries { + var others []*Registry + var ok bool + if reg.Location == "" { + others, ok = regMap[reg.Prefix] + } else { + others, ok = regMap[reg.Location] + } + if !ok { + return fmt.Errorf("Internal error in V2RegistriesConf.PostProcess: entry in regMap is missing") + } + for _, other := range others { + if reg.Insecure != other.Insecure { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + + if reg.Blocked != other.Blocked { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + } + } + + for i := range config.UnqualifiedSearchRegistries { + registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) + if err != nil { + return err + } + if !anchoredDomainRegexp.MatchString(registry) { + return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} + } + config.UnqualifiedSearchRegistries[i] = registry + } + + // Registries are ordered and the first longest prefix always wins, + // rendering later items with the same prefix non-existent. We cannot error + // out anymore as this might break existing users, so let's just ignore them + // to guarantee that the same prefix exists only once. + // + // As a side effect of parsedConfig.updateWithConfigurationFrom, the Registries slice + // is always sorted. To be consistent in situations where it is not called (no drop-ins), + // sort it here as well. + prefixes := []string{} + uniqueRegistries := make(map[string]Registry) + for i := range config.Registries { + // TODO: should we warn if we see the same prefix being used multiple times? + prefix := config.Registries[i].Prefix + if _, exists := uniqueRegistries[prefix]; !exists { + uniqueRegistries[prefix] = config.Registries[i] + prefixes = append(prefixes, prefix) + } + } + sort.Strings(prefixes) + config.Registries = []Registry{} + for _, prefix := range prefixes { + config.Registries = append(config.Registries, uniqueRegistries[prefix]) + } + + return nil +} + +// ConfigPath returns the path to the system-wide registry configuration file. +// Deprecated: This API implies configuration is read from files, and that there is only one. +// Please use ConfigurationSourceDescription to obtain a string usable for error messages. +func ConfigPath(ctx *types.SystemContext) string { + return newConfigWrapper(ctx).configPath +} + +// ConfigDirPath returns the path to the directory for drop-in +// registry configuration files. +// Deprecated: This API implies configuration is read from directories, and that there is only one. +// Please use ConfigurationSourceDescription to obtain a string usable for error messages. +func ConfigDirPath(ctx *types.SystemContext) string { + configWrapper := newConfigWrapper(ctx) + if configWrapper.userConfigDirPath != "" { + return configWrapper.userConfigDirPath + } + return configWrapper.configDirPath +} + +// configWrapper is used to store the paths from ConfigPath and ConfigDirPath +// and acts as a key to the internal cache. +type configWrapper struct { + // path to the registries.conf file + configPath string + // path to system-wide registries.conf.d directory, or "" if not used + configDirPath string + // path to user specified registries.conf.d directory, or "" if not used + userConfigDirPath string +} + +// newConfigWrapper returns a configWrapper for the specified SystemContext. +func newConfigWrapper(ctx *types.SystemContext) configWrapper { + return newConfigWrapperWithHomeDir(ctx, homedir.Get()) +} + +// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper, +// it exists only to allow testing it with an artificial home directory. +func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper { + var wrapper configWrapper + userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile) + userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) + + // decide configPath using per-user path or system file + if ctx != nil && ctx.SystemRegistriesConfPath != "" { + wrapper.configPath = ctx.SystemRegistriesConfPath + } else if _, err := os.Stat(userRegistriesFilePath); err == nil { + // per-user registries.conf exists, not reading system dir + // return config dirs from ctx or per-user one + wrapper.configPath = userRegistriesFilePath + if ctx != nil && ctx.SystemRegistriesConfDirPath != "" { + wrapper.configDirPath = ctx.SystemRegistriesConfDirPath + } else { + wrapper.userConfigDirPath = userRegistriesDirPath + } + + return wrapper + } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { + wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) + } else { + wrapper.configPath = systemRegistriesConfPath + } + + // potentially use both system and per-user dirs if not using per-user config file + if ctx != nil && ctx.SystemRegistriesConfDirPath != "" { + // dir explicitly chosen: use only that one + wrapper.configDirPath = ctx.SystemRegistriesConfDirPath + } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { + wrapper.configDirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath) + wrapper.userConfigDirPath = userRegistriesDirPath + } else { + wrapper.configDirPath = systemRegistriesConfDirPath + wrapper.userConfigDirPath = userRegistriesDirPath + } + + return wrapper +} + +// ConfigurationSourceDescription returns a string containers paths of registries.conf and registries.conf.d +func ConfigurationSourceDescription(ctx *types.SystemContext) string { + wrapper := newConfigWrapper(ctx) + configSources := []string{wrapper.configPath} + if wrapper.configDirPath != "" { + configSources = append(configSources, wrapper.configDirPath) + } + if wrapper.userConfigDirPath != "" { + configSources = append(configSources, wrapper.userConfigDirPath) + } + return strings.Join(configSources, ", ") +} + +// configMutex is used to synchronize concurrent accesses to configCache. +var configMutex = sync.Mutex{} + +// configCache caches already loaded configs with config paths as keys and is +// used to avoid redundantly parsing configs. Concurrent accesses to the cache +// are synchronized via configMutex. +var configCache = make(map[configWrapper]*parsedConfig) + +// InvalidateCache invalidates the registry cache. This function is meant to be +// used for long-running processes that need to reload potential changes made to +// the cached registry config files. +func InvalidateCache() { + configMutex.Lock() + defer configMutex.Unlock() + configCache = make(map[configWrapper]*parsedConfig) +} + +// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. +func getConfig(ctx *types.SystemContext) (*parsedConfig, error) { + wrapper := newConfigWrapper(ctx) + configMutex.Lock() + if config, inCache := configCache[wrapper]; inCache { + configMutex.Unlock() + return config, nil + } + configMutex.Unlock() + + return tryUpdatingCache(ctx, wrapper) +} + +// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d +// directory. +func dropInConfigs(wrapper configWrapper) ([]string, error) { + var ( + configs []string + dirPaths []string + ) + if wrapper.configDirPath != "" { + dirPaths = append(dirPaths, wrapper.configDirPath) + } + if wrapper.userConfigDirPath != "" { + dirPaths = append(dirPaths, wrapper.userConfigDirPath) + } + for _, dirPath := range dirPaths { + err := filepath.WalkDir(dirPath, + // WalkFunc to read additional configs + func(path string, d fs.DirEntry, err error) error { + switch { + case err != nil: + // return error (could be a permission problem) + return err + case d == nil: + // this should only happen when err != nil but let's be sure + return nil + case d.IsDir(): + if path != dirPath { + // make sure to not recurse into sub-directories + return filepath.SkipDir + } + // ignore directories + return nil + default: + // only add *.conf files + if strings.HasSuffix(path, ".conf") { + configs = append(configs, path) + } + return nil + } + }, + ) + + if err != nil && !os.IsNotExist(err) { + // Ignore IsNotExist errors: most systems won't have a registries.conf.d + // directory. + return nil, fmt.Errorf("reading registries.conf.d: %w", err) + } + } + + return configs, nil +} + +// TryUpdatingCache loads the configuration from the provided `SystemContext` +// without using the internal cache. On success, the loaded configuration will +// be added into the internal registry cache. +// It returns the resulting configuration; this is DEPRECATED and may not correctly +// reflect any future data handled by this package. +func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { + config, err := tryUpdatingCache(ctx, newConfigWrapper(ctx)) + if err != nil { + return nil, err + } + return &config.partialV2, err +} + +// tryUpdatingCache implements TryUpdatingCache with an additional configWrapper +// argument to avoid redundantly calculating the config paths. +func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedConfig, error) { + configMutex.Lock() + defer configMutex.Unlock() + + // load the config + config, err := loadConfigFile(wrapper.configPath, false) + if err != nil { + // Continue with an empty []Registry if we use the default config, which + // implies that the config path of the SystemContext isn't set. + // + // Note: if ctx.SystemRegistriesConfPath points to the default config, + // we will still return an error. + if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { + config = &parsedConfig{} + config.partialV2 = V2RegistriesConf{Registries: []Registry{}} + config.aliasCache, err = newShortNameAliasCache("", &shortNameAliasConf{}) + if err != nil { + return nil, err // Should never happen + } + } else { + return nil, fmt.Errorf("loading registries configuration %q: %w", wrapper.configPath, err) + } + } + + // Load the configs from the conf directory path. + dinConfigs, err := dropInConfigs(wrapper) + if err != nil { + return nil, err + } + for _, path := range dinConfigs { + // Enforce v2 format for drop-in-configs. + dropIn, err := loadConfigFile(path, true) + if err != nil { + return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err) + } + config.updateWithConfigurationFrom(dropIn) + } + + if config.shortNameMode == types.ShortNameModeInvalid { + config.shortNameMode = defaultShortNameMode + } + + if len(config.partialV2.CredentialHelpers) == 0 { + config.partialV2.CredentialHelpers = []string{AuthenticationFileHelper} + } + + // populate the cache + configCache[wrapper] = config + return config, nil +} + +// GetRegistries has been deprecated. Use FindRegistry instead. +// +// GetRegistries loads and returns the registries specified in the config. +// Note the parsed content of registry config files is cached. For reloading, +// use `InvalidateCache` and re-call `GetRegistries`. +func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.partialV2.Registries, nil +} + +// UnqualifiedSearchRegistries returns a list of host[:port] entries to try +// for unqualified image search, in the returned order) +func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { + registries, _, err := UnqualifiedSearchRegistriesWithOrigin(ctx) + return registries, err +} + +// UnqualifiedSearchRegistriesWithOrigin returns a list of host[:port] entries +// to try for unqualified image search, in the returned order. It also returns +// a human-readable description of where these entries are specified (e.g., a +// registries.conf file). +func UnqualifiedSearchRegistriesWithOrigin(ctx *types.SystemContext) ([]string, string, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, "", err + } + return config.partialV2.UnqualifiedSearchRegistries, config.unqualifiedSearchRegistriesOrigin, nil +} + +// parseShortNameMode translates the string into well-typed +// types.ShortNameMode. +func parseShortNameMode(mode string) (types.ShortNameMode, error) { + switch mode { + case "disabled": + return types.ShortNameModeDisabled, nil + case "enforcing": + return types.ShortNameModeEnforcing, nil + case "permissive": + return types.ShortNameModePermissive, nil + default: + return types.ShortNameModeInvalid, fmt.Errorf("invalid short-name mode: %q", mode) + } +} + +// GetShortNameMode returns the configured types.ShortNameMode. +func GetShortNameMode(ctx *types.SystemContext) (types.ShortNameMode, error) { + if ctx != nil && ctx.ShortNameMode != nil { + return *ctx.ShortNameMode, nil + } + config, err := getConfig(ctx) + if err != nil { + return -1, err + } + return config.shortNameMode, err +} + +// CredentialHelpers returns the global top-level credential helpers. +func CredentialHelpers(sys *types.SystemContext) ([]string, error) { + config, err := getConfig(sys) + if err != nil { + return nil, err + } + return config.partialV2.CredentialHelpers, nil +} + +// refMatchingSubdomainPrefix returns the length of ref +// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value containing wildcarded subdomains in the +// format: *.example.com. Wildcards are only accepted at the beginning, so +// other formats like example.*.com will not work. Wildcarded prefixes also +// cannot contain port numbers or namespaces in them. +func refMatchingSubdomainPrefix(ref, prefix string) int { + index := strings.Index(ref, prefix[1:]) + if index == -1 { + return -1 + } + if strings.Contains(ref[:index], "/") { + return -1 + } + index += len(prefix[1:]) + if index == len(ref) { + return index + } + switch ref[index] { + case ':', '/', '@': + return index + default: + return -1 + } +} + +// refMatchingPrefix returns the length of the prefix iff ref, +// which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value. +// (This is split from the caller primarily to make testing easier.) +func refMatchingPrefix(ref, prefix string) int { + switch { + case strings.HasPrefix(prefix, "*."): + return refMatchingSubdomainPrefix(ref, prefix) + case len(ref) < len(prefix): + return -1 + case len(ref) == len(prefix): + if ref == prefix { + return len(prefix) + } + return -1 + case len(ref) > len(prefix): + if !strings.HasPrefix(ref, prefix) { + return -1 + } + c := ref[len(prefix)] + // This allows "example.com:5000" to match "example.com", + // which is unintended; that will get fixed eventually, DON'T RELY + // ON THE CURRENT BEHAVIOR. + if c == ':' || c == '/' || c == '@' { + return len(prefix) + } + return -1 + default: + panic("Internal error: impossible comparison outcome") + } +} + +// FindRegistry returns the Registry with the longest prefix for ref, +// which is a registry, repository namespace repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!). +// If no Registry prefixes the image, nil is returned. +func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + + return findRegistryWithParsedConfig(config, ref) +} + +// findRegistryWithParsedConfig implements `FindRegistry` with a pre-loaded +// parseConfig. +func findRegistryWithParsedConfig(config *parsedConfig, ref string) (*Registry, error) { + reg := Registry{} + prefixLen := 0 + for _, r := range config.partialV2.Registries { + if refMatchingPrefix(ref, r.Prefix) != -1 { + length := len(r.Prefix) + if length > prefixLen { + reg = r + prefixLen = length + } + } + } + if prefixLen != 0 { + return ®, nil + } + return nil, nil +} + +// loadConfigFile loads and unmarshals a single config file. +// Use forceV2 if the config must in the v2 format. +func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { + logrus.Debugf("Loading registries configuration %q", path) + + // tomlConfig allows us to unmarshal either V1 or V2 simultaneously. + type tomlConfig struct { + V2RegistriesConf + V1RegistriesConf // for backwards compatibility with sysregistries v1 + } + + // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields. + var combinedTOML tomlConfig + meta, err := toml.DecodeFile(path, &combinedTOML) + if err != nil { + return nil, err + } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, path) + } + + if combinedTOML.V1RegistriesConf.hasSetField() { + // Enforce the v2 format if requested. + if forceV2 { + return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"} + } + + // Convert a v1 config into a v2 config. + if combinedTOML.V2RegistriesConf.hasSetField() { + return nil, &InvalidRegistries{s: fmt.Sprintf("mixing sysregistry v1/v2 is not supported: %#v", combinedTOML)} + } + converted, err := combinedTOML.V1RegistriesConf.ConvertToV2() + if err != nil { + return nil, err + } + combinedTOML.V1RegistriesConf = V1RegistriesConf{} + combinedTOML.V2RegistriesConf = *converted + } + + res := parsedConfig{partialV2: combinedTOML.V2RegistriesConf} + + // Post process registries, set the correct prefixes, sanity checks, etc. + if err := res.partialV2.postProcessRegistries(); err != nil { + return nil, err + } + + res.unqualifiedSearchRegistriesOrigin = path + + if len(res.partialV2.ShortNameMode) > 0 { + mode, err := parseShortNameMode(res.partialV2.ShortNameMode) + if err != nil { + return nil, err + } + res.shortNameMode = mode + } else { + res.shortNameMode = types.ShortNameModeInvalid + } + + // Valid wildcarded prefixes must be in the format: *.example.com + // FIXME: Move to postProcessRegistries + // https://github.com/containers/image/pull/1191#discussion_r610623829 + for i := range res.partialV2.Registries { + prefix := res.partialV2.Registries[i].Prefix + if strings.HasPrefix(prefix, "*.") && strings.ContainsAny(prefix, "/@:") { + msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix) + return nil, &InvalidRegistries{s: msg} + } + } + + // Parse and validate short-name aliases. + cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf) + if err != nil { + return nil, fmt.Errorf("validating short-name aliases: %w", err) + } + res.aliasCache = cache + // Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and + // reduce memory consumption. We're consulting aliasCache for lookups. + res.partialV2.shortNameAliasConf = shortNameAliasConf{} + + return &res, nil +} + +// updateWithConfigurationFrom updates c with configuration from updates. +// +// Fields present in updates will typically replace already set fields in c. +// The [[registry]] and alias tables are merged. +func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { + // == Merge Registries: + registryMap := make(map[string]Registry) + for i := range c.partialV2.Registries { + registryMap[c.partialV2.Registries[i].Prefix] = c.partialV2.Registries[i] + } + // Merge the freshly loaded registries. + for i := range updates.partialV2.Registries { + registryMap[updates.partialV2.Registries[i].Prefix] = updates.partialV2.Registries[i] + } + + // Go maps have a non-deterministic order when iterating the keys, so + // we dump them in a slice and sort it to enforce some order in + // Registries slice. Some consumers of c/image (e.g., CRI-O) log the + // configuration where a non-deterministic order could easily cause + // confusion. + prefixes := maps.Keys(registryMap) + sort.Strings(prefixes) + + c.partialV2.Registries = []Registry{} + for _, prefix := range prefixes { + c.partialV2.Registries = append(c.partialV2.Registries, registryMap[prefix]) + } + + // == Merge UnqualifiedSearchRegistries: + // This depends on an subtlety of the behavior of the TOML decoder, where a missing array field + // is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled + // as a non-nil []string{}. + if updates.partialV2.UnqualifiedSearchRegistries != nil { + c.partialV2.UnqualifiedSearchRegistries = updates.partialV2.UnqualifiedSearchRegistries + c.unqualifiedSearchRegistriesOrigin = updates.unqualifiedSearchRegistriesOrigin + } + + // == Merge credential helpers: + if updates.partialV2.CredentialHelpers != nil { + c.partialV2.CredentialHelpers = updates.partialV2.CredentialHelpers + } + + // == Merge shortNameMode: + // We don’t maintain c.partialV2.ShortNameMode. + if updates.shortNameMode != types.ShortNameModeInvalid { + c.shortNameMode = updates.shortNameMode + } + + // == Merge aliasCache: + // We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf. + c.aliasCache.updateWithConfigurationFrom(updates.aliasCache) +} diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go new file mode 100644 index 00000000..56b0d493 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -0,0 +1,103 @@ +package tlsclientconfig + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc +func SetupCertificates(dir string, tlsc *tls.Config) error { + logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) + fs, err := os.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + if os.IsPermission(err) { + logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) + return nil + } + return err + } + + for _, f := range fs { + fullPath := filepath.Join(dir, f.Name()) + if strings.HasSuffix(f.Name(), ".crt") { + logrus.Debugf(" crt: %s", fullPath) + data, err := os.ReadFile(fullPath) + if err != nil { + if os.IsNotExist(err) { + // Dangling symbolic link? + // Race with someone who deleted the + // file after we read the directory's + // list of contents? + logrus.Warnf("error reading certificate %q: %v", fullPath, err) + continue + } + return err + } + if tlsc.RootCAs == nil { + systemPool, err := x509.SystemCertPool() + if err != nil { + return fmt.Errorf("unable to get system cert pool: %w", err) + } + tlsc.RootCAs = systemPool + } + tlsc.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf(" cert: %s", fullPath) + if !hasFile(fs, keyName) { + return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) + if err != nil { + return err + } + tlsc.Certificates = append(tlsc.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf(" key: %s", fullPath) + if !hasFile(fs, certName) { + return fmt.Errorf("missing client certificate %s for key %s", certName, keyName) + } + } + } + return nil +} + +func hasFile(files []os.DirEntry, name string) bool { + return slices.ContainsFunc(files, func(f os.DirEntry) bool { + return f.Name() == name + }) +} + +// NewTransport Creates a default transport +func NewTransport() *http.Transport { + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + } + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: direct.DialContext, + TLSHandshakeTimeout: 10 * time.Second, + IdleConnTimeout: 90 * time.Second, + MaxIdleConns: 100, + } + return tr +} diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go new file mode 100644 index 00000000..d6075f81 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -0,0 +1,102 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" +) + +// SignOptions includes optional parameters for signing container images. +type SignOptions struct { + // Passphare to use when signing with the key identity. + Passphrase string +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity, and the specified options. +func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := newUntrustedSignature(manifestDigest, dockerReference) + + var passphrase string + if options != nil { + passphrase = options.Passphrase + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return nil, errors.New("invalid passphrase: must not contain a line break") + } + } + + return sig.sign(mech, keyIdentity, passphrase) +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + sig, _, err := VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, []string{expectedKeyIdentity}) + return sig, err +} + +// VerifyImageManifestSignatureUsingKeyIdentityList checks that unverifiedSignature uses one of the expectedKeyIdentities +// to sign unverifiedManifest as expectedDockerReference, using mech. Returns the verified signature and the key identity that +// was used to verify it. +func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentities []string) (*Signature, string, error) { + expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) + if err != nil { + return nil, "", err + } + var matchedKeyIdentity string + sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if !slices.Contains(expectedKeyIdentities, keyIdentity) { + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprints %v", keyIdentity, expectedKeyIdentities)) + } + matchedKeyIdentity = keyIdentity + return nil + }, + validateSignedDockerReference: func(signedDockerReference string) error { + signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) + if err != nil { + return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)) + } + if signedRef.String() != expectedRef.String() { + return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s", + signedDockerReference, expectedDockerReference)) + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)) + } + return nil + }, + }) + if err != nil { + return nil, "", err + } + return sig, matchedKeyIdentity, err +} diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go new file mode 100644 index 00000000..ef5d3df6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -0,0 +1,204 @@ +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/sigstore/fulcio/pkg/certificate" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "golang.org/x/exp/slices" +) + +// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates. +// Users should call validate() on the policy before using it. +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + if f.oidcIssuer == "" { + return errors.New("Internal inconsistency: Fulcio use set up without OIDC issuer") + } + if f.subjectEmail == "" { + return errors.New("Internal inconsistency: Fulcio use set up without subject email") + } + return nil +} + +// fulcioIssuerInCertificate returns the OIDC issuer recorded by Fulcio in unutrustedCertificate; +// it fails if the extension is not present in the certificate, or on any inconsistency. +func fulcioIssuerInCertificate(untrustedCertificate *x509.Certificate) (string, error) { + // == Validate the recorded OIDC issuer + gotOIDCIssuer1 := false + gotOIDCIssuer2 := false + var oidcIssuer1, oidcIssuer2 string + // certificate.ParseExtensions doesn’t reject duplicate extensions, and doesn’t detect inconsistencies + // between certificate.OIDIssuer and certificate.OIDIssuerV2. + // Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19, + // reject duplicates manually. + for _, untrustedExt := range untrustedCertificate.Extensions { + if untrustedExt.Id.Equal(certificate.OIDIssuer) { //nolint:staticcheck // This is deprecated, but we must continue to accept it. + if gotOIDCIssuer1 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v1 extension") + } + oidcIssuer1 = string(untrustedExt.Value) + gotOIDCIssuer1 = true + } else if untrustedExt.Id.Equal(certificate.OIDIssuerV2) { + if gotOIDCIssuer2 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v2 extension") + } + rest, err := asn1.Unmarshal(untrustedExt.Value, &oidcIssuer2) + if err != nil { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("invalid ASN.1 in OIDC issuer v2 extension: %v", err)) + } + if len(rest) != 0 { + return "", internal.NewInvalidSignatureError("invalid ASN.1 in OIDC issuer v2 extension, trailing data") + } + gotOIDCIssuer2 = true + } + } + switch { + case gotOIDCIssuer1 && gotOIDCIssuer2: + if oidcIssuer1 != oidcIssuer2 { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("inconsistent OIDC issuer extension values: v1 %#v, v2 %#v", + oidcIssuer1, oidcIssuer2)) + } + return oidcIssuer1, nil + case gotOIDCIssuer1: + return oidcIssuer1, nil + case gotOIDCIssuer2: + return oidcIssuer2, nil + default: + return "", internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension") + } +} + +func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { + // == Verify the certificate is correctly signed + var untrustedIntermediatePool *x509.CertPool // = nil + // untrustedCertificateChainPool.AppendCertsFromPEM does something broadly similar, + // but it seems to optimize for memory usage at the cost of larger CPU usage (i.e. to load + // the hundreds of trusted CAs). Golang’s TLS code similarly calls individual AddCert + // for intermediate certificates. + if len(untrustedIntermediateChainBytes) > 0 { + untrustedIntermediateChain, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedIntermediateChainBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("loading certificate chain: %v", err)) + } + untrustedIntermediatePool = x509.NewCertPool() + if len(untrustedIntermediateChain) > 1 { + for _, untrustedIntermediateCert := range untrustedIntermediateChain[:len(untrustedIntermediateChain)-1] { + untrustedIntermediatePool.AddCert(untrustedIntermediateCert) + } + } + } + + untrustedLeafCerts, err := cryptoutils.UnmarshalCertificatesFromPEM(untrustedCertificateBytes) + if err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("parsing leaf certificate: %v", err)) + } + switch len(untrustedLeafCerts) { + case 0: + return nil, internal.NewInvalidSignatureError("no certificate found in signature certificate data") + case 1: + break // OK + default: + return nil, internal.NewInvalidSignatureError("unexpected multiple certificates present in signature certificate data") + } + untrustedCertificate := untrustedLeafCerts[0] + + // Go rejects Subject Alternative Name that has no DNSNames, EmailAddresses, IPAddresses and URIs; + // we match SAN ourselves, so override that. + if len(untrustedCertificate.UnhandledCriticalExtensions) > 0 { + var remaining []asn1.ObjectIdentifier + for _, oid := range untrustedCertificate.UnhandledCriticalExtensions { + if !oid.Equal(cryptoutils.SANOID) { + remaining = append(remaining, oid) + } + } + untrustedCertificate.UnhandledCriticalExtensions = remaining + } + + if _, err := untrustedCertificate.Verify(x509.VerifyOptions{ + Intermediates: untrustedIntermediatePool, + Roots: f.caCertificates, + // NOTE: Cosign uses untrustedCertificate.NotBefore here (i.e. uses _that_ time for intermediate certificate validation), + // and validates the leaf certificate against relevantTime manually. + // We verify the full certificate chain against relevantTime instead. + // Assuming the certificate is fulcio-generated and very short-lived, that should make little difference. + CurrentTime: relevantTime, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, + }); err != nil { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("veryfing leaf certificate failed: %v", err)) + } + + // Cosign verifies a SCT of the certificate (either embedded, or even, probably irrelevant, externally-supplied). + // + // We don’t currently do that. + // + // At the very least, with Fulcio we require Rekor SETs to prove Rekor contains a log of the signature, and that + // already contains the full certificate; so a SCT of the certificate is superfluous (assuming Rekor allowed searching by + // certificate subject, which, well…). That argument might go away if we add support for RFC 3161 timestamps instead of Rekor. + // + // Secondarily, assuming a trusted Fulcio server (which, to be fair, might not be the case for the public one) SCT is not clearly + // better than the Fulcio server maintaining an audit log; a SCT can only reveal a misissuance if there is some other authoritative + // log of approved Fulcio invocations, and it’s not clear where that would come from, especially human users manually + // logging in using OpenID are not going to maintain a record of those actions. + // + // Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures + // by correctly-issued certificates. + // + // So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition, + // the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to + // make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. + + // == Validate the recorded OIDC issuer + oidcIssuer, err := fulcioIssuerInCertificate(untrustedCertificate) + if err != nil { + return nil, err + } + if oidcIssuer != f.oidcIssuer { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer)) + } + + // == Validate the OIDC subject + if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) { + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)", + f.subjectEmail, + untrustedCertificate.EmailAddresses)) + } + // FIXME: Match more subject types? Cosign does: + // - .DNSNames (can’t be issued by Fulcio) + // - .IPAddresses (can’t be issued by Fulcio) + // - .URIs (CAN be issued by Fulcio) + // - OtherName values in SAN (CAN be issued by Fulcio) + // - Various values about GitHub workflows (CAN be issued by Fulcio) + // What does it… mean to get an OAuth2 identity for an IP address? + // FIXME: How far into Turing-completeness for the issuer/subject do we need to get? Simultaneously accepted alternatives, for + // issuers and/or subjects and/or combinations? Regexps? More? + + return untrustedCertificate.PublicKey, nil +} + +func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKey, untrustedRekorSET, untrustedCertificateBytes, + untrustedBase64Signature, untrustedPayloadBytes) + if err != nil { + return nil, err + } + return fulcioTrustRoot.verifyFulcioCertificateAtTime(rekorSETTime, untrustedCertificateBytes, untrustedIntermediateChainBytes) +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/errors.go b/vendor/github.com/containers/image/v5/signature/internal/errors.go new file mode 100644 index 00000000..7872f0f4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/errors.go @@ -0,0 +1,15 @@ +package internal + +// InvalidSignatureError is returned when parsing an invalid signature. +// This is publicly visible as signature.InvalidSignatureError +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +func NewInvalidSignatureError(msg string) InvalidSignatureError { + return InvalidSignatureError{msg: msg} +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go new file mode 100644 index 00000000..a9d127e6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/json.go @@ -0,0 +1,90 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/containers/image/v5/internal/set" +) + +// JSONFormatError is returned when JSON does not match expected format. +type JSONFormatError string + +func (err JSONFormatError) Error() string { + return string(err) +} + +// ParanoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error { + seenKeys := set.New[string]() + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return JSONFormatError(err.Error()) + } + if t != json.Delim('{') { + return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) + } + for { + t, err := dec.Token() + if err != nil { + return JSONFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) + } + if seenKeys.Contains(key) { + return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) + } + seenKeys.Add(key) + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return JSONFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return JSONFormatError("Unexpected data after JSON object") + } + return nil +} + +// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields +// must be present exactly once, and none other fields are accepted. +func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error { + seenKeys := set.New[string]() + if err := ParanoidUnmarshalJSONObject(data, func(key string) any { + if valuePtr, ok := exactFields[key]; ok { + seenKeys.Add(key) + return valuePtr + } + return nil + }); err != nil { + return err + } + for key := range exactFields { + if !seenKeys.Contains(key) { + return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go new file mode 100644 index 00000000..d439b5f7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -0,0 +1,237 @@ +package internal + +import ( + "bytes" + "crypto/ecdsa" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "time" + + "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" + "github.com/sigstore/rekor/pkg/generated/models" +) + +// This is the github.com/sigstore/rekor/pkg/generated/models.Hashedrekord.APIVersion for github.com/sigstore/rekor/pkg/generated/models.HashedrekordV001Schema. +// We could alternatively use github.com/sigstore/rekor/pkg/types/hashedrekord.APIVERSION, but that subpackage adds too many dependencies. +const HashedRekordV001APIVersion = "0.0.1" + +// UntrustedRekorSET is a parsed content of the sigstore-signature Rekor SET +// (note that this a signature-specific format, not a format directly used by the Rekor API). +// This corresponds to github.com/sigstore/cosign/bundle.RekorBundle, but we impose a stricter decoder. +type UntrustedRekorSET struct { + UntrustedSignedEntryTimestamp []byte // A signature over some canonical JSON form of UntrustedPayload + UntrustedPayload json.RawMessage +} + +type UntrustedRekorPayload struct { + Body []byte // In cosign, this is an any, but only a string works + IntegratedTime int64 + LogIndex int64 + LogID string +} + +// A compile-time check that UntrustedRekorSET implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorSET)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp, + "Payload": &s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorSET and *UntrustedRekorSET implements json.Marshaler +var _ json.Marshaler = UntrustedRekorSET{} +var _ json.Marshaler = (*UntrustedRekorSET)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp, + "Payload": s.UntrustedPayload, + }) +} + +// A compile-time check that UntrustedRekorPayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error { + err := p.strictUnmarshalJSON(data) + if err != nil { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error { + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "body": &p.Body, + "integratedTime": &p.IntegratedTime, + "logIndex": &p.LogIndex, + "logID": &p.LogID, + }) +} + +// A compile-time check that UntrustedRekorPayload and *UntrustedRekorPayload implements json.Marshaler +var _ json.Marshaler = UntrustedRekorPayload{} +var _ json.Marshaler = (*UntrustedRekorPayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]any{ + "body": p.Body, + "integratedTime": p.IntegratedTime, + "logIndex": p.LogIndex, + "logID": p.LogID, + }) +} + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + // FIXME: Should the publicKey parameter hard-code ecdsa? + + // == Parse SET bytes + var untrustedSET UntrustedRekorSET + // Sadly. we need to parse and transform untrusted data before verifying a cryptographic signature... + if err := json.Unmarshal(unverifiedRekorSET, &untrustedSET); err != nil { + return time.Time{}, NewInvalidSignatureError(err.Error()) + } + // == Verify SET signature + // Cosign unmarshals and re-marshals UntrustedPayload; that seems unnecessary, + // assuming jsoncanonicalizer is designed to operate on untrusted data. + untrustedSETPayloadCanonicalBytes, err := jsoncanonicalizer.Transform(untrustedSET.UntrustedPayload) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err)) + } + untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes) + if !ecdsa.VerifyASN1(publicKey, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) { + return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed") + } + + // == Parse SET payload + // Parse the cryptographically-verified canonicalized variant, NOT the originally-delivered representation, + // to decrease risk of exploiting the JSON parser. Note that if there were an arbitrary execution vulnerability, the attacker + // could have exploited the parsing of unverifiedRekorSET above already; so this, at best, ensures more consistent processing + // of the SET payload. + var rekorPayload UntrustedRekorPayload + if err := json.Unmarshal(untrustedSETPayloadCanonicalBytes, &rekorPayload); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("parsing Rekor SET payload: %v", err.Error())) + } + // FIXME: Use a different decoder implementation? The Swagger-generated code is kinda ridiculous, with the need to re-marshal + // hashedRekor.Spec and so on. + // Especially if we anticipate needing to decode different data formats… + // That would also allow being much more strict about JSON. + // + // Alternatively, rely on the existing .Validate() methods instead of manually checking for nil all over the place. + var hashedRekord models.Hashedrekord + if err := json.Unmarshal(rekorPayload.Body, &hashedRekord); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding the body of a Rekor SET payload: %v", err)) + } + // The decode of models.HashedRekord validates the "kind": "hashedrecord" field, which is otherwise invisible to us. + if hashedRekord.APIVersion == nil { + return time.Time{}, NewInvalidSignatureError("missing Rekor SET Payload API version") + } + if *hashedRekord.APIVersion != HashedRekordV001APIVersion { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("unsupported Rekor SET Payload hashedrekord version %#v", hashedRekord.APIVersion)) + } + hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec) + if err != nil { + // Coverage: hashedRekord.Spec is an any that was just unmarshaled, + // so this should never fail. + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err)) + } + var hashedRekordV001 models.HashedrekordV001Schema + if err := json.Unmarshal(hashedRekordV001Bytes, &hashedRekordV001); err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding hashedrekod spec: %v", err)) + } + + // == Match unverifiedKeyOrCertBytes + if hashedRekordV001.Signature == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature" field in hashedrekord`) + } + if hashedRekordV001.Signature.PublicKey == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "signature.publicKey" field in hashedrekord`) + + } + rekorKeyOrCertPEM, rest := pem.Decode(hashedRekordV001.Signature.PublicKey.Content) + if rekorKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET has trailing data") + } + // FIXME: For public keys, let the caller provide the DER-formatted blob instead + // of round-tripping through PEM. + unverifiedKeyOrCertPEM, rest := pem.Decode(unverifiedKeyOrCertBytes) + if unverifiedKeyOrCertPEM == nil { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET is not in PEM format") + } + if len(rest) != 0 { + return time.Time{}, NewInvalidSignatureError("public key or cert to be matched against publicKey in Rekor SET has trailing data") + } + // NOTE: This compares the PEM payload, but not the object type or headers. + if !bytes.Equal(rekorKeyOrCertPEM.Bytes, unverifiedKeyOrCertPEM.Bytes) { + return time.Time{}, NewInvalidSignatureError("publicKey in Rekor SET does not match") + } + // == Match unverifiedSignatureBytes + unverifiedSignatureBytes, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("decoding signature base64: %v", err)) + } + if !bytes.Equal(hashedRekordV001.Signature.Content, unverifiedSignatureBytes) { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("signature in Rekor SET does not match: %#v vs. %#v", + string(hashedRekordV001.Signature.Content), string(unverifiedSignatureBytes))) + } + + // == Match unverifiedPayloadBytes + if hashedRekordV001.Data == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash" field in hashedrekord`) + } + if hashedRekordV001.Data.Hash.Algorithm == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`) + } + if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm)) + } + if hashedRekordV001.Data.Hash.Value == nil { + return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.value" field in hashedrekord`) + } + rekorPayloadHash, err := hex.DecodeString(*hashedRekordV001.Data.Hash.Value) + if err != nil { + return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Invalid "data.hash.value" field in hashedrekord: %v`, err)) + + } + unverifiedPayloadHash := sha256.Sum256(unverifiedPayloadBytes) + if !bytes.Equal(rekorPayloadHash, unverifiedPayloadHash[:]) { + return time.Time{}, NewInvalidSignatureError("payload in Rekor SET does not match") + } + + // == All OK; return the relevant time. + return time.Unix(rekorPayload.IntegratedTime, 0), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go new file mode 100644 index 00000000..f8ec6656 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -0,0 +1,202 @@ +package internal + +import ( + "bytes" + "crypto" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" +) + +const ( + sigstoreSignatureType = "cosign container image signature" + sigstoreHarcodedHashAlgorithm = crypto.SHA256 +) + +// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature) +type UntrustedSigstorePayload struct { + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + untrustedTimestamp *int64 +} + +// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with +// the specified primary contents and appropriate metadata. +func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerReference string) UntrustedSigstorePayload { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "containers/image " + version.Version + timestamp := time.Now().Unix() + return UntrustedSigstorePayload{ + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, + } +} + +// A compile-time check that UntrustedSigstorePayload and *UntrustedSigstorePayload implements json.Marshaler +var _ json.Marshaler = UntrustedSigstorePayload{} +var _ json.Marshaler = (*UntrustedSigstorePayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]any{ + "type": sigstoreSignatureType, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, + } + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID + } + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp + } + signature := map[string]any{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that UntrustedSigstorePayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + // /usr/bin/cosign generates "optional": null if there are no user-specified annotations. + if !bytes.Equal(optional, []byte("null")) { + if err := ParanoidUnmarshalJSONObject(optional, func(key string) any { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore any + return &ignore + } + }); err != nil { + return err + } + } + if gotCreatorID { + s.untrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return NewInvalidSignatureError("Field optional.timestamp is not is not an integer") + } + s.untrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != sigstoreSignatureType { + return NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) + } + + var digestString string + if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + s.untrustedDockerManifestDigest = digest.Digest(digestString) + + return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, + }) +} + +// SigstorePayloadAcceptanceRules specifies how to decide whether an untrusted payload is acceptable. +// We centralize the actual parsing and data extraction in VerifySigstorePayload; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type SigstorePayloadAcceptanceRules struct { + ValidateSignedDockerReference func(string) error + ValidateSignedDockerManifestDigest func(digest.Digest) error +} + +// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components +// match expected values, both as specified by rules, and returns it. +// We return an *UntrustedSigstorePayload, although nothing actually uses it, +// just to double-check against stupid typos. +func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) { + verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm) + if err != nil { + return nil, fmt.Errorf("creating verifier: %w", err) + } + + unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err)) + } + // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil { + return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err)) + } + + var unmatchedPayload UntrustedSigstorePayload + if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil { + return nil, NewInvalidSignatureError(err.Error()) + } + if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil { + return nil, err + } + // SigstorePayloadAcceptanceRules have accepted this value. + return &unmatchedPayload, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go new file mode 100644 index 00000000..1d3fe0fd --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -0,0 +1,97 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" + + // This code is used only to parse the data in an explicitly-untrusted + // code path, where cryptography is not relevant. For now, continue to + // use this frozen deprecated implementation. When mechanism_openpgp.go + // migrates to another implementation, this should migrate as well. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// Each mechanism should eventually be closed by calling Close(). +type SigningMechanism interface { + // Close removes resources associated with the mechanism, if any. + Close() error + // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. + SupportsSigning() error + // Sign creates a (non-detached) signature of input using keyIdentity. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) + // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, + // along with a short identifier of the key used for signing. + // WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) + // is NOT the same as a "key identity" used in other calls to this interface, and + // the values may have no recognizable relationship if the public key is not available. + UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) +} + +// signingMechanismWithPassphrase is an internal extension of SigningMechanism. +type signingMechanismWithPassphrase interface { + SigningMechanism + + // Sign creates a (non-detached) signature of input using keyIdentity and passphrase. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) +} + +// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. +type SigningNotSupportedError string + +func (err SigningNotSupportedError) Error() string { + return string(err) +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg) +// The caller must call .Close() on the returned SigningMechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + return newEphemeralGPGSigningMechanism([][]byte{blob}) +} + +// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. + md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("The input is not a signature") + } + content, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: An error during reading the body can happen only if + // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key + // to decrypt the contents anyway), or + // 2) the message is signed AND we give ReadMessage a corresponding public key, which we don’t. + return nil, "", err + } + + // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints + // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! + return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go new file mode 100644 index 00000000..2b2a7ad8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -0,0 +1,207 @@ +//go:build !containers_image_openpgp +// +build !containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "os" + + "github.com/containers/image/v5/signature/internal" + "github.com/proglottis/gpgme" +) + +// A GPG/OpenPGP signing mechanism, implemented using gpgme. +type gpgmeSigningMechanism struct { + ctx *gpgme.Context + ephemeralDir string // If not "", a directory to be removed on Close() +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + ctx, err := newGPGMEContext(optionalDir) + if err != nil { + return nil, err + } + return &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: "", + }, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blobs, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-") + if err != nil { + return nil, nil, err + } + removeDir := true + defer func() { + if removeDir { + os.RemoveAll(dir) + } + }() + ctx, err := newGPGMEContext(dir) + if err != nil { + return nil, nil, err + } + mech := &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: dir, + } + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := mech.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) + } + + removeDir = false + return mech, keyIdentities, nil +} + +// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. +func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return ctx, nil +} + +func (m *gpgmeSigningMechanism) Close() error { + if m.ephemeralDir != "" { + os.RemoveAll(m.ephemeralDir) // Ignore an error, if any + } + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); +// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. +func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := m.ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *gpgmeSigningMechanism) SupportsSigning() error { + return nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + + if passphrase != "" { + // Callback to write the passphrase to the specified file descriptor. + callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error { + if prevWasBad { + return errors.New("bad passphrase") + } + _, err := gpgmeFD.WriteString(passphrase + "\n") + return err + } + if err := m.ctx.SetCallback(callback); err != nil { + return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err) + } + + // Loopback mode will use the callback instead of prompting the user. + if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil { + return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err) + } + } + + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))) + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", sig)) + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go new file mode 100644 index 00000000..5d6c1ac8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -0,0 +1,179 @@ +//go:build containers_image_openpgp +// +build containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "strings" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/containers/storage/pkg/homedir" + // This is a fallback code; the primary recommendation is to use the gpgme mechanism + // implementation, which is out-of-process and more appropriate for handling long-term private key material + // than any Go implementation. + // For this verify-only fallback, we haven't reviewed any of the + // existing alternatives to choose; so, for now, continue to + // use this frozen deprecated implementation. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck +) + +// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. +type openpgpSigningMechanism struct { + keyring openpgp.EntityList +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + + gpgHome := optionalDir + if gpgHome == "" { + gpgHome = os.Getenv("GNUPGHOME") + if gpgHome == "" { + gpgHome = path.Join(homedir.Get(), ".gnupg") + } + } + + pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg")) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + _, err := m.importKeysFromBytes(pubring) + if err != nil { + return nil, err + } + } + return m, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) + } + + return m, keyIdentities, nil +} + +func (m *openpgpSigningMechanism) Close() error { + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) + if err != nil { + k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) + if e2 != nil { + return nil, err // The original error -- FIXME: is this better? + } + keyring = k + } + + keyIdentities := []string{} + for _, entity := range keyring { + if entity.PrimaryKey == nil { + // Coverage: This should never happen, openpgp.ReadEntity fails with a + // openpgp.errors.StructuralError instead of returning an entity with this + // field set to nil. + continue + } + // Uppercase the fingerprint to be compatible with gpgme + keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) + m.keyring = append(m.keyring, entity) + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *openpgpSigningMechanism) SupportsSigning() error { + return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { + return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("not signed") + } + content, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted + // (and possibly also signed, but it _must_ be encrypted) and the signing + // “modification detection code†detects a mismatch. But in that case, + // we would expect the signature verification to fail as well, and that is checked + // first. Besides, we are not supplying any decryption keys, so we really + // can never reach this “encrypted data MDC mismatch†path. + return nil, "", err + } + if md.SignatureError != nil { + return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) + } + if md.SignedBy == nil { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)) + } + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Signature expired on %s", expiry)) + } + } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", internal.NewInvalidSignatureError("Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set") + } + + // Uppercase the fingerprint to be compatible with gpgme + return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which corresponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls to this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go new file mode 100644 index 00000000..7eb5cab7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -0,0 +1,799 @@ +// policy_config.go handles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/signature/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/regexp" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// userPolicyFile is the path to the per user policy path. +var userPolicyFile = filepath.FromSlash(".config/containers/policy.json") + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// sys should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { + return NewPolicyFromFile(defaultPolicyPath(sys)) +} + +// defaultPolicyPath returns a path to the default policy of the system. +func defaultPolicyPath(sys *types.SystemContext) string { + return defaultPolicyPathWithHomeDir(sys, homedir.Get()) +} + +// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath, +// it exists only to allow testing it with an artificial home directory. +func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string { + if sys != nil && sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath + } + userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) + if _, err := os.Stat(userPolicyFilePath); err == nil { + return userPolicyFilePath + } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + } + return systemDefaultPolicyPath +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := os.ReadFile(fileName) + if err != nil { + return nil, err + } + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, fmt.Errorf("invalid policy in %q: %w", fileName, err) + } + return policy, nil +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + // transport can be nil + transport := transports.Get(key) + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport if not nil. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" && m.transport != nil { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + case prTypeSigstoreSigned: + res = &prSigstoreSigned{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) + } + keySources := 0 + if keyPath != "" { + keySources++ + } + if keyPaths != nil { + keySources++ + } + if keyData != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths and keyData must be specified") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyPaths: keyPaths, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyPaths is NewPRSignedByKeyPaths, except it returns the private type. +func newPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyPaths, nil, signedIdentity) +} + +// NewPRSignedByKeyPaths returns a new "signedBy" PolicyRequirement using KeyPaths +func NewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPaths(keyType, keyPaths, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", nil, keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && !gotKeyPaths && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyPaths && !gotKeyData: + res, err = newPRSignedByKeyPaths(tmp.KeyType, tmp.KeyPaths, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyPaths && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyPaths && !gotKeyData: + return InvalidPolicyFormatError("Exactly one of keyPath, keyPaths and keyData must be specified, none of them present") + default: + return fmt.Errorf("Exactly one of keyPath, keyPaths and keyData must be specified, more than one present") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "baseLayerIdentity": &baseLayerIdentity, + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + case prmTypeRemapIdentity: + res = &prmRemapIdentity{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it returns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it returns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it returns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it returns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNormalizedNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "dockerReference": &tmp.DockerReference, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it returns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "dockerRepository": &tmp.DockerRepository, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} + +// Private objects for validateIdentityRemappingPrefix +var ( + // remapIdentityDomainRegexp matches exactly a reference domain (name[:port]) + remapIdentityDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$") + // remapIdentityDomainPrefixRegexp matches a reference that starts with a domain; + // we need this because reference.NameRegexp accepts short names with docker.io implied. + remapIdentityDomainPrefixRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "/") + // remapIdentityNameRegexp matches exactly a reference.Named name (possibly unnormalized) + remapIdentityNameRegexp = regexp.Delayed("^" + reference.NameRegexp.String() + "$") +) + +// validateIdentityRemappingPrefix returns an InvalidPolicyFormatError if s is detected to be invalid +// for the Prefix or SignedPrefix values of prmRemapIdentity. +// Note that it may not recognize _all_ invalid values. +func validateIdentityRemappingPrefix(s string) error { + if remapIdentityDomainRegexp.MatchString(s) || + (remapIdentityNameRegexp.MatchString(s) && remapIdentityDomainPrefixRegexp.MatchString(s)) { + // FIXME? This does not reject "shortname" nor "ns/shortname", because docker/reference + // does not provide an API for the short vs. long name logic. + // It will either not match, or fail in the ParseNamed call of + // prmRemapIdentity.remapReferencePrefix when trying to use such a prefix. + return nil + } + return InvalidPolicyFormatError(fmt.Sprintf("prefix %q is not valid", s)) +} + +// newPRMRemapIdentity is NewPRMRemapIdentity, except it returns the private type. +func newPRMRemapIdentity(prefix, signedPrefix string) (*prmRemapIdentity, error) { + if err := validateIdentityRemappingPrefix(prefix); err != nil { + return nil, err + } + if err := validateIdentityRemappingPrefix(signedPrefix); err != nil { + return nil, err + } + return &prmRemapIdentity{ + prmCommon: prmCommon{Type: prmTypeRemapIdentity}, + Prefix: prefix, + SignedPrefix: signedPrefix, + }, nil +} + +// NewPRMRemapIdentity returns a new "remapIdentity" PolicyRepositoryMatch. +func NewPRMRemapIdentity(prefix, signedPrefix string) (PolicyReferenceMatch, error) { + return newPRMRemapIdentity(prefix, signedPrefix) +} + +// Compile-time check that prmRemapIdentity implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmRemapIdentity)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error { + *prm = prmRemapIdentity{} + var tmp prmRemapIdentity + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "type": &tmp.Type, + "prefix": &tmp.Prefix, + "signedPrefix": &tmp.SignedPrefix, + }); err != nil { + return err + } + + if tmp.Type != prmTypeRemapIdentity { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go new file mode 100644 index 00000000..d8c6a97f --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -0,0 +1,343 @@ +package signature + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/containers/image/v5/signature/internal" +) + +// PRSigstoreSignedOption is way to pass values to NewPRSigstoreSigned +type PRSigstoreSignedOption func(*prSigstoreSigned) error + +// PRSigstoreSignedWithKeyPath specifies a value for the "keyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPath != "" { + return errors.New(`"keyPath" already specified`) + } + pr.KeyPath = keyPath + return nil + } +} + +// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyData != nil { + return errors.New(`"keyData" already specified`) + } + pr.KeyData = keyData + return nil + } +} + +// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.Fulcio != nil { + return errors.New(`"fulcio" already specified`) + } + pr.Fulcio = fulcio + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyPath specifies a value for the "rekorPublicKeyPath" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPath != "" { + return errors.New(`"rekorPublicKeyPath" already specified`) + } + pr.RekorPublicKeyPath = rekorPublicKeyPath + return nil + } +} + +// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyData != nil { + return errors.New(`"rekorPublicKeyData" already specified`) + } + pr.RekorPublicKeyData = rekorPublicKeyData + return nil + } +} + +// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.SignedIdentity != nil { + return errors.New(`"signedIdentity" already specified`) + } + pr.SignedIdentity = signedIdentity + return nil + } +} + +// newPRSigstoreSigned is NewPRSigstoreSigned, except it returns the private type. +func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, error) { + res := prSigstoreSigned{ + prCommon: prCommon{Type: prTypeSigstoreSigned}, + } + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + keySources := 0 + if res.KeyPath != "" { + keySources++ + } + if res.KeyData != nil { + keySources++ + } + if res.Fulcio != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyData and fulcio must be specified") + } + + if res.RekorPublicKeyPath != "" && res.RekorPublicKeyData != nil { + return nil, InvalidPolicyFormatError("rekorPublickeyType and rekorPublickeyData cannot be used simultaneously") + } + if res.Fulcio != nil && res.RekorPublicKeyPath == "" && res.RekorPublicKeyData == nil { + return nil, InvalidPolicyFormatError("At least one of RekorPublickeyPath and RekorPublickeyData must be specified if fulcio is used") + } + + if res.SignedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + + return &res, nil +} + +// NewPRSigstoreSigned returns a new "sigstoreSigned" PolicyRequirement based on options. +func NewPRSigstoreSigned(options ...PRSigstoreSignedOption) (PolicyRequirement, error) { + return newPRSigstoreSigned(options...) +} + +// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath +func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyPath(keyPath), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData +func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return NewPRSigstoreSigned( + PRSigstoreSignedWithKeyData(keyData), + PRSigstoreSignedWithSignedIdentity(signedIdentity), + ) +} + +// Compile-time check that prSigstoreSigned implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSigned)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { + *pr = prSigstoreSigned{} + var tmp prSigstoreSigned + var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool + var fulcio prSigstoreSignedFulcio + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "type": + return &tmp.Type + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "fulcio": + gotFulcio = true + return &fulcio + case "rekorPublicKeyPath": + gotRekorPublicKeyPath = true + return &tmp.RekorPublicKeyPath + case "rekorPublicKeyData": + gotRekorPublicKeyData = true + return &tmp.RekorPublicKeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSigstoreSigned { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var opts []PRSigstoreSignedOption + if gotKeyPath { + opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath)) + } + if gotKeyData { + opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData)) + } + if gotFulcio { + opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio)) + } + if gotRekorPublicKeyPath { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath)) + } + if gotRekorPublicKeyData { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData)) + } + opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity)) + + res, err := newPRSigstoreSigned(opts...) + if err != nil { + return err + } + *pr = *res + return nil +} + +// PRSigstoreSignedFulcioOption is a way to pass values to NewPRSigstoreSignedFulcio +type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error + +// PRSigstoreSignedFulcioWithCAPath specifies a value for the "caPath" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAPath != "" { + return errors.New(`"caPath" already specified`) + } + f.CAPath = caPath + return nil + } +} + +// PRSigstoreSignedFulcioWithCAData specifies a value for the "caData" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.CAData != nil { + return errors.New(`"caData" already specified`) + } + f.CAData = caData + return nil + } +} + +// PRSigstoreSignedFulcioWithOIDCIssuer specifies a value for the "oidcIssuer" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.OIDCIssuer != "" { + return errors.New(`"oidcIssuer" already specified`) + } + f.OIDCIssuer = oidcIssuer + return nil + } +} + +// PRSigstoreSignedFulcioWithSubjectEmail specifies a value for the "subjectEmail" field when calling NewPRSigstoreSignedFulcio +func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption { + return func(f *prSigstoreSignedFulcio) error { + if f.SubjectEmail != "" { + return errors.New(`"subjectEmail" already specified`) + } + f.SubjectEmail = subjectEmail + return nil + } +} + +// newPRSigstoreSignedFulcio is NewPRSigstoreSignedFulcio, except it returns the private type +func newPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (*prSigstoreSignedFulcio, error) { + res := prSigstoreSignedFulcio{} + for _, o := range options { + if err := o(&res); err != nil { + return nil, err + } + } + + if res.CAPath != "" && res.CAData != nil { + return nil, InvalidPolicyFormatError("caPath and caData cannot be used simultaneously") + } + if res.CAPath == "" && res.CAData == nil { + return nil, InvalidPolicyFormatError("At least one of caPath and caData must be specified") + } + if res.OIDCIssuer == "" { + return nil, InvalidPolicyFormatError("oidcIssuer not specified") + } + if res.SubjectEmail == "" { + return nil, InvalidPolicyFormatError("subjectEmail not specified") + } + + return &res, nil +} + +// NewPRSigstoreSignedFulcio returns a PRSigstoreSignedFulcio based on options. +func NewPRSigstoreSignedFulcio(options ...PRSigstoreSignedFulcioOption) (PRSigstoreSignedFulcio, error) { + return newPRSigstoreSignedFulcio(options...) +} + +// Compile-time check that prSigstoreSignedFulcio implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSignedFulcio)(nil) + +func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error { + *f = prSigstoreSignedFulcio{} + var tmp prSigstoreSignedFulcio + var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false... + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { + switch key { + case "caPath": + gotCAPath = true + return &tmp.CAPath + case "caData": + gotCAData = true + return &tmp.CAData + case "oidcIssuer": + gotOIDCIssuer = true + return &tmp.OIDCIssuer + case "subjectEmail": + gotSubjectEmail = true + return &tmp.SubjectEmail + default: + return nil + } + }); err != nil { + return err + } + + var opts []PRSigstoreSignedFulcioOption + if gotCAPath { + opts = append(opts, PRSigstoreSignedFulcioWithCAPath(tmp.CAPath)) + } + if gotCAData { + opts = append(opts, PRSigstoreSignedFulcioWithCAData(tmp.CAData)) + } + if gotOIDCIssuer { + opts = append(opts, PRSigstoreSignedFulcioWithOIDCIssuer(tmp.OIDCIssuer)) + } + if gotSubjectEmail { + opts = append(opts, PRSigstoreSignedFulcioWithSubjectEmail(tmp.SubjectEmail)) + } + + res, err := newPRSigstoreSignedFulcio(opts...) + if err != nil { + return err + } + + *f = *res + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go new file mode 100644 index 00000000..4f8d0da3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -0,0 +1,293 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the functions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/unparsedimage" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for further processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeContextState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport "%s" policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for further processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + image := unparsedimage.FromPublic(publicImage) + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: Use image.UntrustedSignatures, use that to improve error messages (needs tests!) + unverifiedSignatures, err := image.Signatures(ctx) + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, publicImage types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + image := unparsedimage.FromPublic(publicImage) + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(ctx, image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go new file mode 100644 index 00000000..a8bc0130 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go @@ -0,0 +1,20 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "context" + + "github.com/containers/image/v5/internal/private" + "github.com/sirupsen/logrus" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go new file mode 100644 index 00000000..a4187735 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -0,0 +1,145 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/manifest" + digest "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value "%s"`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value "%s"`, string(pr.KeyType)) + } + + // FIXME: move this to per-context initialization + var data [][]byte + keySources := 0 + if pr.KeyPath != "" { + keySources++ + d, err := os.ReadFile(pr.KeyPath) + if err != nil { + return sarRejected, nil, err + } + data = [][]byte{d} + } + if pr.KeyPaths != nil { + keySources++ + data = [][]byte{} + for _, path := range pr.KeyPaths { + d, err := os.ReadFile(path) + if err != nil { + return sarRejected, nil, err + } + data = append(data, d) + } + } + if pr.KeyData != nil { + keySources++ + data = [][]byte{pr.KeyData} + } + if keySources != 1 { + return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`) + } + + // FIXME: move this to per-context initialization + mech, trustedIdentities, err := newEphemeralGPGSigningMechanism(data) + if err != nil { + return sarRejected, nil, err + } + defer mech.Close() + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if slices.Contains(trustedIdentities, keyIdentity) { + return nil + } + // Coverage: We use a private GPG home directory and only import trusted keys, so this should + // not be reachable. + return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) + }, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + // FIXME: Use image.UntrustedSignatures, use that to improve error messages + // (needs tests!) + sigs, err := image.Signatures(ctx) + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + var msgs []string + for _, e := range rejections { + msgs = append(msgs, e.Error()) + } + summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", + strings.Join(msgs, "; "))) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go new file mode 100644 index 00000000..dcf5592a --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go @@ -0,0 +1,281 @@ +// Policy evaluation for prSigstoreSigned. + +package signature + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/x509" + "errors" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + digest "github.com/opencontainers/go-digest" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +// loadBytesFromDataOrPath ensures there is at most one of ${prefix}Data and ${prefix}Path set, +// and returns the referenced data, or nil if neither is set. +func loadBytesFromDataOrPath(prefix string, data []byte, path string) ([]byte, error) { + switch { + case data != nil && path != "": + return nil, fmt.Errorf(`Internal inconsistency: both "%sPath" and "%sData" specified`, prefix, prefix) + case path != "": + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return d, nil + case data != nil: + return data, nil + default: // Nothing + return nil, nil + } +} + +// prepareTrustRoot creates a fulcioTrustRoot from the input data. +// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) +func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { + caCertBytes, err := loadBytesFromDataOrPath("fulcioCA", f.CAData, f.CAPath) + if err != nil { + return nil, err + } + if caCertBytes == nil { + return nil, errors.New(`Internal inconsistency: Fulcio specified with neither "caPath" nor "caData"`) + } + certs := x509.NewCertPool() + if ok := certs.AppendCertsFromPEM(caCertBytes); !ok { + return nil, errors.New("error loading Fulcio CA certificates") + } + fulcio := fulcioTrustRoot{ + caCertificates: certs, + oidcIssuer: f.OIDCIssuer, + subjectEmail: f.SubjectEmail, + } + if err := fulcio.validate(); err != nil { + return nil, err + } + return &fulcio, nil +} + +// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy +type sigstoreSignedTrustRoot struct { + publicKey crypto.PublicKey + fulcio *fulcioTrustRoot + rekorPublicKey *ecdsa.PublicKey +} + +func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) { + res := sigstoreSignedTrustRoot{} + + publicKeyPEM, err := loadBytesFromDataOrPath("key", pr.KeyData, pr.KeyPath) + if err != nil { + return nil, err + } + if publicKeyPEM != nil { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM) + if err != nil { + return nil, fmt.Errorf("parsing public key: %w", err) + } + res.publicKey = pk + } + + if pr.Fulcio != nil { + f, err := pr.Fulcio.prepareTrustRoot() + if err != nil { + return nil, err + } + res.fulcio = f + } + + rekorPublicKeyPEM, err := loadBytesFromDataOrPath("rekorPublicKey", pr.RekorPublicKeyData, pr.RekorPublicKeyPath) + if err != nil { + return nil, err + } + if rekorPublicKeyPEM != nil { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(rekorPublicKeyPEM) + if err != nil { + return nil, fmt.Errorf("parsing Rekor public key: %w", err) + } + pkECDSA, ok := pk.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("Rekor public key is not using ECDSA") + + } + res.rekorPublicKey = pkECDSA + } + + return &res, nil +} + +func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // We don’t know of a single user of this API, and we might return unexpected values in Signature. + // For now, just punt. + return sarRejected, nil, errors.New("isSignatureAuthorAccepted is not implemented for sigstore") +} + +func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) { + // FIXME: move this to per-context initialization + trustRoot, err := pr.prepareTrustRoot() + if err != nil { + return sarRejected, err + } + + untrustedAnnotations := sig.UntrustedAnnotations() + untrustedBase64Signature, ok := untrustedAnnotations[signature.SigstoreSignatureAnnotationKey] + if !ok { + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey) + } + untrustedPayload := sig.UntrustedPayload() + + var publicKey crypto.PublicKey + switch { + case trustRoot.publicKey != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified") + case trustRoot.publicKey == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified") + + case trustRoot.publicKey != nil: + if trustRoot.rekorPublicKey != nil { + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies. + // FIXME: We could just generate DER instead of the full PEM text + recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(trustRoot.publicKey) + if err != nil { + // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail. + // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.) + return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err) + + } + // We don’t care about the Rekor timestamp, just about log presence. + if _, err := internal.VerifyRekorSET(trustRoot.rekorPublicKey, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload); err != nil { + return sarRejected, err + } + } + publicKey = trustRoot.publicKey + + case trustRoot.fulcio != nil: + if trustRoot.rekorPublicKey == nil { // newPRSigstoreSigned rejects such combinations. + return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key") + } + untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) + } + untrustedCert, ok := untrustedAnnotations[signature.SigstoreCertificateAnnotationKey] + if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should correctly reject it anyway. + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreCertificateAnnotationKey) + } + var untrustedIntermediateChainBytes []byte + if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { + untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) + } + pk, err := verifyRekorFulcio(trustRoot.rekorPublicKey, trustRoot.fulcio, + []byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload) + if err != nil { + return sarRejected, err + } + publicKey = pk + } + + if publicKey == nil { + // Coverage: This should never happen, we have already excluded the possibility in the switch above. + return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload") + } + signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ + ValidateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + } + return nil + }, + ValidateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, err + } + if signature == nil { // A paranoid sanity check that VerifySigstorePayload has returned consistent values + return sarRejected, errors.New("internal error: VerifySigstorePayload succeeded but returned no data") // Coverage: This should never happen. + } + + return sarAccepted, nil +} + +func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + sigs, err := image.UntrustedSignatures(ctx) + if err != nil { + return false, err + } + var rejections []error + foundNonSigstoreSignatures := 0 + foundSigstoreNonAttachments := 0 + for _, s := range sigs { + sigstoreSig, ok := s.(signature.Sigstore) + if !ok { + foundNonSigstoreSignatures++ + continue + } + if sigstoreSig.UntrustedMIMEType() != signature.SigstoreSignatureMIMEType { + foundSigstoreNonAttachments++ + continue + } + + var reason error + switch res, err := pr.isSignatureAccepted(ctx, image, sigstoreSig); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + if foundNonSigstoreSignatures == 0 && foundSigstoreNonAttachments == 0 { + // A nice message for the most common case. + summary = PolicyRequirementError("A signature was required, but no signature exists") + } else { + summary = PolicyRequirementError(fmt.Sprintf("A signature was required, but no signature exists (%d non-sigstore signatures, %d sigstore non-signature attachments)", + foundNonSigstoreSignatures, foundSigstoreNonAttachments)) + } + case 1: + summary = rejections[0] + default: + var msgs []string + for _, e := range rejections { + msgs = append(msgs, e.Error()) + } + summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", + strings.Join(msgs, "; "))) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go new file mode 100644 index 00000000..031866f0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go @@ -0,0 +1,29 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/transports" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_common.go b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go new file mode 100644 index 00000000..290fc245 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go @@ -0,0 +1,8 @@ +//go:build !freebsd +// +build !freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go new file mode 100644 index 00000000..702b7171 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go @@ -0,0 +1,8 @@ +//go:build freebsd +// +build freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/usr/local/etc/containers/policy.json" diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go new file mode 100644 index 00000000..4e70c0f2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -0,0 +1,154 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/transports" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image private.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +// matchRepoDigestOrExactReferenceValues implements prmMatchRepoDigestOrExact.matchesDockerReference +// using reference.Named values. +func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named) bool { + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Because UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} + +func (prm *prmMatchRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNormalizedNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// refMatchesPrefix returns true if ref matches prm.Prefix. +func (prm *prmRemapIdentity) refMatchesPrefix(ref reference.Named) bool { + name := ref.Name() + switch { + case len(name) < len(prm.Prefix): + return false + case len(name) == len(prm.Prefix): + return name == prm.Prefix + case len(name) > len(prm.Prefix): + // We are matching only ref.Name(), not ref.String(), so the only separator we are + // expecting is '/': + // - '@' is only valid to separate a digest, i.e. not a part of ref.Name() + // - similarly ':' to mark a tag would not be a part of ref.Name(); it can be a part of a + // host:port domain syntax, but we don't treat that specially and require an exact match + // of the domain. + return strings.HasPrefix(name, prm.Prefix) && name[len(prm.Prefix)] == '/' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// remapReferencePrefix returns the result of remapping ref, if it matches prm.Prefix +// or the original ref if it does not. +func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (reference.Named, error) { + if !prm.refMatchesPrefix(ref) { + return ref, nil + } + refString := ref.String() + newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1) + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, fmt.Errorf(`error rewriting reference from "%s" to "%s": %v`, refString, newNamedRef, err) + } + return newParsedRef, nil +} + +func (prm *prmRemapIdentity) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + intended, err = prm.remapReferencePrefix(intended) + if err != nil { + return false + } + return matchRepoDigestOrExactReferenceValues(intended, signature) +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go new file mode 100644 index 00000000..96e91a0a --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -0,0 +1,215 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/containers-policy.json.5.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" + prTypeSigstoreSigned prTypeIdentifier = "sigstoreSigned" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyPaths/KeyData is. + // Acceptable values are “GPGKeys†| “signedByGPGKeys†“X.509Certificates†| “signedByX.509CAs†+ // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "matchRepoDigestOrExact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// prSigstoreSigned is a PolicyRequirement with type = prTypeSigstoreSigned: the image is signed by trusted keys for a specified identity +type prSigstoreSigned struct { + prCommon + + // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyData, Fulcio must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyData, Fulcio must be specified. + KeyData []byte `json:"keyData,omitempty"` + // FIXME: Multiple public keys? + + // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyData, Fulcio must be specified. + // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well. + Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"` + + // RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional + // (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"` + // RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional + // (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "matchRepoDigestOrExact" if not specified. + // Note that /usr/bin/cosign interoperability might require using repo-only matching. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// PRSigstoreSignedFulcio contains Fulcio configuration options for a "sigstoreSigned" PolicyRequirement. +// This is a public type with a single private implementation. +type PRSigstoreSignedFulcio interface { + // toFulcioTrustRoot creates a fulcioTrustRoot from the input data. + // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) + prepareTrustRoot() (*fulcioTrustRoot, error) +} + +// prSigstoreSignedFulcio collects Fulcio configuration options for prSigstoreSigned +type prSigstoreSignedFulcio struct { + // CAPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CAPath and CAData must be specified. + CAPath string `json:"caPath,omitempty"` + // CAData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CAPath and CAData must be specified. + CAData []byte `json:"caData,omitempty"` + // OIDCIssuer specifies the expected OIDC issuer, recorded by Fulcio into the generated certificates. + OIDCIssuer string `json:"oidcIssuer,omitempty"` + // SubjectEmail specifies the expected email address of the authenticated OIDC identity, recorded by Fulcio into the generated certificates. + SubjectEmail string `json:"subjectEmail,omitempty"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" + prmTypeRemapIdentity prmTypeIdentifier = "remapIdentity" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} + +// prmRemapIdentity is a PolicyReferenceMatch with type = prmRemapIdentity: like prmMatchRepoDigestOrExact, +// except that a namespace (at least a host:port, at most a single repository) is substituted before matching the two references. +type prmRemapIdentity struct { + prmCommon + Prefix string `json:"prefix"` + SignedPrefix string `json:"signedPrefix"` + // Possibly let the users make a choice for tag/digest matching behavior + // similar to prmMatchExact/prmMatchRepository? +} diff --git a/vendor/github.com/containers/image/v5/signature/signer/signer.go b/vendor/github.com/containers/image/v5/signature/signer/signer.go new file mode 100644 index 00000000..73ae550a --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/signer/signer.go @@ -0,0 +1,9 @@ +package signer + +import "github.com/containers/image/v5/internal/signer" + +// Signer is an object, possibly carrying state, that can be used by copy.Image to sign one or more container images. +// It can only be created from within the containers/image package; it can’t be implemented externally. +// +// The owner of a Signer must call Close() when done. +type Signer = signer.Signer diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go new file mode 100644 index 00000000..2e510f60 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go @@ -0,0 +1,103 @@ +package sigstore + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/secure-systems-lab/go-securesystemslib/encrypted" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" +) + +// The following code was copied from github.com/sigstore. +// FIXME: Eliminate that duplication. + +// Copyright 2021 The Sigstore Authors. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const ( + // from sigstore/cosign/pkg/cosign.CosignPrivateKeyPemType. + cosignPrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY" + // from sigstore/cosign/pkg/cosign.SigstorePrivateKeyPemType. + sigstorePrivateKeyPemType = "ENCRYPTED SIGSTORE PRIVATE KEY" +) + +// from sigstore/cosign/pkg/cosign.loadPrivateKey +// FIXME: Do we need all of these key formats? +func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { + // Decrypt first + p, _ := pem.Decode(key) + if p == nil { + return nil, errors.New("invalid pem block") + } + if p.Type != sigstorePrivateKeyPemType && p.Type != cosignPrivateKeyPemType { + return nil, fmt.Errorf("unsupported pem type: %s", p.Type) + } + + x509Encoded, err := encrypted.Decrypt(p.Bytes, pass) + if err != nil { + return nil, fmt.Errorf("decrypt: %w", err) + } + + pk, err := x509.ParsePKCS8PrivateKey(x509Encoded) + if err != nil { + return nil, fmt.Errorf("parsing private key: %w", err) + } + switch pk := pk.(type) { + case *rsa.PrivateKey: + return signature.LoadRSAPKCS1v15SignerVerifier(pk, crypto.SHA256) + case *ecdsa.PrivateKey: + return signature.LoadECDSASignerVerifier(pk, crypto.SHA256) + case ed25519.PrivateKey: + return signature.LoadED25519SignerVerifier(pk) + default: + return nil, errors.New("unsupported key type") + } +} + +// simplified from sigstore/cosign/pkg/cosign.marshalKeyPair +// loadPrivateKey always requires a encryption, so this always requires a passphrase. +func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, password []byte) (_privateKey []byte, _publicKey []byte, err error) { + x509Encoded, err := x509.MarshalPKCS8PrivateKey(privateKey) + if err != nil { + return nil, nil, fmt.Errorf("x509 encoding private key: %w", err) + } + + encBytes, err := encrypted.Encrypt(x509Encoded, password) + if err != nil { + return nil, nil, err + } + + // store in PEM format + privBytes := pem.EncodeToMemory(&pem.Block{ + Bytes: encBytes, + // Use the older “COSIGN†type name; as of 2023-03-30 cosign’s main branch generates “SIGSTORE†types, + // but a version of cosign that can accept them has not yet been released. + Type: cosignPrivateKeyPemType, + }) + + // Now do the public key + pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return nil, nil, err + } + + return privBytes, pubBytes, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/generate.go b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go new file mode 100644 index 00000000..77520c12 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/generate.go @@ -0,0 +1,35 @@ +package sigstore + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" +) + +// GenerateKeyPairResult is a struct to ensure the private and public parts can not be confused by the caller. +type GenerateKeyPairResult struct { + PublicKey []byte + PrivateKey []byte +} + +// GenerateKeyPair generates a public/private key pair usable for signing images using the sigstore format, +// and returns key representations suitable for storing in long-term files (with the private key encrypted using the provided passphrase). +// The specific key kind (e.g. algorithm, size), as well as the file format, are unspecified by this API, +// and can change with best practices over time. +func GenerateKeyPair(passphrase []byte) (*GenerateKeyPairResult, error) { + // https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes + // only requires ECDSA-P256 to be supported, so that’s what we must use. + rawKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + // Coverage: This can fail only if the randomness source fails + return nil, err + } + private, public, err := marshalKeyPair(rawKey, rawKey.Public(), passphrase) + if err != nil { + return nil, err + } + return &GenerateKeyPairResult{ + PublicKey: public, + PrivateKey: private, + }, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go new file mode 100644 index 00000000..c6258f40 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/internal/signer.go @@ -0,0 +1,95 @@ +package internal + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" +) + +type Option func(*SigstoreSigner) error + +// SigstoreSigner is a signer.SignerImplementation implementation for sigstore signatures. +// It is initialized using various closures that implement Option, sadly over several subpackages, to decrease the +// dependency impact. +type SigstoreSigner struct { + PrivateKey sigstoreSignature.Signer // May be nil during initialization + SigningKeyOrCert []byte // For possible Rekor upload; always initialized together with PrivateKey + + // Fulcio results to include + FulcioGeneratedCertificate []byte // Or nil + FulcioGeneratedCertificateChain []byte // Or nil + + // Rekor state + RekorUploader func(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) // Or nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *SigstoreSigner) ProgressMessage() string { + return "Signing image using a sigstore signature" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *SigstoreSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (signature.Signature, error) { + if s.PrivateKey == nil { + return nil, errors.New("internal error: nothing to sign with, should have been detected in NewSigner") + } + + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + // sigstore/cosign completely ignores dockerReference for actual policy decisions. + // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks. + // So, just do what simple signing does, and cosign won’t mind. + payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String()) + payloadBytes, err := json.Marshal(payloadData) + if err != nil { + return nil, err + } + + // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + signatureBytes, err := s.PrivateKey.SignMessage(bytes.NewReader(payloadBytes)) + if err != nil { + return nil, fmt.Errorf("creating signature: %w", err) + } + base64Signature := base64.StdEncoding.EncodeToString(signatureBytes) + var rekorSETBytes []byte // = nil + if s.RekorUploader != nil { + set, err := s.RekorUploader(ctx, s.SigningKeyOrCert, signatureBytes, payloadBytes) + if err != nil { + return nil, err + } + rekorSETBytes = set + } + + annotations := map[string]string{ + signature.SigstoreSignatureAnnotationKey: base64Signature, + } + if s.FulcioGeneratedCertificate != nil { + annotations[signature.SigstoreCertificateAnnotationKey] = string(s.FulcioGeneratedCertificate) + } + if s.FulcioGeneratedCertificateChain != nil { + annotations[signature.SigstoreIntermediateCertificateChainAnnotationKey] = string(s.FulcioGeneratedCertificateChain) + } + if rekorSETBytes != nil { + annotations[signature.SigstoreSETAnnotationKey] = string(rekorSETBytes) + } + return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, payloadBytes, annotations), nil +} + +func (s *SigstoreSigner) Close() error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/signer.go b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go new file mode 100644 index 00000000..fb825ada --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/signer.go @@ -0,0 +1,60 @@ +package sigstore + +import ( + "errors" + "fmt" + "os" + + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature/signer" + "github.com/containers/image/v5/signature/sigstore/internal" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type Option = internal.Option + +func WithPrivateKeyFile(file string, passphrase []byte) Option { + return func(s *internal.SigstoreSigner) error { + if s.PrivateKey != nil { + return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures") + } + + if passphrase == nil { + return errors.New("private key passphrase not provided") + } + + privateKeyPEM, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("reading private key from %s: %w", file, err) + } + signerVerifier, err := loadPrivateKey(privateKeyPEM, passphrase) + if err != nil { + return fmt.Errorf("initializing private key: %w", err) + } + publicKey, err := signerVerifier.PublicKey() + if err != nil { + return fmt.Errorf("getting public key from private key: %w", err) + } + publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(publicKey) + if err != nil { + return fmt.Errorf("converting public key to PEM: %w", err) + } + s.PrivateKey = signerVerifier + s.SigningKeyOrCert = publicKeyPEM + return nil + } +} + +func NewSigner(opts ...Option) (*signer.Signer, error) { + s := internal.SigstoreSigner{} + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.PrivateKey == nil { + return nil, errors.New("no private key source provided (neither a private key nor Fulcio) when preparing to create sigstore signatures") + } + + return internalSigner.NewSigner(&s), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go new file mode 100644 index 00000000..56b222ed --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -0,0 +1,283 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/containers/image/v5/signature/internal" + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError = internal.InvalidSignatureError + +// Signature is a parsed content of a signature. +// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// untrustedSignature is a parsed content of a signature. +type untrustedSignature struct { + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + untrustedTimestamp *int64 +} + +// UntrustedSignatureInformation is information available in an untrusted signature. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably†reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authorityâ€) +type UntrustedSignatureInformation struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + UntrustedTimestamp *time.Time + UntrustedShortKeyIdentifier string +} + +// newUntrustedSignature returns an untrustedSignature object with +// the specified primary contents and appropriate metadata. +func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "atomic " + version.Version + timestamp := time.Now().Unix() + return untrustedSignature{ + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, + } +} + +// A compile-time check that untrustedSignature and *untrustedSignature implements json.Marshaler +var _ json.Marshaler = untrustedSignature{} +var _ json.Marshaler = (*untrustedSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s untrustedSignature) MarshalJSON() ([]byte, error) { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]any{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, + } + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID + } + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp + } + signature := map[string]any{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that untrustedSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*untrustedSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *untrustedSignature) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if formatErr, ok := err.(internal.JSONFormatError); ok { + err = internal.NewInvalidSignatureError(formatErr.Error()) + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type. +// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore any + return &ignore + } + }); err != nil { + return err + } + if gotCreatorID { + s.untrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer") + } + s.untrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != signatureType { + return internal.NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) + } + + var digestString string + if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + s.untrustedDockerManifestDigest = digest.Digest(digestString) + + return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, + }) +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +// (If it seems surprising that this is a method on untrustedSignature, note that there +// isn’t a good reason to think that a key used by the user is trusted by any component +// of the system just because it is a private key — actually the presence of a private key +// on the system increases the likelihood of an a successful attack on that private key +// on that particular system.) +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + if newMech, ok := mech.(signingMechanismWithPassphrase); ok { + return newMech.SignWithPassphrase(json, keyIdentity, passphrase) + } + + if passphrase != "" { + return nil, errors.New("signing mechanism does not support passphrases") + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + validateKeyIdentity func(string) error + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principal components +// match expected values, both as specified by rules, and returns it +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, err + } + if err := rules.validateKeyIdentity(keyIdentity); err != nil { + return nil, err + } + + var unmatchedSignature untrustedSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, internal.NewInvalidSignatureError(err.Error()) + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil { + return nil, err + } + // signatureAcceptanceRules have accepted this value. + return &Signature{ + DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest, + DockerReference: unmatchedSignature.untrustedDockerReference, + }, nil +} + +// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, +// WITHOUT doing any cryptographic verification. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably†reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authorityâ€) +func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { + // NOTE: This should eventually do format autodetection. + mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) + if err != nil { + return nil, err + } + defer mech.Close() + + untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) + if err != nil { + return nil, err + } + var untrustedDecodedContents untrustedSignature + if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { + return nil, internal.NewInvalidSignatureError(err.Error()) + } + + var timestamp *time.Time // = nil + if untrustedDecodedContents.untrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0) + timestamp = &ts + } + return &UntrustedSignatureInformation{ + UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID, + UntrustedTimestamp: timestamp, + UntrustedShortKeyIdentifier: shortKeyIdentifier, + }, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go new file mode 100644 index 00000000..983bbb10 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/simplesigning/signer.go @@ -0,0 +1,105 @@ +package simplesigning + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + internalSig "github.com/containers/image/v5/internal/signature" + internalSigner "github.com/containers/image/v5/internal/signer" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/signer" +) + +// simpleSigner is a signer.SignerImplementation implementation for simple signing signatures. +type simpleSigner struct { + mech signature.SigningMechanism + keyFingerprint string + passphrase string // "" if not provided. +} + +type Option func(*simpleSigner) error + +// WithKeyFingerprint returns an Option for NewSigner, specifying a key to sign with, using the provided GPG key fingerprint. +func WithKeyFingerprint(keyFingerprint string) Option { + return func(s *simpleSigner) error { + s.keyFingerprint = keyFingerprint + return nil + } +} + +// WithPassphrase returns an Option for NewSigner, specifying a passphrase for the private key. +// If this is not specified, the system may interactively prompt using a gpg-agent / pinentry. +func WithPassphrase(passphrase string) Option { + return func(s *simpleSigner) error { + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return errors.New("invalid passphrase: must not contain a line break") + } + s.passphrase = passphrase + return nil + } +} + +// NewSigner returns a signature.Signer which creates “simple signing†signatures using the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg). +// +// The set of options must identify a key to sign with, probably using a WithKeyFingerprint. +// +// The caller must call Close() on the returned Signer. +func NewSigner(opts ...Option) (*signer.Signer, error) { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, fmt.Errorf("initializing GPG: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + mech.Close() + } + }() + if err := mech.SupportsSigning(); err != nil { + return nil, fmt.Errorf("Signing not supported: %w", err) + } + + s := simpleSigner{ + mech: mech, + } + for _, o := range opts { + if err := o(&s); err != nil { + return nil, err + } + } + if s.keyFingerprint == "" { + return nil, errors.New("no key identity provided for simple signing") + } + // Ideally, we should look up (and unlock?) the key at this point already, but our current SigningMechanism API does not allow that. + + succeeded = true + return internalSigner.NewSigner(&s), nil +} + +// ProgressMessage returns a human-readable sentence that makes sense to write before starting to create a single signature. +func (s *simpleSigner) ProgressMessage() string { + return "Signing image using simple signing" +} + +// SignImageManifest creates a new signature for manifest m as dockerReference. +func (s *simpleSigner) SignImageManifest(ctx context.Context, m []byte, dockerReference reference.Named) (internalSig.Signature, error) { + if reference.IsNameOnly(dockerReference) { + return nil, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + simpleSig, err := signature.SignDockerManifestWithOptions(m, dockerReference.String(), s.mech, s.keyFingerprint, &signature.SignOptions{ + Passphrase: s.passphrase, + }) + if err != nil { + return nil, err + } + return internalSig.SimpleSigningFromBlob(simpleSig), nil +} + +func (s *simpleSigner) Close() error { + return s.mech.Close() +} diff --git a/vendor/github.com/containers/image/v5/transports/stub.go b/vendor/github.com/containers/image/v5/transports/stub.go new file mode 100644 index 00000000..2c186a90 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/stub.go @@ -0,0 +1,36 @@ +package transports + +import ( + "fmt" + + "github.com/containers/image/v5/types" +) + +// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this buildâ€. +type stubTransport string + +// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this buildâ€. +func NewStubTransport(name string) types.ImageTransport { + return stubTransport(name) +} + +// Name returns the name of the transport, which must be unique among other transports. +func (s stubTransport) Name() string { + return string(s) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { + return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { + // Allowing any reference in here allows tools with some transports stubbed-out to still + // use signature verification policies which refer to these stubbed-out transports. + // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go new file mode 100644 index 00000000..834f33b4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/transports.go @@ -0,0 +1,90 @@ +package transports + +import ( + "fmt" + "sort" + "sync" + + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/types" +) + +// knownTransports is a registry of known ImageTransport instances. +type knownTransports struct { + transports map[string]types.ImageTransport + mu sync.Mutex +} + +func (kt *knownTransports) Get(k string) types.ImageTransport { + kt.mu.Lock() + t := kt.transports[k] + kt.mu.Unlock() + return t +} + +func (kt *knownTransports) Remove(k string) { + kt.mu.Lock() + delete(kt.transports, k) + kt.mu.Unlock() +} + +func (kt *knownTransports) Add(t types.ImageTransport) { + kt.mu.Lock() + defer kt.mu.Unlock() + name := t.Name() + if t := kt.transports[name]; t != nil { + panic(fmt.Sprintf("Duplicate image transport name %s", name)) + } + kt.transports[name] = t +} + +var kt *knownTransports + +func init() { + kt = &knownTransports{ + transports: make(map[string]types.ImageTransport), + } +} + +// Get returns the transport specified by name or nil when unavailable. +func Get(name string) types.ImageTransport { + return kt.Get(name) +} + +// Delete deletes a transport from the registered transports. +func Delete(name string) { + kt.Remove(name) +} + +// Register registers a transport. +func Register(t types.ImageTransport) { + kt.Add(t) +} + +// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that +// ParseImageName(ImageName(reference)) returns an equivalent reference. +// +// This is the generally recommended way to refer to images in the UI. +// +// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. +func ImageName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.StringWithinTransport() +} + +var deprecatedTransports = set.NewWithValues("atomic") + +// ListNames returns a list of non deprecated transport names. +// Deprecated transports can be used, but are not presented to users. +func ListNames() []string { + kt.mu.Lock() + defer kt.mu.Unlock() + var names []string + for _, transport := range kt.transports { + if !deprecatedTransports.Contains(transport.Name()) { + names = append(names, transport.Name()) + } + } + sort.Strings(names) + return names +} diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go new file mode 100644 index 00000000..33adb5f1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -0,0 +1,713 @@ +package types + +import ( + "context" + "io" + "time" + + "github.com/containers/image/v5/docker/reference" + compression "github.com/containers/image/v5/pkg/compression/types" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageTransport is a top-level namespace for ways to store/load an image. +// It should generally correspond to ImageSource/ImageDestination implementations. +// +// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. +// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS +// (or, even, IPv4 or IPv6). +// +// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. +// For example, several different ImageTransport implementations may be based on local filesystem paths, +// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) +// +// See also transports.KnownTransports. +type ImageTransport interface { + // Name returns the name of the transport, which must be unique among other transports. + Name() string + // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. + ParseReference(reference string) (ImageReference, error) + // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys + // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). + // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. + // scope passed to this function will not be "", that value is always allowed. + ValidatePolicyConfigurationScope(scope string) error +} + +// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. +// +// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening +// within an ImageTransport.ParseReference() or equivalent API creating the reference object. +// That's also why the various identification/formatting methods of this type do not support returning errors. +// +// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside +// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. +type ImageReference interface { + Transport() ImageTransport + // StringWithinTransport returns a string representation of the reference, which MUST be such that + // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. + // NOTE: The returned string is not promised to be equal to the original input to ParseReference; + // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. + // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; + // instead, see transports.ImageName(). + StringWithinTransport() string + + // DockerReference returns a Docker reference associated with this reference + // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, + // not e.g. after redirect or alias processing), or nil if unknown/not applicable. + DockerReference() reference.Named + + // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. + // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; + // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical + // (i.e. various references with exactly the same semantics should return the same configuration identity) + // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but + // not required/guaranteed that it will be a valid input to Transport().ParseReference(). + // Returns "" if configuration identities for these references are not supported. + PolicyConfigurationIdentity() string + + // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search + // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed + // in order, terminating on first match, and an implicit "" is always checked at the end. + // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), + // and each following element to be a prefix of the element preceding it. + PolicyConfigurationNamespaces() []string + + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. + // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) + // NewImageSource returns a types.ImageSource for this reference. + // The caller must call .Close() on the returned ImageSource. + NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) + // NewImageDestination returns a types.ImageDestination for this reference. + // The caller must call .Close() on the returned ImageDestination. + NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) + + // DeleteImage deletes the named image from the registry, if supported. + DeleteImage(ctx context.Context, sys *SystemContext) error +} + +// LayerCompression indicates if layers must be compressed, decompressed or preserved +type LayerCompression int + +const ( + // PreserveOriginal indicates the layer must be preserved, ie + // no compression or decompression. + PreserveOriginal LayerCompression = iota + // Decompress indicates the layer must be decompressed + Decompress + // Compress indicates the layer must be compressed + Compress +) + +// LayerCrypto indicates if layers have been encrypted or decrypted or none +type LayerCrypto int + +const ( + // PreserveOriginalCrypto indicates the layer must be preserved, ie + // no encryption/decryption + PreserveOriginalCrypto LayerCrypto = iota + // Encrypt indicates the layer is encrypted + Encrypt + // Decrypt indicates the layer is decrypted + Decrypt +) + +// BlobInfo collects known information about a blob (layer/config). +// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown†value here does not override that. +type BlobInfo struct { + Digest digest.Digest // "" if unknown. + Size int64 // -1 if unknown + URLs []string + Annotations map[string]string + MediaType string + + // NOTE: The following fields contain desired _edits_ to blob infos. + // Conceptually then don't belong in the BlobInfo object at all; + // the edits should be provided specifically as parameters to the edit implementation. + // We can’t remove the fields without breaking compatibility, but don’t + // add any more. + + // CompressionOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer's "compressed or not" should be preserved, + // possibly while changing the compression algorithm from one to another, + // or if it should be compressed or decompressed. The field defaults to + // preserve the original layer's compressedness. + // TODO: To remove together with CryptoOperation in re-design to remove + // field out of BlobInfo. + CompressionOperation LayerCompression + // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct + // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be + // set when `CompressionOperation == Compress` and MAY be set when + // `CompressionOperation == PreserveOriginal` and the compression type is + // being changed for an already-compressed layer. + CompressionAlgorithm *compression.Algorithm + // CryptoOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer was encrypted/decrypted + // TODO: To remove together with CompressionOperation in re-design to + // remove field out of BlobInfo. + CryptoOperation LayerCrypto + // Before adding any fields to this struct, read the NOTE above. +} + +// BICTransportScope encapsulates transport-dependent representation of a “scope†where blobs are or are not present. +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest). +// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICTransportScope struct { + Opaque string +} + +// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. +// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob +// can look it up using BlobInfoCache.CandidateLocations. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICLocationReference struct { + Opaque string +} + +// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. +type BICReplacementCandidate struct { + Digest digest.Digest + Location BICLocationReference +} + +// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies. +// +// It records two kinds of data: +// +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. +// +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// +// Each transport defines its own “scopes†within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) +// +// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; +// users of the cache should just fall back to copying the blobs the usual way. +// +// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by +// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own. +type BlobInfoCache interface { + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. + // May return anyDigest if it is known to be uncompressed. + // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). + UncompressedDigest(anyDigest digest.Digest) digest.Digest + // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. + // It’s allowed for anyDigest == uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) + + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, + // and can be reused given the opaque location data. + RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) + // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused + // within the specified (transport scope) (if they still exist, which is not guaranteed). + // + // If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, + // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same + // uncompressed digest. + CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate +} + +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). +// This is primarily useful for copying images around; for examining their properties, Image (below) +// is usually more useful. +// Each ImageSource should eventually be closed by calling Close(). +// +// WARNING: Various methods which return an object identified by digest generally do not +// validate that the returned data actually matches that digest; this is the caller’s responsibility. +type ImageSource interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Close removes resources associated with an initialized ImageSource, if any. + Close() error + // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). + // It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool + // GetSignatures returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer + // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() + // to read the image's layers. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error) +} + +// ImageDestination is a service, possibly remote (= slow), to store components of a single image. +// +// There is a specific required order for some of the calls: +// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) +// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. +// +// Each ImageDestination should eventually be closed by calling Close(). +type ImageDestination interface { + // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, + // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. + Reference() ImageReference + // Close removes resources associated with an initialized ImageDestination, if any. + Close() error + + // SupportedManifestMIMETypes tells which manifest mime types the destination supports + // If an empty slice or nil it's returned, then any mime type can be tried to upload + SupportedManifestMIMETypes() []string + // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. + // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. + SupportsSignatures(ctx context.Context) error + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression() LayerCompression + // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs() bool + // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. + MustMatchRuntimeOS() bool + // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference() bool + + // PutBlob writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // May update cache. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + // May use and/or update cache. + TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) + // PutManifest writes manifest to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated + // by `manifest.Digest()`. + // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. + // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), + // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. + PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error + // PutSignatures writes a set of signatures to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // MUST be called after PutManifest (signatures may reference manifest contents). + PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error + // Commit marks the process of storing the image as successful and asks for the image to be persisted. + // unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list + // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the + // original manifest list digest, if desired. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before Commit() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) + Commit(ctx context.Context, unparsedToplevel UnparsedImage) error +} + +// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, +// refuses specifically this manifest type, but may accept a different manifest type. +type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ManifestTypeRejectedError) Error() string { + return e.Err.Error() +} + +// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. +// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, +// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. +// This also makes the UnparsedImage→Image conversion an explicitly visible step. +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +type UnparsedImage interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. + Manifest(ctx context.Context) ([]byte, string, error) + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. + Signatures(ctx context.Context) ([][]byte, error) +} + +// Image is the primary API for inspecting properties of images. +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. +type Image interface { + // Note that Reference may return nil in the return value of UpdatedImage! + UnparsedImage + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*v1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []BlobInfo + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(context.Context) ([]BlobInfo, error) + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. + // This does not change the state of the original Image object. + // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if + // manifests of type options.ManifestMIMEType can not include layers that are compressed + // in accordance with the CompressionOperation and CompressionAlgorithm specified in one + // or more options.LayerInfos items, though retrying with a different + // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm + // values might succeed. + UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) + // SupportsEncryption returns an indicator that the image supports encryption + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 + SupportsEncryption(ctx context.Context) bool + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) +} + +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +type ManifestUpdateOptions struct { + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. + EmbeddedDockerReference reference.Named + ManifestMIMEType string + // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. + InformationOnly ManifestUpdateInformation +} + +// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here +// only to make writing struct literals possible. +type ManifestUpdateInformation struct { + Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) + LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. +} + +// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. +// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported +// for other manifest types. +type ImageInspectInfo struct { + Tag string + Created *time.Time + DockerVersion string + Labels map[string]string + Architecture string + Variant string + Os string + Layers []string + LayersData []ImageInspectLayer + Env []string + Author string +} + +// ImageInspectLayer is a set of metadata describing an image layers' detail +type ImageInspectLayer struct { + MIMEType string // "" if unknown. + Digest digest.Digest + Size int64 // -1 if unknown. + Annotations map[string]string +} + +// DockerAuthConfig contains authorization information for connecting to a registry. +// the value of Username and Password can be empty for accessing the registry anonymously +type DockerAuthConfig struct { + Username string + Password string + // IdentityToken can be used as an refresh_token in place of username and + // password to obtain the bearer/access token in oauth2 flow. If identity + // token is set, password should not be set. + // Ref: https://docs.docker.com/registry/spec/auth/oauth/ + IdentityToken string +} + +// OptionalBool is a boolean with an additional undefined value, which is meant +// to be used in the context of user input to distinguish between a +// user-specified value and a default value. +type OptionalBool byte + +const ( + // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. + OptionalBoolUndefined OptionalBool = iota + // OptionalBoolTrue represents the boolean true. + OptionalBoolTrue + // OptionalBoolFalse represents the boolean false. + OptionalBoolFalse +) + +// NewOptionalBool converts the input bool into either OptionalBoolTrue or +// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. +func NewOptionalBool(b bool) OptionalBool { + o := OptionalBoolFalse + if b { + o = OptionalBoolTrue + } + return o +} + +// ShortNameMode defines the mode of short-name resolution. +// +// The use of unqualified-search registries entails an ambiguity as it's +// unclear from which registry a given image, referenced by a short name, may +// be pulled from. +// +// The ShortNameMode type defines how short names should resolve. +type ShortNameMode int + +const ( + ShortNameModeInvalid ShortNameMode = iota + // Use all configured unqualified-search registries without prompting + // the user. + ShortNameModeDisabled + // If stdout and stdin are a TTY, prompt the user to select a configured + // unqualified-search registry. Otherwise, use all configured + // unqualified-search registries. + // + // Note that if only one unqualified-search registry is set, it will be + // used without prompting. + ShortNameModePermissive + // Always prompt the user to select a configured unqualified-search + // registry. Throw an error if stdout or stdin is not a TTY as + // prompting isn't possible. + // + // Note that if only one unqualified-search registry is set, it will be + // used without prompting. + ShortNameModeEnforcing +) + +// SystemContext allows parameterizing access to implicitly-accessed resources, +// like configuration files in /etc and users' login state in their home directory. +// Various components can share the same field only if their semantics is exactly +// the same; if in doubt, add a new field. +// It is always OK to pass nil instead of a SystemContext. +type SystemContext struct { + // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). + // Not used for any of the more specific path overrides available in this struct. + // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). + // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . + // and there is no need to worry about the environment.) + // NOTE: This does NOT affect paths starting by $HOME. + RootForImplicitAbsolutePaths string + + // === Global configuration overrides === + // If not "", overrides the system's default path for signature.Policy configuration. + SignaturePolicyPath string + // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) + RegistriesDirPath string + // Path to the system-wide registries configuration file + SystemRegistriesConfPath string + // Path to the system-wide registries configuration directory + SystemRegistriesConfDirPath string + // Path to the user-specific short-names configuration file + UserShortNameAliasConfPath string + // If set, short-name resolution in pkg/shortnames must follow the specified mode + ShortNameMode *ShortNameMode + // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and + // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce + // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this + // specific context. + PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool + // If not "", overrides the default path for the registry authentication file, but only new format files + AuthFilePath string + // if not "", overrides the default path for the registry authentication file, but with the legacy format; + // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; + // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) + // in locations other than the home dir; openshift components should then set this field in those cases; + // this field is ignored if `AuthFilePath` is set (we favor the newer format); + // only reading of this data is supported; + LegacyFormatAuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + // If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match. + VariantChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string + // Additional tags when creating or copying a docker-archive. + DockerArchiveAdditionalTags []reference.NamedTagged + // If not "", overrides the temporary directory to use for storing big files + BigFilesTemporaryDir string + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when downloading OCI image layers. + OCICertPath string + // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string + // Allow UnCompress image layer for OCI image layer + OCIAcceptUncompressedLayers bool + + // === docker.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a container registry. + DockerCertPath string + // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. + // Ignored if DockerCertPath is non-empty. + DockerPerHostCertDirPath string + // Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify OptionalBool + // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + // Ignored if DockerBearerRegistryToken is non-empty. + DockerAuthConfig *DockerAuthConfig + // if not "", the library uses this registry token to authenticate to the registry + DockerBearerRegistryToken string + // if not "", an User-Agent header is added to each request when contacting a registry. + DockerRegistryUserAgent string + // if true, a V1 ping attempt isn't done to give users a better error. Default is false. + // Note that this field is used mainly to integrate containers/image into projectatomic/docker + // in order to not break any existing docker's integration tests. + DockerDisableV1Ping bool + // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list + DockerDisableDestSchema1MIMETypes bool + // If true, the physical pull source of docker transport images logged as info level + DockerLogMirrorChoice bool + // Directory to use for OSTree temporary files + OSTreeTmpDirPath string + // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry. + // Note that this requires writing blobs to temporary files, and takes more time than the default behavior, + // when the digest for a blob is unknown. + DockerRegistryPushPrecomputeDigests bool + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool + // DirForceDecompress decompresses the image layers if set to true + DirForceDecompress bool + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int +} + +// ProgressEvent is the type of events a progress reader can produce +// Warning: new event types may be added any time. +type ProgressEvent uint + +const ( + // ProgressEventNewArtifact will be fired on progress reader setup + ProgressEventNewArtifact ProgressEvent = iota + + // ProgressEventRead indicates that the artifact download is currently in + // progress + ProgressEventRead + + // ProgressEventDone is fired when the data transfer has been finished for + // the specific artifact + ProgressEventDone + + // ProgressEventSkipped is fired when the artifact has been skipped because + // its already available at the destination + ProgressEventSkipped +) + +// ProgressProperties is used to pass information from the copy code to a monitor which +// can use the real-time information to produce output or react to changes. +type ProgressProperties struct { + // The event indicating what + Event ProgressEvent + + // The artifact which has been updated in this interval + Artifact BlobInfo + + // The currently downloaded size in bytes + // Increases from 0 to the final Artifact size + Offset uint64 + + // The additional offset which has been downloaded inside the last update + // interval. Will be reset after each ProgressEventRead event. + OffsetUpdate uint64 +} diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go new file mode 100644 index 00000000..27d034dc --- /dev/null +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -0,0 +1,18 @@ +package version + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 5 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 28 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md new file mode 100644 index 00000000..a7d8acbf --- /dev/null +++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The libtrust Project Community Code of Conduct + +The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/libtrust/CONTRIBUTING.md b/vendor/github.com/containers/libtrust/CONTRIBUTING.md new file mode 100644 index 00000000..05be0f8a --- /dev/null +++ b/vendor/github.com/containers/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/containers/libtrust/LICENSE b/vendor/github.com/containers/libtrust/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/vendor/github.com/containers/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/libtrust/MAINTAINERS b/vendor/github.com/containers/libtrust/MAINTAINERS new file mode 100644 index 00000000..9768175f --- /dev/null +++ b/vendor/github.com/containers/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/containers/libtrust/README.md b/vendor/github.com/containers/libtrust/README.md new file mode 100644 index 00000000..dcffb31a --- /dev/null +++ b/vendor/github.com/containers/libtrust/README.md @@ -0,0 +1,22 @@ +# libtrust + +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md new file mode 100644 index 00000000..966f4f05 --- /dev/null +++ b/vendor/github.com/containers/libtrust/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the libtrust Project + +The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/libtrust/certificates.go b/vendor/github.com/containers/libtrust/certificates.go new file mode 100644 index 00000000..3dcca33c --- /dev/null +++ b/vendor/github.com/containers/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/containers/libtrust/doc.go b/vendor/github.com/containers/libtrust/doc.go new file mode 100644 index 00000000..ec5d2159 --- /dev/null +++ b/vendor/github.com/containers/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/containers/libtrust/ec_key.go b/vendor/github.com/containers/libtrust/ec_key.go new file mode 100644 index 00000000..0ee1b911 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key.go @@ -0,0 +1,422 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + r, s, err := k.sign(data, hashID) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/ec_key_no_openssl.go b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go new file mode 100644 index 00000000..d6cdaca3 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go @@ -0,0 +1,23 @@ +// +build !libtrust_openssl + +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + return ecdsa.Sign(rand.Reader, k.PrivateKey, hash) +} diff --git a/vendor/github.com/containers/libtrust/ec_key_openssl.go b/vendor/github.com/containers/libtrust/ec_key_openssl.go new file mode 100644 index 00000000..4137511f --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_openssl.go @@ -0,0 +1,24 @@ +// +build libtrust_openssl + +package libtrust + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hId := k.signatureAlgorithm.HashID() + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data: %s", err) + } + + return ecdsa.HashSign(rand.Reader, k.PrivateKey, buf.Bytes(), hId) +} diff --git a/vendor/github.com/containers/libtrust/filter.go b/vendor/github.com/containers/libtrust/filter.go new file mode 100644 index 00000000..5b2b4fca --- /dev/null +++ b/vendor/github.com/containers/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/containers/libtrust/hash.go b/vendor/github.com/containers/libtrust/hash.go new file mode 100644 index 00000000..a2df787d --- /dev/null +++ b/vendor/github.com/containers/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/containers/libtrust/jsonsign.go b/vendor/github.com/containers/libtrust/jsonsign.go new file mode 100644 index 00000000..cb2ca9a7 --- /dev/null +++ b/vendor/github.com/containers/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/containers/libtrust/key.go b/vendor/github.com/containers/libtrust/key.go new file mode 100644 index 00000000..73642db2 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/containers/libtrust/key_files.go b/vendor/github.com/containers/libtrust/key_files.go new file mode 100644 index 00000000..c526de54 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/containers/libtrust/key_manager.go b/vendor/github.com/containers/libtrust/key_manager.go new file mode 100644 index 00000000..9a98ae35 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/containers/libtrust/rsa_key.go b/vendor/github.com/containers/libtrust/rsa_key.go new file mode 100644 index 00000000..dac4cacf --- /dev/null +++ b/vendor/github.com/containers/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/util.go b/vendor/github.com/containers/libtrust/util.go new file mode 100644 index 00000000..a5a101d3 --- /dev/null +++ b/vendor/github.com/containers/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/containers/ocicrypt/.gitignore b/vendor/github.com/containers/ocicrypt/.gitignore new file mode 100644 index 00000000..b25c15b8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/containers/ocicrypt/.golangci.yml b/vendor/github.com/containers/ocicrypt/.golangci.yml new file mode 100644 index 00000000..d3800d1e --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/.golangci.yml @@ -0,0 +1,35 @@ +linters: + enable: + - depguard + - staticcheck + - unconvert + - gofmt + - goimports + - revive + - ineffassign + - vet + - unused + - misspell + +linters-settings: + depguard: + rules: + main: + files: + - $all + deny: + - pkg: "io/ioutil" + + revive: + severity: error + rules: + - name: indent-error-flow + severity: warning + disabled: false + + - name: error-strings + disabled: false + + staticcheck: + # Suppress reports of deprecated packages + checks: ["-SA1019"] diff --git a/vendor/github.com/containers/ocicrypt/ADOPTERS.md b/vendor/github.com/containers/ocicrypt/ADOPTERS.md new file mode 100644 index 00000000..fa4b03bb --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/ADOPTERS.md @@ -0,0 +1,10 @@ +Below are list of adopters of the `ocicrypt` library or supports use of OCI encrypted images: +- [skopeo](https://github.com/containers/skopeo) +- [buildah](https://github.com/containers/buildah) +- [containerd](https://github.com/containerd/imgcrypt) +- [nerdctl](https://github.com/containerd/nerdctl) +- [distribution](https://github.com/distribution/distribution) + +Below are the list of projects that are in the process of adopting support: +- [quay](https://github.com/quay/quay) +- [kata-containers](https://github.com/kata-containers/kata-containers) diff --git a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md new file mode 100644 index 00000000..d68f8dbd --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The OCIcrypt Library Project Community Code of Conduct + +The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/ocicrypt/LICENSE b/vendor/github.com/containers/ocicrypt/LICENSE new file mode 100644 index 00000000..95356353 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS new file mode 100644 index 00000000..af38d03b --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/MAINTAINERS @@ -0,0 +1,6 @@ +# ocicrypt maintainers +# +# Github ID, Name, Email Address +lumjjb, Brandon Lum, lumjjb@gmail.com +stefanberger, Stefan Berger, stefanb@linux.ibm.com +arronwy, Arron Wang, arron.wang@intel.com diff --git a/vendor/github.com/containers/ocicrypt/Makefile b/vendor/github.com/containers/ocicrypt/Makefile new file mode 100644 index 00000000..dc9d9853 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/Makefile @@ -0,0 +1,34 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: check build decoder generate-protobuf + +all: build + +FORCE: + +check: + golangci-lint run + +build: vendor + go build ./... + +vendor: + go mod tidy + +test: + go test ./... -test.v + +generate-protobuf: + protoc -I utils/keyprovider/ utils/keyprovider/keyprovider.proto --go_out=plugins=grpc:utils/keyprovider diff --git a/vendor/github.com/containers/ocicrypt/README.md b/vendor/github.com/containers/ocicrypt/README.md new file mode 100644 index 00000000..b69d14e3 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/README.md @@ -0,0 +1,50 @@ +# OCIcrypt Library + +The `ocicrypt` library is the OCI image spec implementation of container image encryption. More details of the spec can be seen in the [OCI repository](https://github.com/opencontainers/image-spec/pull/775). The purpose of this library is to encode spec structures and consts in code, as well as provide a consistent implementation of image encryption across container runtimes and build tools. + +Consumers of OCIcrypt: + +- [containerd/imgcrypt](https://github.com/containerd/imgcrypt) +- [cri-o](https://github.com/cri-o/cri-o) +- [skopeo](https://github.com/containers/skopeo) + + +## Usage + +There are various levels of usage for this library. The main consumers of these would be runtime/build tools, and a more specific use would be in the ability to extend cryptographic function. + +### Runtime/Build tool usage + +The general exposed interface a runtime/build tool would use, would be to perform encryption or decryption of layers: + +``` +package "github.com/containers/ocicrypt" +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) +``` + +The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the responsibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.). + + +### Crypto Agility and Extensibility + +The implementation for both symmetric and asymmetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages: +- github.com/containers/ocicrypt/blockcipher - LayerBlockCipher interface for block ciphers +- github.com/containers/ocicrypt/keywrap - KeyWrapper interface for key wrapping + +We note that adding interfaces here is risky outside the OCI spec is not recommended, unless for very specialized and confined usecases. Please open an issue or PR if there is a general usecase that could be added to the OCI spec. + + +#### Keyprovider interface + +As part of the keywrap interface, there is a [keyprovider](https://github.com/containers/ocicrypt/blob/main/docs/keyprovider.md) implementation that allows one to call out to a binary or service. + + +## Security Issues + +We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file. + + +## Ocicrypt Pkcs11 Support + +Ocicrypt Pkcs11 support is currently experiemental. For more details, please refer to the [this document](docs/pkcs11.md). diff --git a/vendor/github.com/containers/ocicrypt/SECURITY.md b/vendor/github.com/containers/ocicrypt/SECURITY.md new file mode 100644 index 00000000..ea98cb12 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the OCIcrypt Library Project + +The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go new file mode 100644 index 00000000..0c485d51 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go @@ -0,0 +1,161 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "errors" + "fmt" + "io" + + "github.com/opencontainers/go-digest" +) + +// LayerCipherType is the ciphertype as specified in the layer metadata +type LayerCipherType string + +// TODO: Should be obtained from OCI spec once included +const ( + AES256CTR LayerCipherType = "AES_256_CTR_HMAC_SHA256" +) + +// PrivateLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are sensitive and should not be in plaintext +type PrivateLayerBlockCipherOptions struct { + // SymmetricKey represents the symmetric key used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + SymmetricKey []byte `json:"symkey"` + + // Digest is the digest of the original data for verification. + // This is NOT populated by Encrypt/Decrypt calls + Digest digest.Digest `json:"digest"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// PublicLayerBlockCipherOptions includes the information required to encrypt/decrypt +// an image which are public and can be deduplicated in plaintext across multiple +// recipients +type PublicLayerBlockCipherOptions struct { + // CipherType denotes the cipher type according to the list of OCI suppported + // cipher types. + CipherType LayerCipherType `json:"cipher"` + + // Hmac contains the hmac string to help verify encryption + Hmac []byte `json:"hmac"` + + // CipherOptions contains the cipher metadata used for encryption/decryption + // This field should be populated by Encrypt/Decrypt calls + CipherOptions map[string][]byte `json:"cipheroptions"` +} + +// LayerBlockCipherOptions contains the public and private LayerBlockCipherOptions +// required to encrypt/decrypt an image +type LayerBlockCipherOptions struct { + Public PublicLayerBlockCipherOptions + Private PrivateLayerBlockCipherOptions +} + +// LayerBlockCipher returns a provider for encrypt/decrypt functionality +// for handling the layer data for a specific algorithm +type LayerBlockCipher interface { + // GenerateKey creates a symmetric key + GenerateKey() ([]byte, error) + // Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions + Encrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) + // Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions + Decrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) +} + +// LayerBlockCipherHandler is the handler for encrypt/decrypt for layers +type LayerBlockCipherHandler struct { + cipherMap map[LayerCipherType]LayerBlockCipher +} + +// Finalizer is called after data blobs are written, and returns the LayerBlockCipherOptions for the encrypted blob +type Finalizer func() (LayerBlockCipherOptions, error) + +// GetOpt returns the value of the cipher option and if the option exists +func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) { + if v, ok := lbco.Public.CipherOptions[key]; ok { + return v, ok + } else if v, ok := lbco.Private.CipherOptions[key]; ok { + return v, ok + } else { + return nil, false + } +} + +func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer { + return func() (LayerBlockCipherOptions, error) { + lbco, err := fin() + if err != nil { + return LayerBlockCipherOptions{}, err + } + lbco.Public.CipherType = typ + return lbco, err + } +} + +// Encrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCipherType) (io.Reader, Finalizer, error) { + if c, ok := h.cipherMap[typ]; ok { + sk, err := c.GenerateKey() + if err != nil { + return nil, nil, err + } + opt := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: sk, + }, + } + encDataReader, fin, err := c.Encrypt(plainDataReader, opt) + if err == nil { + fin = wrapFinalizerWithType(fin, typ) + } + return encDataReader, fin, err + } + return nil, nil, fmt.Errorf("unsupported cipher type: %s", typ) +} + +// Decrypt is the handler for the layer decryption routine +func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + typ := opt.Public.CipherType + if typ == "" { + return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided") + } + if c, ok := h.cipherMap[typ]; ok { + return c.Decrypt(encDataReader, opt) + } + return nil, LayerBlockCipherOptions{}, fmt.Errorf("unsupported cipher type: %s", typ) +} + +// NewLayerBlockCipherHandler returns a new default handler +func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) { + h := LayerBlockCipherHandler{ + cipherMap: map[LayerCipherType]LayerBlockCipher{}, + } + + var err error + h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256) + if err != nil { + return nil, fmt.Errorf("unable to set up Cipher AES-256-CTR: %w", err) + } + + return &h, nil +} diff --git a/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go new file mode 100644 index 00000000..7db03e2e --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go @@ -0,0 +1,193 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package blockcipher + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + + "github.com/containers/ocicrypt/utils" +) + +// AESCTRLayerBlockCipher implements the AES CTR stream cipher +type AESCTRLayerBlockCipher struct { + keylen int // in bytes + reader io.Reader + encrypt bool + stream cipher.Stream + err error + hmac hash.Hash + expHmac []byte + doneEncrypting bool +} + +type aesctrcryptor struct { + bc *AESCTRLayerBlockCipher +} + +// NewAESCTRLayerBlockCipher returns a new AES SIV block cipher of 256 or 512 bits +func NewAESCTRLayerBlockCipher(bits int) (LayerBlockCipher, error) { + if bits != 256 { + return nil, errors.New("AES CTR bit count not supported") + } + return &AESCTRLayerBlockCipher{keylen: bits / 8}, nil +} + +func (r *aesctrcryptor) Read(p []byte) (int, error) { + var ( + o int + ) + + if r.bc.err != nil { + return 0, r.bc.err + } + + o, err := utils.FillBuffer(r.bc.reader, p) + if err != nil { + if err == io.EOF { + r.bc.err = err + } else { + return 0, err + } + } + + if !r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Before we return EOF we let the HMAC comparison + // provide a verdict + if !hmac.Equal(r.bc.hmac.Sum(nil), r.bc.expHmac) { + r.bc.err = fmt.Errorf("could not properly decrypt byte stream; exp hmac: '%x', actual hmac: '%s'", r.bc.expHmac, r.bc.hmac.Sum(nil)) + return 0, r.bc.err + } + } + } + + r.bc.stream.XORKeyStream(p[:o], p[:o]) + + if r.bc.encrypt { + if _, err := r.bc.hmac.Write(p[:o]); err != nil { + r.bc.err = fmt.Errorf("could not write to hmac: %w", err) + return 0, r.bc.err + } + + if r.bc.err == io.EOF { + // Final data encrypted; Do the 'then-MAC' part + r.bc.doneEncrypting = true + } + } + + return o, r.bc.err +} + +// init initializes an instance +func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts LayerBlockCipherOptions) (LayerBlockCipherOptions, error) { + var ( + err error + ) + + key := opts.Private.SymmetricKey + if len(key) != bc.keylen { + return LayerBlockCipherOptions{}, fmt.Errorf("invalid key length of %d bytes; need %d bytes", len(key), bc.keylen) + } + + nonce, ok := opts.GetOpt("nonce") + if !ok { + nonce = make([]byte, aes.BlockSize) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return LayerBlockCipherOptions{}, fmt.Errorf("unable to generate random nonce: %w", err) + } + } + + block, err := aes.NewCipher(key) + if err != nil { + return LayerBlockCipherOptions{}, fmt.Errorf("aes.NewCipher failed: %w", err) + } + + bc.reader = reader + bc.encrypt = encrypt + bc.stream = cipher.NewCTR(block, nonce) + bc.err = nil + bc.hmac = hmac.New(sha256.New, key) + bc.expHmac = opts.Public.Hmac + bc.doneEncrypting = false + + if !encrypt && len(bc.expHmac) == 0 { + return LayerBlockCipherOptions{}, errors.New("HMAC is not provided for decryption process") + } + + lbco := LayerBlockCipherOptions{ + Private: PrivateLayerBlockCipherOptions{ + SymmetricKey: key, + CipherOptions: map[string][]byte{ + "nonce": nonce, + }, + }, + } + + return lbco, nil +} + +// GenerateKey creates a synmmetric key +func (bc *AESCTRLayerBlockCipher) GenerateKey() ([]byte, error) { + key := make([]byte, bc.keylen) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + return nil, err + } + return key, nil +} + +// Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Encrypt(plainDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) { + lbco, err := bc.init(true, plainDataReader, opt) + if err != nil { + return nil, nil, err + } + + finalizer := func() (LayerBlockCipherOptions, error) { + if !bc.doneEncrypting { + return LayerBlockCipherOptions{}, errors.New("Read()ing not complete, unable to finalize") + } + if lbco.Public.CipherOptions == nil { + lbco.Public.CipherOptions = map[string][]byte{} + } + lbco.Public.Hmac = bc.hmac.Sum(nil) + return lbco, nil + } + return &aesctrcryptor{bc}, finalizer, nil +} + +// Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions +func (bc *AESCTRLayerBlockCipher) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) { + lbco, err := bc.init(false, encDataReader, opt) + if err != nil { + return nil, LayerBlockCipherOptions{}, err + } + + return utils.NewDelayedReader(&aesctrcryptor{bc}, 1024*10), lbco, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/config.go b/vendor/github.com/containers/ocicrypt/config/config.go new file mode 100644 index 00000000..d960766e --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/config.go @@ -0,0 +1,114 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +// EncryptConfig is the container image PGP encryption configuration holding +// the identifiers of those that will be able to decrypt the container and +// the PGP public keyring file data that contains their public keys. +type EncryptConfig struct { + // map holding 'gpg-recipients', 'gpg-pubkeyringfile', 'pubkeys', 'x509s' + Parameters map[string][][]byte + + DecryptConfig DecryptConfig +} + +// DecryptConfig wraps the Parameters map that holds the decryption key +type DecryptConfig struct { + // map holding 'privkeys', 'x509s', 'gpg-privatekeys' + Parameters map[string][][]byte +} + +// CryptoConfig is a common wrapper for EncryptConfig and DecrypConfig that can +// be passed through functions that share much code for encryption and decryption +type CryptoConfig struct { + EncryptConfig *EncryptConfig + DecryptConfig *DecryptConfig +} + +// InitDecryption initialized a CryptoConfig object with parameters used for decryption +func InitDecryption(dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + DecryptConfig: &DecryptConfig{ + Parameters: dcparameters, + }, + } +} + +// InitEncryption initializes a CryptoConfig object with parameters used for encryption +// It also takes dcparameters that may be needed for decryption when adding a recipient +// to an already encrypted image +func InitEncryption(parameters, dcparameters map[string][][]byte) CryptoConfig { + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: parameters, + DecryptConfig: DecryptConfig{ + Parameters: dcparameters, + }, + }, + } +} + +// CombineCryptoConfigs takes a CryptoConfig list and creates a single CryptoConfig +// containing the crypto configuration of all the key bundles +func CombineCryptoConfigs(ccs []CryptoConfig) CryptoConfig { + ecparam := map[string][][]byte{} + ecdcparam := map[string][][]byte{} + dcparam := map[string][][]byte{} + + for _, cc := range ccs { + if ec := cc.EncryptConfig; ec != nil { + addToMap(ecparam, ec.Parameters) + addToMap(ecdcparam, ec.DecryptConfig.Parameters) + } + + if dc := cc.DecryptConfig; dc != nil { + addToMap(dcparam, dc.Parameters) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ecparam, + DecryptConfig: DecryptConfig{ + Parameters: ecdcparam, + }, + }, + DecryptConfig: &DecryptConfig{ + Parameters: dcparam, + }, + } + +} + +// AttachDecryptConfig adds DecryptConfig to the field of EncryptConfig so that +// the decryption parameters can be used to add recipients to an existing image +// if the user is able to decrypt it. +func (ec *EncryptConfig) AttachDecryptConfig(dc *DecryptConfig) { + if dc != nil { + addToMap(ec.DecryptConfig.Parameters, dc.Parameters) + } +} + +func addToMap(orig map[string][][]byte, add map[string][][]byte) { + for k, v := range add { + if ov, ok := orig[k]; ok { + orig[k] = append(ov, v...) + } else { + orig[k] = v + } + } +} diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go new file mode 100644 index 00000000..f7f29cd8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/constructors.go @@ -0,0 +1,246 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "errors" + "fmt" + "strings" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "gopkg.in/yaml.v3" +) + +// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys +func EncryptWithJwe(pubKeys [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "pubkeys": pubKeys, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs7 returns a CryptoConfig to encrypt with pkcs7 x509 certs +func EncryptWithPkcs7(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + + ep := map[string][][]byte{ + "x509s": x509s, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithGpg returns a CryptoConfig to encrypt with configured gpg parameters +func EncryptWithGpg(gpgRecipients [][]byte, gpgPubRingFile []byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{ + "gpg-recipients": gpgRecipients, + "gpg-pubkeyringfile": {gpgPubRingFile}, + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithPkcs11 returns a CryptoConfig to encrypt with configured pkcs11 parameters +func EncryptWithPkcs11(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Pubkeys, pkcs11Yamls [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{} + + if len(pkcs11Yamls) > 0 { + if pkcs11Config == nil { + return CryptoConfig{}, errors.New("pkcs11Config must not be nil") + } + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) + } + + dc = DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-config": {p11confYaml}, + }, + } + ep["pkcs11-yamls"] = pkcs11Yamls + } + if len(pkcs11Pubkeys) > 0 { + ep["pkcs11-pubkeys"] = pkcs11Pubkeys + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithKeyProvider returns a CryptoConfig to encrypt with configured keyprovider parameters +func EncryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := make(map[string][][]byte) + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + ep[keyProvidersStr[:idx]] = append(ep[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + ep[keyProvidersStr] = append(ep[keyProvidersStr], []byte("Enabled")) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithKeyProvider returns a CryptoConfig to decrypt with configured keyprovider parameters +func DecryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dp := make(map[string][][]byte) + ep := map[string][][]byte{} + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + dp[keyProvidersStr[:idx]] = append(dp[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + dp[keyProvidersStr] = append(dp[keyProvidersStr], []byte("Enabled")) + } + } + dc := DecryptConfig{ + Parameters: dp, + } + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPrivKeys returns a CryptoConfig to decrypt with configured private keys +func DecryptWithPrivKeys(privKeys [][]byte, privKeysPasswords [][]byte) (CryptoConfig, error) { + if len(privKeys) != len(privKeysPasswords) { + return CryptoConfig{}, errors.New("Length of privKeys should match length of privKeysPasswords") + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "privkeys": privKeys, + "privkeys-passwords": privKeysPasswords, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithX509s returns a CryptoConfig to decrypt with configured x509 certs +func DecryptWithX509s(x509s [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "x509s": x509s, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithGpgPrivKeys returns a CryptoConfig to decrypt with configured gpg private keys +func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "gpg-privatekeys": gpgPrivKeys, + "gpg-privatekeys-passwords": gpgPrivKeysPwds, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithPkcs11Yaml returns a CryptoConfig to decrypt with pkcs11 YAML formatted key files +func DecryptWithPkcs11Yaml(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Yamls [][]byte) (CryptoConfig, error) { + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, fmt.Errorf("Could not marshal Pkcs11Config to Yaml: %w", err) + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-yamls": pkcs11Yamls, + "pkcs11-config": {p11confYaml}, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go new file mode 100644 index 00000000..4785a831 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go @@ -0,0 +1,80 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "encoding/json" + "fmt" + "os" +) + +// Command describes the structure of command, it consist of path and args, where path defines the location of +// binary executable and args are passed on to the binary executable +type Command struct { + Path string `json:"path,omitempty"` + Args []string `json:"args,omitempty"` +} + +// KeyProviderAttrs describes the structure of key provider, it defines the way of invocation to key provider +type KeyProviderAttrs struct { + Command *Command `json:"cmd,omitempty"` + Grpc string `json:"grpc,omitempty"` +} + +// OcicryptConfig represents the format of an ocicrypt_provider.conf config file +type OcicryptConfig struct { + KeyProviderConfig map[string]KeyProviderAttrs `json:"key-providers"` +} + +const ENVVARNAME = "OCICRYPT_KEYPROVIDER_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = json.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// ${OCICRYPT_KEYPROVIDER_CONFIG} == "/etc/ocicrypt_keyprovider.yaml" +// If no configuration file could be found or read a null pointer is returned +func GetConfiguration() (*OcicryptConfig, error) { + var ic *OcicryptConfig + var err error + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + ic, err = parseConfigFile(filename) + if err != nil { + return nil, fmt.Errorf("Error while parsing keyprovider config file: %w", err) + } + } else { + return nil, nil + } + return ic, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go new file mode 100644 index 00000000..072d7fe1 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -0,0 +1,134 @@ +/* + Copyright The ocicrypt Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + + pkcs11uri "github.com/stefanberger/go-pkcs11uri" + "gopkg.in/yaml.v3" +) + +// Pkcs11KeyFile describes the format of the pkcs11 (private) key file. +// It also carries pkcs11 module related environment variables that are transferred to the +// Pkcs11URI object and activated when the pkcs11 module is used. +type Pkcs11KeyFile struct { + Pkcs11 struct { + Uri string `yaml:"uri"` + } `yaml:"pkcs11"` + Module struct { + Env map[string]string `yaml:"env,omitempty"` + } `yaml:"module"` +} + +// Pkcs11KeyFileObject is a representation of the Pkcs11KeyFile with the pkcs11 URI as an object +type Pkcs11KeyFileObject struct { + Uri *pkcs11uri.Pkcs11URI +} + +// ParsePkcs11Uri parses a pkcs11 URI +func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) { + p11uri := pkcs11uri.New() + err := p11uri.Parse(uri) + if err != nil { + return nil, fmt.Errorf("Could not parse Pkcs11URI from file: %w", err) + } + return p11uri, err +} + +// ParsePkcs11KeyFile parses a pkcs11 key file holding a pkcs11 URI describing a private key. +// The file has the following yaml format: +// pkcs11: +// - uri : +// An error is returned if the pkcs11 URI is malformed +func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) { + p11keyfile := Pkcs11KeyFile{} + + err := yaml.Unmarshal(yamlstr, &p11keyfile) + if err != nil { + return nil, fmt.Errorf("Could not unmarshal pkcs11 keyfile: %w", err) + } + + p11uri, err := ParsePkcs11Uri(p11keyfile.Pkcs11.Uri) + if err != nil { + return nil, err + } + p11uri.SetEnvMap(p11keyfile.Module.Env) + + return &Pkcs11KeyFileObject{Uri: p11uri}, err +} + +// IsPkcs11PrivateKey checks whether the given YAML represents a Pkcs11 private key +func IsPkcs11PrivateKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// IsPkcs11PublicKey checks whether the given YAML represents a Pkcs11 public key +func IsPkcs11PublicKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// Pkcs11Config describes the layout of a pkcs11 config file +// The file has the following yaml format: +// module-directories: +// - /usr/lib64/pkcs11/ +// allowd-module-paths +// - /usr/lib64/pkcs11/libsofthsm2.so +type Pkcs11Config struct { + ModuleDirectories []string `yaml:"module-directories"` + AllowedModulePaths []string `yaml:"allowed-module-paths"` +} + +// GetDefaultModuleDirectories returns module directories covering +// a variety of Linux distros +func GetDefaultModuleDirectories() []string { + dirs := []string{ + "/usr/lib64/pkcs11/", // Fedora,RHEL,openSUSE + "/usr/lib/pkcs11/", // Fedora,ArchLinux + "/usr/local/lib/pkcs11/", + "/usr/lib/softhsm/", // Debian,Ubuntu + } + + // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|riscv64|s390x)-linux-gnu/ + hosttype, ostype, q := getHostAndOsType() + if len(hosttype) > 0 { + dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q) + dirs = append(dirs, dir) + } + return dirs +} + +// GetDefaultModuleDirectoresFormatted returns the default module directories formatted for YAML +func GetDefaultModuleDirectoriesYaml(indent string) string { + res := "" + + for _, dir := range GetDefaultModuleDirectories() { + res += indent + "- " + dir + "\n" + } + return res +} + +// ParsePkcs11ConfigFile parses a pkcs11 config file hat influences the module search behavior +// as well as the set of modules that users are allowed to use +func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) { + p11conf := Pkcs11Config{} + + err := yaml.Unmarshal(yamlstr, &p11conf) + if err != nil { + return &p11conf, fmt.Errorf("Could not parse Pkcs11Config: %w", err) + } + return &p11conf, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go new file mode 100644 index 00000000..fe047a1e --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go @@ -0,0 +1,485 @@ +//go:build cgo +// +build cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "net/url" + "os" + "strconv" + "strings" + + "github.com/miekg/pkcs11" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" +) + +var ( + // OAEPLabel defines the label we use for OAEP encryption; this cannot be changed + OAEPLabel = []byte("") + + // OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM + OAEPSha1Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA_1, + MGF: pkcs11.CKG_MGF1_SHA1, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } + // OAEPSha256Params describes the OAEP parameters with sha256 hash algorithm + OAEPSha256Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA256, + MGF: pkcs11.CKG_MGF1_SHA256, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } +) + +// rsaPublicEncryptOAEP encrypts the given plaintext with the given *rsa.PublicKey; the +// environment variable OCICRYPT_OAEP_HASHALG can be set to 'sha1' to force usage of sha1 for OAEP (SoftHSM). +// This function is needed by clients who are using a public key file for pkcs11 encryption +func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, string, error) { + var ( + hashfunc hash.Hash + hashalg string + ) + + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + hashfunc = sha1.New() + hashalg = "sha1" + case "sha256", "": + hashfunc = sha256.New() + hashalg = "sha256" + default: + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + ciphertext, err := rsa.EncryptOAEP(hashfunc, rand.Reader, pubKey, plaintext, OAEPLabel) + if err != nil { + return nil, "", fmt.Errorf("rss.EncryptOAEP failed: %w", err) + } + + return ciphertext, hashalg, nil +} + +// pkcs11UriGetLoginParameters gets the parameters necessary for login from the Pkcs11URI +// PIN and module are mandatory; slot-id is optional and if not found -1 will be returned +// For a privateKeyOperation a PIN is required and if none is given, this function will return an error +func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (string, string, int64, error) { + var ( + pin string + err error + ) + if privateKeyOperation { + if !p11uri.HasPIN() { + return "", "", 0, errors.New("Missing PIN for private key operation") + } + } + // some devices require a PIN to find a *public* key object, others don't + pin, _ = p11uri.GetPIN() + + module, err := p11uri.GetModule() + if err != nil { + return "", "", 0, fmt.Errorf("No module available in pkcs11 URI: %w", err) + } + + slotid := int64(-1) + + slot, ok := p11uri.GetPathAttribute("slot-id", false) + if ok { + slotid, err = strconv.ParseInt(slot, 10, 64) + if err != nil { + return "", "", 0, fmt.Errorf("slot-id is not a valid number: %w", err) + } + if slotid < 0 { + return "", "", 0, fmt.Errorf("slot-id is a negative number") + } + if uint64(slotid) > 0xffffffff { + return "", "", 0, fmt.Errorf("slot-id is larger than 32 bit") + } + } + + return pin, module, slotid, nil +} + +// pkcs11UriGetKeyIdAndLabel gets the key label by retrieving the value of the 'object' attribute +func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, error) { + keyid, ok2 := p11uri.GetPathAttribute("id", false) + label, ok1 := p11uri.GetPathAttribute("object", false) + if !ok1 && !ok2 { + return "", "", errors.New("Neither 'id' nor 'object' attributes were found in pkcs11 URI") + } + return keyid, label, nil +} + +// pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN +func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) { + session, err = p11ctx.OpenSession(slotid, pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + return 0, fmt.Errorf("OpenSession to slot %d failed: %w", slotid, err) + } + if len(pin) > 0 { + err = p11ctx.Login(session, pkcs11.CKU_USER, pin) + if err != nil { + _ = p11ctx.CloseSession(session) + return 0, fmt.Errorf("Could not login to device: %w", err) + } + } + return session, nil +} + +// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (shared library) and to get +// the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise +// one slot after the other will be attempted and the first one where login succeeds will be used +func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) { + pin, module, slotid, err := pkcs11UriGetLoginParameters(p11uri, privateKeyOperation) + if err != nil { + return nil, 0, err + } + + p11ctx := pkcs11.New(module) + if p11ctx == nil { + return nil, 0, errors.New("Please check module path, input is: " + module) + } + + err = p11ctx.Initialize() + if err != nil { + p11Err := err.(pkcs11.Error) + if p11Err != pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED { + return nil, 0, fmt.Errorf("Initialize failed: %w", err) + } + } + + if slotid >= 0 { + session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin) + return p11ctx, session, err + } + + slots, err := p11ctx.GetSlotList(true) + if err != nil { + return nil, 0, fmt.Errorf("GetSlotList failed: %w", err) + } + + tokenlabel, ok := p11uri.GetPathAttribute("token", false) + if !ok { + return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given") + } + + for _, slot := range slots { + ti, err := p11ctx.GetTokenInfo(slot) + if err != nil || ti.Label != tokenlabel { + continue + } + + session, err = pkcs11OpenSession(p11ctx, slot, pin) + if err == nil { + return p11ctx, session, err + } + } + if len(pin) > 0 { + return nil, 0, errors.New("Could not create session to any slot and/or log in") + } + return nil, 0, errors.New("Could not create session to any slot") +} + +func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) { + _ = ctx.Logout(session) + _ = ctx.CloseSession(session) + _ = ctx.Finalize() + ctx.Destroy() +} + +// findObject finds an object of the given class with the given keyid and/or label +func findObject(p11ctx *pkcs11.Ctx, session pkcs11.SessionHandle, class uint, keyid, label string) (pkcs11.ObjectHandle, error) { + msg := "" + + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), + } + if len(label) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) + msg = fmt.Sprintf("label '%s'", label) + } + if len(keyid) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, keyid)) + if len(msg) > 0 { + msg += " and " + } + msg += url.PathEscape(keyid) + } + + if err := p11ctx.FindObjectsInit(session, template); err != nil { + return 0, fmt.Errorf("FindObjectsInit failed: %w", err) + } + + obj, _, err := p11ctx.FindObjects(session, 100) + if err != nil { + return 0, fmt.Errorf("FindObjects failed: %w", err) + } + + if err := p11ctx.FindObjectsFinal(session); err != nil { + return 0, fmt.Errorf("FindObjectsFinal failed: %w", err) + } + if len(obj) > 1 { + return 0, fmt.Errorf("There are too many (=%d) keys with %s", len(obj), msg) + } else if len(obj) == 1 { + return obj[0], nil + } + + return 0, fmt.Errorf("Could not find any object with %s", msg) +} + +// publicEncryptOAEP uses a public key described by a pkcs11 URI to OAEP encrypt the given plaintext +func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, string, error) { + oldenv, err := setEnvVars(pubKey.Uri.GetEnvMap()) + if err != nil { + return nil, "", err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(pubKey.Uri, false) + if err != nil { + return nil, "", err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(pubKey.Uri) + if err != nil { + return nil, "", err + } + + p11PubKey, err := findObject(p11ctx, session, pkcs11.CKO_PUBLIC_KEY, keyid, label) + if err != nil { + return nil, "", err + } + + var hashalg string + + var oaep *pkcs11.OAEPParams + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is sha256 (previously was sha1) + switch strings.ToLower(oaephash) { + case "sha1": + oaep = OAEPSha1Params + hashalg = "sha1" + case "sha256", "": + oaep = OAEPSha256Params + hashalg = "sha256" + default: + return nil, "", fmt.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + + err = p11ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PubKey) + if err != nil { + return nil, "", fmt.Errorf("EncryptInit error: %w", err) + } + + ciphertext, err := p11ctx.Encrypt(session, plaintext) + if err != nil { + return nil, "", fmt.Errorf("Encrypt failed: %w", err) + } + return ciphertext, hashalg, nil +} + +// privateDecryptOAEP uses a pkcs11 URI describing a private key to OAEP decrypt a ciphertext +func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hashalg string) ([]byte, error) { + oldenv, err := setEnvVars(privKeyObj.Uri.GetEnvMap()) + if err != nil { + return nil, err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(privKeyObj.Uri, true) + if err != nil { + return nil, err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(privKeyObj.Uri) + if err != nil { + return nil, err + } + + p11PrivKey, err := findObject(p11ctx, session, pkcs11.CKO_PRIVATE_KEY, keyid, label) + if err != nil { + return nil, err + } + + var oaep *pkcs11.OAEPParams + + // An empty string from the Hash in the JSON historically defaults to sha1. + switch hashalg { + case "sha1", "": + oaep = OAEPSha1Params + case "sha256": + oaep = OAEPSha256Params + default: + return nil, fmt.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg) + } + + err = p11ctx.DecryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PrivKey) + if err != nil { + return nil, fmt.Errorf("DecryptInit failed: %w", err) + } + plaintext, err := p11ctx.Decrypt(session, ciphertext) + if err != nil { + return nil, fmt.Errorf("Decrypt failed: %w", err) + } + return plaintext, err +} + +// +// The following part deals with the JSON formatted message for multiple pkcs11 recipients +// + +// Pkcs11Blob holds the encrypted blobs for all recipients; this is what we will put into the image's annotations +type Pkcs11Blob struct { + Version uint `json:"version"` + Recipients []Pkcs11Recipient `json:"recipients"` +} + +// Pkcs11Recipient holds the b64-encoded and encrypted blob for a particular recipient +type Pkcs11Recipient struct { + Version uint `json:"version"` + Blob string `json:"blob"` + Hash string `json:"hash,omitempty"` +} + +// EncryptMultiple encrypts for one or multiple pkcs11 devices; the public keys passed to this function +// may either be *rsa.PublicKey or *pkcs11uri.Pkcs11URI; the returned byte array is a JSON string of the +// following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// ] +// } +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + var ( + ciphertext []byte + err error + pkcs11blob Pkcs11Blob = Pkcs11Blob{Version: 0} + hashalg string + ) + + for _, pubKey := range pubKeys { + switch pkey := pubKey.(type) { + case *rsa.PublicKey: + ciphertext, hashalg, err = rsaPublicEncryptOAEP(pkey, data) + case *Pkcs11KeyFileObject: + ciphertext, hashalg, err = publicEncryptOAEP(pkey, data) + default: + err = fmt.Errorf("Unsupported key object type for pkcs11 public key") + } + if err != nil { + return nil, err + } + + recipient := Pkcs11Recipient{ + Version: 0, + Blob: base64.StdEncoding.EncodeToString(ciphertext), + Hash: hashalg, + } + + pkcs11blob.Recipients = append(pkcs11blob.Recipients, recipient) + } + return json.Marshal(&pkcs11blob) +} + +// Decrypt tries to decrypt one of the recipients' blobs using a pkcs11 private key. +// The input pkcs11blobstr is a string with the following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// { +// "version": 0, +// "blob": , +// "hash": +// } , +// [...] +// } +// Note: More recent versions of this code explicitly write 'sha1' +// while older versions left it empty in case of 'sha1'. +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + pkcs11blob := Pkcs11Blob{} + err := json.Unmarshal(pkcs11blobstr, &pkcs11blob) + if err != nil { + return nil, fmt.Errorf("Could not parse Pkcs11Blob: %w", err) + } + switch pkcs11blob.Version { + case 0: + // latest supported version + default: + return nil, fmt.Errorf("found Pkcs11Blob with version %d but maximum supported version is 0", pkcs11blob.Version) + } + // since we do trial and error, collect all encountered errors + errs := "" + + for _, recipient := range pkcs11blob.Recipients { + switch recipient.Version { + case 0: + // last supported version + default: + return nil, fmt.Errorf("found Pkcs11Recipient with version %d but maximum supported version is 0", recipient.Version) + } + + ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob) + if err != nil || len(ciphertext) == 0 { + // This should never happen... we skip over decoding issues + errs += fmt.Sprintf("Base64 decoding failed: %s\n", err) + continue + } + // try all keys until one works + for _, privKeyObj := range privKeyObjs { + plaintext, err := privateDecryptOAEP(privKeyObj, ciphertext, recipient.Hash) + if err == nil { + return plaintext, nil + } + if uri, err2 := privKeyObj.Uri.Format(); err2 == nil { + errs += fmt.Sprintf("%s : %s\n", uri, err) + } else { + errs += fmt.Sprintf("%s\n", err) + } + } + } + + return nil, fmt.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs) +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go new file mode 100644 index 00000000..6cf0aa2a --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go @@ -0,0 +1,30 @@ +//go:build !cgo +// +build !cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import "fmt" + +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") +} + +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + return nil, fmt.Errorf("ocicrypt pkcs11 not supported on this build") +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go new file mode 100644 index 00000000..231da231 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go @@ -0,0 +1,115 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + "os" + "runtime" + "strings" + "sync" +) + +var ( + envLock sync.Mutex +) + +// setEnvVars sets the environment variables given in the map and locks the environment from +// modification with the same function; if successful, you *must* call restoreEnv with the return +// value from this function +func setEnvVars(env map[string]string) ([]string, error) { + envLock.Lock() + + if len(env) == 0 { + return nil, nil + } + + oldenv := os.Environ() + + for k, v := range env { + err := os.Setenv(k, v) + if err != nil { + restoreEnv(oldenv) + return nil, fmt.Errorf("Could not set environment variable '%s' to '%s': %w", k, v, err) + } + } + + return oldenv, nil +} + +func arrayToMap(elements []string) map[string]string { + o := make(map[string]string) + + for _, element := range elements { + p := strings.SplitN(element, "=", 2) + if len(p) == 2 { + o[p[0]] = p[1] + } + } + + return o +} + +// restoreEnv restores the environment to be exactly as given in the array of strings +// and unlocks the lock +func restoreEnv(envs []string) { + if envs != nil && len(envs) >= 0 { + target := arrayToMap(envs) + curr := arrayToMap(os.Environ()) + + for nc, vc := range curr { + vt, ok := target[nc] + if !ok { + os.Unsetenv(nc) + } else if vc == vt { + delete(target, nc) + } + } + + for nt, vt := range target { + os.Setenv(nt, vt) + } + } + + envLock.Unlock() +} + +func getHostAndOsType() (string, string, string) { + ht := "" + ot := "" + st := "" + switch runtime.GOOS { + case "linux": + ot = "linux" + st = "gnu" + switch runtime.GOARCH { + case "arm": + ht = "arm" + case "arm64": + ht = "aarch64" + case "amd64": + ht = "x86_64" + case "ppc64le": + ht = "powerpc64le" + case "riscv64": + ht = "riscv64" + case "s390x": + ht = "s390x" + } + } + return ht, ot, st +} diff --git a/vendor/github.com/containers/ocicrypt/encryption.go b/vendor/github.com/containers/ocicrypt/encryption.go new file mode 100644 index 00000000..b6fa9db4 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/encryption.go @@ -0,0 +1,356 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/containers/ocicrypt/blockcipher" + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/keywrap/jwe" + "github.com/containers/ocicrypt/keywrap/keyprovider" + "github.com/containers/ocicrypt/keywrap/pgp" + "github.com/containers/ocicrypt/keywrap/pkcs11" + "github.com/containers/ocicrypt/keywrap/pkcs7" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + log "github.com/sirupsen/logrus" +) + +// EncryptLayerFinalizer is a finalizer run to return the annotations to set for +// the encrypted layer +type EncryptLayerFinalizer func() (map[string]string, error) + +func init() { + keyWrappers = make(map[string]keywrap.KeyWrapper) + keyWrapperAnnotations = make(map[string]string) + RegisterKeyWrapper("pgp", pgp.NewKeyWrapper()) + RegisterKeyWrapper("jwe", jwe.NewKeyWrapper()) + RegisterKeyWrapper("pkcs7", pkcs7.NewKeyWrapper()) + RegisterKeyWrapper("pkcs11", pkcs11.NewKeyWrapper()) + ic, err := keyproviderconfig.GetConfiguration() + if err != nil { + log.Error(err) + } else if ic != nil { + for provider, attrs := range ic.KeyProviderConfig { + RegisterKeyWrapper("provider."+provider, keyprovider.NewKeyWrapper(provider, attrs)) + } + } +} + +var keyWrappers map[string]keywrap.KeyWrapper +var keyWrapperAnnotations map[string]string + +// RegisterKeyWrapper allows to register key wrappers by their encryption scheme +func RegisterKeyWrapper(scheme string, iface keywrap.KeyWrapper) { + keyWrappers[scheme] = iface + keyWrapperAnnotations[iface.GetAnnotationID()] = scheme +} + +// GetKeyWrapper looks up the encryptor interface given an encryption scheme (gpg, jwe) +func GetKeyWrapper(scheme string) keywrap.KeyWrapper { + return keyWrappers[scheme] +} + +// GetWrappedKeysMap returns a map of wrappedKeys as values in a +// map with the encryption scheme(s) as the key(s) +func GetWrappedKeysMap(desc ocispec.Descriptor) map[string]string { + wrappedKeysMap := make(map[string]string) + + for annotationsID, scheme := range keyWrapperAnnotations { + if annotation, ok := desc.Annotations[annotationsID]; ok { + wrappedKeysMap[scheme] = annotation + } + } + return wrappedKeysMap +} + +// EncryptLayer encrypts the layer by running one encryptor after the other +func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) { + var ( + encLayerReader io.Reader + err error + encrypted bool + bcFin blockcipher.Finalizer + privOptsData []byte + pubOptsData []byte + ) + + if ec == nil { + return nil, nil, errors.New("EncryptConfig must not be nil") + } + + for annotationsID := range keyWrapperAnnotations { + annotation := desc.Annotations[annotationsID] + if annotation != "" { + privOptsData, err = decryptLayerKeyOptsData(&ec.DecryptConfig, desc) + if err != nil { + return nil, nil, err + } + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, nil, err + } + // already encrypted! + encrypted = true + } + } + + if !encrypted { + encLayerReader, bcFin, err = commonEncryptLayer(encOrPlainLayerReader, desc.Digest, blockcipher.AES256CTR) + if err != nil { + return nil, nil, err + } + } + + encLayerFinalizer := func() (map[string]string, error) { + // If layer was already encrypted, bcFin should be nil, use existing optsData + if bcFin != nil { + opts, err := bcFin() + if err != nil { + return nil, err + } + privOptsData, err = json.Marshal(opts.Private) + if err != nil { + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) + } + pubOptsData, err = json.Marshal(opts.Public) + if err != nil { + return nil, fmt.Errorf("could not JSON marshal opts: %w", err) + } + } + + newAnnotations := make(map[string]string) + keysWrapped := false + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotations := desc.Annotations[annotationsID] + keywrapper := GetKeyWrapper(scheme) + b64Annotations, err = preWrapKeys(keywrapper, ec, b64Annotations, privOptsData) + if err != nil { + return nil, err + } + if b64Annotations != "" { + keysWrapped = true + newAnnotations[annotationsID] = b64Annotations + } + } + + if !keysWrapped { + return nil, errors.New("no wrapped keys produced by encryption") + } + newAnnotations["org.opencontainers.image.enc.pubopts"] = base64.StdEncoding.EncodeToString(pubOptsData) + + if len(newAnnotations) == 0 { + return nil, errors.New("no encryptor found to handle encryption") + } + + return newAnnotations, err + } + + // if nothing was encrypted, we just return encLayer = nil + return encLayerReader, encLayerFinalizer, err + +} + +// preWrapKeys calls WrapKeys and handles the base64 encoding and concatenation of the +// annotation data +func preWrapKeys(keywrapper keywrap.KeyWrapper, ec *config.EncryptConfig, b64Annotations string, optsData []byte) (string, error) { + newAnnotation, err := keywrapper.WrapKeys(ec, optsData) + if err != nil || len(newAnnotation) == 0 { + return b64Annotations, err + } + b64newAnnotation := base64.StdEncoding.EncodeToString(newAnnotation) + if b64Annotations == "" { + return b64newAnnotation, nil + } + return b64Annotations + "," + b64newAnnotation, nil +} + +// DecryptLayer decrypts a layer trying one keywrap.KeyWrapper after the other to see whether it +// can apply the provided private key +// If unwrapOnly is set we will only try to decrypt the layer encryption key and return +func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) { + if dc == nil { + return nil, "", errors.New("DecryptConfig must not be nil") + } + privOptsData, err := decryptLayerKeyOptsData(dc, desc) + if err != nil || unwrapOnly { + return nil, "", err + } + + var pubOptsData []byte + pubOptsData, err = getLayerPubOpts(desc) + if err != nil { + return nil, "", err + } + + return commonDecryptLayer(encLayerReader, privOptsData, pubOptsData) +} + +func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) { + privKeyGiven := false + errs := "" + if len(keyWrapperAnnotations) == 0 { + return nil, errors.New("missing Annotations needed for decryption") + } + for annotationsID, scheme := range keyWrapperAnnotations { + b64Annotation := desc.Annotations[annotationsID] + if b64Annotation != "" { + keywrapper := GetKeyWrapper(scheme) + + if keywrapper.NoPossibleKeys(dc.Parameters) { + continue + } + + if len(keywrapper.GetPrivateKeys(dc.Parameters)) > 0 { + privKeyGiven = true + } + optsData, err := preUnwrapKey(keywrapper, dc, b64Annotation) + if err != nil { + // try next keywrap.KeyWrapper + errs += fmt.Sprintf("%s\n", err) + continue + } + if optsData == nil { + // try next keywrap.KeyWrapper + continue + } + return optsData, nil + } + } + if !privKeyGiven { + return nil, fmt.Errorf("missing private key needed for decryption:\n%s", errs) + } + return nil, fmt.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs) +} + +func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) { + pubOptsString := desc.Annotations["org.opencontainers.image.enc.pubopts"] + if pubOptsString == "" { + return json.Marshal(blockcipher.PublicLayerBlockCipherOptions{}) + } + return base64.StdEncoding.DecodeString(pubOptsString) +} + +// preUnwrapKey decodes the comma separated base64 strings and calls the Unwrap function +// of the given keywrapper with it and returns the result in case the Unwrap functions +// does not return an error. If all attempts fail, an error is returned. +func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64Annotations string) ([]byte, error) { + if b64Annotations == "" { + return nil, nil + } + errs := "" + for _, b64Annotation := range strings.Split(b64Annotations, ",") { + annotation, err := base64.StdEncoding.DecodeString(b64Annotation) + if err != nil { + return nil, errors.New("could not base64 decode the annotation") + } + optsData, err := keywrapper.UnwrapKey(dc, annotation) + if err != nil { + errs += fmt.Sprintf("- %s\n", err) + continue + } + return optsData, nil + } + return nil, fmt.Errorf("no suitable key found for decrypting layer key:\n%s", errs) +} + +// commonEncryptLayer is a function to encrypt the plain layer using a new random +// symmetric key and return the LayerBlockCipherHandler's JSON in string form for +// later use during decryption +func commonEncryptLayer(plainLayerReader io.Reader, d digest.Digest, typ blockcipher.LayerCipherType) (io.Reader, blockcipher.Finalizer, error) { + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, nil, err + } + + encLayerReader, bcFin, err := lbch.Encrypt(plainLayerReader, typ) + if err != nil { + return nil, nil, err + } + + newBcFin := func() (blockcipher.LayerBlockCipherOptions, error) { + lbco, err := bcFin() + if err != nil { + return blockcipher.LayerBlockCipherOptions{}, err + } + lbco.Private.Digest = d + return lbco, nil + } + + return encLayerReader, newBcFin, err +} + +// commonDecryptLayer decrypts an encrypted layer previously encrypted with commonEncryptLayer +// by passing along the optsData +func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsData []byte) (io.Reader, digest.Digest, error) { + privOpts := blockcipher.PrivateLayerBlockCipherOptions{} + err := json.Unmarshal(privOptsData, &privOpts) + if err != nil { + return nil, "", fmt.Errorf("could not JSON unmarshal privOptsData: %w", err) + } + + lbch, err := blockcipher.NewLayerBlockCipherHandler() + if err != nil { + return nil, "", err + } + + pubOpts := blockcipher.PublicLayerBlockCipherOptions{} + if len(pubOptsData) > 0 { + err := json.Unmarshal(pubOptsData, &pubOpts) + if err != nil { + return nil, "", fmt.Errorf("could not JSON unmarshal pubOptsData: %w", err) + } + } + + opts := blockcipher.LayerBlockCipherOptions{ + Private: privOpts, + Public: pubOpts, + } + + plainLayerReader, opts, err := lbch.Decrypt(encLayerReader, opts) + if err != nil { + return nil, "", err + } + + return plainLayerReader, opts.Private.Digest, nil +} + +// FilterOutAnnotations filters out the annotations belonging to the image encryption 'namespace' +// and returns a map with those taken out +func FilterOutAnnotations(annotations map[string]string) map[string]string { + a := make(map[string]string) + if len(annotations) > 0 { + for k, v := range annotations { + if strings.HasPrefix(k, "org.opencontainers.image.enc.") { + continue + } + a[k] = v + } + } + return a +} diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go new file mode 100644 index 00000000..3912e82d --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/gpg.go @@ -0,0 +1,432 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + "sync" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/term" +) + +// GPGVersion enum representing the GPG client version to use. +type GPGVersion int + +const ( + // GPGv2 signifies gpgv2+ + GPGv2 GPGVersion = iota + // GPGv1 signifies gpgv1+ + GPGv1 + // GPGVersionUndetermined signifies gpg client version undetermined + GPGVersionUndetermined +) + +// GPGClient defines an interface for wrapping the gpg command line tools +type GPGClient interface { + // ReadGPGPubRingFile gets the byte sequence of the gpg public keyring + ReadGPGPubRingFile() ([]byte, error) + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) + // GetSecretKeyDetails gets the details of a secret key + GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) + // GetKeyDetails gets the details of a public key + GetKeyDetails(keyid uint64) ([]byte, bool, error) + // ResolveRecipients resolves PGP key ids to user names + ResolveRecipients([]string) []string +} + +// gpgClient contains generic gpg client information +type gpgClient struct { + gpgHomeDir string +} + +// gpgv2Client is a gpg2 client +type gpgv2Client struct { + gpgClient +} + +// gpgv1Client is a gpg client +type gpgv1Client struct { + gpgClient +} + +// GuessGPGVersion guesses the version of gpg. Defaults to gpg2 if exists, if +// not defaults to regular gpg. +func GuessGPGVersion() GPGVersion { + if err := exec.Command("gpg2", "--version").Run(); err == nil { + return GPGv2 + } else if err := exec.Command("gpg", "--version").Run(); err == nil { + return GPGv1 + } else { + return GPGVersionUndetermined + } +} + +// NewGPGClient creates a new GPGClient object representing the given version +// and using the given home directory +func NewGPGClient(gpgVersion, gpgHomeDir string) (GPGClient, error) { + v := new(GPGVersion) + switch gpgVersion { + case "v1": + *v = GPGv1 + case "v2": + *v = GPGv2 + default: + v = nil + } + return newGPGClient(v, gpgHomeDir) +} + +func newGPGClient(version *GPGVersion, homedir string) (GPGClient, error) { + var gpgVersion GPGVersion + if version != nil { + gpgVersion = *version + } else { + gpgVersion = GuessGPGVersion() + } + + switch gpgVersion { + case GPGv1: + return &gpgv1Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGv2: + return &gpgv2Client{ + gpgClient: gpgClient{gpgHomeDir: homedir}, + }, nil + case GPGVersionUndetermined: + return nil, fmt.Errorf("unable to determine GPG version") + default: + return nil, fmt.Errorf("unhandled case: NewGPGClient") + } +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + + rfile, wfile, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("could not create pipe: %w", err) + } + defer func() { + rfile.Close() + wfile.Close() + }() + // fill pipe in background + go func(passphrase string) { + _, _ = wfile.Write([]byte(passphrase)) + wfile.Close() + }(passphrase) + + args = append(args, []string{"--pinentry-mode", "loopback", "--batch", "--passphrase-fd", fmt.Sprintf("%d", 3), "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg2", args...) + cmd.ExtraFiles = []*os.File{rfile} + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv2Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg2", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg2", args...) + + keydata, err := runGPGGetOutput(cmd) + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv2Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv2Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (gc *gpgv1Client) GetGPGPrivateKey(keyid uint64, _ string) ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +// ReadGPGPubRingFile reads the GPG public key ring file +func (gc *gpgv1Client) ReadGPGPubRingFile() ([]byte, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = append(args, []string{"--homedir", gc.gpgHomeDir}...) + } + args = append(args, []string{"--batch", "--export"}...) + + cmd := exec.Command("gpg", args...) + + return runGPGGetOutput(cmd) +} + +func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) { + var args []string + + if gc.gpgHomeDir != "" { + args = []string{"--homedir", gc.gpgHomeDir} + } + args = append(args, option, fmt.Sprintf("0x%x", keyid)) + + cmd := exec.Command("gpg", args...) + + keydata, err := runGPGGetOutput(cmd) + + return keydata, err == nil, err +} + +// GetSecretKeyDetails retrieves the secret key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-K", keyid) +} + +// GetKeyDetails retrieves the public key details of key with keyid. +// returns a byte array of the details and a bool if the key exists +func (gc *gpgv1Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { + return gc.getKeyDetails("-k", keyid) +} + +// ResolveRecipients converts PGP keyids to email addresses, if possible +func (gc *gpgv1Client) ResolveRecipients(recipients []string) []string { + return resolveRecipients(gc, recipients) +} + +// runGPGGetOutput runs the GPG commandline and returns stdout as byte array +// and any stderr in the error +func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) { + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + if err := cmd.Start(); err != nil { + return nil, err + } + + stdoutstr, err2 := io.ReadAll(stdout) + stderrstr, _ := io.ReadAll(stderr) + + if err := cmd.Wait(); err != nil { + return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr)) + } + + return stdoutstr, err2 +} + +// resolveRecipients walks the list of recipients and attempts to convert +// all keyIds to email addresses; if something goes wrong during the +// conversion of a recipient, the original string is returned for that +// recpient +func resolveRecipients(gc GPGClient, recipients []string) []string { + var result []string + + for _, recipient := range recipients { + keyID, err := strconv.ParseUint(recipient, 0, 64) + if err != nil { + result = append(result, recipient) + } else { + details, found, _ := gc.GetKeyDetails(keyID) + if !found { + result = append(result, recipient) + } else { + email := extractEmailFromDetails(details) + if email == "" { + result = append(result, recipient) + } else { + result = append(result, email) + } + } + } + } + return result +} + +var ( + onceRegexp sync.Once + emailPattern *regexp.Regexp +) + +func extractEmailFromDetails(details []byte) string { + onceRegexp.Do(func() { + emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P.+)>`) + }) + loc := emailPattern.FindSubmatchIndex(details) + if len(loc) == 0 { + return "" + } + return string(emailPattern.Expand(nil, []byte("$email"), details, loc)) +} + +// uint64ToStringArray converts an array of uint64's to an array of strings +// by applying a format string to each uint64 +func uint64ToStringArray(format string, in []uint64) []string { + var ret []string + + for _, v := range in { + ret = append(ret, fmt.Sprintf(format, v)) + } + return ret +} + +// GPGGetPrivateKey walks the list of layerInfos and tries to decrypt the +// wrapped symmetric keys. For this it determines whether a private key is +// in the GPGVault or on this system and prompts for the passwords for those +// that are available. If we do not find a private key on the system for +// getting to the symmetric key of a layer then an error is generated. +func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault GPGVault, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) { + // PrivateKeyData describes a private key + type PrivateKeyData struct { + KeyData []byte + KeyDataPassword []byte + } + var pkd PrivateKeyData + keyIDPasswordMap := make(map[uint64]PrivateKeyData) + + for _, desc := range descs { + for scheme, b64pgpPackets := range GetWrappedKeysMap(desc) { + if scheme != "pgp" { + continue + } + keywrapper := GetKeyWrapper(scheme) + if keywrapper == nil { + return nil, nil, fmt.Errorf("could not get KeyWrapper for %s", scheme) + } + keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, nil, err + } + + found := false + for _, keyid := range keyIds { + // do we have this key? -- first check the vault + if gpgVault != nil { + _, keydata := gpgVault.GetGPGPrivateKey(keyid) + if len(keydata) > 0 { + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: nil, // password not supported in this case + } + keyIDPasswordMap[keyid] = pkd + found = true + break + } + } else if gpgClient != nil { + // check the local system's gpg installation + keyinfo, haveKey, _ := gpgClient.GetSecretKeyDetails(keyid) + // this may fail if the key is not here; we ignore the error + if !haveKey { + // key not on this system + continue + } + + _, found = keyIDPasswordMap[keyid] + if !found { + fmt.Printf("Passphrase required for Key id 0x%x: \n%v", keyid, string(keyinfo)) + fmt.Printf("Enter passphrase for key with Id 0x%x: ", keyid) + + password, err := term.ReadPassword(int(os.Stdin.Fd())) + fmt.Printf("\n") + if err != nil { + return nil, nil, err + } + keydata, err := gpgClient.GetGPGPrivateKey(keyid, string(password)) + if err != nil { + return nil, nil, err + } + pkd = PrivateKeyData{ + KeyData: keydata, + KeyDataPassword: password, + } + keyIDPasswordMap[keyid] = pkd + found = true + } + break + } else { + return nil, nil, errors.New("no GPGVault or GPGClient passed") + } + } + if !found && len(b64pgpPackets) > 0 && mustFindKey { + ids := uint64ToStringArray("0x%x", keyIds) + + return nil, nil, fmt.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", ")) + } + } + } + + for _, pkd := range keyIDPasswordMap { + gpgPrivKeys = append(gpgPrivKeys, pkd.KeyData) + gpgPrivKeysPwds = append(gpgPrivKeysPwds, pkd.KeyDataPassword) + } + + return gpgPrivKeys, gpgPrivKeysPwds, nil +} diff --git a/vendor/github.com/containers/ocicrypt/gpgvault.go b/vendor/github.com/containers/ocicrypt/gpgvault.go new file mode 100644 index 00000000..f1bd0d98 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/gpgvault.go @@ -0,0 +1,100 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "bytes" + "fmt" + "os" + + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +// GPGVault defines an interface for wrapping multiple secret key rings +type GPGVault interface { + // AddSecretKeyRingData adds a secret keyring via its raw byte array + AddSecretKeyRingData(gpgSecretKeyRingData []byte) error + // AddSecretKeyRingDataArray adds secret keyring via its raw byte arrays + AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error + // AddSecretKeyRingFiles adds secret keyrings given their filenames + AddSecretKeyRingFiles(filenames []string) error + // GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase + GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) +} + +// gpgVault wraps an array of gpgSecretKeyRing +type gpgVault struct { + entityLists []openpgp.EntityList + keyDataList [][]byte // the raw data original passed in +} + +// NewGPGVault creates an empty GPGVault +func NewGPGVault() GPGVault { + return &gpgVault{} +} + +// AddSecretKeyRingData adds a secret keyring's to the gpgVault; the raw byte +// array read from the file must be passed and will be parsed by this function +func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error { + // read the private keys + r := bytes.NewReader(gpgSecretKeyRingData) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return fmt.Errorf("could not read keyring: %w", err) + } + g.entityLists = append(g.entityLists, entityList) + g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData) + return nil +} + +// AddSecretKeyRingDataArray adds secret keyrings to the gpgVault; the raw byte +// arrays read from files must be passed +func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error { + for _, gpgSecretKeyRingData := range gpgSecretKeyRingDataArray { + if err := g.AddSecretKeyRingData(gpgSecretKeyRingData); err != nil { + return err + } + } + return nil +} + +// AddSecretKeyRingFiles adds the secret key rings given their filenames +func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error { + for _, filename := range filenames { + gpgSecretKeyRingData, err := os.ReadFile(filename) + if err != nil { + return err + } + err = g.AddSecretKeyRingData(gpgSecretKeyRingData) + if err != nil { + return err + } + } + return nil +} + +// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase +func (g *gpgVault) GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) { + for i, el := range g.entityLists { + decKeys := el.KeysByIdUsage(keyid, packet.KeyFlagEncryptCommunications) + if len(decKeys) > 0 { + return decKeys, g.keyDataList[i] + } + } + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go new file mode 100644 index 00000000..cd2241cb --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go @@ -0,0 +1,137 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package jwe + +import ( + "crypto/ecdsa" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "github.com/go-jose/go-jose/v3" +) + +type jweKeyWrapper struct { +} + +func (kw *jweKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.jwe" +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &jweKeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + var joseRecipients []jose.Recipient + + err := addPubKeys(&joseRecipients, ec.Parameters["pubkeys"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(joseRecipients) == 0 { + return nil, nil + } + + encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil) + if err != nil { + return nil, fmt.Errorf("jose.NewMultiEncrypter failed: %w", err) + } + jwe, err := encrypter.Encrypt(optsData) + if err != nil { + return nil, fmt.Errorf("JWE Encrypt failed: %w", err) + } + return []byte(jwe.FullSerialize()), nil +} + +func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) { + jwe, err := jose.ParseEncrypted(string(jweString)) + if err != nil { + return nil, errors.New("jose.ParseEncrypted failed") + } + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for JWE decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("Private key password array length must be same as that of private keys") + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "JWE") + if err != nil { + return nil, err + } + _, _, plain, err := jwe.DecryptMulti(key) + if err == nil { + return plain, nil + } + } + return nil, errors.New("JWE: No suitable private key found for decryption") +} + +func (kw *jweKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *jweKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *jweKeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +func (kw *jweKeyWrapper) GetKeyIdsFromPacket(b64jwes string) ([]uint64, error) { + return nil, nil +} + +func (kw *jweKeyWrapper) GetRecipients(b64jwes string) ([]string, error) { + return []string{"[jwe]"}, nil +} + +func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error { + if len(pubKeys) == 0 { + return nil + } + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "JWE") + if err != nil { + return err + } + + alg := jose.RSA_OAEP + switch key.(type) { + case *ecdsa.PublicKey: + alg = jose.ECDH_ES_A256KW + } + + *joseRecipients = append(*joseRecipients, jose.Recipient{ + Algorithm: alg, + Key: key, + }) + } + return nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go new file mode 100644 index 00000000..ddb244a8 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go @@ -0,0 +1,244 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keyprovider + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + keyproviderpb "github.com/containers/ocicrypt/utils/keyprovider" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" +) + +type keyProviderKeyWrapper struct { + provider string + attrs keyproviderconfig.KeyProviderAttrs +} + +func (kw *keyProviderKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.provider." + kw.provider +} + +// NewKeyWrapper returns a new key wrapping interface using keyprovider +func NewKeyWrapper(p string, a keyproviderconfig.KeyProviderAttrs) keywrap.KeyWrapper { + return &keyProviderKeyWrapper{provider: p, attrs: a} +} + +type KeyProviderKeyWrapProtocolOperation string + +var ( + OpKeyWrap KeyProviderKeyWrapProtocolOperation = "keywrap" + OpKeyUnwrap KeyProviderKeyWrapProtocolOperation = "keyunwrap" +) + +// KeyProviderKeyWrapProtocolInput defines the input to the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolInput struct { + // Operation is either "keywrap" or "keyunwrap" + Operation KeyProviderKeyWrapProtocolOperation `json:"op"` + // KeyWrapParams encodes the arguments to key wrap if operation is set to wrap + KeyWrapParams KeyWrapParams `json:"keywrapparams,omitempty"` + // KeyUnwrapParams encodes the arguments to key unwrap if operation is set to unwrap + KeyUnwrapParams KeyUnwrapParams `json:"keyunwrapparams,omitempty"` +} + +// KeyProviderKeyWrapProtocolOutput defines the output of the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolOutput struct { + // KeyWrapResult encodes the results to key wrap if operation is to wrap + KeyWrapResults KeyWrapResults `json:"keywrapresults,omitempty"` + // KeyUnwrapResult encodes the result to key unwrap if operation is to unwrap + KeyUnwrapResults KeyUnwrapResults `json:"keyunwrapresults,omitempty"` +} + +type KeyWrapParams struct { + Ec *config.EncryptConfig `json:"ec"` + OptsData []byte `json:"optsdata"` +} + +type KeyUnwrapParams struct { + Dc *config.DecryptConfig `json:"dc"` + Annotation []byte `json:"annotation"` +} + +type KeyUnwrapResults struct { + OptsData []byte `json:"optsdata"` +} + +type KeyWrapResults struct { + Annotation []byte `json:"annotation"` +} + +var runner utils.CommandExecuter + +func init() { + runner = utils.Runner{} +} + +// WrapKeys calls appropriate binary executable/grpc server for wrapping the session key for recipients and gets encrypted optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyWrap, + KeyWrapParams: KeyWrapParams{ + Ec: ec, + OptsData: optsData, + }, + }) + + if err != nil { + return nil, err + } + + if _, ok := ec.Parameters[kw.provider]; ok { + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + return nil, fmt.Errorf("error while retrieving keyprovider protocol command output: %w", err) + } + return protocolOuput.KeyWrapResults.Annotation, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyWrap) + if err != nil { + return nil, fmt.Errorf("error while retrieving keyprovider protocol grpc output: %w", err) + } + + return protocolOuput.KeyWrapResults.Annotation, nil + } else { + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") + } + } + + return nil, nil +} + +// UnwrapKey calls appropriate binary executable/grpc server for unwrapping the session key based on the protocol given in annotation for recipients and gets decrypted optsData, +// which describe the symmetric key used for decrypting the layer +func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyUnwrap, + KeyUnwrapParams: KeyUnwrapParams{ + Dc: dc, + Annotation: jsonString, + }, + }) + if err != nil { + return nil, err + } + + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyUnwrap) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } else { + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") + } +} + +func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + var grpcOutput *keyproviderpb.KeyProviderKeyWrapProtocolOutput + cc, err := grpc.Dial(connString, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("error while dialing rpc server: %w", err) + } + defer func() { + derr := cc.Close() + if derr != nil { + log.WithError(derr).Error("Error closing grpc socket") + } + }() + + client := keyproviderpb.NewKeyProviderServiceClient(cc) + req := &keyproviderpb.KeyProviderKeyWrapProtocolInput{ + KeyProviderKeyWrapProtocolInput: input, + } + + if operation == OpKeyWrap { + grpcOutput, err = client.WrapKey(context.Background(), req) + if err != nil { + return nil, fmt.Errorf("Error from grpc method: %w", err) + } + } else if operation == OpKeyUnwrap { + grpcOutput, err = client.UnWrapKey(context.Background(), req) + if err != nil { + return nil, fmt.Errorf("Error from grpc method: %w", err) + } + } else { + return nil, errors.New("Unsupported operation") + } + + respBytes := grpcOutput.GetKeyProviderKeyWrapProtocolOutput() + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, fmt.Errorf("Error while unmarshalling grpc method output: %w", err) + } + + return &protocolOuput, nil +} + +func getProviderCommandOutput(input []byte, command *keyproviderconfig.Command) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + // Convert interface to command structure + respBytes, err := runner.Exec(command.Path, command.Args, input) + if err != nil { + return nil, err + } + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, fmt.Errorf("Error while unmarshalling binary executable command output: %w", err) + } + return &protocolOuput, nil +} + +// Return false as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return false +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetRecipients(_ string) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go new file mode 100644 index 00000000..ed25e7da --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/keywrap.go @@ -0,0 +1,48 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keywrap + +import ( + "github.com/containers/ocicrypt/config" +) + +// KeyWrapper is the interface used for wrapping keys using +// a specific encryption technology (pgp, jwe) +type KeyWrapper interface { + WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) + UnwrapKey(dc *config.DecryptConfig, annotation []byte) ([]byte, error) + GetAnnotationID() string + + // NoPossibleKeys returns true if there is no possibility of performing + // decryption for parameters provided. + NoPossibleKeys(dcparameters map[string][][]byte) bool + + // GetPrivateKeys (optional) gets the array of private keys. It is an optional implementation + // as in some key services, a private key may not be exportable (i.e. HSM) + // If not implemented, return nil + GetPrivateKeys(dcparameters map[string][][]byte) [][]byte + + // GetKeyIdsFromPacket (optional) gets a list of key IDs. This is optional as some encryption + // schemes may not have a notion of key IDs + // If not implemented, return the nil slice + GetKeyIdsFromPacket(packet string) ([]uint64, error) + + // GetRecipients (optional) gets a list of recipients. It is optional due to the validity of + // recipients in a particular encryptiong scheme + // If not implemented, return the nil slice + GetRecipients(packet string) ([]string, error) +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go new file mode 100644 index 00000000..4ab9bd97 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go @@ -0,0 +1,272 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pgp + +import ( + "bytes" + "crypto" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "net/mail" + "strconv" + "strings" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "golang.org/x/crypto/openpgp" + "golang.org/x/crypto/openpgp/packet" +) + +type gpgKeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface for pgp +func NewKeyWrapper() keywrap.KeyWrapper { + return &gpgKeyWrapper{} +} + +var ( + // GPGDefaultEncryptConfig is the default configuration for layer encryption/decryption + GPGDefaultEncryptConfig = &packet.Config{ + Rand: rand.Reader, + DefaultHash: crypto.SHA256, + DefaultCipher: packet.CipherAES256, + CompressionConfig: &packet.CompressionConfig{Level: 0}, // No compression + RSABits: 2048, + } +) + +func (kw *gpgKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pgp" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + ciphertext := new(bytes.Buffer) + el, err := kw.createEntityList(ec) + if err != nil { + return nil, fmt.Errorf("unable to create entity list: %w", err) + } + if len(el) == 0 { + // nothing to do -- not an error + return nil, nil + } + + plaintextWriter, err := openpgp.Encrypt(ciphertext, + el, /*EntityList*/ + nil, /* Sign*/ + nil, /* FileHint */ + GPGDefaultEncryptConfig) + if err != nil { + return nil, err + } + + if _, err = plaintextWriter.Write(optsData); err != nil { + return nil, err + } else if err = plaintextWriter.Close(); err != nil { + return nil, err + } + return ciphertext.Bytes(), err +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PGP payload. +func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ([]byte, error) { + pgpPrivateKeys, pgpPrivateKeysPwd, err := kw.getKeyParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for idx, pgpPrivateKey := range pgpPrivateKeys { + r := bytes.NewBuffer(pgpPrivateKey) + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, fmt.Errorf("unable to parse private keys: %w", err) + } + + var prompt openpgp.PromptFunction + if len(pgpPrivateKeysPwd) > idx { + responded := false + prompt = func(keys []openpgp.Key, symmetric bool) ([]byte, error) { + if responded { + return nil, fmt.Errorf("don't seem to have the right password") + } + responded = true + for _, key := range keys { + if key.PrivateKey != nil { + _ = key.PrivateKey.Decrypt(pgpPrivateKeysPwd[idx]) + } + } + return pgpPrivateKeysPwd[idx], nil + } + } + + r = bytes.NewBuffer(pgpPacket) + md, err := openpgp.ReadMessage(r, entityList, prompt, GPGDefaultEncryptConfig) + if err != nil { + continue + } + // we get the plain key options back + optsData, err := io.ReadAll(md.UnverifiedBody) + if err != nil { + continue + } + return optsData, nil + } + return nil, errors.New("PGP: No suitable key found to unwrap key") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded PGPPacket to uint64 keyIds +func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, error) { + + var keyids []uint64 + for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") { + pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket) + if err != nil { + return nil, fmt.Errorf("could not decode base64 encoded PGP packet: %w", err) + } + newids, err := kw.getKeyIDs(pgpPacket) + if err != nil { + return nil, err + } + keyids = append(keyids, newids...) + } + return keyids, nil +} + +// getKeyIDs parses a PGPPacket and gets the list of recipients' key IDs +func (kw *gpgKeyWrapper) getKeyIDs(pgpPacket []byte) ([]uint64, error) { + var keyids []uint64 + + kbuf := bytes.NewBuffer(pgpPacket) + packets := packet.NewReader(kbuf) +ParsePackets: + for { + p, err := packets.Next() + if err == io.EOF { + break ParsePackets + } + if err != nil { + return []uint64{}, fmt.Errorf("packets.Next() failed: %w", err) + } + switch p := p.(type) { + case *packet.EncryptedKey: + keyids = append(keyids, p.KeyId) + case *packet.SymmetricallyEncrypted: + break ParsePackets + } + } + return keyids, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +func (kw *gpgKeyWrapper) GetRecipients(b64pgpPackets string) ([]string, error) { + keyIds, err := kw.GetKeyIdsFromPacket(b64pgpPackets) + if err != nil { + return nil, err + } + var array []string + for _, keyid := range keyIds { + array = append(array, "0x"+strconv.FormatUint(keyid, 16)) + } + return array, nil +} + +func (kw *gpgKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *gpgKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["gpg-privatekeys"] +} + +func (kw *gpgKeyWrapper) getKeyParameters(dcparameters map[string][][]byte) ([][]byte, [][]byte, error) { + + privKeys := kw.GetPrivateKeys(dcparameters) + if len(privKeys) == 0 { + return nil, nil, errors.New("GPG: Missing private key parameter") + } + + return privKeys, dcparameters["gpg-privatekeys-passwords"], nil +} + +// createEntityList creates the opengpg EntityList by reading the KeyRing +// first and then filtering out recipients' keys +func (kw *gpgKeyWrapper) createEntityList(ec *config.EncryptConfig) (openpgp.EntityList, error) { + pgpPubringFile := ec.Parameters["gpg-pubkeyringfile"] + if len(pgpPubringFile) == 0 { + return nil, nil + } + r := bytes.NewReader(pgpPubringFile[0]) + + entityList, err := openpgp.ReadKeyRing(r) + if err != nil { + return nil, err + } + + gpgRecipients := ec.Parameters["gpg-recipients"] + if len(gpgRecipients) == 0 { + return nil, nil + } + + rSet := make(map[string]int) + for _, r := range gpgRecipients { + rSet[string(r)] = 0 + } + + var filteredList openpgp.EntityList + for _, entity := range entityList { + for k := range entity.Identities { + addr, err := mail.ParseAddress(k) + if err != nil { + return nil, err + } + for _, r := range gpgRecipients { + recp := string(r) + if strings.Compare(addr.Name, recp) == 0 || strings.Compare(addr.Address, recp) == 0 { + filteredList = append(filteredList, entity) + rSet[recp] = rSet[recp] + 1 + } + } + } + } + + // make sure we found keys for all the Recipients... + var buffer bytes.Buffer + notFound := false + buffer.WriteString("PGP: No key found for the following recipients: ") + + for k, v := range rSet { + if v == 0 { + if notFound { + buffer.WriteString(", ") + } + buffer.WriteString(k) + notFound = true + } + } + + if notFound { + return nil, errors.New(buffer.String()) + } + + return filteredList, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go new file mode 100644 index 00000000..236764d2 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go @@ -0,0 +1,148 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" +) + +type pkcs11KeyWrapper struct { +} + +func (kw *pkcs11KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs11" +} + +// NewKeyWrapper returns a new key wrapping interface using pkcs11 +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs11KeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters["pkcs11-pubkeys"], ec.Parameters["pkcs11-yamls"]...)) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(pkcs11Recipients) == 0 { + return nil, nil + } + + jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData) + if err != nil { + return nil, fmt.Errorf("PKCS11 EncryptMulitple failed: %w", err) + } + return jsonString, nil +} + +func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + var pkcs11PrivKeys []*pkcs11.Pkcs11KeyFileObject + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for PKCS11 decryption") + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, nil, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PrivKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PrivKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PrivKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + pkcs11PrivKeys = append(pkcs11PrivKeys, pkcs11PrivKey) + default: + continue + } + } + + plaintext, err := pkcs11.Decrypt(pkcs11PrivKeys, jsonString) + if err == nil { + return plaintext, nil + } + + return nil, fmt.Errorf("PKCS11: No suitable private key found for decryption: %w", err) +} + +func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs11KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["pkcs11-yamls"] +} + +func (kw *pkcs11KeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +func (kw *pkcs11KeyWrapper) GetRecipients(_ string) ([]string, error) { + return []string{"[pkcs11]"}, nil +} + +func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, error) { + var pkcs11Keys []interface{} + + if len(pubKeys) == 0 { + return pkcs11Keys, nil + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PubKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PubKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PubKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + } + pkcs11Keys = append(pkcs11Keys, key) + } + return pkcs11Keys, nil +} + +func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error) { + if _, ok := dcparameters["pkcs11-config"]; ok { + return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0]) + } + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go new file mode 100644 index 00000000..603925df --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go @@ -0,0 +1,137 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs7 + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + "go.mozilla.org/pkcs7" +) + +type pkcs7KeyWrapper struct { +} + +// NewKeyWrapper returns a new key wrapping interface using jwe +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs7KeyWrapper{} +} + +func (kw *pkcs7KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs7" +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs7KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + x509Certs, err := collectX509s(ec.Parameters["x509s"]) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(x509Certs) == 0 { + return nil, nil + } + + pkcs7.ContentEncryptionAlgorithm = pkcs7.EncryptionAlgorithmAES128GCM + return pkcs7.Encrypt(optsData, x509Certs) +} + +func collectX509s(x509s [][]byte) ([]*x509.Certificate, error) { + if len(x509s) == 0 { + return nil, nil + } + var x509Certs []*x509.Certificate + for _, x509 := range x509s { + x509Cert, err := utils.ParseCertificate(x509, "PKCS7") + if err != nil { + return nil, err + } + x509Certs = append(x509Certs, x509Cert) + } + return x509Certs, nil +} + +func (kw *pkcs7KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs7KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys"] +} + +func (kw *pkcs7KeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte { + return dcparameters["privkeys-passwords"] +} + +// UnwrapKey unwraps the symmetric key with which the layer is encrypted +// This symmetric key is encrypted in the PKCS7 payload. +func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byte) ([]byte, error) { + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("no private keys found for PKCS7 decryption") + } + privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters) + if len(privKeysPasswords) != len(privKeys) { + return nil, errors.New("private key password array length must be same as that of private keys") + } + + x509Certs, err := collectX509s(dc.Parameters["x509s"]) + if err != nil { + return nil, err + } + if len(x509Certs) == 0 { + return nil, errors.New("no x509 certificates found needed for PKCS7 decryption") + } + + p7, err := pkcs7.Parse(pkcs7Packet) + if err != nil { + return nil, fmt.Errorf("could not parse PKCS7 packet: %w", err) + } + + for idx, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "PKCS7") + if err != nil { + return nil, err + } + for _, x509Cert := range x509Certs { + optsData, err := p7.Decrypt(x509Cert, crypto.PrivateKey(key)) + if err != nil { + continue + } + return optsData, nil + } + } + return nil, errors.New("PKCS7: No suitable private key found for decryption") +} + +// GetKeyIdsFromWrappedKeys converts the base64 encoded Packet to uint64 keyIds; +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetKeyIdsFromPacket(b64pkcs7Packets string) ([]uint64, error) { + return nil, nil +} + +// GetRecipients converts the wrappedKeys to an array of recipients +// We cannot do this with pkcs7 +func (kw *pkcs7KeyWrapper) GetRecipients(b64pkcs7Packets string) ([]string, error) { + return []string{"[pkcs7]"}, nil +} diff --git a/vendor/github.com/containers/ocicrypt/reader.go b/vendor/github.com/containers/ocicrypt/reader.go new file mode 100644 index 00000000..a93eec8e --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/reader.go @@ -0,0 +1,40 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ocicrypt + +import ( + "io" +) + +type readerAtReader struct { + r io.ReaderAt + off int64 +} + +// ReaderFromReaderAt takes an io.ReaderAt and returns an io.Reader +func ReaderFromReaderAt(r io.ReaderAt) io.Reader { + return &readerAtReader{ + r: r, + off: 0, + } +} + +func (rar *readerAtReader) Read(p []byte) (n int, err error) { + n, err = rar.r.ReadAt(p, rar.off) + rar.off += int64(n) + return n, err +} diff --git a/vendor/github.com/containers/ocicrypt/spec/spec.go b/vendor/github.com/containers/ocicrypt/spec/spec.go new file mode 100644 index 00000000..8665f6f2 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/spec/spec.go @@ -0,0 +1,16 @@ +package spec + +const ( + // MediaTypeLayerEnc is MIME type used for encrypted layers. + MediaTypeLayerEnc = "application/vnd.oci.image.layer.v1.tar+encrypted" + // MediaTypeLayerGzipEnc is MIME type used for encrypted gzip-compressed layers. + MediaTypeLayerGzipEnc = "application/vnd.oci.image.layer.v1.tar+gzip+encrypted" + // MediaTypeLayerZstdEnc is MIME type used for encrypted zstd-compressed layers. + MediaTypeLayerZstdEnc = "application/vnd.oci.image.layer.v1.tar+zstd+encrypted" + // MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers. + MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted" + // MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers. + MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted" + // MediaTypeLayerZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers. + MediaTypeLayerNonDistributableZsdtEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted" +) diff --git a/vendor/github.com/containers/ocicrypt/utils/delayedreader.go b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go new file mode 100644 index 00000000..3b939bde --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/delayedreader.go @@ -0,0 +1,109 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "io" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// DelayedReader wraps a io.Reader and allows a client to use the Reader +// interface. The DelayedReader holds back some buffer to the client +// so that it can report any error that occurred on the Reader it wraps +// early to the client while it may still have held some data back. +type DelayedReader struct { + reader io.Reader // Reader to Read() bytes from and delay them + err error // error that occurred on the reader + buffer []byte // delay buffer + bufbytes int // number of bytes in the delay buffer to give to Read(); on '0' we return 'EOF' to caller + bufoff int // offset in the delay buffer to give to Read() +} + +// NewDelayedReader wraps a io.Reader and allocates a delay buffer of bufsize bytes +func NewDelayedReader(reader io.Reader, bufsize uint) io.Reader { + return &DelayedReader{ + reader: reader, + buffer: make([]byte, bufsize), + } +} + +// Read implements the io.Reader interface +func (dr *DelayedReader) Read(p []byte) (int, error) { + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + + // if we are completely drained, return io.EOF + if dr.err == io.EOF && dr.bufbytes == 0 { + return 0, io.EOF + } + + // only at the beginning we fill our delay buffer in an extra step + if dr.bufbytes < len(dr.buffer) && dr.err == nil { + dr.bufbytes, dr.err = FillBuffer(dr.reader, dr.buffer) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + // dr.err != nil means we have EOF and can drain the delay buffer + // otherwise we need to still read from the reader + + var tmpbuf []byte + tmpbufbytes := 0 + if dr.err == nil { + tmpbuf = make([]byte, len(p)) + tmpbufbytes, dr.err = FillBuffer(dr.reader, tmpbuf) + if dr.err != nil && dr.err != io.EOF { + return 0, dr.err + } + } + + // copy out of the delay buffer into 'p' + tocopy1 := min(len(p), dr.bufbytes) + c1 := copy(p[:tocopy1], dr.buffer[dr.bufoff:]) + dr.bufoff += c1 + dr.bufbytes -= c1 + + c2 := 0 + // can p still hold more data? + if c1 < len(p) { + // copy out of the tmpbuf into 'p' + c2 = copy(p[tocopy1:], tmpbuf[:tmpbufbytes]) + } + + // if tmpbuf holds data we need to hold onto, copy them + // into the delay buffer + if tmpbufbytes-c2 > 0 { + // left-shift the delay buffer and append the tmpbuf's remaining data + dr.buffer = dr.buffer[dr.bufoff : dr.bufoff+dr.bufbytes] + dr.buffer = append(dr.buffer, tmpbuf[c2:tmpbufbytes]...) + dr.bufoff = 0 + dr.bufbytes = len(dr.buffer) + } + + var err error + if dr.bufbytes == 0 { + err = io.EOF + } + return c1 + c2, err +} diff --git a/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/vendor/github.com/containers/ocicrypt/utils/ioutils.go new file mode 100644 index 00000000..c6265168 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/ioutils.go @@ -0,0 +1,58 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "fmt" + "io" + "os/exec" +) + +// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns +// EOF if an EOF was encountered or any other error. +func FillBuffer(reader io.Reader, buffer []byte) (int, error) { + n, err := io.ReadFull(reader, buffer) + if err == io.ErrUnexpectedEOF { + return n, io.EOF + } + return n, err +} + +// first argument is the command, like cat or echo, +// the second is the list of args to pass to it +type CommandExecuter interface { + Exec(string, []string, []byte) ([]byte, error) +} + +type Runner struct{} + +// ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists. +func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) { + var out bytes.Buffer + var stderr bytes.Buffer + stdInputBuffer := bytes.NewBuffer(input) + cmd := exec.Command(cmdName, args...) + cmd.Stdin = stdInputBuffer + cmd.Stdout = &out + cmd.Stderr = &stderr + err := cmd.Run() + if err != nil { + return nil, fmt.Errorf("Error while running command: %s. stderr: %s: %w", cmdName, stderr.String(), err) + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go new file mode 100644 index 00000000..dc477d3c --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go @@ -0,0 +1,243 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: keyprovider.proto + +package keyprovider + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type KeyProviderKeyWrapProtocolInput struct { + KeyProviderKeyWrapProtocolInput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolInput,proto3" json:"KeyProviderKeyWrapProtocolInput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolInput) Reset() { *m = KeyProviderKeyWrapProtocolInput{} } +func (m *KeyProviderKeyWrapProtocolInput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolInput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolInput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{0} +} + +func (m *KeyProviderKeyWrapProtocolInput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolInput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolInput) GetKeyProviderKeyWrapProtocolInput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolInput + } + return nil +} + +type KeyProviderKeyWrapProtocolOutput struct { + KeyProviderKeyWrapProtocolOutput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolOutput,proto3" json:"KeyProviderKeyWrapProtocolOutput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolOutput) Reset() { *m = KeyProviderKeyWrapProtocolOutput{} } +func (m *KeyProviderKeyWrapProtocolOutput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolOutput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{1} +} + +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolOutput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolOutput) GetKeyProviderKeyWrapProtocolOutput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolOutput + } + return nil +} + +func init() { + proto.RegisterType((*KeyProviderKeyWrapProtocolInput)(nil), "keyprovider.keyProviderKeyWrapProtocolInput") + proto.RegisterType((*KeyProviderKeyWrapProtocolOutput)(nil), "keyprovider.keyProviderKeyWrapProtocolOutput") +} + +func init() { + proto.RegisterFile("keyprovider.proto", fileDescriptor_da74c8e785ad390c) +} + +var fileDescriptor_da74c8e785ad390c = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4e, 0xad, 0x2c, + 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x03, 0x32, 0x4a, 0xf2, 0x85, 0xb8, 0x91, 0x84, + 0x94, 0xb2, 0xb9, 0xe4, 0x81, 0xdc, 0x00, 0x28, 0xd7, 0x3b, 0xb5, 0x32, 0xbc, 0x28, 0xb1, 0x20, + 0x00, 0xa4, 0x2e, 0x39, 0x3f, 0xc7, 0x33, 0xaf, 0xa0, 0xb4, 0x44, 0xc8, 0x83, 0x4b, 0xde, 0x1b, + 0xbf, 0x12, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x42, 0xca, 0x94, 0xf2, 0xb8, 0x14, 0x70, + 0x5b, 0xe6, 0x5f, 0x5a, 0x02, 0xb2, 0xcd, 0x8b, 0x4b, 0xc1, 0x9b, 0x80, 0x1a, 0xa8, 0x75, 0x04, + 0xd5, 0x19, 0xbd, 0x62, 0xe4, 0x12, 0x42, 0x52, 0x14, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a, + 0x94, 0xc1, 0xc5, 0x0e, 0x52, 0x0c, 0x94, 0x11, 0xd2, 0xd1, 0x43, 0x0e, 0x1f, 0x02, 0x21, 0x21, + 0xa5, 0x4b, 0xa4, 0x6a, 0x88, 0xf5, 0x4a, 0x0c, 0x42, 0x59, 0x5c, 0x9c, 0xa1, 0x79, 0xf4, 0xb1, + 0xcb, 0x89, 0x37, 0x0a, 0x39, 0x62, 0x93, 0xd8, 0xc0, 0x91, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0x9a, 0x10, 0xcb, 0xf9, 0x01, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// KeyProviderServiceClient is the client API for KeyProviderService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyProviderServiceClient interface { + WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) +} + +type keyProviderServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewKeyProviderServiceClient(cc grpc.ClientConnInterface) KeyProviderServiceClient { + return &keyProviderServiceClient{cc} +} + +func (c *keyProviderServiceClient) WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/WrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyProviderServiceClient) UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/UnWrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyProviderServiceServer is the server API for KeyProviderService service. +type KeyProviderServiceServer interface { + WrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) +} + +// UnimplementedKeyProviderServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyProviderServiceServer struct { +} + +func (*UnimplementedKeyProviderServiceServer) WrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method WrapKey not implemented") +} +func (*UnimplementedKeyProviderServiceServer) UnWrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnWrapKey not implemented") +} + +func RegisterKeyProviderServiceServer(s *grpc.Server, srv KeyProviderServiceServer) { + s.RegisterService(&_KeyProviderService_serviceDesc, srv) +} + +func _KeyProviderService_WrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).WrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/WrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).WrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyProviderService_UnWrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/UnWrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyProviderService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "keyprovider.KeyProviderService", + HandlerType: (*KeyProviderServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "WrapKey", + Handler: _KeyProviderService_WrapKey_Handler, + }, + { + MethodName: "UnWrapKey", + Handler: _KeyProviderService_UnWrapKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "keyprovider.proto", +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto new file mode 100644 index 00000000..a71f0a59 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package keyprovider; +option go_package = "keyprovider"; + +message keyProviderKeyWrapProtocolInput { + bytes KeyProviderKeyWrapProtocolInput = 1; +} + +message keyProviderKeyWrapProtocolOutput { + bytes KeyProviderKeyWrapProtocolOutput = 1; +} + +service KeyProviderService { + rpc WrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; + rpc UnWrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; +} \ No newline at end of file diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go new file mode 100644 index 00000000..69bb9d12 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/testing.go @@ -0,0 +1,165 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "time" +) + +// CreateRSAKey creates an RSA key +func CreateRSAKey(bits int) (*rsa.PrivateKey, error) { + key, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) + } + return key, nil +} + +// CreateRSATestKey creates an RSA key of the given size and returns +// the public and private key in PEM or DER format +func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte, error) { + key, err := CreateRSAKey(bits) + if err != nil { + return nil, nil, err + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) + } + privData := x509.MarshalPKCS1PrivateKey(key) + + // no more encoding needed for DER + if !pemencode { + return pubData, privData, nil + } + + publicKey := pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: pubData, + }) + + var block *pem.Block + + typ := "RSA PRIVATE KEY" + if len(password) > 0 { + block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, nil, fmt.Errorf("x509.EncryptPEMBlock failed: %w", err) + } + } else { + block = &pem.Block{ + Type: typ, + Bytes: privData, + } + } + + privateKey := pem.EncodeToMemory(block) + + return publicKey, privateKey, nil +} + +// CreateECDSATestKey creates and elliptic curve key for the given curve and returns +// the public and private key in DER format +func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) { + key, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, nil, fmt.Errorf("ecdsa.GenerateKey failed: %w", err) + } + + pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalPKIXPublicKey failed: %w", err) + } + + privData, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, fmt.Errorf("x509.MarshalECPrivateKey failed: %w", err) + } + + return pubData, privData, nil +} + +// CreateTestCA creates a root CA for testing +func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, fmt.Errorf("rsa.GenerateKey failed: %w", err) + } + + ca := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "test-ca", + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(1, 0, 0), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + caCert, err := certifyKey(&key.PublicKey, ca, key, ca) + + return key, caCert, err +} + +// CertifyKey certifies a public key using the given CA's private key and cert; +// The certificate template for the public key is optional +func CertifyKey(pubbytes []byte, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + pubKey, err := ParsePublicKey(pubbytes, "CertifyKey") + if err != nil { + return nil, err + } + return certifyKey(pubKey, template, caKey, caCert) +} + +func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) { + if template == nil { + template = &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "testkey", + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour), + IsCA: false, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + } + + certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey) + if err != nil { + return nil, fmt.Errorf("x509.CreateCertificate failed: %w", err) + } + + cert, err := x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("x509.ParseCertificate failed: %w", err) + } + + return cert, nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go new file mode 100644 index 00000000..160f747b --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -0,0 +1,249 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "bytes" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "strings" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/go-jose/go-jose/v3" + "golang.org/x/crypto/openpgp" +) + +// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key +func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) { + jwk := jose.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) + } + if jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a private key", prefix) + } + return &jwk, nil +} + +// parseJWKPublicKey parses the input byte array as a JWK +func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) { + jwk := jose.JSONWebKey{} + err := jwk.UnmarshalJSON(privKey) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) + } + if !jwk.IsPublic() { + return nil, fmt.Errorf("%s: JWK is not a public key", prefix) + } + return &jwk, nil +} + +// parsePkcs11PrivateKeyYaml parses the input byte array as pkcs11 key file yaml format) +func parsePkcs11PrivateKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// parsePkcs11URIPublicKey parses the input byte array as a pkcs11 key file yaml +func parsePkcs11PublicKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// IsPasswordError checks whether an error is related to a missing or wrong +// password +func IsPasswordError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + + return strings.Contains(msg, "password") && + (strings.Contains(msg, "missing") || strings.Contains(msg, "wrong")) +} + +// ParsePrivateKey tries to parse a private key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKCS8PrivateKey(privKey) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(privKey) + if err != nil { + key, err = x509.ParseECPrivateKey(privKey) + } + } + if err != nil { + block, _ := pem.Decode(privKey) + if block != nil { + var der []byte + if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if privKeyPassword == nil { + return nil, fmt.Errorf("%s: Missing password for encrypted private key", prefix) + } + der, err = x509.DecryptPEMBlock(block, privKeyPassword) //nolint:staticcheck // ignore SA1019, which is kept for backward compatibility + if err != nil { + return nil, fmt.Errorf("%s: Wrong password: could not decrypt private key", prefix) + } + } else { + der = block.Bytes + } + + key, err = x509.ParsePKCS8PrivateKey(der) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(der) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse private key: %w", prefix, err) + } + } + } else { + key, err = parseJWKPrivateKey(privKey, prefix) + if err != nil { + key, err = parsePkcs11PrivateKeyYaml(privKey, prefix) + } + } + } + return key, err +} + +// IsPrivateKey returns true in case the given byte array represents a private key +// It returns an error if for example the password is wrong +func IsPrivateKey(data []byte, password []byte) (bool, error) { + _, err := ParsePrivateKey(data, password, "") + return err == nil, err +} + +// IsPkcs11PrivateKey returns true in case the given byte array represents a pkcs11 private key +func IsPkcs11PrivateKey(data []byte) bool { + return pkcs11.IsPkcs11PrivateKey(data) +} + +// ParsePublicKey tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) { + key, err := x509.ParsePKIXPublicKey(pubKey) + if err != nil { + block, _ := pem.Decode(pubKey) + if block != nil { + key, err = x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse public key: %w", prefix, err) + } + } else { + key, err = parseJWKPublicKey(pubKey, prefix) + if err != nil { + key, err = parsePkcs11PublicKeyYaml(pubKey, prefix) + } + } + } + return key, err +} + +// IsPublicKey returns true in case the given byte array represents a public key +func IsPublicKey(data []byte) bool { + _, err := ParsePublicKey(data, "") + return err == nil +} + +// IsPkcs11PublicKey returns true in case the given byte array represents a pkcs11 public key +func IsPkcs11PublicKey(data []byte) bool { + return pkcs11.IsPkcs11PublicKey(data) +} + +// ParseCertificate tries to parse a public key in DER format first and +// PEM format after, returning an error if the parsing failed +func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error) { + x509Cert, err := x509.ParseCertificate(certBytes) + if err != nil { + block, _ := pem.Decode(certBytes) + if block == nil { + return nil, fmt.Errorf("%s: Could not PEM decode x509 certificate", prefix) + } + x509Cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("%s: Could not parse x509 certificate: %w", prefix, err) + } + } + return x509Cert, err +} + +// IsCertificate returns true in case the given byte array represents an x.509 certificate +func IsCertificate(data []byte) bool { + _, err := ParseCertificate(data, "") + return err == nil +} + +// IsGPGPrivateKeyRing returns true in case the given byte array represents a GPG private key ring file +func IsGPGPrivateKeyRing(data []byte) bool { + r := bytes.NewBuffer(data) + _, err := openpgp.ReadKeyRing(r) + return err == nil +} + +// SortDecryptionKeys parses a list of comma separated base64 entries and sorts the data into +// a map. Each entry in the list may be either a GPG private key ring, private key, or x.509 +// certificate +func SortDecryptionKeys(b64ItemList string) (map[string][][]byte, error) { + dcparameters := make(map[string][][]byte) + + for _, b64Item := range strings.Split(b64ItemList, ",") { + var password []byte + b64Data := strings.Split(b64Item, ":") + keyData, err := base64.StdEncoding.DecodeString(b64Data[0]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key") + } + if len(b64Data) == 2 { + password, err = base64.StdEncoding.DecodeString(b64Data[1]) + if err != nil { + return nil, errors.New("Could not base64 decode a passed decryption key password") + } + } + var key string + isPrivKey, err := IsPrivateKey(keyData, password) + if IsPasswordError(err) { + return nil, err + } + if isPrivKey { + key = "privkeys" + if _, ok := dcparameters["privkeys-passwords"]; !ok { + dcparameters["privkeys-passwords"] = [][]byte{password} + } else { + dcparameters["privkeys-passwords"] = append(dcparameters["privkeys-passwords"], password) + } + } else if IsCertificate(keyData) { + key = "x509s" + } else if IsGPGPrivateKeyRing(keyData) { + key = "gpg-privatekeys" + } + if key != "" { + values := dcparameters[key] + if values == nil { + dcparameters[key] = [][]byte{keyData} + } else { + dcparameters[key] = append(dcparameters[key], keyData) + } + } else { + return nil, errors.New("Unknown decryption key type") + } + } + + return dcparameters, nil +} diff --git a/vendor/github.com/containers/storage/AUTHORS b/vendor/github.com/containers/storage/AUTHORS new file mode 100644 index 00000000..129dd396 --- /dev/null +++ b/vendor/github.com/containers/storage/AUTHORS @@ -0,0 +1,1523 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Miller +Adam Singer +Aditi Rajagopal +Aditya +Adria Casas +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Suda +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Ali Dehghani +Allen Madsen +Allen Sun +almoehi +Alvin Richards +amangoel +Amen Belayneh +Amit Bakshi +Amit Krishnan +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew Munsell +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Ankush Agarwal +Anonmily +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +ArikaChen +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +BartÅ‚omiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bill W +bin liu +Blake Geno +Boaz Shuster +bobby abbott +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Carl Henrik Lunde +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Hanxiao +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Cory Forsyth +cressie176 +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Hiltgen +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Gageot +David Gebler +David Lawrence +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Xia +David Young +Davide Ceretti +Dawn Chen +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dieter Reuter +Dima Stopel +Dimitri John Ledkov +Dimitry Andric +Dinesh Subhraveti +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitry Demeshchuk +Dmitry Gusev +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen LuÄanin +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivind Uggedal +Elan Ruusamäe +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Windisch +Eric Yang +Eric-Olivier Lamey +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evgeny Vereshchagin +Ewa Czechowska +Eystein MÃ¥løy Stenberg +ezbercih +Fabiano Rosas +Fabio Falci +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Schindler +Ferenc Szabo +Fernando +Fero Volar +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +Francesc Campoy +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Monroy +GabrielNicolasAvellaneda +Galen Sampson +Gareth Rushgrove +Garrett Barboza +Gaurav +gautam, prasanna +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +gs11 +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +He Simei +heartlock <21521209@zju.edu.cn> +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou <1187766782@qq.com> +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Lee +Ian Main +Ian Truslove +Iavael +Icaro Seara +Igor Dolzhikov +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jake Champlin +Jake Moshenko +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Paul Calderone +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Jian Zhang +jianbosun +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiří Župka +jjy +jmzwcn +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John OBrien III +John Starks +John Tims +John Warwick +John Willis +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jordan Williams +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Hawn +Josh Poimboeuf +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu(Kennan) +Kamil DomaÅ„ski +kamjar gerami +Kanstantsin Shautsou +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +kayrus +Ke Xu +Keli Hu +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickael Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Clark +Kevin J. Lynagh +Kevin Menard +Kevin P. Kucharczyk +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Pelykh +Krasimir Georgiev +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +lalyos +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Laszlo Meszaros +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Marshall +Lewis Peckover +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liaoqingwei +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +LIZAO LI +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Luis Martínez de Bartolomé Izquierdo +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +lukemarsden +Lynda O'Leary +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mahesh Tiyyagura +malnick +Malte Janduda +manchoz +Manfred Touron +Manfred Zabarauskas +mansinahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt McCormick +Matt Moore +Matt Robenolt +Matthew Heon +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mengdi Gao +Mert YazıcıoÄŸlu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minar +Michaël Pailloncy +MichaÅ‚ Czeraszkiewicz +Michiel@unhosted +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miloslav TrmaÄ +mingqing +Mingzhen Feng +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Nelson Chen +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +NikolaMandic +nikolas +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +Otto Kekäläinen +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Hammond +Paul Jimenez +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Prayag Verma +Przemek Hejman +pysqz +qg <1373319223@qq.com> +qhuang +Qiang Huang +qq690388648 <690388648@qq.com> +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Regan McCooey +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Wallis +Roberto G. Hashioka +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sankar சஙà¯à®•à®°à¯ +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +scaleoutsean +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean OMeara +Sean P. Kane +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +Shekhar Gulati +Sheng Yang +Shengbo Song +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +shuai-z +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tibor Vass +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš HrÄka +Tonis Tiigi +Tonny Xu +Tony Daws +Tony Miller +toogley +Torstein Husebø +tpng +tracylihui <793912329@qq.com> +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +Tõnis Tiigi +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Xing +Ward Vandewege +WarheadsSE +Wayne Chang +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +xamyzhao +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +yangshukui +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +Youcef YEKHLEF +Yuan Sun +yuchangchun +yuchengxia +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +Zhenan Ye <21551168@zju.edu.cn> +Zhu Guihua +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Ãlex González +Ãlvaro Lázaro +Ãtila Camurça Alves +å°¹å‰å³° +æ通 diff --git a/vendor/github.com/containers/storage/LICENSE b/vendor/github.com/containers/storage/LICENSE new file mode 100644 index 00000000..8f3fee62 --- /dev/null +++ b/vendor/github.com/containers/storage/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/storage/NOTICE b/vendor/github.com/containers/storage/NOTICE new file mode 100644 index 00000000..8a37c1c7 --- /dev/null +++ b/vendor/github.com/containers/storage/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2016 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containers/storage/pkg/archive/README.md b/vendor/github.com/containers/storage/pkg/archive/README.md new file mode 100644 index 00000000..7307d969 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go new file mode 100644 index 00000000..29f800b2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -0,0 +1,1518 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/promise" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + gzip "github.com/klauspost/pgzip" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + IgnoreChownErrors bool + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // This is additional data to be used by the converter. It will + // not survive a round trip through JSON, so it's primarily + // intended for generating archives (i.e., converting writes). + WhiteoutData interface{} + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + // ForceMask, if set, indicates the permission mask used for created files. + ForceMask *os.FileMode + } +) + +const ( + tarExt = "tar" + solaris = "solaris" + windows = "windows" + darwin = "darwin" + freebsd = "freebsd" +) + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + +// Archiver allows the reuse of most utility functions of this package with a +// pluggable Untar function. To facilitate the passing of specific id mappings +// for untar, an archiver can be created with maps which will then be passed to +// Untar operations. If ChownOpts is set, its values are mapped using +// UntarIDMappings before being used to create files and directories on disk. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + TarIDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + UntarIDMappings *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, TarIDMappings: &idtools.IDMappings{}, UntarIDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +// overwriteError is used to differentiate errors related to attempting to +// overwrite a directory with a non-directory or vice-versa. When testing +// copying a file over a directory, this error is expected in order for the +// test to pass. +type overwriteError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz + // Zstd is zstd compression algorithm. + Zstd +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + Zstd: {0x28, 0xb5, 0x2f, 0xfd}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xz.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + case Zstd: + return zstdReader(buf) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Zstd: + return zstdWriter(dest) + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return tarExt + case Bzip2: + return tarExt + ".bz2" + case Gzip: + return tarExt + ".gz" + case Xz: + return tarExt + ".xz" + case Zstd: + return tarExt + ".zst" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// ReadSecurityXattrToTarHeader reads security.capability, security,image +// xattrs from filesystem to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + for _, xattr := range []string{"security.capability", "security.ima"} { + capability, err := system.Lgetxattr(path, xattr) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err) + } + if capability != nil { + hdr.Xattrs[xattr] = string(capability) + } + } + return nil +} + +// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header +func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error { + xattrs, err := system.Llistxattr(path) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + value, err := system.Lgetxattr(path, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", path, key) + continue + } + return err + } + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[key] = string(value) + } + } + return nil +} + +type TarWhiteoutHandler interface { + Setxattr(path, name string, value []byte) error + Mknod(path string, mode uint32, dev int) error + Chown(path string, uid, gid int) error +} + +type TarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) + ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter TarWhiteoutConverter + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool +} + +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +// path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Warnf("archive: skipping %q since it is a socket", path) + return nil + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + if err := ReadUserXattrToTarHeader(path, hdr); err != nil { + return err + } + if err := ReadFileFlagsToTarHeader(path, hdr); err != nil { + return err + } + if ta.CopyPass { + copyPassHeader(hdr) + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + // handle re-mapping container ID mappings back to host ID mappings before + // writing tar headers/files. We skip whiteout files because they were written + // by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + maybeTruncateHeaderModTime(hdr) + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + mask := hdrInfo.Mode() + if forceMask != nil { + mask = *forceMask + } + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, mask); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, mask) + if err != nil { + return err + } + if _, err := io.CopyBuffer(file, reader, buffer); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + logrus.Debugf("Tar: Can't create device %v while running in user namespace", path) + return nil + } + fallthrough + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := handleLLink(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") { + value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&0o7777) + if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != windows { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID) + if err != nil { + if ignoreChownErrors { + fmt.Fprintf(os.Stderr, "Chown error detected. Ignoring due to ignoreChownErrors flag: %v\n", err) + } else { + return err + } + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo, forceMask); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + + var errs []string + for key, value := range hdr.Xattrs { + if _, found := xattrsToIgnore[key]; found { + continue + } + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. We also ignore EPERM errors if we are running in a + // user namespace. + errs = append(errs, err.Error()) + continue + } + return err + } + + } + + // We defer setting flags on directories until the end of + // Unpack or UnpackLayer in case setting them makes the + // directory immutable. + if hdr.Typeflag != tar.TypeDir { + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + + if len(errs) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errs, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil //nolint: nilerr + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + matches, err := pm.IsMatch(relFilePath) + if err != nil { + return fmt.Errorf("matching %s: %w", relFilePath, err) + } + skip = matches + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !d.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }); err != nil { + logrus.Errorf("%s", err) + return + } + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + buffer := make([]byte, 1<<20) + + doChown := !options.NoLchown + if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false + uid, gid, mode, err := GetFileOwner(dest) + if err == nil { + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + } + } + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + chownOpts := options.ChownOpts + if err := remapIDs(nil, idMappings, chownOpts, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if chownOpts != nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + + if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// +// identity (uncompressed), gzip, bzip2, xz. +// +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarMappings := archiver.TarIDMappings + if tarMappings == nil { + tarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + Compression: Uncompressed, + CopyPass: true, + InUserNS: unshare.IsRootless(), + } + archive, err := TarWithOptions(src, options) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options = &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.UntarIDMappings.RootPair() + if archiver.ChownOpts != nil { + rootIDs = *archiver.ChownOpts + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + copyPassHeader(hdr) + + if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + options := &TarOptions{ + UIDMaps: archiver.UntarIDMappings.UIDs(), + GIDMaps: archiver.UntarIDMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: unshare.IsRootless(), + NoOverwriteDirNonDir: true, + } + err = archiver.Untar(r, filepath.Dir(dst), options) + if err != nil { + r.CloseWithError(err) + } + return err +} + +func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, hdr *tar.Header) (err error) { + var uid, gid int + if chownOpts != nil { + uid, gid = chownOpts.UID, chownOpts.GID + } else { + if readIDMappings != nil && !readIDMappings.Empty() { + uid, gid, err = readIDMappings.ToContainer(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + if err != nil { + return err + } + } else if runtime.GOOS == darwin { + uid, gid = hdr.Uid, hdr.Gid + if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok { + attrs := strings.Split(string(xstat), ":") + if len(attrs) == 3 { + val, err := strconv.ParseUint(attrs[0], 10, 32) + if err != nil { + uid = int(val) + } + val, err = strconv.ParseUint(attrs[1], 10, 32) + if err != nil { + gid = int(val) + } + } + } + } else { + uid, gid = hdr.Uid, hdr.Gid + } + } + ids := idtools.IDPair{UID: uid, GID: gid} + if writeIDMappings != nil && !writeIDMappings.Empty() { + ids, err = writeIDMappings.ToHost(ids) + if err != nil { + return err + } + } + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := os.CreateTemp(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewReader(header)) + _, err := r.Next() + return err == nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return NewDefaultArchiver().UntarPath(src, dst) +} + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +// NewArchiver returns a new Archiver +func NewArchiver(idMappings *idtools.IDMappings) *Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings} +} + +// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver { + if tarIDMappings == nil { + tarIDMappings = &idtools.IDMappings{} + } + if untarIDMappings == nil { + untarIDMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings} +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return fmt.Errorf("creating pipe extract data to %q: %w", dest, err) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := tar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) + } + hashWorker.Wait() + if err == nil && hashError != nil { + err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} + +// TarPath returns a function which creates an archive of a specified +// location in the container's filesystem, mapping permissions using the +// container's ID maps +func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) { + tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + return func(path string) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{ + Compression: Uncompressed, + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + }) + } +} + +// GetOverlayXattrName returns the xattr used by the overlay driver with the +// given name. +// It uses the trusted.overlay prefix when running as root, and user.overlay +// in rootless mode. +func GetOverlayXattrName(name string) string { + if unshare.IsRootless() { + return fmt.Sprintf("user.overlay.%s", name) + } + return fmt.Sprintf("trusted.overlay.%s", name) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go new file mode 100644 index 00000000..eab9da51 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -0,0 +1,23 @@ +//go:build go1.10 +// +build go1.10 + +package archive + +import ( + "archive/tar" + "time" +) + +func copyPassHeader(hdr *tar.Header) { + hdr.Format = tar.FormatPAX +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { + if hdr.Format == tar.FormatUnknown { + // one of the first things archive/tar does is round this + // value, possibly up, if the format isn't specified, while we + // are much better equipped to handle truncation when scanning + // for changes between source and an extracted copy of this + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + } +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go new file mode 100644 index 00000000..f591bf38 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -0,0 +1,14 @@ +//go:build !go1.10 +// +build !go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go new file mode 100644 index 00000000..4d362f07 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go @@ -0,0 +1,19 @@ +//go:build freebsd || darwin +// +build freebsd darwin + +package archive + +import ( + "archive/tar" + "os" + + "golang.org/x/sys/unix" +) + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + return unix.Fchmodat(unix.AT_FDCWD, path, uint32(permissionsMask), unix.AT_SYMLINK_NOFOLLOW) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go new file mode 100644 index 00000000..02995d76 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -0,0 +1,209 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func getOverlayOpaqueXattrName() string { + return GetOverlayXattrName("opaque") +} + +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + if rolayers, ok := data.([]string); ok && len(rolayers) > 0 { + return overlayWhiteoutConverter{rolayers: rolayers} + } + return overlayWhiteoutConverter{rolayers: nil} + } + return nil +} + +type overlayWhiteoutConverter struct { + rolayers []string +} + +func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the whiteout prefix + opaque, err := system.Lgetxattr(path, getOverlayOpaqueXattrName()) + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, getOverlayOpaqueXattrName()) + } + // If there are no lower layers, then it can't have been deleted in this layer. + if len(o.rolayers) == 0 { + return nil, nil //nolint: nilnil + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, rolayer := range o.rolayers { + stat, statErr := os.Stat(filepath.Join(rolayer, hdr.Name)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return nil, nil //nolint: nilnil + } + } + // It's not whiteout, so it was there in the older layer, so we need to + // add a whiteout for this item in this layer. + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + break + } + for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory in a parent layer. + stat, statErr := os.Stat(filepath.Join(rolayer, dir)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // If it's whiteout for a parent directory, then the + // original directory wasn't inherited into this layer, + // so we don't need to emit whiteout for it. + if isWhiteOut(stat) { + return nil, nil //nolint: nilnil + } + } + } + } + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path string, handler TarWhiteoutHandler) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := handler.Setxattr(dir, getOverlayOpaqueXattrName(), []byte{'y'}) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + // If someone does: + // rm -rf /foo/bar + // in an image, some tools will generate a layer with: + // /.wh.foo + // /foo/.wh.bar + // and when doing the second mknod(), we will fail with + // ENOTDIR, since the previous /foo was mknod()'d as a + // character device node and not a directory. + if isENOTDIR(err) { + return false, nil + } + return false, err + } + if err := handler.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +type directHandler struct{} + +func (d directHandler) Setxattr(path, name string, value []byte) error { + return unix.Setxattr(path, name, value, 0) +} + +func (d directHandler) Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +func (d directHandler) Chown(path string, uid, gid int) error { + return idtools.SafeChown(path, uid, gid) +} + +func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + var handler directHandler + return o.ConvertReadWithHandler(hdr, path, handler) +} + +func isWhiteOut(stat os.FileInfo) bool { + s := stat.Sys().(*syscall.Stat_t) + return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + f, err := os.Stat(path) + if err != nil { + return 0, 0, 0, err + } + s, ok := f.Sys().(*syscall.Stat_t) + if ok { + return s.Uid, s.Gid, s.Mode & 0o7777, nil + } + return 0, 0, uint32(f.Mode()), nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go new file mode 100644 index 00000000..2468ab3c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -0,0 +1,12 @@ +//go:build !linux +// +build !linux + +package archive + +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { + return nil +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + return 0, 0, 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go new file mode 100644 index 00000000..88192f22 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -0,0 +1,112 @@ +//go:build !windows +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 0o7777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor)) +} + +// Hardlink without symlinks +func handleLLink(targetPath, path string) error { + // Note: on Linux, the link syscall will not follow symlinks. + // This behavior is implementation-dependent since + // POSIX.1-2008 so to make it clear that we need non-symlink + // following here we use the linkat syscall which has a flags + // field to select symlink following or not. + return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go new file mode 100644 index 00000000..85a5b3a5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -0,0 +1,84 @@ +//go:build windows +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0o111 + permPart &= 0o755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} + +// Hardlink without following symlinks +func handleLLink(targetPath string, path string) error { + return os.Link(targetPath, path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go new file mode 100644 index 00000000..36b7118a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go @@ -0,0 +1,41 @@ +package archive + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go new file mode 100644 index 00000000..01c6f30c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -0,0 +1,497 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// changesByPath implements sort.Interface. +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +func aufsWhiteoutPresent(root, path string) (bool, error) { + f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path)) + _, err := os.Stat(filepath.Join(root, f)) + if err == nil { + return true, nil + } + if os.IsNotExist(err) || isENOTDIR(err) { + return false, nil + } + return false, err +} + +func isENOTDIR(err error) bool { + if err == nil { + return false + } + if err == syscall.ENOTDIR { + return true + } + if perror, ok := err.(*os.PathError); ok { + if errno, ok := perror.Err.(syscall.Errno); ok { + return errno == syscall.ENOTDIR + } + } + return false +} + +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) + whiteoutChange func(string, string) (bool, error) +) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + layerScan: + for _, layer := range layers { + if wc != nil { + // ...Unless a lower layer also had whiteout for this directory or one of its parents, + // in which case, it's new + ignore, err := wc(layer, path) + if err != nil { + return err + } + if ignore { + break layerScan + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + ignore, err = wc(layer, dir) + if err != nil { + return err + } + if ignore { + break layerScan + } + } + } + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + tail := []Change{} + for parent != "/" { + if _, ok := changedDirs[parent]; !ok && parent != "/" { + tail = append([]Change{{Path: parent, Kind: ChangeModify}}, tail...) + changedDirs[parent] = struct{}{} + } + parent = filepath.Dir(parent) + } + changes = append(changes, tail...) + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + idMappings *idtools.IDMappings + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool + xattrs map[string]string +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, oldInfo, newStat, info) || + !bytes.Equal(oldChild.capability, newChild.capability) || + !reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + idMappings: idMappings, + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { + var oldRoot, newRoot *FileInfo + if oldDir == "" { + emptyDir, err := os.MkdirTemp("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir, oldMappings, newMappings) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go new file mode 100644 index 00000000..f8414717 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -0,0 +1,400 @@ +package archive + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo + idmap1 *idtools.IDMappings //nolint:unused + idmap2 *idtools.IDMappings //nolint:unused +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string, idmap1, idmap2 *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(idmap1), + root2: newRootFileInfo(idmap2), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: root.idMappings, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + return err + } + xattrs, err := system.Llistxattr(cpath) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + value, err := system.Lgetxattr(cpath, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", cpath, key) + continue + } + return err + } + if info.xattrs == nil { + info.xattrs = make(map[string]string) + } + info.xattrs[key] = string(value) + } + } + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + builder := make([]byte, 0, dirent.Reclen) + for i := 0; i < len(dirent.Name); i++ { + if dirent.Name[i] == 0 { + break + } + builder = append(builder, byte(dirent.Name[i])) + } + name := string(builder) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + dc := func(root, path string, fi os.FileInfo) (string, error) { + return overlayDeletedFile(layers, root, path, fi) + } + return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout) +} + +func overlayLowerContainsWhiteout(root, path string) (bool, error) { + // Whiteout for a file or directory has the same name, but is for a character + // device with major/minor of 0/0. + stat, err := os.Stat(filepath.Join(root, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return false, err + } + if err == nil && stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return true, nil + } + } + return false, nil +} + +func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) { + // If it's a whiteout item, then a file or directory with that name is removed by this layer. + if fi.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(fi) { + return path, nil + } + } + // After this we only need to pay attention to directories. + if !fi.IsDir() { + return "", nil + } + // If the directory isn't marked as opaque, then it's just a normal directory. + opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName()) + if err != nil { + return "", err + } + if len(opaque) != 1 || opaque[0] != 'y' { + return "", err + } + // If there are no lower layers, then it can't have been deleted and recreated in this layer. + if len(layers) == 0 { + return "", err + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + // It's not whiteout, so it was there in the older layer, so it has to be + // marked as deleted in this layer. + return path, nil + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory. + stat, err := os.Stat(filepath.Join(layer, dir)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + } + } + } + + // We didn't find the same path in any older layers, so it was new in this one. + return "", nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go new file mode 100644 index 00000000..ca272e68 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -0,0 +1,113 @@ +//go:build !linux +// +build !linux + +package archive + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir, oldIDMap) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir, newIDMap) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { + root := newRootFileInfo(idMappings) + + sourceStat, err := system.Lstat(sourceDir) + if err != nil { + return nil, err + } + + err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: idMappings, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + + // Don't cross mount points. This ignores file mounts to avoid + // generating a diff which deletes all files following the + // mount. + if s.Dev() != sourceStat.Dev() && s.IsDir() { + return filepath.SkipDir + } + + info.stat = s + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go new file mode 100644 index 00000000..6b2e5938 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -0,0 +1,52 @@ +//go:build !windows +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + oldUID, oldGID := oldStat.UID(), oldStat.GID() + uid, gid := newStat.UID(), newStat.GID() + if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil { + uid = uint32(cuid) + gid = uint32(cgid) + if oldInfo != nil { + if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil { + oldUID = uint32(oldcuid) + oldGID = uint32(oldcgid) + } + } + } + ownerChanged := uid != oldUID || gid != oldGID + if oldStat.Mode() != newStat.Mode() || + ownerChanged || + oldStat.Rdev() != newStat.Rdev() || + oldStat.Flags() != newStat.Flags() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go new file mode 100644 index 00000000..1bab94fa --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -0,0 +1,29 @@ +package archive + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go new file mode 100644 index 00000000..55f753bf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -0,0 +1,458 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !filepath.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, io.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go new file mode 100644 index 00000000..d6c5fd98 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -0,0 +1,12 @@ +//go:build !windows +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go new file mode 100644 index 00000000..2b775b45 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go new file mode 100644 index 00000000..71355185 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -0,0 +1,276 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + buffer := make([]byte, 1<<20) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == windows { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0o755) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + if err := resetImmutable(path, nil); err != nil { + return err + } + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := resetImmutable(originalPath, nil); err != nil { + return 0, err + } + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + // + // We always reset the immutable flag (if present) to allow metadata + // changes and to allow directory modification. The flag will be + // re-applied based on the contents of hdr either at the end for + // directories or in createTarFile otherwise. + if fi, err := os.Lstat(path); err == nil { + if err := resetImmutable(path, &fi); err != nil { + return 0, err + } + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(nil, idMappings, options.ChownOpts, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer func() { + _, _ = system.Umask(oldmask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above. + }() + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go new file mode 100644 index 00000000..92b8d05e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go @@ -0,0 +1,167 @@ +//go:build freebsd +// +build freebsd + +package archive + +import ( + "archive/tar" + "fmt" + "math/bits" + "os" + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +const ( + paxSCHILYFflags = "SCHILY.fflags" +) + +var ( + flagNameToValue = map[string]uint32{ + "sappnd": system.SF_APPEND, + "sappend": system.SF_APPEND, + "arch": system.SF_ARCHIVED, + "archived": system.SF_ARCHIVED, + "schg": system.SF_IMMUTABLE, + "schange": system.SF_IMMUTABLE, + "simmutable": system.SF_IMMUTABLE, + "sunlnk": system.SF_NOUNLINK, + "sunlink": system.SF_NOUNLINK, + "snapshot": system.SF_SNAPSHOT, + "uappnd": system.UF_APPEND, + "uappend": system.UF_APPEND, + "uarch": system.UF_ARCHIVE, + "uarchive": system.UF_ARCHIVE, + "hidden": system.UF_HIDDEN, + "uhidden": system.UF_HIDDEN, + "uchg": system.UF_IMMUTABLE, + "uchange": system.UF_IMMUTABLE, + "uimmutable": system.UF_IMMUTABLE, + "uunlnk": system.UF_NOUNLINK, + "uunlink": system.UF_NOUNLINK, + "offline": system.UF_OFFLINE, + "uoffline": system.UF_OFFLINE, + "opaque": system.UF_OPAQUE, + "rdonly": system.UF_READONLY, + "urdonly": system.UF_READONLY, + "readonly": system.UF_READONLY, + "ureadonly": system.UF_READONLY, + "reparse": system.UF_REPARSE, + "ureparse": system.UF_REPARSE, + "sparse": system.UF_SPARSE, + "usparse": system.UF_SPARSE, + "system": system.UF_SYSTEM, + "usystem": system.UF_SYSTEM, + } + // Only include the short names for the reverse map + flagValueToName = map[uint32]string{ + system.SF_APPEND: "sappnd", + system.SF_ARCHIVED: "arch", + system.SF_IMMUTABLE: "schg", + system.SF_NOUNLINK: "sunlnk", + system.SF_SNAPSHOT: "snapshot", + system.UF_APPEND: "uappnd", + system.UF_ARCHIVE: "uarch", + system.UF_HIDDEN: "hidden", + system.UF_IMMUTABLE: "uchg", + system.UF_NOUNLINK: "uunlnk", + system.UF_OFFLINE: "offline", + system.UF_OPAQUE: "opaque", + system.UF_READONLY: "rdonly", + system.UF_REPARSE: "reparse", + system.UF_SPARSE: "sparse", + system.UF_SYSTEM: "system", + } +) + +func parseFileFlags(fflags string) (uint32, uint32, error) { + var set, clear uint32 = 0, 0 + for _, fflag := range strings.Split(fflags, ",") { + isClear := false + if strings.HasPrefix(fflag, "no") { + isClear = true + fflag = strings.TrimPrefix(fflag, "no") + } + if value, ok := flagNameToValue[fflag]; ok { + if isClear { + clear |= value + } else { + set |= value + } + } else { + return 0, 0, fmt.Errorf("parsing file flags, unrecognised token: %s", fflag) + } + } + return set, clear, nil +} + +func formatFileFlags(fflags uint32) (string, error) { + res := []string{} + for fflags != 0 { + // Extract lowest set bit + fflag := uint32(1) << bits.TrailingZeros32(fflags) + if name, ok := flagValueToName[fflag]; ok { + res = append(res, name) + } else { + return "", fmt.Errorf("formatting file flags, unrecognised flag: %x", fflag) + } + fflags &= ^fflag + } + return strings.Join(res, ","), nil +} + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + st, err := system.Lstat(path) + if err != nil { + return err + } + fflags, err := formatFileFlags(st.Flags()) + if err != nil { + return err + } + if fflags != "" { + if hdr.PAXRecords == nil { + hdr.PAXRecords = map[string]string{} + } + hdr.PAXRecords[paxSCHILYFflags] = fflags + } + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + if fflags, ok := hdr.PAXRecords[paxSCHILYFflags]; ok { + var set, clear uint32 + set, clear, err := parseFileFlags(fflags) + if err != nil { + return err + } + + // Apply the delta to the existing file flags + st, err := system.Lstat(path) + if err != nil { + return err + } + return system.Lchflags(path, (st.Flags() & ^clear)|set) + } + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + var flags uint32 + if fi != nil { + flags = (*fi).Sys().(*syscall.Stat_t).Flags + } else { + st, err := system.Lstat(path) + if err != nil { + return err + } + flags = st.Flags() + } + if flags&(system.SF_IMMUTABLE|system.UF_IMMUTABLE) != 0 { + flags &= ^(system.SF_IMMUTABLE | system.UF_IMMUTABLE) + return system.Lchflags(path, flags) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go new file mode 100644 index 00000000..27a0bae2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go @@ -0,0 +1,21 @@ +//go:build !freebsd +// +build !freebsd + +package archive + +import ( + "archive/tar" + "os" +) + +func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error { + return nil +} + +func resetImmutable(path string, fi *os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/github.com/containers/storage/pkg/archive/time_linux.go new file mode 100644 index 00000000..3448569b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go new file mode 100644 index 00000000..8db64e80 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -0,0 +1,17 @@ +//go:build !linux +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go new file mode 100644 index 00000000..d20478a1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go new file mode 100644 index 00000000..903befd7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// - ./foo.txt with content "hello world" +// - ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go new file mode 100644 index 00000000..ca7ce30f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -0,0 +1,513 @@ +package compressor + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "bufio" + "bytes" + "encoding/base64" + "io" + + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +const ( + RollsumBits = 16 + holesThreshold = int64(1 << 10) +) + +type holesFinder struct { + reader *bufio.Reader + zeros int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// readByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) readByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if err := f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if err := f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := int64(len(b)) + if rc.pendingHole < toCopy { + toCopy = rc.pendingHole + } + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := 0; i < len(b); i++ { + holeLen, n, err := rc.reader.readByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + +type tarSplitData struct { + compressed *bytes.Buffer + digester digest.Digester + uncompressedCounter *ioutils.WriteCounter + zstd *zstd.Encoder + packer storage.Packer +} + +func newTarSplitData(level int) (*tarSplitData, error) { + compressed := bytes.NewBuffer(nil) + digester := digest.Canonical.Digester() + + zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level) + if err != nil { + return nil, err + } + + uncompressedCounter := ioutils.NewWriteCounter(zstdWriter) + metaPacker := storage.NewJSONPacker(uncompressedCounter) + + return &tarSplitData{ + compressed: compressed, + digester: digester, + uncompressedCounter: uncompressedCounter, + zstd: zstdWriter, + packer: metaPacker, + }, nil +} + +func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { + // total written so far. Used to retrieve partial offsets in the file + dest := ioutils.NewWriteCounter(destFile) + + tarSplitData, err := newTarSplitData(level) + if err != nil { + return err + } + defer func() { + if tarSplitData.zstd != nil { + tarSplitData.zstd.Close() + } + }() + + its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil) + if err != nil { + return err + } + + tr := tar.NewReader(its) + tr.RawAccounting = true + + buf := make([]byte, 4096) + + zstdWriter, err := internal.ZstdWriterWithLevel(dest, level) + if err != nil { + return err + } + defer func() { + if zstdWriter != nil { + zstdWriter.Close() + } + }() + + restartCompression := func() (int64, error) { + var offset int64 + if zstdWriter != nil { + if err := zstdWriter.Close(); err != nil { + return 0, err + } + offset = dest.Count + zstdWriter.Reset(dest) + } + return offset, nil + } + + var metadata []internal.FileMetadata + for { + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + return err + } + + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() + + // Now handle the payload, if any + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + for { + mustSplit, read, errRead := rcReader.Read(buf) + if errRead != nil && errRead != io.EOF { + return err + } + // restart the compression only if there is a payload. + if read > 0 { + if startOffset == 0 { + startOffset, err = restartCompression() + if err != nil { + return err + } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err + } + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() + if err != nil { + return err + } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := internal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = internal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + } + if errRead == io.EOF { + if startOffset > 0 { + checksum = payloadDigester.Digest().String() + } + break + } + } + + typ, err := internal.GetType(hdr.Typeflag) + if err != nil { + return err + } + xattrs := make(map[string]string) + for k, v := range hdr.Xattrs { + xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v)) + } + entries := []internal.FileMetadata{ + { + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: &hdr.ModTime, + AccessTime: &hdr.AccessTime, + ChangeTime: &hdr.ChangeTime, + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + Digest: checksum, + Offset: startOffset, + EndOffset: lastOffset, + }, + } + for i := 1; i < len(chunks); i++ { + entries = append(entries, internal.FileMetadata{ + Type: internal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Flush(); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + zstdWriter = nil + + if err := tarSplitData.zstd.Flush(); err != nil { + return err + } + if err := tarSplitData.zstd.Close(); err != nil { + return err + } + tarSplitData.zstd = nil + + ts := internal.TarSplitData{ + Data: tarSplitData.compressed.Bytes(), + Digest: tarSplitData.digester.Digest(), + UncompressedSize: tarSplitData.uncompressedCounter.Count, + } + + return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level) +} + +type zstdChunkedWriter struct { + tarSplitOut *io.PipeWriter + tarSplitErr chan error +} + +func (w zstdChunkedWriter) Close() error { + err := <-w.tarSplitErr + if err != nil { + w.tarSplitOut.Close() + return err + } + return w.tarSplitOut.Close() +} + +func (w zstdChunkedWriter) Write(p []byte) (int, error) { + select { + case err := <-w.tarSplitErr: + w.tarSplitOut.Close() + return 0, err + default: + return w.tarSplitOut.Write(p) + } +} + +// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is +// compressed separately so it can be addressed separately. Idea based on CRFS: +// https://github.com/google/crfs +// The difference with CRFS is that the zstd compression is used instead of gzip. +// The reason for it is that zstd supports embedding metadata ignored by the decoder +// as part of the compressed stream. +// A manifest json file with all the metadata is appended at the end of the tarball +// stream, using zstd skippable frames. +// The final file will look like: +// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2] +// Where: +// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER] +// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST] +// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER] +// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format. +func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) { + ch := make(chan error, 1) + r, w := io.Pipe() + + go func() { + ch <- writeZstdChunkedStream(out, metadata, r, level) + _, _ = io.Copy(io.Discard, r) // Ordinarily writeZstdChunkedStream consumes all of r. If it fails, ensure the write end never blocks and eventually terminates. + r.Close() + close(ch) + }() + + return zstdChunkedWriter{ + tarSplitOut: w, + tarSplitErr: ch, + }, nil +} + +// ZstdCompressor is a CompressorFunc for the zstd compression algorithm. +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + l := 10 + level = &l + } + + return zstdChunkedWriterWithLevel(r, metadata, *level) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 00000000..59df6901 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,85 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const ( + windowSize = 64 // Roll assumes windowSize is a power of 2 + charOffset = 31 +) + +const ( + blobBits = 13 + blobSize = 1 << blobBits // 8k +) + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go new file mode 100644 index 00000000..caa581ef --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -0,0 +1,283 @@ +package internal + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "archive/tar" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" +) + +type TOC struct { + Version int `json:"version"` + Entries []FileMetadata `json:"entries"` + + // internal: used by unmarshalToc + StringsBuf bytes.Buffer `json:"-"` +} + +type FileMetadata struct { + Type string `json:"type"` + Name string `json:"name"` + Linkname string `json:"linkName,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` + Xattrs map[string]string `json:"xattrs,omitempty"` + Digest string `json:"digest,omitempty"` + Offset int64 `json:"offset,omitempty"` + EndOffset int64 `json:"endOffset,omitempty"` + + ChunkSize int64 `json:"chunkSize,omitempty"` + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` + + // internal: computed by mergeTOCEntries. + Chunks []*FileMetadata `json:"-"` +} + +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + +const ( + TypeReg = "reg" + TypeChunk = "chunk" + TypeLink = "hardlink" + TypeChar = "char" + TypeBlock = "block" + TypeDir = "dir" + TypeFifo = "fifo" + TypeSymlink = "symlink" +) + +var TarTypes = map[byte]string{ + tar.TypeReg: TypeReg, + tar.TypeRegA: TypeReg, + tar.TypeLink: TypeLink, + tar.TypeChar: TypeChar, + tar.TypeBlock: TypeBlock, + tar.TypeDir: TypeDir, + tar.TypeFifo: TypeFifo, + tar.TypeSymlink: TypeSymlink, +} + +func GetType(t byte) (string, error) { + r, found := TarTypes[t] + if !found { + return "", fmt.Errorf("unknown tarball type: %v", t) + } + return r, nil +} + +const ( + ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum" + ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" + TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" + TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position" + + // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. + ManifestTypeCRFS = 1 + + // FooterSizeSupported is the footer size supported by this implementation. + // Newer versions of the image format might increase this value, so reject + // any version that is not supported. + FooterSizeSupported = 64 +) + +var ( + // when the zstd decoder encounters a skippable frame + 1 byte for the size, it + // will ignore it. + // https://tools.ietf.org/html/rfc8478#section-3.1.2 + skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18} + + ZstdChunkedFrameMagic = []byte{0x47, 0x4e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78} +) + +func appendZstdSkippableFrame(dest io.Writer, data []byte) error { + if _, err := dest.Write(skippableFrameMagic); err != nil { + return err + } + + size := make([]byte, 4) + binary.LittleEndian.PutUint32(size, uint32(len(data))) + if _, err := dest.Write(size); err != nil { + return err + } + if _, err := dest.Write(data); err != nil { + return err + } + return nil +} + +type TarSplitData struct { + Data []byte + Digest digest.Digest + UncompressedSize int64 +} + +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error { + // 8 is the size of the zstd skippable frame header + the frame size + const zstdSkippableFrameHeader = 8 + manifestOffset := offset + zstdSkippableFrameHeader + + toc := TOC{ + Version: 1, + Entries: metadata, + } + + json := jsoniter.ConfigCompatibleWithStandardLibrary + // Generate the manifest + manifest, err := json.Marshal(toc) + if err != nil { + return err + } + + var compressedBuffer bytes.Buffer + zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level) + if err != nil { + return err + } + if _, err := zstdWriter.Write(manifest); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + compressedManifest := compressedBuffer.Bytes() + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(compressedManifest); err != nil { + return err + } + + outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String() + outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS) + if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil { + return err + } + + outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String() + tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader + outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize) + if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil { + return err + } + + footer := ZstdChunkedFooterData{ + ManifestType: uint64(ManifestTypeCRFS), + Offset: manifestOffset, + LengthCompressed: uint64(len(compressedManifest)), + LengthUncompressed: uint64(len(manifest)), + ChecksumAnnotation: "", // unused + OffsetTarSplit: uint64(tarSplitOffset), + LengthCompressedTarSplit: uint64(len(tarSplitData.Data)), + LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize), + ChecksumAnnotationTarSplit: "", // unused + } + + manifestDataLE := footerDataToBlob(footer) + + return appendZstdSkippableFrame(dest, manifestDataLE) +} + +func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer. +type ZstdChunkedFooterData struct { + ManifestType uint64 + + Offset uint64 + LengthCompressed uint64 + LengthUncompressed uint64 + ChecksumAnnotation string // Only used when reading a layer, not when creating it + + OffsetTarSplit uint64 + LengthCompressedTarSplit uint64 + LengthUncompressedTarSplit uint64 + ChecksumAnnotationTarSplit string // Only used when reading a layer, not when creating it +} + +func footerDataToBlob(footer ZstdChunkedFooterData) []byte { + // Store the offset to the manifest and its size in LE order + manifestDataLE := make([]byte, FooterSizeSupported) + binary.LittleEndian.PutUint64(manifestDataLE[8*0:], footer.Offset) + binary.LittleEndian.PutUint64(manifestDataLE[8*1:], footer.LengthCompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*2:], footer.LengthUncompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*3:], footer.ManifestType) + binary.LittleEndian.PutUint64(manifestDataLE[8*4:], footer.OffsetTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*5:], footer.LengthCompressedTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*6:], footer.LengthUncompressedTarSplit) + copy(manifestDataLE[8*7:], ZstdChunkedFrameMagic) + + return manifestDataLE +} + +// ReadFooterDataFromAnnotations reads the zstd:chunked footer data from the given annotations. +func ReadFooterDataFromAnnotations(annotations map[string]string) (ZstdChunkedFooterData, error) { + var footerData ZstdChunkedFooterData + + footerData.ChecksumAnnotation = annotations[ManifestChecksumKey] + if footerData.ChecksumAnnotation == "" { + return footerData, fmt.Errorf("manifest checksum annotation %q not found", ManifestChecksumKey) + } + + offsetMetadata := annotations[ManifestInfoKey] + + if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &footerData.Offset, &footerData.LengthCompressed, &footerData.LengthUncompressed, &footerData.ManifestType); err != nil { + return footerData, err + } + + if tarSplitInfoKeyAnnotation, found := annotations[TarSplitInfoKey]; found { + if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &footerData.OffsetTarSplit, &footerData.LengthCompressedTarSplit, &footerData.LengthUncompressedTarSplit); err != nil { + return footerData, err + } + footerData.ChecksumAnnotationTarSplit = annotations[TarSplitChecksumKey] + } + return footerData, nil +} + +// ReadFooterDataFromBlob reads the zstd:chunked footer from the binary buffer. +func ReadFooterDataFromBlob(footer []byte) (ZstdChunkedFooterData, error) { + var footerData ZstdChunkedFooterData + + if len(footer) < FooterSizeSupported { + return footerData, errors.New("blob too small") + } + footerData.Offset = binary.LittleEndian.Uint64(footer[0:8]) + footerData.LengthCompressed = binary.LittleEndian.Uint64(footer[8:16]) + footerData.LengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) + footerData.ManifestType = binary.LittleEndian.Uint64(footer[24:32]) + footerData.OffsetTarSplit = binary.LittleEndian.Uint64(footer[32:40]) + footerData.LengthCompressedTarSplit = binary.LittleEndian.Uint64(footer[40:48]) + footerData.LengthUncompressedTarSplit = binary.LittleEndian.Uint64(footer[48:56]) + + // the magic number is stored in the last 8 bytes + if !bytes.Equal(ZstdChunkedFrameMagic, footer[len(footer)-len(ZstdChunkedFrameMagic):]) { + return footerData, errors.New("invalid magic number") + } + return footerData, nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go new file mode 100644 index 00000000..9d0714b1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -0,0 +1,371 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/sirupsen/logrus" +) + +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = strings.TrimPrefix(filepath.Clean(p[1:]), "/") + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Deprecated: Please use the `MatchesResult` method instead. +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +type MatchResult struct { + isMatched bool + matches, excludes uint +} + +// Excludes returns true if the overall result is matched +func (m *MatchResult) IsMatched() bool { + return m.isMatched +} + +// Excludes returns the amount of matches of an MatchResult +func (m *MatchResult) Matches() uint { + return m.matches +} + +// Excludes returns the amount of excludes of an MatchResult +func (m *MatchResult) Excludes() uint { + return m.excludes +} + +// MatchesResult verifies the provided filepath against all patterns. +// It returns the `*MatchResult` result for the patterns on success, otherwise +// an error. This method is not safe to be called concurrently. +func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) { + file = filepath.FromSlash(file) + res = &MatchResult{false, 0, 0} + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return nil, err + } + + if match { + res.isMatched = !negative + if negative { + res.excludes++ + } else { + res.matches++ + } + } + } + + if res.matches > 0 { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return res, nil +} + +// IsMatch verifies the provided filepath against all patterns and returns true +// if it matches. A match is valid if the last match is a positive one. +// It returns an error on failure and is not safe to be called concurrently. +func (pm *PatternMatcher) IsMatch(file string) (matched bool, err error) { + res, err := pm.MatchesResult(file) + if err != nil { + return false, err + } + return res.isMatched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + const bs = `\` + if sl == bs { + escSL += bs + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += bs + string(ch) + } else if ch == '\\' { + // escape next char. + if sl == bs { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += bs + string(scan.Next()) + } else { + return filepath.ErrBadPattern + } + } else { + regStr += string(ch) + } + } + + regStr += "(" + escSL + ".*)?$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.IsMatch(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %w", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %w", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %w", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// ReadSymlinkedPath returns the target directory of a symlink. +// The target of the symbolic link can be a file and a directory. +func ReadSymlinkedPath(path string) (realPath string, err error) { + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) + } + if _, err := os.Stat(realPath); err != nil { + return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0o755) + } + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0o755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 00000000..ccd648fa --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 00000000..0f2cb7ab --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go new file mode 100644 index 00000000..92e0263d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +//go:build linux || freebsd +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "os" + + "github.com/sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("%v", err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go new file mode 100644 index 00000000..5ec21cac --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go new file mode 100644 index 00000000..85c5e76c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -0,0 +1,52 @@ +package homedir + +import ( + "errors" + "os" + "path/filepath" +) + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetCacheHome returns XDG_CACHE_HOME. +// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetCacheHome() (string, error) { + if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { + return xdgCacheHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CACHE_HOME or HOME") + } + return filepath.Join(home, ".cache"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go new file mode 100644 index 00000000..0883ee02 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -0,0 +1,21 @@ +//go:build !linux && !darwin && !freebsd +// +build !linux,!darwin,!freebsd + +package homedir + +// Copyright 2013-2018 Docker, Inc. +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "errors" +) + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go new file mode 100644 index 00000000..9976f19a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -0,0 +1,96 @@ +//go:build !windows +// +build !windows + +package homedir + +// Copyright 2013-2018 Docker, Inc. +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "errors" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/unshare" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + homedir, _ := unshare.HomeDir() + return homedir +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return filepath.EvalSymlinks(xdgRuntimeDir) + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil //nolint: nilerr + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go new file mode 100644 index 00000000..af65f2c0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -0,0 +1,32 @@ +package homedir + +// Copyright 2013-2018 Docker, Inc. +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home != "" { + return home + } + home, _ = os.UserHomeDir() + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go new file mode 100644 index 00000000..1e2d5bcc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -0,0 +1,461 @@ +package idtools + +import ( + "bufio" + "errors" + "fmt" + "os" + "os/user" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" + ContainersOverrideXattr = "user.containers.override_stat" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + var err error + if len(uidMap) == 1 && uidMap[0].Size == 1 { + uid = uidMap[0].HostID + } else { + uid, err = RawToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + } + if len(gidMap) == 1 && gidMap[0].Size == 1 { + gid = gidMap[0].HostID + } else { + gid, err = RawToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + } + return uid, gid, nil +} + +// RawToContainer takes an id mapping, and uses it to translate a host ID to +// the remapped ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) +} + +// RawToHost takes an id mapping and a remapped ID, and translates the ID to +// the mapped host ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) +} + +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIDMappings(username, groupname string) (*IDMappings, error) { + subuidRanges, err := readSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := readSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("no subuid ranges found for user %q in %s", username, subuidFileName) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("no subgid ranges found for group %q in %s", groupname, subgidFileName) + } + + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + var target IDPair + + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + return target, err + } + + target.GID, err = RawToHost(pair.GID, i.gids) + return target, err +} + +var ( + overflowUIDOnce sync.Once + overflowGIDOnce sync.Once + overflowUID int + overflowGID int +) + +// getOverflowUID returns the UID mapped to the overflow user +func getOverflowUID() int { + overflowUIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present + overflowUID = 65534 + if content, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowUID = tmp + } + } + }) + return overflowUID +} + +// getOverflowUID returns the GID mapped to the overflow user +func getOverflowGID() int { + overflowGIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present + overflowGID = 65534 + if content, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowGID = tmp + } + } + }) + return overflowGID +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +// If the mapping is not possible because the target ID is not mapped into +// the namespace, then the overflow ID is used. +func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + target.UID = getOverflowUID() + logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID) + } + } + + if pair.GID != target.GID { + target.GID, err = RawToHost(pair.GID, i.gids) + if err != nil { + target.GID = getOverflowGID() + logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID) + } + } + return target, nil +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := RawToContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := RawToContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var ( + rangeList ranges + uidstr string + ) + if u, err := user.Lookup(username); err == nil { + uidstr = u.Uid + } + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} + +func checkChownErr(err error, name string, uid, gid int) error { + var e *os.PathError + if errors.As(err, &e) && e.Err == syscall.EINVAL { + return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err) + } + return err +} + +func SafeChown(name string, uid, gid int) error { + if runtime.GOOS == "darwin" { + var mode uint64 = 0o0700 + xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) + if err == nil { + attrs := strings.Split(string(xstat), ":") + if len(attrs) == 3 { + val, err := strconv.ParseUint(attrs[2], 8, 32) + if err == nil { + mode = val + } + } + } + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + uid = os.Getuid() + gid = os.Getgid() + } + if stat, statErr := system.Stat(name); statErr == nil { + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + } + return checkChownErr(os.Chown(name, uid, gid), name, uid, gid) +} + +func SafeLchown(name string, uid, gid int) error { + if runtime.GOOS == "darwin" { + var mode uint64 = 0o0700 + xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) + if err == nil { + attrs := strings.Split(string(xstat), ":") + if len(attrs) == 3 { + val, err := strconv.ParseUint(attrs[2], 8, 32) + if err == nil { + mode = val + } + } + } + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + uid = os.Getuid() + gid = os.Getgid() + } + if stat, statErr := system.Lstat(name); statErr == nil { + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + } + return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid) +} + +type sortByHostID []IDMap + +func (e sortByHostID) Len() int { return len(e) } +func (e sortByHostID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e sortByHostID) Less(i, j int) bool { return e[i].HostID < e[j].HostID } + +type sortByContainerID []IDMap + +func (e sortByContainerID) Len() int { return len(e) } +func (e sortByContainerID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e sortByContainerID) Less(i, j int) bool { return e[i].ContainerID < e[j].ContainerID } + +// IsContiguous checks if the specified mapping is contiguous and doesn't +// have any hole. +func IsContiguous(mappings []IDMap) bool { + if len(mappings) < 2 { + return true + } + + var mh sortByHostID = mappings[:] + sort.Sort(mh) + for i := 1; i < len(mh); i++ { + if mh[i].HostID != mh[i-1].HostID+mh[i-1].Size { + return false + } + } + + var mc sortByContainerID = mappings[:] + sort.Sort(mc) + for i := 1; i < len(mc); i++ { + if mc[i].ContainerID != mc[i-1].ContainerID+mc[i-1].Size { + return false + } + } + return true +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go new file mode 100644 index 00000000..03e78737 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -0,0 +1,87 @@ +//go:build linux && cgo && libsubid +// +build linux,cgo,libsubid + +package idtools + +import ( + "errors" + "os/user" + "unsafe" +) + +/* +#cgo LDFLAGS: -l subid +#include +#include +#include +const char *Prog = "storage"; +FILE *shadow_logfd = NULL; + +struct subid_range get_range(struct subid_range *ranges, int i) +{ + shadow_logfd = stderr; + return ranges[i]; +} + +#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_get_uid_ranges get_subuid_ranges +# define subid_get_gid_ranges get_subgid_ranges +#endif + +*/ +import "C" + +func readSubid(username string, isUser bool) (ranges, error) { + var ret ranges + uidstr := "" + + if username == "ALL" { + return nil, errors.New("username ALL not supported") + } + + if u, err := user.Lookup(username); err == nil { + uidstr = u.Uid + } + + cUsername := C.CString(username) + defer C.free(unsafe.Pointer(cUsername)) + + cuidstr := C.CString(uidstr) + defer C.free(unsafe.Pointer(cuidstr)) + + var nRanges C.int + var cRanges *C.struct_subid_range + if isUser { + nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) + if nRanges <= 0 { + nRanges = C.subid_get_uid_ranges(cuidstr, &cRanges) + } + } else { + nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) + if nRanges <= 0 { + nRanges = C.subid_get_gid_ranges(cuidstr, &cRanges) + } + } + if nRanges < 0 { + return nil, errors.New("cannot read subids") + } + defer C.free(unsafe.Pointer(cRanges)) + + for i := 0; i < int(nRanges); i++ { + r := C.get_range(cRanges, C.int(i)) + newRange := subIDRange{ + Start: int(r.start), + Length: int(r.count), + } + ret = append(ret, newRange) + } + return ret, nil +} + +func readSubuid(username string) (ranges, error) { + return readSubid(username, true) +} + +func readSubgid(username string) (ranges, error) { + return readSubid(username, false) +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go new file mode 100644 index 00000000..4701dc5a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -0,0 +1,214 @@ +//go:build !windows +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + st, err := os.Stat(path) + if err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil { + if !st.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if chownExisting { + // short-circuit--we were called with an existing directory and chown was requested + return SafeChown(path, ownerUID, ownerGID) + } + // nothing to do; directory exists and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + if !filepath.IsAbs(dirPath) { + return fmt.Errorf("path: %s should be absolute", dirPath) + } + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := os.MkdirAll(path, mode); err != nil { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0o100 == 0o100) { + return true + } + if isGroup && (perms&0o010 == 0o010) { + return true + } + if perms&0o001 == 0o001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go new file mode 100644 index 00000000..78141fb8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go @@ -0,0 +1,12 @@ +//go:build !linux || !libsubid || !cgo +// +build !linux !libsubid !cgo + +package idtools + +func readSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func readSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go new file mode 100644 index 00000000..dc69c607 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -0,0 +1,24 @@ +//go:build windows +// +build windows + +package idtools + +import ( + "os" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := os.MkdirAll(path, mode); err != nil { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go new file mode 100644 index 00000000..042d0ea9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go @@ -0,0 +1,59 @@ +package idtools + +import ( + "fmt" + "math" + "math/bits" + "strconv" + "strings" +) + +func parseTriple(spec []string) (container, host, size uint32, err error) { + cid, err := strconv.ParseUint(spec[0], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[0], err) + } + hid, err := strconv.ParseUint(spec[1], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[1], err) + } + sz, err := strconv.ParseUint(spec[2], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[2], err) + } + return uint32(cid), uint32(hid), uint32(sz), nil +} + +// ParseIDMap parses idmap triples from string. +func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { + stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec) + for _, idMapSpec := range mapSpec { + if idMapSpec == "" { + continue + } + idSpec := strings.Split(idMapSpec, ":") + if len(idSpec)%3 != 0 { + return nil, stdErr + } + for i := range idSpec { + if i%3 != 0 { + continue + } + cid, hid, size, err := parseTriple(idSpec[i : i+3]) + if err != nil { + return nil, stdErr + } + // Avoid possible integer overflow on 32bit builds + if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) { + return nil, stdErr + } + mapping := IDMap{ + ContainerID: int(cid), + HostID: int(hid), + Size: int(size), + } + idmap = append(idmap, mapping) + } + } + return idmap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 00000000..ac27718d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "sort" + "strconv" + "strings" + "sync" + + "github.com/containers/storage/pkg/regexp" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.Delayed(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("adding user %q: %w", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("trying to find uid/gid for new user %q: %w", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %w", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("can't convert found gid (%s) to int: %w", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %w", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := readSubuid(name) + if err != nil { + return fmt.Errorf("while looking for subuid ranges for user %q: %w", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("can't find available subuid range: %w", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err) + } + } + + ranges, err = readSubgid(name) + if err != nil { + return fmt.Errorf("while looking for subgid ranges for user %q: %w", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("can't find available subgid range: %w", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := readSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := readSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 00000000..15bd98ed --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,13 @@ +//go:build !linux +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("no support for adding users or groups on this OS") +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go new file mode 100644 index 00000000..b3772bdb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -0,0 +1,33 @@ +//go:build !windows +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + // only return no error if the final resolved binary basename + // matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go new file mode 100644 index 00000000..3d737b3e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go new file mode 100644 index 00000000..72a04f34 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go new file mode 100644 index 00000000..2a8c85ad --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -0,0 +1,281 @@ +package ioutils + +import ( + "io" + "os" + "path/filepath" + "time" +) + +// AtomicFileWriterOptions specifies options for creating the atomic file writer. +type AtomicFileWriterOptions struct { + // NoSync specifies whether the sync call must be skipped for the file. + // If NoSync is not specified, the file is synced to the + // storage after it has been written and before it is moved to + // the specified path. + NoSync bool + // On successful return from Close() this is set to the mtime of the + // newly written file. + ModTime time.Time + // Specifies whether Commit() must be explicitly called to write state + // to the destination. This allows an application to preserve the original + // file when an error occurs during processing (and not just during write) + // The default is false, which will auto-commit on Close + ExplicitCommit bool +} + +type CommittableWriter interface { + io.WriteCloser + + // Commit closes the temporary file associated with this writer, and + // provided no errors (during commit or previously during write operations), + // will publish the completed file under the intended destination. + Commit() error +} + +var defaultWriterOptions = AtomicFileWriterOptions{} + +// SetDefaultOptions overrides the default options used when creating an +// atomic file writer. +func SetDefaultOptions(opts AtomicFileWriterOptions) { + defaultWriterOptions = opts +} + +// NewAtomicFileWriterWithOpts returns a CommittableWriter so that writing to it +// writes to a temporary file, which can later be committed to a destination path, +// either by Closing in the case of auto-commit, or manually calling commit if the +// ExplicitCommit option is enabled. Writing and closing concurrently is not +// allowed. +func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (CommittableWriter, error) { + return newAtomicFileWriter(filename, perm, opts) +} + +// newAtomicFileWriter returns a CommittableWriter so that writing to it writes to +// a temporary file, which can later be committed to a destination path, either by +// Closing in the case of auto-commit, or manually calling commit if the +// ExplicitCommit option is enabled. Writing and closing concurrently is not allowed. +func newAtomicFileWriter(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (*atomicFileWriter, error) { + f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + if opts == nil { + opts = &defaultWriterOptions + } + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + noSync: opts.NoSync, + explicitCommit: opts.ExplicitCommit, + }, nil +} + +// NewAtomicFileWriterWithOpts returns a CommittableWriter, with auto-commit enabled. +// Writing to it writes to a temporary file and closing it atomically changes the +// temporary file to destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (CommittableWriter, error) { + return NewAtomicFileWriterWithOpts(filename, perm, nil) +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opts *AtomicFileWriterOptions) error { + f, err := newAtomicFileWriter(filename, perm, opts) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + + if opts != nil { + opts.ModTime = f.modTime + } + + return err +} + +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + return AtomicWriteFileWithOpts(filename, data, perm, nil) +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode + noSync bool + modTime time.Time + closed bool + explicitCommit bool +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) closeTempFile() error { + if w.closed { + return nil + } + + w.closed = true + return w.f.Close() +} + +func (w *atomicFileWriter) Close() error { + return w.complete(!w.explicitCommit) +} + +func (w *atomicFileWriter) Commit() error { + return w.complete(true) +} + +func (w *atomicFileWriter) complete(commit bool) (retErr error) { + if w == nil || w.closed { + return nil + } + + defer func() { + w.closeTempFile() + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + + if commit { + return w.commitState() + } + + return nil +} + +func (w *atomicFileWriter) commitState() error { + // Perform a data only sync (fdatasync()) if supported + if err := w.postDataWrittenSync(); err != nil { + return err + } + + // Capture fstat before closing the fd + info, err := w.f.Stat() + if err != nil { + return err + } + w.modTime = info.ModTime() + + if err := w.f.Chmod(w.perm); err != nil { + return err + } + + // Perform full sync on platforms that need it + if err := w.preRenameSync(); err != nil { + return err + } + + // Some platforms require closing before rename (Windows) + if err := w.closeTempFile(); err != nil { + return err + } + + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := os.MkdirTemp(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + if !defaultWriterOptions.NoSync { + return w.File.Close() + } + err := dataOrFullSync(w.File) + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go new file mode 100644 index 00000000..10ed48cf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go @@ -0,0 +1,23 @@ +package ioutils + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func dataOrFullSync(f *os.File) error { + return unix.Fdatasync(int(f.Fd())) +} + +func (w *atomicFileWriter) postDataWrittenSync() error { + if w.noSync { + return nil + } + return unix.Fdatasync(int(w.f.Fd())) +} + +func (w *atomicFileWriter) preRenameSync() error { + // On Linux data can be reliably flushed to media without metadata, so defer + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go new file mode 100644 index 00000000..aec161e0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go @@ -0,0 +1,26 @@ +//go:build !linux +// +build !linux + +package ioutils + +import ( + "os" +) + +func dataOrFullSync(f *os.File) error { + return f.Sync() +} + +func (w *atomicFileWriter) postDataWrittenSync() error { + // many platforms (Mac, Windows) require a full sync to reliably flush to media + return nil +} + +func (w *atomicFileWriter) preRenameSync() error { + if w.noSync { + return nil + } + + // fsync() on Non-linux Unix, FlushFileBuffers (Windows), F_FULLFSYNC (Mac) + return w.f.Sync() +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go new file mode 100644 index 00000000..146e1a5f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go @@ -0,0 +1,170 @@ +package ioutils + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "io" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +type readWriteToCloserWrapper struct { + io.Reader + io.WriterTo + closer func() error +} + +func (r *readWriteToCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + if wt, ok := r.(io.WriterTo); ok { + return &readWriteToCloserWrapper{ + Reader: r, + WriterTo: wt, + closer: closer, + } + } + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go new file mode 100644 index 00000000..9d5af610 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go @@ -0,0 +1,11 @@ +//go:build !windows +// +build !windows + +package ioutils + +import "os" + +// TempDir on Unix systems is equivalent to os.MkdirTemp. +func TempDir(dir, prefix string) (string, error) { + return os.MkdirTemp(dir, prefix) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go new file mode 100644 index 00000000..2c2242d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go @@ -0,0 +1,19 @@ +//go:build windows +// +build windows + +package ioutils + +import ( + "os" + + "github.com/containers/storage/pkg/longpath" +) + +// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := os.MkdirTemp(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go new file mode 100644 index 00000000..52a4901a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go new file mode 100644 index 00000000..ccc7f9c2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go b/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go new file mode 100644 index 00000000..93fb1fea --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go @@ -0,0 +1,82 @@ +package lockfile + +import ( + "bytes" + cryptorand "crypto/rand" + "encoding/binary" + "os" + "sync/atomic" + "time" +) + +// LastWrite is an opaque identifier of the last write to some *LockFile. +// It can be used by users of a *LockFile to determine if the lock indicates changes +// since the last check. +// +// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back. +type LastWrite struct { + // Never modify fields of a LastWrite object; it has value semantics. + state []byte // Contents of the lock file. +} + +var lastWriterIDCounter uint64 // Private state for newLastWriterID + +const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID) +// newLastWrite returns a new "last write" ID. +// The value must be different on every call, and also differ from values +// generated by other processes. +func newLastWrite() LastWrite { + // The ID is (PID, time, per-process counter, random) + // PID + time represents both a unique process across reboots, + // and a specific time within the process; the per-process counter + // is an extra safeguard for in-process concurrency. + // The random part disambiguates across process namespaces + // (where PID values might collide), serves as a general-purpose + // extra safety, _and_ is used to pad the output to lastWriterIDSize, + // because other versions of this code exist and they don't work + // efficiently if the size of the value changes. + pid := os.Getpid() + tm := time.Now().UnixNano() + counter := atomic.AddUint64(&lastWriterIDCounter, 1) + + res := make([]byte, lastWriterIDSize) + binary.LittleEndian.PutUint64(res[0:8], uint64(tm)) + binary.LittleEndian.PutUint64(res[8:16], counter) + binary.LittleEndian.PutUint32(res[16:20], uint32(pid)) + if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 { + panic(err) // This shouldn't happen + } + + return LastWrite{ + state: res, + } +} + +// serialize returns bytes to write to the lock file to represent the specified write. +func (lw LastWrite) serialize() []byte { + if lw.state == nil { + panic("LastWrite.serialize on an uninitialized object") + } + return lw.state +} + +// Equals returns true if lw matches other +func (lw LastWrite) equals(other LastWrite) bool { + if lw.state == nil { + panic("LastWrite.equals on an uninitialized object") + } + if other.state == nil { + panic("LastWrite.equals with an uninitialized counterparty") + } + return bytes.Equal(lw.state, other.state) +} + +// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize +func newLastWriteFromData(serialized []byte) LastWrite { + if serialized == nil { + panic("newLastWriteFromData with nil data") + } + return LastWrite{ + state: serialized, + } +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go new file mode 100644 index 00000000..5dd67410 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -0,0 +1,409 @@ +package lockfile + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "time" +) + +// A Locker represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +// +// Deprecated: Refer directly to *LockFile, the provided implementation, instead. +type Locker interface { + // Acquire a writer lock. + // The default unix implementation panics if: + // - opening the lockfile failed + // - tried to lock a read-only lock-file + Lock() + + // Unlock the lock. + // The default unix implementation panics if: + // - unlocking an unlocked lock + // - if the lock counter is corrupted + Unlock() + + // Acquire a reader lock. + RLock() + + // Touch records, for others sharing the lock, that the caller was the + // last writer. It should only be called with the lock held. + // + // Deprecated: Use *LockFile.RecordWrite. + Touch() error + + // Modified() checks if the most recent writer was a party other than the + // last recorded writer. It should only be called with the lock held. + // Deprecated: Use *LockFile.ModifiedSince. + Modified() (bool, error) + + // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. + TouchedSince(when time.Time) bool + + // IsReadWrite() checks if the lock file is read-write + IsReadWrite() bool + + // AssertLocked() can be used by callers that _know_ that they hold the lock (for reading or writing), for sanity checking. + // It might do nothing at all, or it may panic if the caller is not the owner of this lock. + AssertLocked() + + // AssertLockedForWriting() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking. + // It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing. + AssertLockedForWriting() +} + +type lockType byte + +const ( + readLock lockType = iota + writeLock +) + +// LockFile represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +// +// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. +type LockFile struct { + // The following fields are only set when constructing *LockFile, and must never be modified afterwards. + // They are safe to access without any other locking. + file string + ro bool + + // rwMutex serializes concurrent reader-writer acquisitions in the same process space + rwMutex *sync.RWMutex + // stateMutex is used to synchronize concurrent accesses to the state below + stateMutex *sync.Mutex + counter int64 + lw LastWrite // A global value valid as of the last .Touch() or .Modified() + lockType lockType + locked bool + // The following fields are only modified on transitions between counter == 0 / counter != 0. + // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking. + // In other cases, they need to be protected using stateMutex. + fd fileHandle +} + +var ( + lockFiles map[string]*LockFile + lockFilesLock sync.Mutex +) + +// GetLockFile opens a read-write lock file, creating it if necessary. The +// *LockFile object may already be locked if the path has already been requested +// by the current process. +func GetLockFile(path string) (*LockFile, error) { + return getLockfile(path, false) +} + +// GetLockfile opens a read-write lock file, creating it if necessary. The +// Locker object may already be locked if the path has already been requested +// by the current process. +// +// Deprecated: Use GetLockFile +func GetLockfile(path string) (Locker, error) { + return GetLockFile(path) +} + +// GetROLockFile opens a read-only lock file, creating it if necessary. The +// *LockFile object may already be locked if the path has already been requested +// by the current process. +func GetROLockFile(path string) (*LockFile, error) { + return getLockfile(path, true) +} + +// GetROLockfile opens a read-only lock file, creating it if necessary. The +// Locker object may already be locked if the path has already been requested +// by the current process. +// +// Deprecated: Use GetROLockFile +func GetROLockfile(path string) (Locker, error) { + return GetROLockFile(path) +} + +// Lock locks the lockfile as a writer. Panic if the lock is a read-only one. +func (l *LockFile) Lock() { + if l.ro { + panic("can't take write lock on read-only lock file") + } else { + l.lock(writeLock) + } +} + +// LockRead locks the lockfile as a reader. +func (l *LockFile) RLock() { + l.lock(readLock) +} + +// Unlock unlocks the lockfile. +func (l *LockFile) Unlock() { + l.stateMutex.Lock() + if !l.locked { + // Panic when unlocking an unlocked lock. That's a violation + // of the lock semantics and will reveal such. + panic("calling Unlock on unlocked lock") + } + l.counter-- + if l.counter < 0 { + // Panic when the counter is negative. There is no way we can + // recover from a corrupted lock and we need to protect the + // storage from corruption. + panic(fmt.Sprintf("lock %q has been unlocked too often", l.file)) + } + if l.counter == 0 { + // We should only release the lock when the counter is 0 to + // avoid releasing read-locks too early; a given process may + // acquire a read lock multiple times. + l.locked = false + // Close the file descriptor on the last unlock, releasing the + // file lock. + unlockAndCloseHandle(l.fd) + } + if l.lockType == readLock { + l.rwMutex.RUnlock() + } else { + l.rwMutex.Unlock() + } + l.stateMutex.Unlock() +} + +func (l *LockFile) AssertLocked() { + // DO NOT provide a variant that returns the value of l.locked. + // + // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and + // we can’t tell the difference. + // + // Hence, this “AssertLocked†method, which exists only for sanity checks. + + // Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true + // with no possible writers. + // If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data + // without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers. + if !l.locked { + panic("internal error: lock is not held by the expected owner") + } +} + +func (l *LockFile) AssertLockedForWriting() { + // DO NOT provide a variant that returns the current lock state. + // + // The same caveats as for AssertLocked apply equally. + + l.AssertLocked() + // Like AssertLocked, don’t even bother with l.stateMutex. + if l.lockType == readLock { + panic("internal error: lock is not held for writing") + } +} + +// ModifiedSince checks if the lock has been changed since a provided LastWrite value, +// and returns the one to record instead. +// +// If ModifiedSince reports no modification, the previous LastWrite value +// is still valid and can continue to be used. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should fail and keep using the previously-recorded LastWrite value, +// so that it continues failing until the situation is resolved. Similarly, +// it should only update the recorded LastWrite value after processing the update: +// +// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) +// if err != nil { /* fail */ } +// state.lastWrite = lw2 +// if modified { +// if err := reload(); err != nil { /* fail */ } +// state.lastWrite = lw2 +// } +// +// The caller must hold the lock (for reading or writing). +func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { + l.AssertLocked() + currentLW, err := l.GetLastWrite() + if err != nil { + return LastWrite{}, false, err + } + modified := !previous.equals(currentLW) + return currentLW, modified, nil +} + +// Modified indicates if the lockfile has been updated since the last time it +// was loaded. +// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile. +// Callers cannot, in general, rely on this, because that might have happened for some other +// owner of the same *LockFile who created it previously. +// +// Deprecated: Use *LockFile.ModifiedSince. +func (l *LockFile) Modified() (bool, error) { + l.stateMutex.Lock() + if !l.locked { + panic("attempted to check last-writer in lockfile without locking it first") + } + defer l.stateMutex.Unlock() + oldLW := l.lw + // Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it. + currentLW, modified, err := l.ModifiedSince(oldLW) + if err != nil { + return true, err + } + l.lw = currentLW + return modified, nil +} + +// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data. +// +// Deprecated: Use *LockFile.RecordWrite. +func (l *LockFile) Touch() error { + lw, err := l.RecordWrite() + if err != nil { + return err + } + l.stateMutex.Lock() + if !l.locked || (l.lockType == readLock) { + panic("attempted to update last-writer in lockfile without the write lock") + } + defer l.stateMutex.Unlock() + l.lw = lw + return nil +} + +// IsReadWrite indicates if the lock file is a read-write lock. +func (l *LockFile) IsReadWrite() bool { + return !l.ro +} + +// getLockFile returns a *LockFile object, possibly (depending on the platform) +// working inter-process, and associated with the specified path. +// +// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the +// “lock for reading†(shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and *LockFile should correspond to the “lock for writing†(exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func getLockfile(path string, ro bool) (*LockFile, error) { + lockFilesLock.Lock() + defer lockFilesLock.Unlock() + if lockFiles == nil { + lockFiles = make(map[string]*LockFile) + } + cleanPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err) + } + if lockFile, ok := lockFiles[cleanPath]; ok { + if ro && lockFile.IsReadWrite() { + return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath) + } + if !ro && !lockFile.IsReadWrite() { + return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath) + } + return lockFile, nil + } + lockFile, err := createLockFileForPath(cleanPath, ro) // platform-dependent LockFile + if err != nil { + return nil, err + } + lockFiles[cleanPath] = lockFile + return lockFile, nil +} + +// createLockFileForPath returns new *LockFile object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the +// “lock for reading†(shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and *LockFile should correspond to the “lock for writing†(exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockFileForPath(path string, ro bool) (*LockFile, error) { + // Check if we can open the lock. + fd, err := openLock(path, ro) + if err != nil { + return nil, err + } + unlockAndCloseHandle(fd) + + lType := writeLock + if ro { + lType = readLock + } + + return &LockFile{ + file: path, + ro: ro, + + rwMutex: &sync.RWMutex{}, + stateMutex: &sync.Mutex{}, + lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change. + lockType: lType, + locked: false, + }, nil +} + +// openLock opens the file at path and returns the corresponding file +// descriptor. The path is opened either read-only or read-write, +// depending on the value of ro argument. +// +// openLock will create the file and its parent directories, +// if necessary. +func openLock(path string, ro bool) (fd fileHandle, err error) { + flags := os.O_CREATE + if ro { + flags |= os.O_RDONLY + } else { + flags |= os.O_RDWR + } + fd, err = openHandle(path, flags) + if err == nil { + return fd, nil + } + + // the directory of the lockfile seems to be removed, try to create it + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return fd, fmt.Errorf("creating lock file directory: %w", err) + } + + return openLock(path, ro) + } + + return fd, &os.PathError{Op: "open", Path: path, Err: err} +} + +// lock locks the lockfile via syscall based on the specified type and +// command. +func (l *LockFile) lock(lType lockType) { + if lType == readLock { + l.rwMutex.RLock() + } else { + l.rwMutex.Lock() + } + l.stateMutex.Lock() + defer l.stateMutex.Unlock() + if l.counter == 0 { + // If we're the first reference on the lock, we need to open the file again. + fd, err := openLock(l.file, l.ro) + if err != nil { + panic(err) + } + l.fd = fd + + // Optimization: only use the (expensive) syscall when + // the counter is 0. In this case, we're either the first + // reader lock or a writer lock. + lockHandle(l.fd, lType) + } + l.lockType = lType + l.locked = true + l.counter++ +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go new file mode 100644 index 00000000..38e737e2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -0,0 +1,95 @@ +//go:build linux || solaris || darwin || freebsd +// +build linux solaris darwin freebsd + +package lockfile + +import ( + "time" + + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +type fileHandle uintptr + +// GetLastWrite returns a LastWrite value corresponding to current state of the lock. +// This is typically called before (_not after_) loading the state when initializing a consumer +// of the data protected by the lock. +// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. +// +// The caller must hold the lock (for reading or writing). +func (l *LockFile) GetLastWrite() (LastWrite, error) { + l.AssertLocked() + contents := make([]byte, lastWriterIDSize) + n, err := unix.Pread(int(l.fd), contents, 0) + if err != nil { + return LastWrite{}, err + } + // It is important to handle the partial read case, because + // the initial size of the lock file is zero, which is a valid + // state (no writes yet) + contents = contents[:n] + return newLastWriteFromData(contents), nil +} + +// RecordWrite updates the lock with a new LastWrite value, and returns the new value. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should keep using the previously-recorded LastWrite value, +// and possibly detecting its own modification as an external one: +// +// lw, err := state.lock.RecordWrite() +// if err != nil { /* fail */ } +// state.lastWrite = lw +// +// The caller must hold the lock for writing. +func (l *LockFile) RecordWrite() (LastWrite, error) { + l.AssertLockedForWriting() + lw := newLastWrite() + lockContents := lw.serialize() + n, err := unix.Pwrite(int(l.fd), lockContents, 0) + if err != nil { + return LastWrite{}, err + } + if n != len(lockContents) { + return LastWrite{}, unix.ENOSPC + } + return lw, nil +} + +// TouchedSince indicates if the lock file has been touched since the specified time +func (l *LockFile) TouchedSince(when time.Time) bool { + st, err := system.Fstat(int(l.fd)) + if err != nil { + return true + } + mtim := st.Mtim() + touched := time.Unix(mtim.Unix()) + return when.Before(touched) +} + +func openHandle(path string, mode int) (fileHandle, error) { + mode |= unix.O_CLOEXEC + fd, err := unix.Open(path, mode, 0o644) + return fileHandle(fd), err +} + +func lockHandle(fd fileHandle, lType lockType) { + fType := unix.F_RDLCK + if lType != readLock { + fType = unix.F_WRLCK + } + lk := unix.Flock_t{ + Type: int16(fType), + Whence: int16(unix.SEEK_SET), + Start: 0, + Len: 0, + } + for unix.FcntlFlock(uintptr(fd), unix.F_SETLKW, &lk) != nil { + time.Sleep(10 * time.Millisecond) + } +} + +func unlockAndCloseHandle(fd fileHandle) { + unix.Close(int(fd)) +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go new file mode 100644 index 00000000..304c92b1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -0,0 +1,99 @@ +//go:build windows +// +build windows + +package lockfile + +import ( + "os" + "time" + + "golang.org/x/sys/windows" +) + +const ( + reserved = 0 + allBytes = ^uint32(0) +) + +type fileHandle windows.Handle + +// GetLastWrite returns a LastWrite value corresponding to current state of the lock. +// This is typically called before (_not after_) loading the state when initializing a consumer +// of the data protected by the lock. +// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. +// +// The caller must hold the lock (for reading or writing) before this function is called. +func (l *LockFile) GetLastWrite() (LastWrite, error) { + l.AssertLocked() + contents := make([]byte, lastWriterIDSize) + ol := new(windows.Overlapped) + var n uint32 + err := windows.ReadFile(windows.Handle(l.fd), contents, &n, ol) + if err != nil && err != windows.ERROR_HANDLE_EOF { + return LastWrite{}, err + } + // It is important to handle the partial read case, because + // the initial size of the lock file is zero, which is a valid + // state (no writes yet) + contents = contents[:n] + return newLastWriteFromData(contents), nil +} + +// RecordWrite updates the lock with a new LastWrite value, and returns the new value. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should keep using the previously-recorded LastWrite value, +// and possibly detecting its own modification as an external one: +// +// lw, err := state.lock.RecordWrite() +// if err != nil { /* fail */ } +// state.lastWrite = lw +// +// The caller must hold the lock for writing. +func (l *LockFile) RecordWrite() (LastWrite, error) { + l.AssertLockedForWriting() + lw := newLastWrite() + lockContents := lw.serialize() + ol := new(windows.Overlapped) + var n uint32 + err := windows.WriteFile(windows.Handle(l.fd), lockContents, &n, ol) + if err != nil { + return LastWrite{}, err + } + if int(n) != len(lockContents) { + return LastWrite{}, windows.ERROR_DISK_FULL + } + return lw, nil +} + +// TouchedSince indicates if the lock file has been touched since the specified time +func (l *LockFile) TouchedSince(when time.Time) bool { + stat, err := os.Stat(l.file) + if err != nil { + return true + } + return when.Before(stat.ModTime()) +} + +func openHandle(path string, mode int) (fileHandle, error) { + mode |= windows.O_CLOEXEC + fd, err := windows.Open(path, mode, windows.S_IWRITE) + return fileHandle(fd), err +} + +func lockHandle(fd fileHandle, lType lockType) { + flags := 0 + if lType != readLock { + flags = windows.LOCKFILE_EXCLUSIVE_LOCK + } + ol := new(windows.Overlapped) + if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil { + panic(err) + } +} + +func unlockAndCloseHandle(fd fileHandle) { + ol := new(windows.Overlapped) + windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol) + windows.Close(windows.Handle(fd)) +} diff --git a/vendor/github.com/containers/storage/pkg/longpath/longpath.go b/vendor/github.com/containers/storage/pkg/longpath/longpath.go new file mode 100644 index 00000000..9b15bfff --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go new file mode 100644 index 00000000..5de3a671 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// ParseOptions parses fstab type mount options into mount() flags +// and device specific data +func ParseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := ParseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000..3ba99cf9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MNT_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MNT_SYNCHRONOUS + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MNT_UPDATE + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MNT_NOATIME + + mntDetach = unix.MNT_FORCE + + NODIRATIME = 0 + NODEV = 0 + DIRSYNC = 0 + MANDLOCK = 0 + BIND = 0 + RBIND = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SLAVE = 0 + RSLAVE = 0 + SHARED = 0 + RSHARED = 0 + RELATIME = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go new file mode 100644 index 00000000..0425d0dd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = unix.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = unix.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = unix.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = unix.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = unix.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = unix.MS_BIND | unix.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = unix.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = unix.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = unix.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = unix.MS_SLAVE | unix.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = unix.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = unix.MS_SHARED | unix.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = unix.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go new file mode 100644 index 00000000..ee0f593a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -0,0 +1,32 @@ +//go:build !linux && !freebsd +// +build !linux,!freebsd + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go new file mode 100644 index 00000000..23c5c44a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -0,0 +1,110 @@ +package mount + +import ( + "sort" + "strconv" + "strings" +) + +// mountError holds an error from a mount or unmount operation +type mountError struct { + op string + source, target string + flags uintptr + data string + err error +} + +// Error returns a string representation of mountError +func (e *mountError) Error() string { + out := e.op + " " + + if e.source != "" { + out += e.source + ":" + e.target + } else { + out += e.target + } + + if e.flags != uintptr(0) { + out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) + } + if e.data != "" { + out += ", data: " + e.data + } + + out += ": " + e.err.Error() + return out +} + +// Cause returns the underlying cause of the error +func (e *mountError) Cause() error { + return e.err +} + +// Unwrap returns the underlying cause of the error +func (e *mountError) Unwrap() error { + return e.err +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, data := ParseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return mount(device, target, mType, uintptr(flag), data) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := ParseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + return unmount(target, mntDetach) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + return err + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there are any submounts remaining + } + } + return nil +} + +// ForceUnmount lazily unmounts a filesystem on supported platforms, +// otherwise does a normal unmount. +// +// Deprecated: please use Unmount instead, it is identical. +func ForceUnmount(target string) error { + return unmount(target, mntDetach) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go new file mode 100644 index 00000000..c70b0bf9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -0,0 +1,68 @@ +//go:build freebsd && cgo +// +build freebsd,cgo + +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + options := []string{"fspath", target} + + if data != "" { + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + continue + } + opt := strings.SplitN(x, "=", 2) + options = append(options, opt[0]) + if len(opt) == 2 { + options = append(options, opt[1]) + } else { + options = append(options, "") + } + } + } + + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("failed to call nmount: %s", reason) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go new file mode 100644 index 00000000..594cd088 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go @@ -0,0 +1,74 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY + + none = "none" +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == none: + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return &mountError{ + op: "mount", + source: device, + target: target, + flags: oflags, + data: data, + err: err, + } + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return &mountError{ + op: "remount", + target: target, + flags: flags & pflags, + err: err, + } + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { + return &mountError{ + op: "remount-ro", + target: target, + flags: oflags | unix.MS_REMOUNT, + err: err, + } + } + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000..74fe6660 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -0,0 +1,9 @@ +//go:build !linux && !(freebsd && cgo) +// +build !linux +// +build !freebsd !cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go new file mode 100644 index 00000000..bb2da474 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go @@ -0,0 +1,13 @@ +package mount + +import ( + "github.com/moby/sys/mountinfo" +) + +type Info = mountinfo.Info + +var Mounted = mountinfo.Mounted + +func GetMounts() ([]*Info, error) { + return mountinfo.GetMounts(nil) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000..cbc0299f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go @@ -0,0 +1,5 @@ +package mount + +import "github.com/moby/sys/mountinfo" + +var PidMountInfo = mountinfo.PidMountInfo diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 00000000..80922ad5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,64 @@ +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, SHARED) +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, RSHARED) +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, PRIVATE) +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, RPRIVATE) +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, SLAVE) +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, RSLAVE) +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, UNBINDABLE) +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, RUNBINDABLE) +} + +func ensureMountedAs(mnt string, flags int) error { + mounted, err := Mounted(mnt) + if err != nil { + return err + } + + if !mounted { + if err := mount(mnt, mnt, "none", uintptr(BIND), ""); err != nil { + return err + } + } + + return mount("", mnt, "none", uintptr(flags), "") +} diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go new file mode 100644 index 00000000..a2a1d407 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go @@ -0,0 +1,35 @@ +//go:build !windows +// +build !windows + +package mount + +import ( + "time" + + "golang.org/x/sys/unix" +) + +func unmount(target string, flags int) error { + var err error + for i := 0; i < 50; i++ { + err = unix.Unmount(target, flags) + switch err { + case unix.EBUSY: + time.Sleep(50 * time.Millisecond) + continue + case unix.EINVAL, nil: + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + break + } + + return &mountError{ + op: "umount", + target: target, + flags: uintptr(flags), + err: err, + } +} diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go new file mode 100644 index 00000000..d3a0cf51 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package mount + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go new file mode 100644 index 00000000..a15e3688 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool *sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool *sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/promise/promise.go b/vendor/github.com/containers/storage/pkg/promise/promise.go new file mode 100644 index 00000000..dd52b908 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/README.md b/vendor/github.com/containers/storage/pkg/reexec/README.md new file mode 100644 index 00000000..6658f69b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go new file mode 100644 index 00000000..32d6a9f4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go @@ -0,0 +1,38 @@ +//go:build freebsd +// +build freebsd + +package reexec + +import ( + "context" + "os" + "os/exec" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Uses sysctl. +func Self() string { + path, err := unix.SysctlArgs("kern.proc.pathname", -1) + if err == nil { + return path + } + return os.Args[0] +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go new file mode 100644 index 00000000..87b43ed9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -0,0 +1,35 @@ +//go:build linux +// +build linux + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go new file mode 100644 index 00000000..a56ada21 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -0,0 +1,33 @@ +//go:build solaris || darwin +// +build solaris darwin + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go new file mode 100644 index 00000000..77c93b4a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -0,0 +1,21 @@ +//go:build !linux && !windows && !freebsd && !solaris && !darwin +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "context" + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() + return nil +} + +// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go new file mode 100644 index 00000000..c46125eb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -0,0 +1,35 @@ +//go:build windows +// +build windows + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go new file mode 100644 index 00000000..0c032e6c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -0,0 +1,66 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var ( + registeredInitializers = make(map[string]func()) + initWasCalled = false +) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + initWasCalled = true + if exists { + initializer() + + return true + } + return false +} + +func panicIfNotInitialized() { + if !initWasCalled { + // The reexec package is used to run subroutines in + // subprocesses which would otherwise have unacceptable side + // effects on the main thread. If you found this error, then + // your program uses a package which needs to do this. In + // order for that to work, main() should start with this + // boilerplate, or an equivalent: + // if reexec.Init() { + // return + // } + panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()") + } +} + +func naiveSelf() string { //nolint: unused + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp.go b/vendor/github.com/containers/storage/pkg/regexp/regexp.go new file mode 100644 index 00000000..1a3333db --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp.go @@ -0,0 +1,234 @@ +package regexp + +import ( + "io" + "regexp" + "sync" +) + +// Regexp is a wrapper struct used for wrapping MustCompile regex expressions +// used as global variables. Using this structure helps speed the startup time +// of apps that want to use global regex variables. This library initializes them on +// first use as opposed to the start of the executable. +type Regexp struct { + *regexpStruct +} + +type regexpStruct struct { + _ noCopy + once sync.Once + regexp *regexp.Regexp + val string +} + +func Delayed(val string) Regexp { + re := ®expStruct{ + val: val, + } + if precompile { + re.regexp = regexp.MustCompile(re.val) + } + return Regexp{re} +} + +func (re *regexpStruct) compile() { + if precompile { + return + } + re.once.Do(func() { + re.regexp = regexp.MustCompile(re.val) + }) +} + +func (re *regexpStruct) Expand(dst []byte, template []byte, src []byte, match []int) []byte { + re.compile() + return re.regexp.Expand(dst, template, src, match) +} + +func (re *regexpStruct) ExpandString(dst []byte, template string, src string, match []int) []byte { + re.compile() + return re.regexp.ExpandString(dst, template, src, match) +} + +func (re *regexpStruct) Find(b []byte) []byte { + re.compile() + return re.regexp.Find(b) +} + +func (re *regexpStruct) FindAll(b []byte, n int) [][]byte { + re.compile() + return re.regexp.FindAll(b, n) +} + +func (re *regexpStruct) FindAllIndex(b []byte, n int) [][]int { + re.compile() + return re.regexp.FindAllIndex(b, n) +} + +func (re *regexpStruct) FindAllString(s string, n int) []string { + re.compile() + return re.regexp.FindAllString(s, n) +} + +func (re *regexpStruct) FindAllStringIndex(s string, n int) [][]int { + re.compile() + return re.regexp.FindAllStringIndex(s, n) +} + +func (re *regexpStruct) FindAllStringSubmatch(s string, n int) [][]string { + re.compile() + return re.regexp.FindAllStringSubmatch(s, n) +} + +func (re *regexpStruct) FindAllStringSubmatchIndex(s string, n int) [][]int { + re.compile() + return re.regexp.FindAllStringSubmatchIndex(s, n) +} + +func (re *regexpStruct) FindAllSubmatch(b []byte, n int) [][][]byte { + re.compile() + return re.regexp.FindAllSubmatch(b, n) +} + +func (re *regexpStruct) FindAllSubmatchIndex(b []byte, n int) [][]int { + re.compile() + return re.regexp.FindAllSubmatchIndex(b, n) +} + +func (re *regexpStruct) FindIndex(b []byte) (loc []int) { + re.compile() + return re.regexp.FindIndex(b) +} + +func (re *regexpStruct) FindReaderIndex(r io.RuneReader) (loc []int) { + re.compile() + return re.regexp.FindReaderIndex(r) +} + +func (re *regexpStruct) FindReaderSubmatchIndex(r io.RuneReader) []int { + re.compile() + return re.regexp.FindReaderSubmatchIndex(r) +} + +func (re *regexpStruct) FindString(s string) string { + re.compile() + return re.regexp.FindString(s) +} + +func (re *regexpStruct) FindStringIndex(s string) (loc []int) { + re.compile() + return re.regexp.FindStringIndex(s) +} + +func (re *regexpStruct) FindStringSubmatch(s string) []string { + re.compile() + return re.regexp.FindStringSubmatch(s) +} + +func (re *regexpStruct) FindStringSubmatchIndex(s string) []int { + re.compile() + return re.regexp.FindStringSubmatchIndex(s) +} + +func (re *regexpStruct) FindSubmatch(b []byte) [][]byte { + re.compile() + return re.regexp.FindSubmatch(b) +} + +func (re *regexpStruct) FindSubmatchIndex(b []byte) []int { + re.compile() + return re.regexp.FindSubmatchIndex(b) +} + +func (re *regexpStruct) LiteralPrefix() (prefix string, complete bool) { + re.compile() + return re.regexp.LiteralPrefix() +} + +func (re *regexpStruct) Longest() { + re.compile() + re.regexp.Longest() +} + +func (re *regexpStruct) Match(b []byte) bool { + re.compile() + return re.regexp.Match(b) +} + +func (re *regexpStruct) MatchReader(r io.RuneReader) bool { + re.compile() + return re.regexp.MatchReader(r) +} + +func (re *regexpStruct) MatchString(s string) bool { + re.compile() + return re.regexp.MatchString(s) +} + +func (re *regexpStruct) NumSubexp() int { + re.compile() + return re.regexp.NumSubexp() +} + +func (re *regexpStruct) ReplaceAll(src, repl []byte) []byte { + re.compile() + return re.regexp.ReplaceAll(src, repl) +} + +func (re *regexpStruct) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { + re.compile() + return re.regexp.ReplaceAllFunc(src, repl) +} + +func (re *regexpStruct) ReplaceAllLiteral(src, repl []byte) []byte { + re.compile() + return re.regexp.ReplaceAllLiteral(src, repl) +} + +func (re *regexpStruct) ReplaceAllLiteralString(src, repl string) string { + re.compile() + return re.regexp.ReplaceAllLiteralString(src, repl) +} + +func (re *regexpStruct) ReplaceAllString(src, repl string) string { + re.compile() + return re.regexp.ReplaceAllString(src, repl) +} + +func (re *regexpStruct) ReplaceAllStringFunc(src string, repl func(string) string) string { + re.compile() + return re.regexp.ReplaceAllStringFunc(src, repl) +} + +func (re *regexpStruct) Split(s string, n int) []string { + re.compile() + return re.regexp.Split(s, n) +} + +func (re *regexpStruct) String() string { + re.compile() + return re.regexp.String() +} + +func (re *regexpStruct) SubexpIndex(name string) int { + re.compile() + return re.regexp.SubexpIndex(name) +} + +func (re *regexpStruct) SubexpNames() []string { + re.compile() + return re.regexp.SubexpNames() +} + +// noCopy may be added to structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +// +// Note that it must not be embedded, due to the Lock and Unlock methods. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go new file mode 100644 index 00000000..834dd943 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go @@ -0,0 +1,6 @@ +//go:build !regexp_precompile +// +build !regexp_precompile + +package regexp + +const precompile = false diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go new file mode 100644 index 00000000..a5fe0dbc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go @@ -0,0 +1,6 @@ +//go:build regexp_precompile +// +build regexp_precompile + +package regexp + +const precompile = true diff --git a/vendor/github.com/containers/storage/pkg/system/chmod.go b/vendor/github.com/containers/storage/pkg/system/chmod.go new file mode 100644 index 00000000..a01d8abf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chmod.go @@ -0,0 +1,17 @@ +package system + +import ( + "errors" + "os" + "syscall" +) + +func Chmod(name string, mode os.FileMode) error { + err := os.Chmod(name, mode) + + for err != nil && errors.Is(err, syscall.EINTR) { + err = os.Chmod(name, mode) + } + + return err +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go new file mode 100644 index 00000000..056d1995 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go new file mode 100644 index 00000000..1ce4c0d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -0,0 +1,15 @@ +//go:build !windows +// +build !windows + +package system + +import ( + "time" +) + +// setCTime will set the create time on a file. On Unix, the create +// time is updated as a side effect of setting the modified time, so +// no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go new file mode 100644 index 00000000..7a3d7937 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -0,0 +1,29 @@ +//go:build windows +// +build windows + +package system + +import ( + "time" + + "golang.org/x/sys/windows" +) + +// setCTime will set the create time on a file. On Windows, this requires +// calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go new file mode 100644 index 00000000..b87d419b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/errors.go @@ -0,0 +1,8 @@ +package system + +import ( + "errors" +) + +// ErrNotSupportedPlatform means the platform is not supported. +var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go new file mode 100644 index 00000000..60f0514b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go new file mode 100644 index 00000000..05642f60 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// maxTime is used by chtimes. +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go new file mode 100644 index 00000000..5f6fea1d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go @@ -0,0 +1,16 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go new file mode 100644 index 00000000..4eaeb5d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go @@ -0,0 +1,56 @@ +//go:build freebsd +// +build freebsd + +package system + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Flag values from +const ( + /* + * Definitions of flags stored in file flags word. + * + * Super-user and owner changeable flags. + */ + UF_SETTABLE uint32 = 0x0000ffff /* mask of owner changeable flags */ + UF_NODUMP uint32 = 0x00000001 /* do not dump file */ + UF_IMMUTABLE uint32 = 0x00000002 /* file may not be changed */ + UF_APPEND uint32 = 0x00000004 /* writes to file may only append */ + UF_OPAQUE uint32 = 0x00000008 /* directory is opaque wrt. union */ + UF_NOUNLINK uint32 = 0x00000010 /* file may not be removed or renamed */ + + UF_SYSTEM uint32 = 0x00000080 /* Windows system file bit */ + UF_SPARSE uint32 = 0x00000100 /* sparse file */ + UF_OFFLINE uint32 = 0x00000200 /* file is offline */ + UF_REPARSE uint32 = 0x00000400 /* Windows reparse point file bit */ + UF_ARCHIVE uint32 = 0x00000800 /* file needs to be archived */ + UF_READONLY uint32 = 0x00001000 /* Windows readonly file bit */ + /* This is the same as the MacOS X definition of UF_HIDDEN. */ + UF_HIDDEN uint32 = 0x00008000 /* file is hidden */ + + /* + * Super-user changeable flags. + */ + SF_SETTABLE uint32 = 0xffff0000 /* mask of superuser changeable flags */ + SF_ARCHIVED uint32 = 0x00010000 /* file is archived */ + SF_IMMUTABLE uint32 = 0x00020000 /* file may not be changed */ + SF_APPEND uint32 = 0x00040000 /* writes to file may only append */ + SF_NOUNLINK uint32 = 0x00100000 /* file may not be removed or renamed */ + SF_SNAPSHOT uint32 = 0x00200000 /* snapshot inode */ +) + +func Lchflags(path string, flags uint32) error { + p, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + _, _, e1 := unix.Syscall(unix.SYS_LCHFLAGS, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + return e1 + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/lchown.go b/vendor/github.com/containers/storage/pkg/system/lchown.go new file mode 100644 index 00000000..eb2d8b46 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lchown.go @@ -0,0 +1,20 @@ +package system + +import ( + "os" + "syscall" +) + +func Lchown(name string, uid, gid int) error { + err := syscall.Lchown(name, uid, gid) + + for err == syscall.EINTR { + err = syscall.Lchown(name, uid, gid) + } + + if err != nil { + return &os.PathError{Op: "lchown", Path: name, Err: err} + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go new file mode 100644 index 00000000..42658c8b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go @@ -0,0 +1,9 @@ +//go:build !windows +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go new file mode 100644 index 00000000..e54d01e6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go new file mode 100644 index 00000000..9b13e614 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go @@ -0,0 +1,21 @@ +//go:build !windows +// +build !windows + +package system + +import ( + "os" + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go new file mode 100644 index 00000000..e51df0da --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo.go b/vendor/github.com/containers/storage/pkg/system/meminfo.go new file mode 100644 index 00000000..3b6e947e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go new file mode 100644 index 00000000..37da93aa --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go @@ -0,0 +1,86 @@ +//go:build freebsd && cgo +// +build freebsd,cgo + +package system + +import ( + "errors" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" +) + +// #include +// #include +// #include +// #include +import "C" + +func getMemInfo() (int64, int64, error) { + data, err := unix.SysctlRaw("vm.vmtotal") + if err != nil { + return -1, -1, fmt.Errorf("can't get kernel info: %w", err) + } + if len(data) != C.sizeof_struct_vmtotal { + return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data)) + } + + total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0])) + + pagesize := int64(C.sysconf(C._SC_PAGESIZE)) + npages := int64(C.sysconf(C._SC_PHYS_PAGES)) + return pagesize * npages, pagesize * int64(total.t_free), nil +} + +func getSwapInfo() (int64, int64, error) { + var ( + total int64 = 0 + used int64 = 0 + ) + swapCount, err := unix.SysctlUint32("vm.nswapdev") + if err != nil { + return -1, -1, fmt.Errorf("reading vm.nswapdev: %w", err) + } + for i := 0; i < int(swapCount); i++ { + data, err := unix.SysctlRaw("vm.swap_info", i) + if err != nil { + return -1, -1, fmt.Errorf("reading vm.swap_info.%d: %w", i, err) + } + if len(data) != C.sizeof_struct_xswdev { + return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data)) + } + xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0])) + total += int64(xsw.xsw_nblks) + used += int64(xsw.xsw_used) + } + pagesize := int64(C.sysconf(C._SC_PAGESIZE)) + return pagesize * total, pagesize * (total - used), nil +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + MemTotal, MemFree, err := getMemInfo() + if err != nil { + return nil, fmt.Errorf("getting memory totals %w", err) + } + SwapTotal, SwapFree, err := getSwapInfo() + if err != nil { + return nil, fmt.Errorf("getting swap totals %w", err) + } + + if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { + return nil, errors.New("getting system memory info") + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go new file mode 100644 index 00000000..385f1d5e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go new file mode 100644 index 00000000..a90b23e0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -0,0 +1,130 @@ +//go:build solaris && cgo +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo CFLAGS: -std=c99 +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("getting system memory info %w", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go new file mode 100644 index 00000000..0f9feb1d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -0,0 +1,12 @@ +//go:build !linux && !windows && !solaris && !(freebsd && cgo) +// +build !linux +// +build !windows +// +build !solaris +// +build !freebsd !cgo + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go new file mode 100644 index 00000000..c833f30f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go @@ -0,0 +1,46 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go new file mode 100644 index 00000000..d3d0ed8a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -0,0 +1,23 @@ +//go:build !windows && !freebsd +// +build !windows,!freebsd + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev uint32) error { + return unix.Mknod(path, mode, int(dev)) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go new file mode 100644 index 00000000..53c3f283 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go @@ -0,0 +1,23 @@ +//go:build freebsd +// +build freebsd + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev uint64) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint64 { + return uint64(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go new file mode 100644 index 00000000..c35b1b34 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go @@ -0,0 +1,14 @@ +//go:build windows +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go new file mode 100644 index 00000000..ca076f2b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path.go @@ -0,0 +1,20 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go new file mode 100644 index 00000000..ff01143e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package system + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go new file mode 100644 index 00000000..9f250973 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("relative path not specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go new file mode 100644 index 00000000..7ee59d92 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -0,0 +1,25 @@ +//go:build linux || freebsd || solaris || darwin +// +build linux freebsd solaris darwin + +package system + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + _ = unix.Kill(pid, unix.SIGKILL) +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go new file mode 100644 index 00000000..5917fa25 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -0,0 +1,99 @@ +package system + +import ( + "errors" + "fmt" + "os" + "syscall" + "time" + + "github.com/containers/storage/pkg/mount" + "github.com/sirupsen/logrus" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 100 + + // Attempt a simple remove all first, this avoids the more expensive + // RecursiveUnmount call if not needed. + if err := os.RemoveAll(dir); err == nil { + return nil + } + + // Attempt to unmount anything beneath this dir first + if err := mount.RecursiveUnmount(dir); err != nil { + logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err) + } + + for { + err := os.RemoveAll(dir) + if err == nil { + return nil + } + + // If the RemoveAll fails with a permission error, we + // may have immutable files so try to remove the + // immutable flag and redo the RemoveAll. + if errors.Is(err, syscall.EPERM) { + if err = resetFileFlags(dir); err != nil { + return fmt.Errorf("resetting file flags: %w", err) + } + err = os.RemoveAll(dir) + if err == nil { + return nil + } + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if !IsEBUSY(pe.Err) { + return err + } + + if e := mount.Unmount(pe.Path); e != nil { + return fmt.Errorf("while removing %s: %w", dir, e) + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm_common.go b/vendor/github.com/containers/storage/pkg/system/rm_common.go new file mode 100644 index 00000000..117eb1d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm_common.go @@ -0,0 +1,10 @@ +//go:build !freebsd +// +build !freebsd + +package system + +// Reset file flags in a directory tree. This allows EnsureRemoveAll +// to delete trees which have the immutable flag set. +func resetFileFlags(dir string) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go new file mode 100644 index 00000000..39a5de7a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go @@ -0,0 +1,17 @@ +package system + +import ( + "io/fs" + "path/filepath" +) + +// Reset file flags in a directory tree. This allows EnsureRemoveAll +// to delete trees which have the immutable flag set. +func resetFileFlags(dir string) error { + return filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err := Lchflags(path, 0); err != nil { + return err + } + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go new file mode 100644 index 00000000..2f44d18b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_common.go @@ -0,0 +1,12 @@ +//go:build !freebsd +// +build !freebsd + +package system + +type platformStatT struct{} + +// Flags return file flags if supported or zero otherwise +func (s StatT) Flags() uint32 { + _ = s.platformStatT // Silence warnings that StatT.platformStatT is unused (on these platforms) + return 0 +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go new file mode 100644 index 00000000..57850a88 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go @@ -0,0 +1,15 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{ + size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go new file mode 100644 index 00000000..4b95073a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -0,0 +1,28 @@ +package system + +import "syscall" + +type platformStatT struct { + flags uint32 +} + +// Flags return file flags if supported or zero otherwise +func (s StatT) Flags() uint32 { + return s.flags +} + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + st := &StatT{ + size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec, + dev: s.Dev, + } + st.flags = s.Flags + st.dev = s.Dev + return st, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go new file mode 100644 index 00000000..e3d13463 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -0,0 +1,22 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{ + size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim, + dev: uint64(s.Dev), + }, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go new file mode 100644 index 00000000..a413e171 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -0,0 +1,15 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{ + size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go new file mode 100644 index 00000000..a413e171 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -0,0 +1,15 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{ + size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go new file mode 100644 index 00000000..e552e91d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -0,0 +1,88 @@ +//go:build !windows +// +build !windows + +package system + +import ( + "os" + "strconv" + "syscall" + + "golang.org/x/sys/unix" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec + dev uint64 + platformStatT +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// Dev returns a unique identifier for owning filesystem +func (s StatT) Dev() uint64 { + return s.dev +} + +func (s StatT) IsDir() bool { + return (s.mode & unix.S_IFDIR) != 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} + } + return fromStatT(s) +} + +// Fstat takes an open file descriptor and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file descriptor is invalid +func Fstat(fd int) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Fstat(fd, s); err != nil { + return nil, &os.PathError{Op: "Fstat", Path: strconv.Itoa(fd), Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go new file mode 100644 index 00000000..828be208 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -0,0 +1,74 @@ +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time + platformStatT +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// UID returns file's user id of owner. +// +// on windows this is always 0 because there is no concept of UID +func (s StatT) UID() uint32 { + return 0 +} + +// GID returns file's group id of owner. +// +// on windows this is always 0 because there is no concept of GID +func (s StatT) GID() uint32 { + return 0 +} + +// Dev returns a unique identifier for owning filesystem +func (s StatT) Dev() uint64 { + return 0 +} + +func (s StatT) IsDir() bool { + return s.Mode().IsDir() +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime(), + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go new file mode 100644 index 00000000..c4816c13 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -0,0 +1,27 @@ +//go:build linux || freebsd || darwin +// +build linux freebsd darwin + +package system + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} + +// IsEBUSY checks if the specified error is EBUSY. +func IsEBUSY(err error) bool { + return errors.Is(err, unix.EBUSY) +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go new file mode 100644 index 00000000..f4d8692c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go @@ -0,0 +1,127 @@ +package system + +import ( + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := windows.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := windows.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(windows.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} + +// IsEBUSY checks if the specified error is EBUSY. +func IsEBUSY(err error) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go new file mode 100644 index 00000000..ad0337db --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -0,0 +1,14 @@ +//go:build !windows +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go new file mode 100644 index 00000000..9497596a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go @@ -0,0 +1,10 @@ +//go:build windows +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go new file mode 100644 index 00000000..edc588a6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -0,0 +1,25 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go new file mode 100644 index 00000000..edc588a6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go @@ -0,0 +1,25 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000..843ecdc5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -0,0 +1,11 @@ +//go:build !linux && !freebsd +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go new file mode 100644 index 00000000..75275b96 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go @@ -0,0 +1,84 @@ +package system + +import ( + "bytes" + "os" + + "golang.org/x/sys/unix" +) + +const ( + // Value is larger than the maximum size allowed + E2BIG unix.Errno = unix.E2BIG + + // Operation not supported + EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// Returns a []byte slice if the xattr is set and nil otherwise. +func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + + switch { + case errno == unix.ENOATTR: + return nil, nil + case errno != nil: + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + if err := unix.Lsetxattr(path, attr, data, flags); err != nil { + return &os.PathError{Op: "lsetxattr", Path: path, Err: err} + } + + return nil +} + +// Llistxattr lists extended attributes associated with the given path +// in the file system. +func Llistxattr(path string) ([]string, error) { + dest := make([]byte, 128) + sz, errno := unix.Llistxattr(path, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Llistxattr(path, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + dest = make([]byte, sz) + sz, errno = unix.Llistxattr(path, dest) + } + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + var attrs []string + for _, token := range bytes.Split(dest[:sz], []byte{0}) { + if len(token) > 0 { + attrs = append(attrs, string(token)) + } + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go new file mode 100644 index 00000000..6b47c4e7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -0,0 +1,87 @@ +package system + +import ( + "bytes" + "os" + + "golang.org/x/sys/unix" +) + +const ( + // Value is larger than the maximum size allowed + E2BIG unix.Errno = unix.E2BIG + + // Operation not supported + EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// Returns a []byte slice if the xattr is set and nil otherwise. +func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + + switch { + case errno == unix.ENODATA: + return nil, nil + case errno != nil: + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + if err := unix.Lsetxattr(path, attr, data, flags); err != nil { + return &os.PathError{Op: "lsetxattr", Path: path, Err: err} + } + + return nil +} + +// Llistxattr lists extended attributes associated with the given path +// in the file system. +func Llistxattr(path string) ([]string, error) { + dest := make([]byte, 128) + sz, errno := unix.Llistxattr(path, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Llistxattr(path, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + dest = make([]byte, sz) + sz, errno = unix.Llistxattr(path, dest) + } + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + var attrs []string + for _, token := range bytes.Split(dest[:sz], []byte{0}) { + if len(token) > 0 { + attrs = append(attrs, string(token)) + } + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000..8bd7acf1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -0,0 +1,32 @@ +//go:build !linux && !darwin +// +build !linux,!darwin + +package system + +import "syscall" + +const ( + // Value is larger than the maximum size allowed + E2BIG syscall.Errno = syscall.Errno(0) + + // Operation not supported + EOPNOTSUPP syscall.Errno = syscall.Errno(0) + + // Value is too small or too large for maximum size allowed + EOVERFLOW syscall.Errno = syscall.Errno(0) +) + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} + +// Llistxattr is not supported on platforms other than linux. +func Llistxattr(path string) ([]string, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go new file mode 100644 index 00000000..08dbc661 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go @@ -0,0 +1,23 @@ +//go:build linux && cgo +// +build linux,cgo + +package unshare + +import ( + "unsafe" +) + +/* +#cgo remoteclient CFLAGS: -Wall -Werror +#include +*/ +import "C" + +func getenv(name string) string { + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + value := C.GoString(C.getenv(cName)) + + return value +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go new file mode 100644 index 00000000..25054810 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go @@ -0,0 +1,12 @@ +//go:build linux && !cgo +// +build linux,!cgo + +package unshare + +import ( + "os" +) + +func getenv(name string) string { + return os.Getenv(name) +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c new file mode 100644 index 00000000..f5a7c3a2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c @@ -0,0 +1,378 @@ +#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__linux__) + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Open Source projects like conda-forge, want to package podman and are based + off of centos:6, Conda-force has minimal libc requirements and is lacking + the memfd.h file, so we use mmam.h +*/ +#ifndef MFD_ALLOW_SEALING +#define MFD_ALLOW_SEALING 2U +#endif +#ifndef MFD_CLOEXEC +#define MFD_CLOEXEC 1U +#endif + +#ifndef F_LINUX_SPECIFIC_BASE +#define F_LINUX_SPECIFIC_BASE 1024 +#endif +#ifndef F_ADD_SEALS +#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9) +#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10) +#endif +#ifndef F_SEAL_SEAL +#define F_SEAL_SEAL 0x0001LU +#endif +#ifndef F_SEAL_SHRINK +#define F_SEAL_SHRINK 0x0002LU +#endif +#ifndef F_SEAL_GROW +#define F_SEAL_GROW 0x0004LU +#endif +#ifndef F_SEAL_WRITE +#define F_SEAL_WRITE 0x0008LU +#endif + +#define BUFSTEP 1024 + +static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces"; +static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone"; + +static int _containers_unshare_parse_envint(const char *envname) { + char *p, *q; + long l; + + p = getenv(envname); + if (p == NULL) { + return -1; + } + q = NULL; + l = strtol(p, &q, 10); + if ((q == NULL) || (*q != '\0')) { + fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); + _exit(1); + } + unsetenv(envname); + return l; +} + +static void _check_proc_sys_file(const char *path) +{ + FILE *fp; + char buf[32]; + size_t n_read; + long r; + + fp = fopen(path, "r"); + if (fp == NULL) { + if (errno != ENOENT) + fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces); + } else { + memset(buf, 0, sizeof(buf)); + n_read = fread(buf, 1, sizeof(buf) - 1, fp); + if (n_read > 0) { + r = atoi(buf); + if (r == 0) { + fprintf(stderr, "User namespaces are not enabled in %s.\n", path); + } + } else { + fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path); + } + fclose(fp); + } +} + +static char **parse_proc_stringlist(const char *list) { + int fd, n, i, n_strings; + char *buf, *new_buf, **ret; + size_t size, new_size, used; + + fd = open(list, O_RDONLY); + if (fd == -1) { + return NULL; + } + buf = NULL; + size = 0; + used = 0; + for (;;) { + new_size = used + BUFSTEP; + new_buf = realloc(buf, new_size); + if (new_buf == NULL) { + free(buf); + fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP)); + return NULL; + } + buf = new_buf; + size = new_size; + memset(buf + used, '\0', size - used); + n = read(fd, buf + used, size - used - 1); + if (n < 0) { + fprintf(stderr, "read(): %m\n"); + return NULL; + } + if (n == 0) { + break; + } + used += n; + } + close(fd); + n_strings = 0; + for (n = 0; n < used; n++) { + if ((n == 0) || (buf[n-1] == '\0')) { + n_strings++; + } + } + ret = calloc(n_strings + 1, sizeof(char *)); + if (ret == NULL) { + fprintf(stderr, "calloc(): out of memory\n"); + return NULL; + } + i = 0; + for (n = 0; n < used; n++) { + if ((n == 0) || (buf[n-1] == '\0')) { + ret[i++] = &buf[n]; + } + } + ret[i] = NULL; + return ret; +} + +/* + * Taken from the runc cloned_binary.c file + * Copyright (C) 2019 Aleksa Sarai + * Copyright (C) 2019 SUSE LLC + * + * This work is dual licensed under the following licenses. You may use, + * redistribute, and/or modify the work under the conditions of either (or + * both) licenses. + * + * === Apache-2.0 === + */ +static int try_bindfd(void) +{ + int fd, ret = -1; + char src[PATH_MAX] = {0}; + char template[64] = {0}; + + strncpy(template, "/tmp/containers.XXXXXX", sizeof(template) - 1); + + /* + * We need somewhere to mount it, mounting anything over /proc/self is a + * BAD idea on the host -- even if we do it temporarily. + */ + fd = mkstemp(template); + if (fd < 0) + return ret; + close(fd); + + ret = -EPERM; + + if (readlink("/proc/self/exe", src, sizeof (src) - 1) < 0) + goto out; + + if (mount(src, template, NULL, MS_BIND, NULL) < 0) + goto out; + if (mount(NULL, template, NULL, MS_REMOUNT | MS_BIND | MS_RDONLY, NULL) < 0) + goto out_umount; + + /* Get read-only handle that we're sure can't be made read-write. */ + ret = open(template, O_PATH | O_CLOEXEC); + +out_umount: + /* + * Make sure the MNT_DETACH works, otherwise we could get remounted + * read-write and that would be quite bad (the fd would be made read-write + * too, invalidating the protection). + */ + if (umount2(template, MNT_DETACH) < 0) { + if (ret >= 0) + close(ret); + ret = -ENOTRECOVERABLE; + } + +out: + /* + * We don't care about unlink errors, the worst that happens is that + * there's an empty file left around in STATEDIR. + */ + unlink(template); + return ret; +} + +static int copy_self_proc_exe(char **argv) { + char *exename; + int fd, mmfd, n_read, n_written; + struct stat st; + char buf[2048]; + + fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC); + if (fd == -1) { + fprintf(stderr, "open(\"/proc/self/exe\"): %m\n"); + return -1; + } + if (fstat(fd, &st) == -1) { + fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n"); + close(fd); + return -1; + } + exename = basename(argv[0]); + mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC); + if (mmfd == -1) { + fprintf(stderr, "memfd_create(): %m\n"); + goto close_fd; + } + for (;;) { + n_read = read(fd, buf, sizeof(buf)); + if (n_read < 0) { + fprintf(stderr, "read(\"/proc/self/exe\"): %m\n"); + return -1; + } + if (n_read == 0) { + break; + } + n_written = write(mmfd, buf, n_read); + if (n_written < 0) { + fprintf(stderr, "write(anonfd): %m\n"); + goto close_fd; + } + if (n_written != n_read) { + fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read); + goto close_fd; + } + } + close(fd); + if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) { + fprintf(stderr, "Close_Fd sealing memfd copy: %m\n"); + goto close_mmfd; + } + + return mmfd; + +close_fd: + close(fd); +close_mmfd: + close(mmfd); + return -1; +} +static int containers_reexec(int flags) { + char **argv; + int fd = -1; + + argv = parse_proc_stringlist("/proc/self/cmdline"); + if (argv == NULL) { + return -1; + } + + if (flags & CLONE_NEWNS) + fd = try_bindfd(); + if (fd < 0) + fd = copy_self_proc_exe(argv); + if (fd < 0) + return fd; + + if (fexecve(fd, argv, environ) == -1) { + close(fd); + fprintf(stderr, "Error during reexec(...): %m\n"); + return -1; + } + close(fd); + return 0; +} + +void _containers_unshare(void) +{ + int flags, pidfd, continuefd, n, pgrp, sid, ctty; + char buf[2048]; + + flags = _containers_unshare_parse_envint("_Containers-unshare"); + if (flags == -1) { + return; + } + if ((flags & CLONE_NEWUSER) != 0) { + if (unshare(CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n"); + _check_proc_sys_file (_max_user_namespaces); + _check_proc_sys_file (_unprivileged_user_namespaces); + _exit(1); + } + } + pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); + if (pidfd != -1) { + snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); + size_t size = write(pidfd, buf, strlen(buf)); + if (size != strlen(buf)) { + fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); + _exit(1); + } + close(pidfd); + } + continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); + if (continuefd != -1) { + n = read(continuefd, buf, sizeof(buf)); + if (n > 0) { + fprintf(stderr, "Error: %.*s\n", n, buf); + _exit(1); + } + close(continuefd); + } + sid = _containers_unshare_parse_envint("_Containers-setsid"); + if (sid == 1) { + if (setsid() == -1) { + fprintf(stderr, "Error during setsid: %m\n"); + _exit(1); + } + } + pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); + if (pgrp == 1) { + if (setpgrp() == -1) { + fprintf(stderr, "Error during setpgrp: %m\n"); + _exit(1); + } + } + ctty = _containers_unshare_parse_envint("_Containers-ctty"); + if (ctty != -1) { + if (ioctl(ctty, TIOCSCTTY, 0) == -1) { + fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); + _exit(1); + } + } + if ((flags & CLONE_NEWUSER) != 0) { + if (setresgid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresgid(0): %m\n"); + _exit(1); + } + if (setresuid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresuid(0): %m\n"); + _exit(1); + } + } + if ((flags & ~CLONE_NEWUSER) != 0) { + if (unshare(flags & ~CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(...): %m\n"); + _exit(1); + } + } + if (containers_reexec(flags) != 0) { + _exit(1); + } + return; +} + +#endif // !UNSHARE_NO_CODE_AT_ALL diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go new file mode 100644 index 00000000..00f397f3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go @@ -0,0 +1,32 @@ +package unshare + +import ( + "fmt" + "os" + "os/user" + "sync" +) + +var ( + homeDirOnce sync.Once + homeDirErr error + homeDir string +) + +// HomeDir returns the home directory for the current user. +func HomeDir() (string, error) { + homeDirOnce.Do(func() { + home := os.Getenv("HOME") + if home == "" { + usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) + if err != nil { + homeDir, homeDirErr = "", fmt.Errorf("unable to resolve HOME directory: %w", err) + return + } + homeDir, homeDirErr = usr.HomeDir, nil + return + } + homeDir, homeDirErr = home, nil + }) + return homeDir, homeDirErr +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go new file mode 100644 index 00000000..fbfb90d5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go @@ -0,0 +1,11 @@ +//go:build (linux && cgo && !gccgo) || (freebsd && cgo) +// +build linux,cgo,!gccgo freebsd,cgo + +package unshare + +// #cgo CFLAGS: -Wall +// extern void _containers_unshare(void); +// static void __attribute__((constructor)) init(void) { +// _containers_unshare(); +// } +import "C" diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go new file mode 100644 index 00000000..86ac12ec --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go @@ -0,0 +1,54 @@ +//go:build darwin +// +build darwin + +package unshare + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + return true +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=") +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + return nil, nil, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") + if err != nil { + return nil, nil, err + } + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") + if err != nil { + return nil, nil, err + } + return uid, gid, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c new file mode 100644 index 00000000..0b2f1788 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c @@ -0,0 +1,76 @@ +#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__FreeBSD__) + + +#include +#include +#include +#include +#include +#include + +static int _containers_unshare_parse_envint(const char *envname) { + char *p, *q; + long l; + + p = getenv(envname); + if (p == NULL) { + return -1; + } + q = NULL; + l = strtol(p, &q, 10); + if ((q == NULL) || (*q != '\0')) { + fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); + _exit(1); + } + unsetenv(envname); + return l; +} + +void _containers_unshare(void) +{ + int pidfd, continuefd, n, pgrp, sid, ctty; + char buf[2048]; + + pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); + if (pidfd != -1) { + snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); + size_t size = write(pidfd, buf, strlen(buf)); + if (size != strlen(buf)) { + fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); + _exit(1); + } + close(pidfd); + } + continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); + if (continuefd != -1) { + n = read(continuefd, buf, sizeof(buf)); + if (n > 0) { + fprintf(stderr, "Error: %.*s\n", n, buf); + _exit(1); + } + close(continuefd); + } + sid = _containers_unshare_parse_envint("_Containers-setsid"); + if (sid == 1) { + if (setsid() == -1) { + fprintf(stderr, "Error during setsid: %m\n"); + _exit(1); + } + } + pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); + if (pgrp == 1) { + if (setpgrp(0, 0) == -1) { + fprintf(stderr, "Error during setpgrp: %m\n"); + _exit(1); + } + } + ctty = _containers_unshare_parse_envint("_Containers-ctty"); + if (ctty != -1) { + if (ioctl(ctty, TIOCSCTTY, 0) == -1) { + fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); + _exit(1); + } + } +} + +#endif diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go new file mode 100644 index 00000000..7a44ca30 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go @@ -0,0 +1,179 @@ +//go:build freebsd +// +build freebsd + +package unshare + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "strconv" + "syscall" + + "github.com/containers/storage/pkg/reexec" + "github.com/sirupsen/logrus" +) + +// Cmd wraps an exec.Cmd created by the reexec package in unshare(), +// and one day might handle setting ID maps and other related setting*s +// by triggering initialization code in the child. +type Cmd struct { + *exec.Cmd + Setsid bool + Setpgrp bool + Ctty *os.File + Hook func(pid int) error +} + +// Command creates a new Cmd which can be customized. +func Command(args ...string) *Cmd { + cmd := reexec.Command(args...) + return &Cmd{ + Cmd: cmd, + } +} + +func (c *Cmd) Start() error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Set environment variables to tell the child to synchronize its startup. + if c.Env == nil { + c.Env = os.Environ() + } + + // Create the pipe for reading the child's PID. + pidRead, pidWrite, err := os.Pipe() + if err != nil { + return fmt.Errorf("creating pid pipe: %w", err) + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, pidWrite) + + // Create the pipe for letting the child know to proceed. + continueRead, continueWrite, err := os.Pipe() + if err != nil { + pidRead.Close() + pidWrite.Close() + return fmt.Errorf("creating continue read/write pipe: %w", err) + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, continueRead) + + // Pass along other instructions. + if c.Setsid { + c.Env = append(c.Env, "_Containers-setsid=1") + } + if c.Setpgrp { + c.Env = append(c.Env, "_Containers-setpgrp=1") + } + if c.Ctty != nil { + c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, c.Ctty) + } + + // Make sure we clean up our pipes. + defer func() { + if pidRead != nil { + pidRead.Close() + } + if pidWrite != nil { + pidWrite.Close() + } + if continueRead != nil { + continueRead.Close() + } + if continueWrite != nil { + continueWrite.Close() + } + }() + + // Start the new process. + err = c.Cmd.Start() + if err != nil { + return err + } + + // Close the ends of the pipes that the parent doesn't need. + continueRead.Close() + continueRead = nil + pidWrite.Close() + pidWrite = nil + + // Read the child's PID from the pipe. + pidString := "" + b := new(bytes.Buffer) + if _, err := io.Copy(b, pidRead); err != nil { + return fmt.Errorf("reading child PID: %w", err) + } + pidString = b.String() + pid, err := strconv.Atoi(pidString) + if err != nil { + fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) + return fmt.Errorf("parsing PID %q: %w", pidString, err) + } + + // Run any additional setup that we want to do before the child starts running proper. + if c.Hook != nil { + if err = c.Hook(pid); err != nil { + fmt.Fprintf(continueWrite, "hook error: %v", err) + return err + } + } + + return nil +} + +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + return c.Wait() +} + +func (c *Cmd) CombinedOutput() ([]byte, error) { + return nil, errors.New("unshare: CombinedOutput() not implemented") +} + +func (c *Cmd) Output() ([]byte, error) { + return nil, errors.New("unshare: Output() not implemented") +} + +type Runnable interface { + Run() error +} + +// ExecRunnable runs the specified unshare command, captures its exit status, +// and exits with the same status. +func ExecRunnable(cmd Runnable, cleanup func()) { + exit := func(status int) { + if cleanup != nil { + cleanup() + } + os.Exit(status) + } + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.ProcessState.Exited() { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + logrus.Debugf("%v", exitError) + exit(waitStatus.ExitStatus()) + } + if waitStatus.Signaled() { + logrus.Debugf("%v", exitError) + exit(int(waitStatus.Signal()) + 128) + } + } + } + } + logrus.Errorf("%v", err) + logrus.Errorf("(Unable to determine exit status)") + exit(1) + } + exit(0) +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go new file mode 100644 index 00000000..21a43d38 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go @@ -0,0 +1,26 @@ +//go:build linux && cgo && gccgo +// +build linux,cgo,gccgo + +package unshare + +// #cgo CFLAGS: -Wall -Wextra +// extern void _containers_unshare(void); +// static void __attribute__((constructor)) init(void) { +// _containers_unshare(); +// } +import "C" + +// This next bit is straight out of libcontainer. + +// AlwaysFalse is here to stay false +// (and be exported so the compiler doesn't optimize out its reference) +var AlwaysFalse bool + +func init() { + if AlwaysFalse { + // by referencing this C init() in a noop test, it will ensure the compiler + // links in the C function. + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 + C.init() + } +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go new file mode 100644 index 00000000..e169633d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -0,0 +1,729 @@ +//go:build linux +// +build linux + +package unshare + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "os/signal" + "os/user" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" + "github.com/syndtr/gocapability/capability" +) + +// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and +// handles setting ID maps and other related settings by triggering +// initialization code in the child. +type Cmd struct { + *exec.Cmd + UnshareFlags int + UseNewuidmap bool + UidMappings []specs.LinuxIDMapping // nolint: revive,golint + UseNewgidmap bool + GidMappings []specs.LinuxIDMapping // nolint: revive,golint + GidMappingsEnableSetgroups bool + Setsid bool + Setpgrp bool + Ctty *os.File + OOMScoreAdj *int + Hook func(pid int) error +} + +// Command creates a new Cmd which can be customized. +func Command(args ...string) *Cmd { + cmd := reexec.Command(args...) + return &Cmd{ + Cmd: cmd, + } +} + +func getRootlessUID() int { + uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +func getRootlessGID() int { + gidEnv := getenv("_CONTAINERS_ROOTLESS_GID") + if gidEnv != "" { + u, _ := strconv.Atoi(gidEnv) + return u + } + + /* If the _CONTAINERS_ROOTLESS_UID is set, assume the gid==uid. */ + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Getegid() +} + +// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the +// matching file capability +func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + + mode := info.Mode() + if mode&modeid == modeid { + return true, nil + } + cap, err := capability.NewFile2(path) + if err != nil { + return false, err + } + if err := cap.Load(); err != nil { + return false, err + } + return cap.Get(capability.EFFECTIVE, capid), nil +} + +func (c *Cmd) Start() error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Set an environment variable to tell the child to synchronize its startup. + if c.Env == nil { + c.Env = os.Environ() + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags)) + + // Please the libpod "rootless" package to find the expected env variables. + if IsRootless() { + c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done") + c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", getRootlessUID())) + c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", getRootlessGID())) + } + + // Create the pipe for reading the child's PID. + pidRead, pidWrite, err := os.Pipe() + if err != nil { + return fmt.Errorf("creating pid pipe: %w", err) + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, pidWrite) + + // Create the pipe for letting the child know to proceed. + continueRead, continueWrite, err := os.Pipe() + if err != nil { + pidRead.Close() + pidWrite.Close() + return fmt.Errorf("creating continue read/write pipe: %w", err) + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, continueRead) + + // Pass along other instructions. + if c.Setsid { + c.Env = append(c.Env, "_Containers-setsid=1") + } + if c.Setpgrp { + c.Env = append(c.Env, "_Containers-setpgrp=1") + } + if c.Ctty != nil { + c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, c.Ctty) + } + + // Make sure we clean up our pipes. + defer func() { + if pidRead != nil { + pidRead.Close() + } + if pidWrite != nil { + pidWrite.Close() + } + if continueRead != nil { + continueRead.Close() + } + if continueWrite != nil { + continueWrite.Close() + } + }() + + // Start the new process. + err = c.Cmd.Start() + if err != nil { + return err + } + + // Close the ends of the pipes that the parent doesn't need. + continueRead.Close() + continueRead = nil + pidWrite.Close() + pidWrite = nil + + // Read the child's PID from the pipe. + b := new(bytes.Buffer) + if _, err := io.Copy(b, pidRead); err != nil { + return fmt.Errorf("reading child PID: %w", err) + } + pidString := b.String() + pid, err := strconv.Atoi(pidString) + if err != nil { + fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) + return fmt.Errorf("parsing PID %q: %w", pidString, err) + } + pidString = fmt.Sprintf("%d", pid) + + // If we created a new user namespace, set any specified mappings. + if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 { + // Always set "setgroups". + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) + return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) + } + defer setgroups.Close() + if c.GidMappingsEnableSetgroups { + if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) + return fmt.Errorf("opening \"allow\" to /proc/%s/setgroups: %w", pidString, err) + } + } else { + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) + return fmt.Errorf("writing \"deny\" to /proc/%s/setgroups: %w", pidString, err) + } + } + + if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { + uidmap, gidmap, err := GetHostIDMappings("") + if err != nil { + fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) + return fmt.Errorf("reading ID mappings in parent: %w", err) + } + if len(c.UidMappings) == 0 { + c.UidMappings = uidmap + for i := range c.UidMappings { + c.UidMappings[i].HostID = c.UidMappings[i].ContainerID + } + } + if len(c.GidMappings) == 0 { + c.GidMappings = gidmap + for i := range c.GidMappings { + c.GidMappings[i].HostID = c.GidMappings[i].ContainerID + } + } + } + + if len(c.GidMappings) > 0 { + // Build the GID map, since writing to the proc file has to be done all at once. + g := new(bytes.Buffer) + for _, m := range c.GidMappings { + fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + gidmapSet := false + // Set the GID map. + if c.UseNewgidmap { + path, err := exec.LookPath("newgidmap") + if err != nil { + return fmt.Errorf("finding newgidmap: %w", err) + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + g.Reset() + cmd.Stdout = g + cmd.Stderr = g + if err := cmd.Run(); err == nil { + gidmapSet = true + } else { + logrus.Warnf("running newgidmap: %v: %s", err, g.String()) + isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID) + if err != nil { + logrus.Warnf("Failed to check for setgid on %s: %v", path, err) + } else { + if !isSetgid { + logrus.Warnf("%s should be setgid or have filecaps setgid", path) + } + } + logrus.Warnf("Falling back to single mapping") + g.Reset() + g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) + } + } + if !gidmapSet { + if c.UseNewgidmap { + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err) + return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) + } + defer setgroups.Close() + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err) + return fmt.Errorf("writing 'deny' to /proc/%s/setgroups: %w", pidString, err) + } + } + gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "opening /proc/%s/gid_map: %v", pidString, err) + return fmt.Errorf("opening /proc/%s/gid_map: %w", pidString, err) + } + defer gidmap.Close() + if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { + fmt.Fprintf(continueWrite, "writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) + return fmt.Errorf("writing %q to /proc/%s/gid_map: %w", g.String(), pidString, err) + } + } + } + + if len(c.UidMappings) > 0 { + // Build the UID map, since writing to the proc file has to be done all at once. + u := new(bytes.Buffer) + for _, m := range c.UidMappings { + fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + uidmapSet := false + // Set the UID map. + if c.UseNewuidmap { + path, err := exec.LookPath("newuidmap") + if err != nil { + return fmt.Errorf("finding newuidmap: %w", err) + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + u.Reset() + cmd.Stdout = u + cmd.Stderr = u + if err := cmd.Run(); err == nil { + uidmapSet = true + } else { + logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) + isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID) + if err != nil { + logrus.Warnf("Failed to check for setuid on %s: %v", path, err) + } else { + if !isSetuid { + logrus.Warnf("%s should be setuid or have filecaps setuid", path) + } + } + + logrus.Warnf("Falling back to single mapping") + u.Reset() + u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) + } + } + if !uidmapSet { + uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) + return fmt.Errorf("opening /proc/%s/uid_map: %w", pidString, err) + } + defer uidmap.Close() + if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { + fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err) + return fmt.Errorf("writing %q to /proc/%s/uid_map: %w", u.String(), pidString, err) + } + } + } + } + + if c.OOMScoreAdj != nil { + oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) + return fmt.Errorf("opening /proc/%s/oom_score_adj: %w", pidString, err) + } + defer oomScoreAdj.Close() + if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { + fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) + return fmt.Errorf("writing \"%d\" to /proc/%s/oom_score_adj: %w", c.OOMScoreAdj, pidString, err) + } + } + // Run any additional setup that we want to do before the child starts running proper. + if c.Hook != nil { + if err = c.Hook(pid); err != nil { + fmt.Fprintf(continueWrite, "hook error: %v", err) + return err + } + } + + return nil +} + +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + return c.Wait() +} + +func (c *Cmd) CombinedOutput() ([]byte, error) { + return nil, errors.New("unshare: CombinedOutput() not implemented") +} + +func (c *Cmd) Output() ([]byte, error) { + return nil, errors.New("unshare: Output() not implemented") +} + +var ( + isRootlessOnce sync.Once + isRootless bool +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// hasFullUsersMappings checks whether the current user namespace has all the IDs mapped. +func hasFullUsersMappings() (bool, error) { + content, err := os.ReadFile("/proc/self/uid_map") + if err != nil { + return false, err + } + // The kernel rejects attempts to create mappings where either starting + // point is (u32)-1: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c?id=af3e9579ecfb#n1006 . + // So, if the uid_map contains 4294967295, the entire IDs space is available in the + // user namespace, so it is likely the initial user namespace. + return bytes.Contains(content, []byte("4294967295")), nil +} + +var ( + hasCapSysAdminOnce sync.Once + hasCapSysAdminRet bool + hasCapSysAdminErr error +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + isRootlessOnce.Do(func() { + isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != "" + if !isRootless { + hasCapSysAdmin, err := HasCapSysAdmin() + if err != nil { + logrus.Warnf("Failed to read CAP_SYS_ADMIN presence for the current process") + } + if err == nil && !hasCapSysAdmin { + isRootless = true + } + } + if !isRootless { + hasMappings, err := hasFullUsersMappings() + if err != nil { + logrus.Warnf("Failed to read current user namespace mappings") + } + if err == nil && !hasMappings { + isRootless = true + } + } + }) + return isRootless +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=done") +} + +type Runnable interface { + Run() error +} + +func bailOnError(err error, format string, a ...interface{}) { // nolint: revive,goprintffuncname + if err != nil { + if format != "" { + logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) + } else { + logrus.Errorf("%v", err) + } + os.Exit(1) + } +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { + // If we've already been through this once, no need to try again. + if os.Geteuid() == 0 && GetRootlessUID() > 0 { + return + } + + var uidNum, gidNum uint64 + // Figure out who we are. + me, err := user.Current() + if !os.IsNotExist(err) { + bailOnError(err, "error determining current user") + uidNum, err = strconv.ParseUint(me.Uid, 10, 32) + bailOnError(err, "error parsing current UID %s", me.Uid) + gidNum, err = strconv.ParseUint(me.Gid, 10, 32) + bailOnError(err, "error parsing current GID %s", me.Gid) + } + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // ID mappings to use to reexec ourselves. + var uidmap, gidmap []specs.LinuxIDMapping + if uidNum != 0 || evenForRoot { + // Read the set of ID mappings that we're allowed to use. Each + // range in /etc/subuid and /etc/subgid file is a starting host + // ID and a range size. + uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username) + if err != nil { + logrus.Warnf("Reading allowed ID mappings: %v", err) + } + if len(uidmap) == 0 { + logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username) + } + if len(gidmap) == 0 { + logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username) + } + // Map our UID and GID, then the subuid and subgid ranges, + // consecutively, starting at 0, to get the mappings to use for + // a copy of ourselves. + uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...) + gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...) + var rangeStart uint32 + for i := range uidmap { + uidmap[i].ContainerID = rangeStart + rangeStart += uidmap[i].Size + } + rangeStart = 0 + for i := range gidmap { + gidmap[i].ContainerID = rangeStart + rangeStart += gidmap[i].Size + } + } else { + // If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able + // to use unshare(), so don't bother creating a new user namespace at this point. + capabilities, err := capability.NewPid(0) + bailOnError(err, "Reading the current capabilities sets") + if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) { + return + } + // Read the set of ID mappings that we're currently using. + uidmap, gidmap, err = GetHostIDMappings("") + bailOnError(err, "Reading current ID mappings") + // Just reuse them. + for i := range uidmap { + uidmap[i].HostID = uidmap[i].ContainerID + } + for i := range gidmap { + gidmap[i].HostID = gidmap[i].ContainerID + } + } + + // Unlike most uses of reexec or unshare, we're using a name that + // _won't_ be recognized as a registered reexec handler, since we + // _want_ to fall through reexec.Init() to the normal main(). + cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...) + + // If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again. + err = os.Setenv(UsernsEnvName, "1") + bailOnError(err, "error setting %s=1 in environment", UsernsEnvName) + + // Set the default isolation type to use the "rootless" method. + if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present { + if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + logrus.Errorf("Setting BUILDAH_ISOLATION=rootless in environment: %v", err) + os.Exit(1) + } + } + } + + // Reuse our stdio. + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + // Set up a new user namespace with the ID mapping. + cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS + cmd.UseNewuidmap = uidNum != 0 + cmd.UidMappings = uidmap + cmd.UseNewgidmap = uidNum != 0 + cmd.GidMappings = gidmap + cmd.GidMappingsEnableSetgroups = true + + // Finish up. + logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + + // Forward SIGHUP, SIGINT, and SIGTERM to our child process. + interrupted := make(chan os.Signal, 100) + defer func() { + signal.Stop(interrupted) + close(interrupted) + }() + cmd.Hook = func(int) error { + go func() { + for receivedSignal := range interrupted { + cmd.Cmd.Process.Signal(receivedSignal) + } + }() + return nil + } + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + // Make sure our child process gets SIGKILLed if we exit, for whatever + // reason, before it does. + if cmd.Cmd.SysProcAttr == nil { + cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL + + ExecRunnable(cmd, nil) +} + +// ExecRunnable runs the specified unshare command, captures its exit status, +// and exits with the same status. +func ExecRunnable(cmd Runnable, cleanup func()) { + exit := func(status int) { + if cleanup != nil { + cleanup() + } + os.Exit(status) + } + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.ProcessState.Exited() { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + logrus.Debugf("%v", exitError) + exit(waitStatus.ExitStatus()) + } + if waitStatus.Signaled() { + logrus.Debugf("%v", exitError) + exit(int(waitStatus.Signal()) + 128) + } + } + } + } + logrus.Errorf("%v", err) + logrus.Errorf("(Unable to determine exit status)") + exit(1) + } + exit(0) +} + +// getHostIDMappings reads mappings from the named node under /proc. +func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { + var mappings []specs.LinuxIDMapping + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("reading ID mappings from %q: %w", path, err) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + if len(fields) != 3 { + return nil, fmt.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) + } + cid, err := strconv.ParseUint(fields[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("parsing container ID value %q from line %q in %q: %w", fields[0], line, path, err) + } + hid, err := strconv.ParseUint(fields[1], 10, 32) + if err != nil { + return nil, fmt.Errorf("parsing host ID value %q from line %q in %q: %w", fields[1], line, path, err) + } + size, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil, fmt.Errorf("parsing size value %q from line %q in %q: %w", fields[2], line, path, err) + } + mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) + } + return mappings, nil +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + if pid == "" { + pid = "self" + } + uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) + if err != nil { + return nil, nil, err + } + gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) + if err != nil { + return nil, nil, err + } + return uidmap, gidmap, nil +} + +// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. +func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + mappings, err := idtools.NewIDMappings(user, group) + if err != nil { + return nil, nil, fmt.Errorf("reading subuid mappings for user %q and subgid mappings for group %q: %w", user, group, err) + } + var uidmap, gidmap []specs.LinuxIDMapping + for _, m := range mappings.UIDs() { + uidmap = append(uidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + for _, m := range mappings.GIDs() { + gidmap = append(gidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + return uidmap, gidmap, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") + if err != nil { + return nil, nil, err + } + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") + if err != nil { + return nil, nil, err + } + return uid, gid, nil +} + +// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. +func HasCapSysAdmin() (bool, error) { + hasCapSysAdminOnce.Do(func() { + currentCaps, err := capability.NewPid2(0) + if err != nil { + hasCapSysAdminErr = err + return + } + if err = currentCaps.Load(); err != nil { + hasCapSysAdminErr = err + return + } + hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) + }) + return hasCapSysAdminRet, hasCapSysAdminErr +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go new file mode 100644 index 00000000..66dd5459 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go @@ -0,0 +1,51 @@ +//go:build !linux && !darwin +// +build !linux,!darwin + +package unshare + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + return false +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=") +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + return nil, nil, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. +func HasCapSysAdmin() (bool, error) { + return os.Geteuid() == 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go new file mode 100644 index 00000000..a6b38eda --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go @@ -0,0 +1,11 @@ +//go:build cgo && !(linux || freebsd) +// +build cgo,!linux,!freebsd + +package unshare + +// Go refuses to compile a subpackage with CGO_ENABLED=1 if there is a *.c file but no 'import "C"'. +// OTOH if we did have an 'import "C"', the Linux-only code would fail to compile. +// So, satisfy the Go compiler by using import "C" but #ifdef-ing out all of the code. + +// #cgo CPPFLAGS: -DUNSHARE_NO_CODE_AT_ALL +import "C" diff --git a/vendor/github.com/cyberphone/json-canonicalization/LICENSE b/vendor/github.com/cyberphone/json-canonicalization/LICENSE new file mode 100644 index 00000000..59121159 --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/LICENSE @@ -0,0 +1,13 @@ + Copyright 2018 Anders Rundgren + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go new file mode 100644 index 00000000..92574a3f --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go @@ -0,0 +1,71 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package converts numbers in IEEE-754 double precision into the +// format specified for JSON in EcmaScript Version 6 and forward. +// The core application for this is canonicalization: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "math" + "strconv" + "strings" +) + +const invalidPattern uint64 = 0x7ff0000000000000 + +func NumberToJSON(ieeeF64 float64) (res string, err error) { + ieeeU64 := math.Float64bits(ieeeF64) + + // Special case: NaN and Infinity are invalid in JSON + if (ieeeU64 & invalidPattern) == invalidPattern { + return "null", errors.New("Invalid JSON number: " + strconv.FormatUint(ieeeU64, 16)) + } + + // Special case: eliminate "-0" as mandated by the ES6-JSON/JCS specifications + if ieeeF64 == 0 { // Right, this line takes both -0 and 0 + return "0", nil + } + + // Deal with the sign separately + var sign string = "" + if ieeeF64 < 0 { + ieeeF64 =-ieeeF64 + sign = "-" + } + + // ES6 has a unique "g" format + var format byte = 'e' + if ieeeF64 < 1e+21 && ieeeF64 >= 1e-6 { + format = 'f' + } + + // The following should do the trick: + es6Formatted := strconv.FormatFloat(ieeeF64, format, -1, 64) + + // Minor cleanup + exponent := strings.IndexByte(es6Formatted, 'e') + if exponent > 0 { + // Go outputs "1e+09" which must be rewritten as "1e+9" + if es6Formatted[exponent + 2] == '0' { + es6Formatted = es6Formatted[:exponent + 2] + es6Formatted[exponent + 3:] + } + } + return sign + es6Formatted, nil +} diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go new file mode 100644 index 00000000..661f4105 --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go @@ -0,0 +1,378 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package transforms JSON data in UTF-8 according to: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "container/list" + "fmt" + "strconv" + "strings" + "unicode/utf16" +) + +type nameValueType struct { + name string + sortKey []uint16 + value string +} + +// JSON standard escapes (modulo \u) +var asciiEscapes = []byte{'\\', '"', 'b', 'f', 'n', 'r', 't'} +var binaryEscapes = []byte{'\\', '"', '\b', '\f', '\n', '\r', '\t'} + +// JSON literals +var literals = []string{"true", "false", "null"} + +func Transform(jsonData []byte) (result []byte, e error) { + + // JSON data MUST be UTF-8 encoded + var jsonDataLength int = len(jsonData) + + // Current pointer in jsonData + var index int = 0 + + // "Forward" declarations are needed for closures referring each other + var parseElement func() string + var parseSimpleType func() string + var parseQuotedString func() string + var parseObject func() string + var parseArray func() string + + var globalError error = nil + + checkError := func(e error) { + // We only honor the first reported error + if globalError == nil { + globalError = e + } + } + + setError := func(msg string) { + checkError(errors.New(msg)) + } + + isWhiteSpace := func(c byte) bool { + return c == 0x20 || c == 0x0a || c == 0x0d || c == 0x09 + } + + nextChar := func() byte { + if index < jsonDataLength { + c := jsonData[index] + if c > 0x7f { + setError("Unexpected non-ASCII character") + } + index++ + return c + } + setError("Unexpected EOF reached") + return '"' + } + + scan := func() byte { + for { + c := nextChar() + if isWhiteSpace(c) { + continue; + } + return c + } + } + + scanFor := func(expected byte) { + c := scan() + if c != expected { + setError("Expected '" + string(expected) + "' but got '" + string(c) + "'") + } + } + + getUEscape := func() rune { + start := index + nextChar() + nextChar() + nextChar() + nextChar() + if globalError != nil { + return 0 + } + u16, err := strconv.ParseUint(string(jsonData[start:index]), 16, 64) + checkError(err) + return rune(u16) + } + + testNextNonWhiteSpaceChar := func() byte { + save := index + c := scan() + index = save + return c + } + + decorateString := func(rawUTF8 string) string { + var quotedString strings.Builder + quotedString.WriteByte('"') + CoreLoop: + for _, c := range []byte(rawUTF8) { + // Is this within the JSON standard escapes? + for i, esc := range binaryEscapes { + if esc == c { + quotedString.WriteByte('\\') + quotedString.WriteByte(asciiEscapes[i]) + continue CoreLoop + } + } + if c < 0x20 { + // Other ASCII control characters must be escaped with \uhhhh + quotedString.WriteString(fmt.Sprintf("\\u%04x", c)) + } else { + quotedString.WriteByte(c) + } + } + quotedString.WriteByte('"') + return quotedString.String() + } + + parseQuotedString = func() string { + var rawString strings.Builder + CoreLoop: + for globalError == nil { + var c byte + if index < jsonDataLength { + c = jsonData[index] + index++ + } else { + nextChar() + break + } + if (c == '"') { + break; + } + if c < ' ' { + setError("Unterminated string literal") + } else if c == '\\' { + // Escape sequence + c = nextChar() + if c == 'u' { + // The \u escape + firstUTF16 := getUEscape() + if utf16.IsSurrogate(firstUTF16) { + // If the first UTF-16 code unit has a certain value there must be + // another succeeding UTF-16 code unit as well + if nextChar() != '\\' || nextChar() != 'u' { + setError("Missing surrogate") + } else { + // Output the UTF-32 code point as UTF-8 + rawString.WriteRune(utf16.DecodeRune(firstUTF16, getUEscape())) + } + } else { + // Single UTF-16 code identical to UTF-32. Output as UTF-8 + rawString.WriteRune(firstUTF16) + } + } else if c == '/' { + // Benign but useless escape + rawString.WriteByte('/') + } else { + // The JSON standard escapes + for i, esc := range asciiEscapes { + if esc == c { + rawString.WriteByte(binaryEscapes[i]) + continue CoreLoop + } + } + setError("Unexpected escape: \\" + string(c)) + } + } else { + // Just an ordinary ASCII character alternatively a UTF-8 byte + // outside of ASCII. + // Note that properly formatted UTF-8 never clashes with ASCII + // making byte per byte search for ASCII break characters work + // as expected. + rawString.WriteByte(c) + } + } + return rawString.String() + } + + parseSimpleType = func() string { + var token strings.Builder + index-- + for globalError == nil { + c := testNextNonWhiteSpaceChar() + if c == ',' || c == ']' || c == '}' { + break; + } + c = nextChar() + if isWhiteSpace(c) { + break + } + token.WriteByte(c) + } + if token.Len() == 0 { + setError("Missing argument") + } + value := token.String() + // Is it a JSON literal? + for _, literal := range literals { + if literal == value { + return literal + } + } + // Apparently not so we assume that it is a I-JSON number + ieeeF64, err := strconv.ParseFloat(value, 64) + checkError(err) + value, err = NumberToJSON(ieeeF64) + checkError(err) + return value + } + + parseElement = func() string { + switch scan() { + case '{': + return parseObject() + case '"': + return decorateString(parseQuotedString()) + case '[': + return parseArray() + default: + return parseSimpleType() + } + } + + parseArray = func() string { + var arrayData strings.Builder + arrayData.WriteByte('[') + var next bool = false + for globalError == nil && testNextNonWhiteSpaceChar() != ']' { + if next { + scanFor(',') + arrayData.WriteByte(',') + } else { + next = true + } + arrayData.WriteString(parseElement()) + } + scan() + arrayData.WriteByte(']') + return arrayData.String() + } + + lexicographicallyPrecedes := func(sortKey []uint16, e *list.Element) bool { + // Find the minimum length of the sortKeys + oldSortKey := e.Value.(nameValueType).sortKey + minLength := len(oldSortKey) + if minLength > len(sortKey) { + minLength = len(sortKey) + } + for q := 0; q < minLength; q++ { + diff := int(sortKey[q]) - int(oldSortKey[q]) + if diff < 0 { + // Smaller => Precedes + return true + } else if diff > 0 { + // Bigger => No match + return false + } + // Still equal => Continue + } + // The sortKeys compared equal up to minLength + if len(sortKey) < len(oldSortKey) { + // Shorter => Precedes + return true + } + if len(sortKey) == len(oldSortKey) { + setError("Duplicate key: " + e.Value.(nameValueType).name) + } + // Longer => No match + return false + } + + parseObject = func() string { + nameValueList := list.New() + var next bool = false + CoreLoop: + for globalError == nil && testNextNonWhiteSpaceChar() != '}' { + if next { + scanFor(',') + } + next = true + scanFor('"') + rawUTF8 := parseQuotedString() + if globalError != nil { + break; + } + // Sort keys on UTF-16 code units + // Since UTF-8 doesn't have endianess this is just a value transformation + // In the Go case the transformation is UTF-8 => UTF-32 => UTF-16 + sortKey := utf16.Encode([]rune(rawUTF8)) + scanFor(':') + nameValue := nameValueType{rawUTF8, sortKey, parseElement()} + for e := nameValueList.Front(); e != nil; e = e.Next() { + // Check if the key is smaller than a previous key + if lexicographicallyPrecedes(sortKey, e) { + // Precedes => Insert before and exit sorting + nameValueList.InsertBefore(nameValue, e) + continue CoreLoop + } + // Continue searching for a possibly succeeding sortKey + // (which is straightforward since the list is ordered) + } + // The sortKey is either the first or is succeeding all previous sortKeys + nameValueList.PushBack(nameValue) + } + // Scan away '}' + scan() + // Now everything is sorted so we can properly serialize the object + var objectData strings.Builder + objectData.WriteByte('{') + next = false + for e := nameValueList.Front(); e != nil; e = e.Next() { + if next { + objectData.WriteByte(',') + } + next = true + nameValue := e.Value.(nameValueType) + objectData.WriteString(decorateString(nameValue.name)) + objectData.WriteByte(':') + objectData.WriteString(nameValue.value) + } + objectData.WriteByte('}') + return objectData.String() + } + + ///////////////////////////////////////////////// + // This is where Transform actually begins... // + ///////////////////////////////////////////////// + var transformed string + + if testNextNonWhiteSpaceChar() == '[' { + scan() + transformed = parseArray() + } else { + scanFor('{') + transformed = parseObject() + } + for index < jsonDataLength { + if !isWhiteSpace(jsonData[index]) { + setError("Improperly terminated JSON object") + break; + } + index++ + } + return []byte(transformed), globalError +} \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go new file mode 100644 index 00000000..71327dca --- /dev/null +++ b/vendor/github.com/docker/distribution/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 00000000..978df7ea --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 00000000..b3dfb7a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,199 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 00000000..b7cd00b0 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 00000000..78603493 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 00000000..4c35b879 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr := daErr.(type) { + case ErrorCode: + err = daErr.WithDetail(nil) + case Error: + err = daErr + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 00000000..d77e7047 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,40 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + return json.NewEncoder(w).Encode(err) +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 00000000..d1e8826c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 00000000..c3bf90f7 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1613 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/go-digest" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: reference.NameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: reference.TagRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", + StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + } + + invalidPaginationResponseDescriptor = ResponseDescriptor{ + Name: "Invalid pagination number", + Description: "The received parameter n was invalid in some way, as described by the error code. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodePaginationNumberInvalid, + }, + } + + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, + }, + } + + tooManyRequestsDescriptor = ResponseDescriptor{ + Name: "Too Many Requests", + StatusCode: http.StatusTooManyRequests, + Description: "The client made too many requests within a time interval.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeTooManyRequests, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particular request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particular response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status received by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + unauthorizedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + invalidPaginationResponseDescriptor, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + invalidPaginationResponseDescriptor, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go new file mode 100644 index 00000000..cde01195 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go new file mode 100644 index 00000000..87e9f3c1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -0,0 +1,145 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) + + ErrorCodePaginationNumberInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "PAGINATION_NUMBER_INVALID", + Message: "invalid number of results requested", + Description: `Returned when the "n" parameter (number of results + to return) is not an integer, "n" is negative or "n" is bigger than + the maximum allowed.`, + HTTPStatusCode: http.StatusBadRequest, + }) +) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go new file mode 100644 index 00000000..9bc41a3a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +var ( + // according to rfc7230 + reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) + reQuotedValue = regexp.MustCompile(`^[^\\"]+`) + reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) +) + +// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains +// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The +// function parses only the first element of the list, which is set by the very first proxy. It returns a map +// of corresponding key-value pairs and an unparsed slice of the input string. +// +// Examples of Forwarded header values: +// +// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown +// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" +// +// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into +// {"for": "192.0.2.43:443", "host": "registry.example.org"}. +func parseForwardedHeader(forwarded string) (map[string]string, string, error) { + // Following are states of forwarded header parser. Any state could transition to a failure. + const ( + // terminating state; can transition to Parameter + stateElement = iota + // terminating state; can transition to KeyValueDelimiter + stateParameter + // can transition to Value + stateKeyValueDelimiter + // can transition to one of { QuotedValue, PairEnd } + stateValue + // can transition to one of { EscapedCharacter, PairEnd } + stateQuotedValue + // can transition to one of { QuotedValue } + stateEscapedCharacter + // terminating state; can transition to one of { Parameter, Element } + statePairEnd + ) + + var ( + parameter string + value string + parse = forwarded[:] + res = map[string]string{} + state = stateElement + ) + +Loop: + for { + // skip spaces unless in quoted value + if state != stateQuotedValue && state != stateEscapedCharacter { + parse = strings.TrimLeftFunc(parse, unicode.IsSpace) + } + + if len(parse) == 0 { + if state != stateElement && state != statePairEnd && state != stateParameter { + return nil, parse, fmt.Errorf("unexpected end of input") + } + // terminating + break + } + + switch state { + // terminate at list element delimiter + case stateElement: + if parse[0] == ',' { + parse = parse[1:] + break Loop + } + state = stateParameter + + // parse parameter (the key of key-value pair) + case stateParameter: + match := reToken.FindString(parse) + if len(match) == 0 { + return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) + } + parameter = strings.ToLower(match) + parse = parse[len(match):] + state = stateKeyValueDelimiter + + // parse '=' + case stateKeyValueDelimiter: + if parse[0] != '=' { + return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) + } + parse = parse[1:] + state = stateValue + + // parse value or quoted value + case stateValue: + if parse[0] == '"' { + parse = parse[1:] + state = stateQuotedValue + } else { + value = reToken.FindString(parse) + if len(value) == 0 { + return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) + } + if _, exists := res[parameter]; exists { + return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) + } + res[parameter] = value + parse = parse[len(value):] + value = "" + state = statePairEnd + } + + // parse a part of quoted value until the first backslash + case stateQuotedValue: + match := reQuotedValue.FindString(parse) + value += match + parse = parse[len(match):] + switch { + case len(parse) == 0: + return nil, parse, fmt.Errorf("unterminated quoted string") + case parse[0] == '"': + res[parameter] = value + value = "" + parse = parse[1:] + state = statePairEnd + case parse[0] == '\\': + parse = parse[1:] + state = stateEscapedCharacter + } + + // parse escaped character in a quoted string, ignore the backslash + // transition back to QuotedValue state + case stateEscapedCharacter: + c := reEscapedCharacter.FindString(parse) + if len(c) == 0 { + return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) + } + value += c + parse = parse[1:] + state = stateQuotedValue + + // expect either a new key-value pair, new list or end of input + case statePairEnd: + switch parse[0] { + case ';': + parse = parse[1:] + state = stateParameter + case ',': + state = stateElement + default: + return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) + } + } + } + + return res, parse, nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 00000000..9612ac2e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,40 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go new file mode 100644 index 00000000..3c3ec989 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -0,0 +1,254 @@ +package v2 + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + relative: relative, + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u, relative), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { + var ( + scheme = "http" + host = r.Host + ) + + if r.TLS != nil { + scheme = "https" + } else if len(r.URL.Scheme) > 0 { + scheme = r.URL.Scheme + } + + // Handle fowarded headers + // Prefer "Forwarded" header as defined by rfc7239 if given + // see https://tools.ietf.org/html/rfc7239 + if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { + forwardedHeader, _, err := parseForwardedHeader(forwarded) + if err == nil { + if fproto := forwardedHeader["proto"]; len(fproto) > 0 { + scheme = fproto + } + if fhost := forwardedHeader["host"]; len(fhost) > 0 { + host = fhost + } + } + } else { + if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { + scheme = forwardedProto + } + if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u, relative) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + default: + return "", fmt.Errorf("reference must have a tag or digest") + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root, relative: ub.relative} +} + +type clonedRoute struct { + *mux.Route + root *url.URL + relative bool +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if cr.relative { + return routeURL, nil + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 00000000..2c3ebe16 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 00000000..fe238210 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challenges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challenges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challenges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challenges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE new file mode 100644 index 00000000..1ea555e2 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 David Calavera + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go new file mode 100644 index 00000000..678153cf --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// isValidCredsMessage checks if 'msg' contains invalid credentials error message. +// It returns whether the logs are free of invalid credentials errors and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. +func isValidCredsMessage(msg string) error { + if credentials.IsCredentialsMissingServerURLMessage(msg) { + return credentials.NewErrCredentialsMissingServerURL() + } + + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + + return nil +} + +// Store uses an external program to save credentials. +func Store(program ProgramFunc, creds *credentials.Credentials) error { + cmd := program(credentials.ActionStore) + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// Get executes an external program to get the credentials from a native store. +func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { + cmd := program(credentials.ActionGet) + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if credentials.IsErrCredentialsNotFoundMessage(t) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + } + + resp := &credentials.Credentials{ + ServerURL: serverURL, + } + + if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { + return nil, err + } + + return resp, nil +} + +// Erase executes a program to remove the server credentials from the native store. +func Erase(program ProgramFunc, serverURL string) error { + cmd := program(credentials.ActionErase) + cmd.Input(strings.NewReader(serverURL)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// List executes a program to list server credentials in the native store. +func List(program ProgramFunc) (map[string]string, error) { + cmd := program(credentials.ActionList) + cmd.Input(strings.NewReader("unused")) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go new file mode 100644 index 00000000..1936234b --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -0,0 +1,54 @@ +package client + +import ( + "io" + "os" + "os/exec" +) + +// Program is an interface to execute external programs. +type Program interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// ProgramFunc is a type of function that initializes programs based on arguments. +type ProgramFunc func(args ...string) Program + +// NewShellProgramFunc creates programs that are executed in a Shell. +func NewShellProgramFunc(name string) ProgramFunc { + return NewShellProgramFuncWithEnv(name, nil) +} + +// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables +func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + } +} + +func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { + programCmd := exec.Command(commandName, args...) + if env != nil { + for k, v := range *env { + programCmd.Env = append(programCmd.Environ(), k+"="+v) + } + } + programCmd.Stderr = os.Stderr + return programCmd +} + +// Shell invokes shell commands to talk with a remote credentials-helper. +type Shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials-helper. +func (s *Shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials-helper. +func (s *Shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go new file mode 100644 index 00000000..eac55188 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -0,0 +1,209 @@ +package credentials + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" +) + +// Action defines the name of an action (sub-command) supported by a +// credential-helper binary. It is an alias for "string", and mostly +// for convenience. +type Action = string + +// List of actions (sub-commands) supported by credential-helper binaries. +const ( + ActionStore Action = "store" + ActionGet Action = "get" + ActionErase Action = "erase" + ActionList Action = "list" + ActionVersion Action = "version" +) + +// Credentials holds the information shared between docker and the credentials store. +type Credentials struct { + ServerURL string + Username string + Secret string +} + +// isValid checks the integrity of Credentials object such that no credentials lack +// a server URL or a username. +// It returns whether the credentials are valid and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername +func (c *Credentials) isValid() (bool, error) { + if len(c.ServerURL) == 0 { + return false, NewErrCredentialsMissingServerURL() + } + + if len(c.Username) == 0 { + return false, NewErrCredentialsMissingUsername() + } + + return true, nil +} + +// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. +// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, +// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" +var CredsLabel = "Docker Credentials" + +// SetCredsLabel is a simple setter for CredsLabel +func SetCredsLabel(label string) { + CredsLabel = label +} + +// Serve initializes the credentials-helper and parses the action argument. +// This function is designed to be called from a command line interface. +// It uses os.Args[1] as the key for the action. +// It uses os.Stdin as input and os.Stdout as output. +// This function terminates the program with os.Exit(1) if there is an error. +func Serve(helper Helper) { + if len(os.Args) != 2 { + _, _ = fmt.Fprintln(os.Stdout, usage()) + os.Exit(1) + } + + switch os.Args[1] { + case "--version", "-v": + _ = PrintVersion(os.Stdout) + os.Exit(0) + case "--help", "-h": + _, _ = fmt.Fprintln(os.Stdout, usage()) + os.Exit(0) + } + + if err := HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout); err != nil { + _, _ = fmt.Fprintln(os.Stdout, err) + os.Exit(1) + } +} + +func usage() string { + return fmt.Sprintf("Usage: %s ", Name) +} + +// HandleCommand runs a helper to execute a credential action. +func HandleCommand(helper Helper, action Action, in io.Reader, out io.Writer) error { + switch action { + case ActionStore: + return Store(helper, in) + case ActionGet: + return Get(helper, in, out) + case ActionErase: + return Erase(helper, in) + case ActionList: + return List(helper, out) + case ActionVersion: + return PrintVersion(out) + default: + return fmt.Errorf("%s: unknown action: %s", Name, action) + } +} + +// Store uses a helper and an input reader to save credentials. +// The reader must contain the JSON serialization of a Credentials struct. +func Store(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + var creds Credentials + if err := json.NewDecoder(buffer).Decode(&creds); err != nil { + return err + } + + if ok, err := creds.isValid(); !ok { + return err + } + + return helper.Add(&creds) +} + +// Get retrieves the credentials for a given server url. +// The reader must contain the server URL to search. +// The writer is used to write the JSON serialization of the credentials. +func Get(helper Helper, reader io.Reader, writer io.Writer) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + username, secret, err := helper.Get(serverURL) + if err != nil { + return err + } + + buffer.Reset() + err = json.NewEncoder(buffer).Encode(Credentials{ + ServerURL: serverURL, + Username: username, + Secret: secret, + }) + if err != nil { + return err + } + + _, _ = fmt.Fprint(writer, buffer.String()) + return nil +} + +// Erase removes credentials from the store. +// The reader must contain the server URL to remove. +func Erase(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + return helper.Delete(serverURL) +} + +// List returns all the serverURLs of keys in +// the OS store as a list of strings +func List(helper Helper, writer io.Writer) error { + accts, err := helper.List() + if err != nil { + return err + } + return json.NewEncoder(writer).Encode(accts) +} + +// PrintVersion outputs the current version. +func PrintVersion(writer io.Writer) error { + _, _ = fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version) + return nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go new file mode 100644 index 00000000..8fa4d5d2 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -0,0 +1,121 @@ +package credentials + +import "errors" + +const ( + // ErrCredentialsNotFound standardizes the not found error, so every helper returns + // the same message and docker can handle it properly. + errCredentialsNotFoundMessage = "credentials not found in native keychain" + + // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize + // invalid credentials or credentials management operations + errCredentialsMissingServerURLMessage = "no credentials server URL" + errCredentialsMissingUsernameMessage = "no credentials username" +) + +// errCredentialsNotFound represents an error +// raised when credentials are not in the store. +type errCredentialsNotFound struct{} + +// Error returns the standard error message +// for when the credentials are not in the store. +func (errCredentialsNotFound) Error() string { + return errCredentialsNotFoundMessage +} + +// NotFound implements the [ErrNotFound][errdefs.ErrNotFound] interface. +// +// [errdefs.ErrNotFound]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrNotFound +func (errCredentialsNotFound) NotFound() {} + +// NewErrCredentialsNotFound creates a new error +// for when the credentials are not in the store. +func NewErrCredentialsNotFound() error { + return errCredentialsNotFound{} +} + +// IsErrCredentialsNotFound returns true if the error +// was caused by not having a set of credentials in a store. +func IsErrCredentialsNotFound(err error) bool { + var target errCredentialsNotFound + return errors.As(err, &target) +} + +// IsErrCredentialsNotFoundMessage returns true if the error +// was caused by not having a set of credentials in a store. +// +// This function helps to check messages returned by an +// external program via its standard output. +func IsErrCredentialsNotFoundMessage(err string) bool { + return err == errCredentialsNotFoundMessage +} + +// errCredentialsMissingServerURL represents an error raised +// when the credentials object has no server URL or when no +// server URL is provided to a credentials operation requiring +// one. +type errCredentialsMissingServerURL struct{} + +func (errCredentialsMissingServerURL) Error() string { + return errCredentialsMissingServerURLMessage +} + +// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter] +// interface. +// +// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter +func (errCredentialsMissingServerURL) InvalidParameter() {} + +// errCredentialsMissingUsername represents an error raised +// when the credentials object has no username or when no +// username is provided to a credentials operation requiring +// one. +type errCredentialsMissingUsername struct{} + +func (errCredentialsMissingUsername) Error() string { + return errCredentialsMissingUsernameMessage +} + +// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter] +// interface. +// +// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter +func (errCredentialsMissingUsername) InvalidParameter() {} + +// NewErrCredentialsMissingServerURL creates a new error for +// errCredentialsMissingServerURL. +func NewErrCredentialsMissingServerURL() error { + return errCredentialsMissingServerURL{} +} + +// NewErrCredentialsMissingUsername creates a new error for +// errCredentialsMissingUsername. +func NewErrCredentialsMissingUsername() error { + return errCredentialsMissingUsername{} +} + +// IsCredentialsMissingServerURL returns true if the error +// was an errCredentialsMissingServerURL. +func IsCredentialsMissingServerURL(err error) bool { + var target errCredentialsMissingServerURL + return errors.As(err, &target) +} + +// IsCredentialsMissingServerURLMessage checks for an +// errCredentialsMissingServerURL in the error message. +func IsCredentialsMissingServerURLMessage(err string) bool { + return err == errCredentialsMissingServerURLMessage +} + +// IsCredentialsMissingUsername returns true if the error +// was an errCredentialsMissingUsername. +func IsCredentialsMissingUsername(err error) bool { + var target errCredentialsMissingUsername + return errors.As(err, &target) +} + +// IsCredentialsMissingUsernameMessage checks for an +// errCredentialsMissingUsername in the error message. +func IsCredentialsMissingUsernameMessage(err string) bool { + return err == errCredentialsMissingUsernameMessage +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go new file mode 100644 index 00000000..135acd25 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go @@ -0,0 +1,14 @@ +package credentials + +// Helper is the interface a credentials store helper must implement. +type Helper interface { + // Add appends credentials to the store. + Add(*Credentials) error + // Delete removes credentials from the store. + Delete(serverURL string) error + // Get retrieves credentials from the store. + // It returns username and secret as strings. + Get(serverURL string) (string, string, error) + // List returns the stored serverURLs and their associated usernames. + List() (map[string]string, error) +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go new file mode 100644 index 00000000..84377c26 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -0,0 +1,16 @@ +package credentials + +var ( + // Name is filled at linking time + Name = "" + + // Package is filled at linking time + Package = "github.com/docker/docker-credential-helpers" + + // Version holds the complete version number. Filled in at linking time. + Version = "v0.0.0+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" +) diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 00000000..b3141819 --- /dev/null +++ b/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,2390 @@ +# File @generated by hack/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See hack/generate-authors.sh to make modifications. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Hnatiw +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinandan Prativadi +Abhinav Ajgaonkar +Abhishek Chanda +Abhishek Sharma +Abin Shahab +Abirdcfly +Ada Mancini +Adam Avilla +Adam Dobrawy +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Pointer +Adam Singer +Adam Walz +Adam Williams +AdamKorcz +Addam Hardy +Aditi Rajagopal +Aditya +Adnan Khan +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akash Gupta +Akhil Mohan +Akihiro Matsushima +Akihiro Suda +Akim Demaille +Akira Koyasu +Akshay Karle +Akshay Moghe +Al Tobey +alambike +Alan Hoyle +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Albin Kerouanton +Alec Benson +Alejandro González Hevia +Aleksa Sarai +Aleksandr Chebotov +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Goodman +Alex Nordlund +Alex Olshansky +Alex Samorukov +Alex Stockinger +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Midlash +Alexander Morozov +Alexander Polakov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandre Jomin +Alexandru Sfirlogea +Alexei Margasov +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis Ries +Alexis Thomas +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Ameya Gawde +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amr Gawish +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anca Iordache +Anchal Agrawal +Anda Xu +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Denisse Gómez +Andrea Luzzardi +Andrea Turli +Andreas Elvers +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrei Ushakov +Andrei Vagin +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew He +Andrew Hsu +Andrew Kim +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Pennebaker +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Kolomentsev +Andrey Petrov +Andrey Stolbovsky +André Martins +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Lindeman +Andy Rothfusz +Andy Smith +Andy Wilson +Andy Zhang +Anes Hasicic +Angel Velazquez +Anil Belur +Anil Madhavapeddy +Ankit Jain +Ankush Agarwal +Anonmily +Anran Qiao +Anshul Pundir +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anuj Varma +Anusha Ragunathan +Anyu Wang +apocas +Arash Deshmeh +arcosx +ArikaChen +Arko Dasgupta +Arnaud Lefebvre +Arnaud Porterie +Arnaud Rebillout +Artem Khramov +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asad Saeeduddin +Asbjørn Enge +Austin Vazquez +averagehuman +Avi Das +Avi Kivity +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bao Yonglei +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +BartÅ‚omiej Piotrowski +Bastiaan Bakker +Bastien Pascard +bdevloed +Bearice Ren +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Gould +Ben Hall +Ben Langfeld +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Baker +Benjamin Boudreau +Benjamin Böhmke +Benjamin Wang +Benjamin Yolken +Benny Ng +Benoit Chesneau +Bernerd Schaefer +Bernhard M. Wiedemann +Bert Goethals +Bertrand Roussel +Bevisy Zhang +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill Wang +Billy Ridgway +Bily Zhang +Bin Liu +Bingshen Wang +Bjorn Neergaard +Blake Geno +Boaz Shuster +bobby abbott +Bojun Zhu +Boqin Qin +Boris Pruessmann +Boshi Lian +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brett Milford +Brett Randall +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Schwind +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Brielle Broder +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Sparr +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Catalin Pirvu +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander Govindarajan +Chanhun Jeong +Chao Wang +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chee Hau Lim +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Min +Chen Mingjie +Chen Qiu +Cheng-mean Liu +Chengfei Shang +Chengguang Xu +Chenyang Yan +chenyuzhu +Chetan Birajdar +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dias +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris Kreussling (Flatbush Gardener) +Chris McKinnel +Chris McKinnel +Chris Price +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Telfer +Chris Wahl +Chris Weyl +Chris White +Christian Becker +Christian Berendt +Christian Brauner +Christian Böhme +Christian Muehlhaeuser +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +Christoph Ziebuhr +Christophe Mehay +Christophe Troestler +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Norman +Chun Chen +Ciro S. Costa +Clayton Coleman +Clint Armstrong +Clinton Kitson +clubby789 +Cody Roseborough +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Panisset +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Conor Evans +Corbin Coleman +Corey Farrell +Cory Forsyth +Cory Snider +cressie176 +Cristian Ariza +Cristian Staretu +cristiano balducci +Cristina Yenyxe Gonzalez Garcia +Cruceru Calin-Cristian +CUI Wei +cuishuang +Cuong Manh Le +Cyprian Gracz +Cyril F +Da McGrady +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Plamadeala +Dan Stine +Dan Williams +Dani Hodovic +Dani Louca +Daniel Antlinger +Daniel Black +Daniel Dao +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel P. Berrangé +Daniel Robinson +Daniel S +Daniel Sweet +Daniel Von Fange +Daniel Watkins +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniele Rondina +Danny Berger +Danny Milosavljevic +Danny Yates +Danyal Khaliq +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Goodchild +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Bellotti +David Calavera +David Chung +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Glasser +David Lawrence +David Lechner +David M. Karr +David Mackey +David Manouchehri +David Mat +David Mcanulty +David McKay +David O'Rourke +David P Hilton +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Wang <00107082@163.com> +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +Debayan De +Deborah Gertrude Digges +deed02392 +Deep Debroy +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +Devon Estes +Devvyn Murphy +Dharmit Shah +Dhawal Yogesh Bhanushali +Dhilip Kumars +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Mandalidis +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +dingwei +Diogo Monica +DiuDiugirl +Djibril Koné +Djordje Lukic +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Sharshakov +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dmytro Iakovliev +docker-unir[bot] +Dolph Mathews +Dominic Tubach +Dominic Yin +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donghwa Kim +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Douglas Curtis +Dr Nic Williams +dragon788 +Dražen LuÄanin +Drew Erny +Drew Hubl +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elango Sivanandam +Elena Morozova +Eli Uriegas +Elias Faxö +Elias Koromilas +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Davtyan +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Eng Zer Jun +Enguerran +Eohyung Lee +epeterso +er0k +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Mountain +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Soderstrom +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erich Cordoba +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Sipsma +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Espen Suenson +Ethan Bell +Ethan Mosbaugh +Euan Harris +Euan Kemp +Eugen Krizo +Eugene Yakubovich +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeniy Makhrov +Evgeny Shmarnev +Evgeny Vereshchagin +Ewa Czechowska +Eystein MÃ¥løy Stenberg +ezbercih +Ezra Silvera +Fabian Kramm +Fabian Lauer +Fabian Raetz +Fabiano Rosas +Fabio Falci +Fabio Kung +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangming Fang +Fangyuan Gao <21551127@zju.edu.cn> +fanjiyun +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felipe Ruhland +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Feng Yan +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Feroz Salam +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +Flavio Castelli +Flavio Crisciani +Florian +Florian Klein +Florian Maier +Florian Noeding +Florian Schmaus +Florian Weingarten +Florin Asavoaie +Florin Patan +fonglh +Foysal Iqbal +Francesc Campoy +Francesco Degrassi +Francesco Mari +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Frank Yang +Fred Lifton +Frederick F. Kautz IV +Frederico F. de Oliveira +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +Frieder Bluemle +frobnicaty <92033765+frobnicaty@users.noreply.github.com> +Frédéric Dalleau +Fu JinLin +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Adrian Samfira +Gabriel Goller +Gabriel L. Somlo +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +Gaurav Singh +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoff Levand +Geoffrey Bachelet +Geon Kim +George Kontridze +George MacRorie +George Xie +Georgi Hristozov +Georgy Yakovlev +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim Feiken +Ghislain Bourgeois +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +Giovan Isa Musthofa +gissehel +Giuseppe Mazzotta +Giuseppe Scrivano +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Goldwyn Rodrigues +Gopikannan Venugopalsamy +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Millar +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Stephens +Greg Thornton +Grzegorz JaÅ›kiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +Gunadhya S. <6939749+gunadhya@users.noreply.github.com> +Guoqiang QI +guoxiuyan +Guri +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +Haichao Yang +haikuoliu +haining.cao +Hakan Özler +Hamish Hutchings +Hannes Ljungberg +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harald Niesche +Harley Laue +Harold Cooper +Harrison Turton +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xiaoxi +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hiroshi Hatake +Hiroyuki Sasagawa +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +Hongxu Jia +Honza Pokorny +Hsing-Hui Hsu +Hsing-Yu (David) Chen +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +HuanHuan Ye +Huanzhong Zhang +Huayi Zhang +Hugo Barrera +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hui Kang +Hunter Blanks +huqun +Huu Nguyen +Hyeongkyu Lee +Hyzhou Zhy +Iago López Galeiras +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Chen +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Igor Karpovich +Iliana Weller +Ilkka Laukkanen +Illia Antypenko +Illo Abdulrahim +Ilya Dmitrichenko +Ilya Gusev +Ilya Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Innovimax +Isaac Dupree +Isabel Jimenez +Isaiah Grace +Isao Jonas +Iskander Sharipov +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jack Laxson +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jacob Vallejo +Jacob Wen +Jaime Cepeda +Jaivish Kothari +Jake Champlin +Jake Moshenko +Jake Sanders +Jakub Drahos +Jakub Guzik +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Sanders +James Turnbull +James Watkins-Harvey +Jamie Hannaford +Jamshid Afshar +Jan Breig +Jan Chren +Jan Garcia +Jan Götte +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +Jasmine Hegman +Jason A. Donenfeld +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +Javier Bassi +jaxgeller +Jay +Jay Kamat +Jay Lim +Jean Rouge +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeeva S. Chelladhurai +Jeff Anderson +Jeff Hajewski +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeff Zvier +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Huntwork +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +Jhon Honce +Ji.Zhilong +Jian Liao +Jian Zhang +Jiang Jinyang +Jianyong Wu +Jie Luo +Jie Ma +Jihyun Hwang +Jilles Oldenbeuving +Jim Alateras +Jim Carroll +Jim Ehrismann +Jim Galasyn +Jim Lin +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +Jinsoo Park +Jintao Zhang +Jiri Appl +Jiri Popelka +Jiuyue Ma +Jiří Župka +Joakim Roubert +Joao Fernandes +Joao Trindade +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Surrell +Jon Wedaman +Jonas Dohse +Jonas Heinrich +Jonas Pfenniger +Jonathan A. Schweder +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Choy +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan Arentsen +Jordan Jennings +Jordan Sissel +Jordi Massaguer Pla +Jorge Marin +Jorit Kleine-Möllhoff +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Bonczkowski +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Soref +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +Joyce Jang +JP +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julien Pivotto +Julio Guerra +Julio Montes +Jun Du +Jun-Ru Chang +junxu +Jussi Nummelin +Justas Brazauskas +Justen Martin +Justin Cormack +Justin Force +Justin Keller <85903732+jk-vb@users.noreply.github.com> +Justin Menga +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérémy Leherpeur +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu (Kennan) +Kaijie Chen +Kamil DomaÅ„ski +Kamjar Gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Karanth +Karthik Nayak +Kasper Fabæch Brandt +Kate Heddleston +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Kazuhiro Sera +Kazuyoshi Kato +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Ken Reese +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kenta Tada +Kevin "qwazerty" Houdebert +Kevin Alvarez +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin Meredith +Kevin P. Kucharczyk +Kevin Parsons +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +Kirk Easterson +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konrad Ponichtera +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Kostadin Plachkov +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +Krystian Wojcicki +Kunal Kushwaha +Kunal Tyagi +Kyle Conroy +Kyle Linden +Kyle Squizzato +Kyle Wuolle +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Brehm +Laura Frank +Laurent Bernaille +Laurent Erignoux +Laurie Voss +Leandro Motta Barros +Leandro Siqueira +Lee Calcote +Lee Chao <932819864@qq.com> +Lee, Meng-Han +Lei Gong +Lei Jitang +Leiiwang +Len Weincier +Lennie +Leo Gallucci +Leonardo Nodari +Leonardo Taccari +Leszek Kowalski +Levi Blackstone +Levi Gross +Levi Harrison +Lewis Daly +Lewis Marshall +Lewis Peckover +Li Yi +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liangwei +Liao Qingwei +Lifubang +Lihua Tang +Lily Guo +limeidan +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Lotus Fenn +Louis Delossantos +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Lucas Molas +Lucas Silvestre +Luciano Mores +Luis Henrique Mulinari +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Heeren +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Rüger +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcel Edmund Franke +Marcelo Horacio Fortino +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Martins +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark Feit +Mark Jeromin +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Oates +Mark Parker +Mark Vainomaa +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Markus Kortlang +Martijn Dwars +Martijn van Oosterhout +Martin Braun +Martin Dojcak +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Muzatko +Martin Redmond +Maru Newby +Mary Anthony +Masahito Zembutsu +Masato Ohba +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Champlon +Mathieu Le Marec - Pasquet +Mathieu Parent +Mathieu Paturel +Matt Apperson +Matt Bachmann +Matt Bajor +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Morrison <3maven@gmail.com> +Matt Richardson +Matt Rickard +Matt Robenolt +Matt Schurenko +Matt Williams +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mosesohn +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Fronton +Matthieu Hauglustaine +Mattias Jernberg +Mauricio Garavaglia +mauriyouth +Max Harmathy +Max Shytikov +Max Timchenko +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Maximiliano Maccanti +Maxwell +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Menghui Chen +Mert YazıcıoÄŸlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Beskin +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Kuehn +Michael Käufl +Michael Neale +Michael Nussbaum +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael Weidmann +Michael West +Michael Zhao +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Kostrzewa +Michal Minář +Michal Rostecki +Michal Wieczorek +Michaël Pailloncy +MichaÅ‚ Czeraszkiewicz +MichaÅ‚ Gryko +MichaÅ‚ Kosek +Michiel de Jong +Mickaël Fortunato +Mickaël Remars +Miguel Angel Fernández +Miguel Morales +Miguel Perez +Mihai Borobocea +Mihuleacc Sergiu +Mikael Davranche +Mike Brown +Mike Bush +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Estes +Mike Gaffney +Mike Goelzer +Mike Leone +Mike Lundy +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miklos Szegedi +Milas Bowman +Milind Chawre +Miloslav TrmaÄ +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +Mizuki Urushida +mlarcher +Mohammad Banikazemi +Mohammad Nasirifar +Mohammed Aaqib Ansari +Mohit Soni +Moorthy RS +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mrfly +Mrunal Patel +Muayyad Alsadi +Muhammad Zohaib Aslam +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nace Oroz +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Natasha Jarus +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Carlson +Nathan Herald +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Naveed Jamil +Neal McBurnett +Neil Horman +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Adcock +Nick DeCoursin +Nick Irvine +Nick Neisen +Nick Parker +Nick Payne +Nick Russo +Nick Santos +Nick Stenning +Nick Stinemates +Nick Wood +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolas Sterchele +Nicolas V Castet +Nicolás Hock Isaza +Niel Drummond +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Edigaryev +Nikolay Milovanov +ningmingxiao +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Meyerhans +Noah Treuhaft +NobodyOnSE +noducks +Nolan Darilek +Noriki Nakamura +nponeccop +Nurahmadie +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +Odin Ugedal +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Oliver Reason +Olivier Gambier +Olle Jonsson +Olli Janatuinen +Olly Pomeroy +Omri Shiv +Onur Filiz +Oriol Francès +Oscar Bonilla <6f6231@gmail.com> +oscar.chen <2972789494@qq.com> +Oskar Niburski +Otto Kekäläinen +Ouyang Liduo +Ovidio Mallo +Panagiotis Moustafellos +Paolo G. Giarrusso +Pascal +Pascal Bach +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Haas +Patrick Hemmer +Patrick Stapleton +Patrik Cyvoct +pattichen +Paul "TBBle" Hampson +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Gomes +Paulo Ribeiro +Pavel Lobashov +Pavel MatÄ›ja +Pavel Pletenev +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Pawel Konczalski +PaweÅ‚ Gronowski +Peeyush Gupta +Peggy Li +Pei Su +Peng Tao +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Pete Woods +Peter Bourgon +Peter Braden +Peter Bücker +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Kang +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Å vihlík +Petros Angelatos +Phil +Phil Estes +Phil Sphicas +Phil Spitler +Philip Alexander Etling +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +Piotr Karbowski +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Pradip Dhara +Pradipta Kr. Banerjee +Prasanna Gautam +Pratik Karki +Prayag Verma +Priya Wadhwa +Projjol Banerji +Przemek Hejman +Puneet Pruthi +Pure White +pysqz +Qiang Huang +Qin TianHuan +Qinglan Peng +Quan Tian +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Radostin Stoyanov +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Raja Sami +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +RaviTeja Pothana +Ray Tsang +ReadmeCritic +realityone +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +Renaud Gaubert +Rhys Hiltner +Ri Xu +Ricardo N Feliciano +Rich Horwood +Rich Moyse +Rich Seymour +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Cowsill <42620235+rcowsill@users.noreply.github.com> +Rob Gulewich +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Schneider +Robert Shade +Robert Stern +Robert Terhaar +Robert Wallis +Robert Wang +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +Robin Thoni +robpc +Rodolfo Carvalho +Rodrigo Campos +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rohit Kapur +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Dudin +Roman Mazur +Roman Strashkin +Roman Volosatovs +Roman Zabaluev +Ron Smits +Ron Williams +Rong Gao +Rong Zhang +Rongxiang Song +Rony Weng +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Roy Reznik +Royce Remer +Rozhnov Alexandr +Rudolph Gottesheim +Rui Cao +Rui Lopes +Ruilin Li +Runshen Zhu +Russ Magee +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Barry +Ryan Belgrave +Ryan Campbell +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Shea +Ryan Simmen +Ryan Stelly +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Ryo Nakao +Ryoga Saito +Rémy Greinhofer +s. rannou +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sakeven Jiang +Salahuddin Khan +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sam Whited +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +sanchayanghosh +Sandeep Bansal +Sankar சஙà¯à®•à®°à¯ +Sanket Saurav +Santhosh Manohar +sapphiredev +Sargun Dhillon +Sascha Andres +Sascha Grunert +SataQiu +Satnam Singh +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Percival +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Sebastian Höffner +Sebastian Radloff +Sebastien Goasguen +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Sergio Lopez +Serhat Gülçiçek +SeungUkLee +Sevki Hasirci +Shane Canon +Shane da Silva +Shaun Kaasten +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayan Pooya +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shengjing Zhu +Shev Yan +Shih-Yuan Lee +Shihao Xia +Shijiang Wei +Shijun Qin +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +Shu-Wai Chow +shuai-z +Shukui Yang +Sian Lerk Lau +Siarhei Rasiukevich +Sidhartha Mani +sidharthamani +Silas Sewell +Silvan Jegen +Simão Reis +Simon Barendse +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Menke +Simon Taranto +Simon Vikstrom +Sindhu S +Sjoerd Langkemper +skanehira +Smark Meng +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Sotiris Salloumis +Soulou +Spencer Brown +Spencer Smith +Spike Curtis +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Staf Wagemakers +Stanislav Bondarenko +Stanislav Levin +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Steffen Butzer +Stephan Spindler +Stephen Benjamin +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +SteÌphane Este-Gracias +Stig Larsson +Su Wang +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sun Jianbo +Sune Keller +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien HOUZÉ +Sébastien Luttringer +Sébastien Stormacq +Sören Tempel +Tabakhase +Tadej Janež +Takuto Sato +tang0th +Tangi Colin +Tatsuki Sugiura +Tatsushi Inagaki +Taylan Isikdemir +Taylor Jones +Ted M. Young +Tehmasp Chaudhri +Tejaswini Duggaraju +Tejesh Mehta +Terry Chu +terryding77 <550147740@qq.com> +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thiago Alves Silva +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Graf +Thomas Grainger +Thomas Hansen +Thomas Ledos +Thomas Leonard +Thomas Léveil +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Ti Zhou +Tiago Seabra +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Till Claassen +Till Wegmüller +Tim +Tim Bart +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Potter +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wagner +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timchenxiaoyu <837829664@qq.com> +timfeirg +Timo Rothenpieler +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Pfandzelter +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Parker +Tom Sweeney +Tom Wilkie +Tom X. Tobin +Tom Zhao +Tomas Janousek +Tomas Kral +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tomek MaÅ„ko +Tommaso Visconti +Tomoya Tabuchi +Tomáš HrÄka +tonic +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Toshiaki Makita +Tõnis Tiigi +Trace Andreason +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +Trishna Guha +Tristan Carel +Troy Denton +Tudor Brindus +Ty Alexander +Tycho Andersen +Tyler Brock +Tyler Brown +Tzu-Jung Lee +uhayate +Ulysse Carion +Umesh Yadav +Utz Bacher +vagrant +Vaidas Jablonskis +Valentin Kulesh +vanderliang +Velko Ivanov +Veres Lajos +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Vikas Choudhary +Vikram bir Singh +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Boulineau +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vladislav Kolesnikov +Vlastimil Zeman +Vojtech Vitek (V-Teq) +Walter Leibbrandt +Walter Stanish +Wang Chao +Wang Guoliang +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +wanghuaiqing +Ward Vandewege +WarheadsSE +Wassim Dhif +Wataru Ishida +Wayne Chang +Wayne Song +Weerasak Chongnguluam +Wei Fu +Wei Wu +Wei-Ting Kuo +weipeng +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenjun Tang +Wenkai Yin +wenlxie +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Wiktor Kwapisiewicz +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +Wilson Júnior +Wing-Kam Wong +WiseTrem +Wolfgang Nagele +Wolfgang Powisch +Wonjun Kim +WuLonghui +xamyzhao +Xia Wu +Xian Chaobo +Xianglin Gao +Xianjie +Xianlu Bird +Xiao YongBiao +Xiao Zhang +XiaoBing Jiang +Xiaodong Liu +Xiaodong Zhang +Xiaohua Ding +Xiaoxi He +Xiaoxu Chen +Xiaoyu Zhang +xichengliudui <1693291525@qq.com> +xiekeyang +Ximo Guanter Gonzálbez +xin.li +Xinbo Weng +Xinfeng Liu +Xinzi Zhou +Xiuming Chen +Xuecong Liao +xuzhaokui +Yadnyawalkya Tale +Yahya +yalpul +YAMADA Tsuyoshi +Yamasaki Masahide +Yamazaki Masashi +Yan Feng +Yan Zhu +Yang Bai +Yang Li +Yang Pengfei +yangchenliang +Yann Autissier +Yanqiang Miao +Yao Zaiyong +Yash Murty +Yassine Tijani +Yasunori Mahata +Yazhong Liu +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongxin Li +Yongzhi Pan +Yosef Fertel +You-Sheng Yang (楊有å‹) +youcai +Youcef YEKHLEF +Youfu Zhang +Yu Changchun +Yu Chengxia +Yu Peng +Yu-Ju Hong +Yuan Sun +Yuanhong Peng +Yue Zhang +Yufei Xiong +Yuhao Fang +Yuichiro Kaneko +YujiOshima +Yunxiang Huang +Yurii Rashkovskii +Yusuf Tarık Günaydın +Yves Blusseau <90z7oey02@sneakemail.com> +Yves Junqueira +Zac Dover +Zach Borboa +Zach Gershman +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenhai Gao +Zhenkun Bi +ZhiPeng Lu +zhipengzuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Ziheng Liu +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +Zou Yu +zqh +Zuhayr Elahi +Zunayed Ali +Ãlvaro Lázaro +Ãtila Camurça Alves +å°¹å‰å³° +å±ˆéª +å¾ä¿Šæ° +慕陶 +æ通 +黄艳红00139573 +ì •ìž¬ì˜ diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 00000000..6d8d58fb --- /dev/null +++ b/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 00000000..58b19b6d --- /dev/null +++ b/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 00000000..1ef911ed --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go new file mode 100644 index 00000000..489e917e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -0,0 +1,65 @@ +package versions // import "github.com/docker/docker/api/types/versions" + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + if v1 == v2 { + return 0 + } + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 00000000..b55b37bc --- /dev/null +++ b/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 00000000..1ca0965e --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 00000000..1ff81c33 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 00000000..0ef3fdcb --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,254 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} + +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + return certPool, nil +} + +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault() + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault() + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 00000000..6b4c6a7c --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 00000000..ee22df47 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 00000000..9ea86d78 --- /dev/null +++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 00000000..b55b37bc --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 00000000..4aac7c74 --- /dev/null +++ b/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,46 @@ +# go-units maintainers file +# +# This file describes who runs the docker/go-units project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "akihirosuda", + "dnephin", + "thajeztah", + "vdemeester", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "akihiro.suda.cz@hco.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 00000000..4f70a4e1 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 00000000..af9d6055 --- /dev/null +++ b/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get golang.org/x/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 00000000..48dd8744 --- /dev/null +++ b/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go new file mode 100644 index 00000000..c245a895 --- /dev/null +++ b/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,154 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[byte]int64 + +var ( + decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} + binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} +) + +var ( + decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +) + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + // TODO: rewrite to use strings.Cut if there's a space + // once Go < 1.18 is deprecated. + sep := strings.LastIndexAny(sizeStr, "01234567890. ") + if sep == -1 { + // There should be at least a digit. + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + var num, sfx string + if sizeStr[sep] != ' ' { + num = sizeStr[:sep+1] + sfx = sizeStr[sep+1:] + } else { + // Omit the space separator. + num = sizeStr[:sep] + sfx = sizeStr[sep+1:] + } + + size, err := strconv.ParseFloat(num, 64) + if err != nil { + return -1, err + } + // Backward compatibility: reject negative sizes. + if size < 0 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + if len(sfx) == 0 { + return int64(size), nil + } + + // Process the suffix. + + if len(sfx) > 3 { // Too long. + goto badSuffix + } + sfx = strings.ToLower(sfx) + // Trivial case: b suffix. + if sfx[0] == 'b' { + if len(sfx) > 1 { // no extra characters allowed after b. + goto badSuffix + } + return int64(size), nil + } + // A suffix from the map. + if mul, ok := uMap[sfx[0]]; ok { + size *= float64(mul) + } else { + goto badSuffix + } + + // The suffix may have extra "b" or "ib" (e.g. KiB or MB). + switch { + case len(sfx) == 2 && sfx[1] != 'b': + goto badSuffix + case len(sfx) == 3 && sfx[1:] != "ib": + goto badSuffix + } + + return int64(size), nil + +badSuffix: + return -1, fmt.Errorf("invalid suffix: '%s'", sfx) +} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 00000000..fca0400c --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,123 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if *hard != -1 { + if soft == -1 { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) + } + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/.gitignore b/vendor/github.com/go-jose/go-jose/v3/.gitignore new file mode 100644 index 00000000..eb29ebae --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.gitignore @@ -0,0 +1,2 @@ +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v3/.golangci.yml b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml new file mode 100644 index 00000000..2a577a8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml @@ -0,0 +1,53 @@ +# https://github.com/golangci/golangci-lint + +run: + skip-files: + - doc_test.go + modules-download-mode: readonly + +linters: + enable-all: true + disable: + - gochecknoglobals + - goconst + - lll + - maligned + - nakedret + - scopelint + - unparam + - funlen # added in 1.18 (requires go-jose changes before it can be enabled) + +linters-settings: + gocyclo: + min-complexity: 35 + +issues: + exclude-rules: + - text: "don't use ALL_CAPS in Go names" + linters: + - golint + - text: "hardcoded credentials" + linters: + - gosec + - text: "weak cryptographic primitive" + linters: + - gosec + - path: json/ + linters: + - dupl + - errcheck + - gocritic + - gocyclo + - golint + - govet + - ineffassign + - staticcheck + - structcheck + - stylecheck + - unused + - path: _test\.go + linters: + - scopelint + - path: jwk.go + linters: + - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v3/.travis.yml b/vendor/github.com/go-jose/go-jose/v3/.travis.yml new file mode 100644 index 00000000..48de631b --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.travis.yml @@ -0,0 +1,33 @@ +language: go + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: + - "1.13.x" + - "1.14.x" + - tip + +before_script: + - export PATH=$HOME/.local/bin:$PATH + +before_install: + - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 + - pip install cram --user + +script: + - go test -v -covermode=count -coverprofile=profile.cov . + - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner + - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher + - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt + - go test -v ./json # no coverage for forked encoding/json package + - golangci-lint run + - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util + - cd .. + +after_success: + - gocovmerge *.cov */*.cov > merged.coverprofile + - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md new file mode 100644 index 00000000..3305db0f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md @@ -0,0 +1,10 @@ +Serious about security +====================== + +Square recognizes the important contributions the security research community +can make. We therefore encourage reporting security issues with the code +contained in this repository. + +If you believe you have discovered a security vulnerability, please follow the +guidelines at . + diff --git a/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md new file mode 100644 index 00000000..b63e1f8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +Individual Contributor License Agreement. We use [cla-assistant.io][1] and you +will be prompted to sign once a pull request is opened. + +[1]: https://cla-assistant.io/ diff --git a/vendor/github.com/go-jose/go-jose/v3/LICENSE b/vendor/github.com/go-jose/go-jose/v3/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md new file mode 100644 index 00000000..b90c7e5c --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -0,0 +1,122 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) +[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +Tables of supported algorithms are shown below. The library supports both +the compact and JWS/JWE JSON Serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +[Version 2](https://gopkg.in/go-jose/go-jose.v2) +([branch](https://github.com/go-jose/go-jose/tree/v2), +[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: + + import "gopkg.in/go-jose/go-jose.v2" + +[Version 3](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/tree/master), +[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): + + import "github.com/go-jose/go-jose/v3" + +All new feature development takes place on the `master` branch, which we are +preparing to release as version 3 soon. Version 2 will continue to receive +critical bug and security fixes. Note that starting with version 3 we are +using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. + +Version 1 (on the `v1` branch) is frozen and not supported anymore. + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 or later of the package + +## Examples + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go new file mode 100644 index 00000000..78abc326 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -0,0 +1,592 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + josecipher "github.com/go-jose/go-jose/v3/cipher" + "github.com/go-jose/go-jose/v3/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("go-jose/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("go-jose/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("go-jose/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("go-jose/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go new file mode 100644 index 00000000..af029cec --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, ciphertext[:offset]...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures that the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go new file mode 100644 index 00000000..f62c3bdb --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go new file mode 100644 index 00000000..093c6467 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go new file mode 100644 index 00000000..b9effbca --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("go-jose/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go new file mode 100644 index 00000000..6901137e --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -0,0 +1,544 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "reflect" + + "github.com/go-jose/go-jose/v3/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if len(rcpts) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + } + if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { + return newOpaqueKeyEncrypter(alg, encrypter) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + } + if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { + return &opaqueKeyDecrypter{decrypter: okd}, nil + } + return nil, ErrUnsupportedKeyType +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. Note that this +// function does not support multi-recipient, if you desire multi-recipient +// decryption use DecryptMulti instead. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + return plaintext, err +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, _ = decompress(comp, plaintext) + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go new file mode 100644 index 00000000..71ec1c41 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -0,0 +1,27 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web +Token support available in a sub-package. The library supports both the compact +and JWS/JWE JSON Serialization formats, and has optional support for multiple +recipients. + +*/ +package jose diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go new file mode 100644 index 00000000..968a4249 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -0,0 +1,191 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "strings" + "unicode" + + "github.com/go-jose/go-jose/v3/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/go-jose/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64URLDecode(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} + +// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C +func base64URLDecode(value string) ([]byte, error) { + value = strings.TrimRight(value, "=") + return base64.RawURLEncoding.DecodeString(value) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/LICENSE b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-jose/go-jose/v3/json/README.md b/vendor/github.com/go-jose/go-jose/v3/json/README.md new file mode 100644 index 00000000..86de5e55 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go new file mode 100644 index 00000000..4dbc4146 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go @@ -0,0 +1,1217 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +type NumberUnmarshalType int + +const ( + // unmarshal a JSON number into an interface{} as a float64 + UnmarshalFloat NumberUnmarshalType = iota + // unmarshal a JSON number into an interface{} as a `json.Number` + UnmarshalJSONNumber + // unmarshal a JSON number into an interface{} as a int64 + // if value is an integer otherwise float64 + UnmarshalIntOrFloat +) + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + numberType NumberUnmarshalType +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64, int64 or a Number +// depending on d.numberDecodeType. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + switch d.numberType { + + case UnmarshalJSONNumber: + return Number(s), nil + case UnmarshalIntOrFloat: + v, err := strconv.ParseInt(s, 10, 64) + if err == nil { + return v, nil + } + + // tries to parse integer number in scientific notation + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + + // if it has no decimal value use int64 + if fi, fd := math.Modf(f); fd == 0.0 { + return int64(fi), nil + } + return f, nil + default: + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil + } + +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go new file mode 100644 index 00000000..ea0a1361 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML