From 63f44c87ef1396be27fe216deedc55a3645e3ff0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miloslav=20Trma=C4=8D?= Date: Thu, 28 Nov 2024 21:39:52 +0100 Subject: [PATCH] Test with current c/storage and c/image MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Miloslav Trmač --- go.mod | 32 +- go.sum | 89 +- .../stargz-snapshotter/estargz/testutil.go | 15 +- .../containerd/typeurl/v2/README.md | 6 + .../github.com/containerd/typeurl/v2/types.go | 89 +- .../containerd/typeurl/v2/types_gogo.go | 68 ++ .../containers/image/v5/copy/single.go | 24 +- .../image/v5/directory/directory_dest.go | 1 + .../image/v5/docker/daemon/client.go | 12 + .../image/v5/docker/distribution_error.go | 8 +- .../image/v5/docker/docker_image_dest.go | 1 + .../image/v5/docker/docker_image_src.go | 4 + .../image/v5/docker/internal/tarfile/dest.go | 1 + .../stubs/original_oci_config.go | 16 + .../v5/internal/imagedestination/wrapper.go | 1 + .../internal/manifest/docker_schema2_list.go | 32 +- .../image/v5/internal/private/private.go | 7 + .../image/v5/oci/archive/oci_dest.go | 9 + .../image/v5/oci/internal/oci_util.go | 2 +- .../image/v5/oci/layout/oci_dest.go | 1 + .../image/v5/openshift/openshift_dest.go | 9 + .../containers/image/v5/pkg/blobcache/dest.go | 9 + .../image/v5/signature/fulcio_cert_stub.go | 2 +- .../v5/signature/internal/rekor_set_stub.go | 2 +- .../image/v5/storage/storage_dest.go | 281 +++-- .../image/v5/storage/storage_reference.go | 4 +- .../image/v5/storage/storage_src.go | 22 +- .../containers/image/v5/version/version.go | 4 +- .../keywrap/pkcs7/keywrapper_pkcs7.go | 2 +- .../github.com/containers/storage/.cirrus.yml | 4 +- vendor/github.com/containers/storage/Makefile | 2 +- vendor/github.com/containers/storage/VERSION | 2 +- vendor/github.com/containers/storage/check.go | 8 +- .../containers/storage/drivers/aufs/aufs.go | 5 + .../containers/storage/drivers/btrfs/btrfs.go | 5 + .../storage/drivers/chown_darwin.go | 2 +- .../containers/storage/drivers/chown_unix.go | 2 +- .../storage/drivers/copy/copy_linux.go | 4 +- .../containers/storage/drivers/driver.go | 20 + .../storage/drivers/overlay/check_116.go | 3 +- .../storage/drivers/overlay/composefs.go | 4 +- .../storage/drivers/overlay/overlay.go | 21 + .../storage/drivers/overlay/overlay_nocgo.go | 23 - .../drivers/register/register_overlay.go | 2 +- .../containers/storage/drivers/vfs/driver.go | 17 + .../storage/drivers/windows/windows.go | 5 + .../containers/storage/drivers/zfs/zfs.go | 5 + .../storage/internal/dedup/dedup.go | 163 +++ .../storage/internal/dedup/dedup_linux.go | 139 +++ .../internal/dedup/dedup_unsupported.go | 27 + .../github.com/containers/storage/layers.go | 37 +- .../containers/storage/pkg/archive/archive.go | 55 +- .../containers/storage/pkg/archive/changes.go | 3 + .../storage/pkg/archive/changes_linux.go | 11 +- .../storage/pkg/chunked/cache_linux.go | 10 +- .../storage/pkg/chunked/compression.go | 18 +- .../storage/pkg/chunked/compression_linux.go | 167 +-- .../pkg/chunked/compressor/compressor.go | 24 +- .../storage/pkg/chunked/dump/dump.go | 51 +- .../storage/pkg/chunked/filesystem_linux.go | 73 +- .../internal/{ => minimal}/compression.go | 2 +- .../storage/pkg/chunked/internal/path/path.go | 12 + .../storage/pkg/chunked/storage_linux.go | 224 +++- .../containers/storage/pkg/chunked/toc/toc.go | 4 +- .../containers/storage/pkg/idtools/idtools.go | 169 ++- .../storage/pkg/loopback/attach_loopback.go | 2 +- .../containers/storage/pkg/loopback/ioctl.go | 2 +- .../storage/pkg/loopback/loop_wrapper.go | 38 +- .../storage/pkg/loopback/loopback.go | 2 +- .../storage/pkg/system/extattr_freebsd.go | 93 ++ .../storage/pkg/system/extattr_unsupported.go | 24 + .../storage/pkg/system/stat_netbsd.go | 13 + .../storage/pkg/system/xattrs_darwin.go | 2 +- .../storage/pkg/system/xattrs_freebsd.go | 85 ++ .../storage/pkg/system/xattrs_linux.go | 2 +- .../storage/pkg/system/xattrs_unsupported.go | 4 +- vendor/github.com/containers/storage/store.go | 64 + .../registry/client/auth/challenge/addr.go | 27 - .../client/auth/challenge/authchallenge.go | 237 ---- .../github.com/docker/docker/api/swagger.yaml | 6 +- .../github.com/docker/docker/client/ping.go | 4 +- .../docker/docker/pkg/system/lstat_unix.go | 4 +- .../docker/docker/pkg/system/lstat_windows.go | 2 + .../docker/docker/pkg/system/mknod.go | 2 + .../docker/docker/pkg/system/mknod_freebsd.go | 2 + .../docker/docker/pkg/system/mknod_unix.go | 2 + .../docker/docker/pkg/system/stat_linux.go | 2 + .../docker/docker/pkg/system/stat_unix.go | 6 +- .../docker/docker/pkg/system/stat_windows.go | 6 +- vendor/github.com/proglottis/gpgme/.gitignore | 2 + .../github.com/proglottis/gpgme/callbacks.go | 42 - vendor/github.com/proglottis/gpgme/data.go | 119 +- vendor/github.com/proglottis/gpgme/go_gpgme.c | 20 +- vendor/github.com/proglottis/gpgme/go_gpgme.h | 4 +- vendor/github.com/proglottis/gpgme/gpgme.go | 78 +- .../proglottis/gpgme/unset_agent_info.go | 1 + .../sigstore/pkg/oauthflow/interactive.go | 2 +- .../smallstep}/pkcs7/.gitignore | 4 + .../smallstep}/pkcs7/LICENSE | 0 .../smallstep}/pkcs7/Makefile | 0 vendor/github.com/smallstep/pkcs7/README.md | 63 + .../smallstep}/pkcs7/ber.go | 21 +- .../smallstep}/pkcs7/decrypt.go | 70 +- .../smallstep}/pkcs7/encrypt.go | 102 +- .../pkcs7/internal/legacy/x509/debug.go | 14 + .../pkcs7/internal/legacy/x509/doc.go | 14 + .../pkcs7/internal/legacy/x509/oid.go | 377 ++++++ .../pkcs7/internal/legacy/x509/parser.go | 1027 +++++++++++++++++ .../pkcs7/internal/legacy/x509/pkcs1.go | 15 + .../pkcs7/internal/legacy/x509/verify.go | 193 ++++ .../pkcs7/internal/legacy/x509/x509.go | 488 ++++++++ .../smallstep}/pkcs7/pkcs7.go | 95 +- .../smallstep}/pkcs7/sign.go | 0 .../smallstep}/pkcs7/verify.go | 50 +- .../sylabs/sif/v2/pkg/sif/create.go | 12 +- vendor/go.mozilla.org/pkcs7/README.md | 69 -- .../go.mozilla.org/pkcs7/verify_test_dsa.go | 182 --- vendor/golang.org/x/crypto/cryptobyte/asn1.go | 825 +++++++++++++ .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 350 ++++++ .../golang.org/x/crypto/cryptobyte/string.go | 183 +++ .../x/exp/constraints/constraints.go | 50 - vendor/golang.org/x/exp/maps/maps.go | 58 +- vendor/golang.org/x/exp/slices/cmp.go | 44 - vendor/golang.org/x/exp/slices/slices.go | 416 +------ vendor/golang.org/x/exp/slices/sort.go | 143 +-- .../golang.org/x/exp/slices/zsortanyfunc.go | 479 -------- .../golang.org/x/exp/slices/zsortordered.go | 481 -------- vendor/golang.org/x/oauth2/README.md | 15 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 16 +- .../grpc/balancer/balancer.go | 15 +- .../grpc/balancer/base/balancer.go | 2 +- .../balancer/pickfirst/internal/internal.go | 24 + .../grpc/balancer/pickfirst/pickfirst.go | 14 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 625 ++++++++++ .../grpc/balancer_wrapper.go | 48 +- vendor/google.golang.org/grpc/clientconn.go | 54 +- .../google.golang.org/grpc/credentials/tls.go | 29 +- vendor/google.golang.org/grpc/dialoptions.go | 2 +- .../balancer/gracefulswitch/config.go | 2 + .../grpc/internal/channelz/channel.go | 15 + .../grpc/internal/channelz/server.go | 2 + .../grpc/internal/channelz/socket.go | 7 + .../grpc/internal/channelz/subchannel.go | 2 + .../grpc/internal/channelz/trace.go | 19 +- .../grpc/internal/envconfig/envconfig.go | 5 + .../internal/grpcsync/callback_serializer.go | 2 +- .../grpc/internal/grpcutil/method.go | 2 +- .../grpc/internal/idle/idle.go | 4 +- .../grpc/internal/internal.go | 8 +- .../internal/resolver/dns/dns_resolver.go | 6 +- .../internal/stats/metrics_recorder_list.go | 10 + .../grpc/internal/status/status.go | 35 +- .../grpc/internal/transport/http2_client.go | 60 +- .../grpc/internal/transport/transport.go | 11 +- vendor/google.golang.org/grpc/mem/buffers.go | 32 +- vendor/google.golang.org/grpc/rpc_util.go | 3 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/modules.txt | 59 +- 159 files changed, 6908 insertions(+), 3068 deletions(-) create mode 100644 vendor/github.com/containerd/typeurl/v2/types_gogo.go create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go delete mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go create mode 100644 vendor/github.com/containers/storage/internal/dedup/dedup.go create mode 100644 vendor/github.com/containers/storage/internal/dedup/dedup_linux.go create mode 100644 vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go rename vendor/github.com/containers/storage/pkg/chunked/internal/{ => minimal}/compression.go (99%) create mode 100644 vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go create mode 100644 vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_netbsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go delete mode 100644 vendor/github.com/proglottis/gpgme/callbacks.go rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/.gitignore (88%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/LICENSE (100%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/Makefile (100%) create mode 100644 vendor/github.com/smallstep/pkcs7/README.md rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/ber.go (91%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/decrypt.go (62%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/encrypt.go (74%) create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go create mode 100644 vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/pkcs7.go (70%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/sign.go (100%) rename vendor/{go.mozilla.org => github.com/smallstep}/pkcs7/verify.go (87%) delete mode 100644 vendor/go.mozilla.org/pkcs7/README.md delete mode 100644 vendor/go.mozilla.org/pkcs7/verify_test_dsa.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go delete mode 100644 vendor/golang.org/x/exp/constraints/constraints.go delete mode 100644 vendor/golang.org/x/exp/slices/cmp.go delete mode 100644 vendor/golang.org/x/exp/slices/zsortanyfunc.go delete mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go diff --git a/go.mod b/go.mod index d693243640..8bbc32191c 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/containers/podman/v5 // Warning: if there is a "toolchain" directive anywhere in this file (and most of the // time there shouldn't be), its version must be an exact match to the "go" directive. -go 1.22.6 +go 1.22.8 require ( github.com/BurntSushi/toml v1.4.0 @@ -17,11 +17,11 @@ require ( github.com/containers/common v0.61.1-0.20241202111335-2d4a9a65dd81 github.com/containers/conmon v2.0.20+incompatible github.com/containers/gvisor-tap-vsock v0.8.1 - github.com/containers/image/v5 v5.33.0 + github.com/containers/image/v5 v5.33.1-0.20250102201119-717b49b0ac3d github.com/containers/libhvee v0.9.0 - github.com/containers/ocicrypt v1.2.0 + github.com/containers/ocicrypt v1.2.1 github.com/containers/psgo v1.9.0 - github.com/containers/storage v1.56.0 + github.com/containers/storage v1.56.1-0.20250102204143-48e12ae2d80f github.com/containers/winquit v1.1.0 github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 github.com/crc-org/crc/v2 v2.45.0 @@ -29,7 +29,7 @@ require ( github.com/cyphar/filepath-securejoin v0.3.5 github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e github.com/docker/distribution v2.8.3+incompatible - github.com/docker/docker v27.4.0+incompatible + github.com/docker/docker v27.4.1+incompatible github.com/docker/go-connections v0.5.0 github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8 github.com/docker/go-units v0.5.0 @@ -72,7 +72,7 @@ require ( github.com/vishvananda/netlink v1.3.0 go.etcd.io/bbolt v1.3.11 golang.org/x/crypto v0.31.0 - golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f + golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 golang.org/x/net v0.33.0 golang.org/x/sync v0.10.0 golang.org/x/sys v0.28.0 @@ -103,8 +103,8 @@ require ( github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect - github.com/containerd/typeurl/v2 v2.2.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containernetworking/cni v1.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 // indirect @@ -189,18 +189,19 @@ require ( github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/proglottis/gpgme v0.1.3 // indirect + github.com/proglottis/gpgme v0.1.4 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/sigstore/fulcio v1.6.4 // indirect github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/sigstore v1.8.9 // indirect + github.com/sigstore/sigstore v1.8.11 // indirect github.com/skeema/knownhosts v1.3.0 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect + github.com/smallstep/pkcs7 v0.1.1 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/sylabs/sif/v2 v2.19.1 // indirect + github.com/sylabs/sif/v2 v2.20.2 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect @@ -214,7 +215,6 @@ require ( github.com/vishvananda/netns v0.0.4 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect - go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect @@ -222,11 +222,11 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/arch v0.8.0 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.27.0 // indirect + golang.org/x/tools v0.28.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.67.0 // indirect + google.golang.org/grpc v1.68.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect tags.cncf.io/container-device-interface/specs-go v0.8.0 // indirect diff --git a/go.sum b/go.sum index 3e1589505a..95de9caf27 100644 --- a/go.sum +++ b/go.sum @@ -68,10 +68,10 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= -github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= -github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= -github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM= github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ= @@ -84,20 +84,20 @@ github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6J github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/gvisor-tap-vsock v0.8.1 h1:88qkOjGMF9NmyoVG/orUw73mdwj3z4aOwEbRS01hF78= github.com/containers/gvisor-tap-vsock v0.8.1/go.mod h1:gjdY4JBWnynrXsxT8+OM7peEOd4FCZpoOWjSadHva0g= -github.com/containers/image/v5 v5.33.0 h1:6oPEFwTurf7pDTGw7TghqGs8K0+OvPtY/UyzU0B2DfE= -github.com/containers/image/v5 v5.33.0/go.mod h1:T7HpASmvnp2H1u4cyckMvCzLuYgpD18dSmabSw0AcHk= +github.com/containers/image/v5 v5.33.1-0.20250102201119-717b49b0ac3d h1:6y9M7h0L4n2HhKZL2IAs6R7HJcCNKPyfPl7CrSPA9Bs= +github.com/containers/image/v5 v5.33.1-0.20250102201119-717b49b0ac3d/go.mod h1:XtwVqcE9c4QsuW+N1KkanmIEA8FMvFCJdA7aE2GD6nY= github.com/containers/libhvee v0.9.0 h1:5UxJMka1lDfxTeITA25Pd8QVVttJAG43eQS1Getw1tc= github.com/containers/libhvee v0.9.0/go.mod h1:p44VJd8jMIx3SRN1eM6PxfCEwXQE0lJ0dQppCAlzjPQ= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 h1:57rxgU2wdI3lZMDZtao09WjCWmxBKOxI/Sj37IpCV50= github.com/containers/luksy v0.0.0-20241007190014-e2530d691420/go.mod h1:MYzFCudLgMcXgFl7XuFjUowNDTBqL09BfEgMf7QHtO4= -github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM= -github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U= +github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= +github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A= -github.com/containers/storage v1.56.0 h1:DZ9KSkj6M2tvj/4bBoaJu3QDHRl35BwsZ4kmLJS97ZI= -github.com/containers/storage v1.56.0/go.mod h1:c6WKowcAlED/DkWGNuL9bvGYqIWCVy7isRMdCSKWNjk= +github.com/containers/storage v1.56.1-0.20250102204143-48e12ae2d80f h1:bXsyxapdm5nTm+J6npzAg6lLrsofvP5KVSQOU7tRXfc= +github.com/containers/storage v1.56.1-0.20250102204143-48e12ae2d80f/go.mod h1:iLQGLnPAL+Hxzmq6QhbTch5DdGwYbnNpL3JpHexCD+k= github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE= github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8= github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= @@ -129,12 +129,12 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= -github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI= +github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.4.0+incompatible h1:I9z7sQ5qyzO0BfAb9IMOawRkAGxhYsidKiTMcm0DU+A= -github.com/docker/docker v27.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= +github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -424,8 +424,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= -github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= +github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -447,8 +447,8 @@ github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEW github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= -github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= @@ -459,14 +459,16 @@ github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= -github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= +github.com/sigstore/sigstore v1.8.11 h1:tEqeQqbT+awtM87ec9KEeSUxT/AFvJNawneYJyAkFrQ= +github.com/sigstore/sigstore v1.8.11/go.mod h1:fdrFQosxCQ4wTL5H1NrZcQkqQ72AQbPjtpcL2QOGKV0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -487,8 +489,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0= -github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E= +github.com/sylabs/sif/v2 v2.20.2 h1:HGEPzauCHhIosw5o6xmT3jczuKEuaFzSfdjAsH33vYw= +github.com/sylabs/sif/v2 v2.20.2/go.mod h1:WyYryGRaR4Wp21SAymm5pK0p45qzZCSRiZMFvUZiuhc= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= @@ -545,8 +547,6 @@ go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= -go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= @@ -574,13 +574,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= -golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -589,6 +592,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -605,11 +611,14 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -618,6 +627,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -646,16 +658,21 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -663,7 +680,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= @@ -679,8 +698,10 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= -golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -691,8 +712,8 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -700,8 +721,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= -google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 0ca6fd75f2..ba650b4d1d 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -26,12 +26,13 @@ import ( "archive/tar" "bytes" "compress/gzip" + "crypto/rand" "crypto/sha256" "encoding/json" "errors" "fmt" "io" - "math/rand" + "math/big" "os" "path/filepath" "reflect" @@ -45,10 +46,6 @@ import ( digest "github.com/opencontainers/go-digest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression @@ -920,9 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } if sampleEntry == nil { t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + return } if targetEntry == nil { t.Fatalf("rewrite target not found") + return } targetEntry.Offset = sampleEntry.Offset }, @@ -2291,7 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX func randomContents(n int) string { b := make([]rune, n) for i := range b { - b[i] = runes[rand.Intn(len(runes))] + bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes)))) + if err != nil { + panic(err) + } + b[i] = runes[int(bi.Int64())] } return string(b) } diff --git a/vendor/github.com/containerd/typeurl/v2/README.md b/vendor/github.com/containerd/typeurl/v2/README.md index 8d86600a40..3098526ab1 100644 --- a/vendor/github.com/containerd/typeurl/v2/README.md +++ b/vendor/github.com/containerd/typeurl/v2/README.md @@ -18,3 +18,9 @@ As a containerd sub-project, you will find the: * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) information in our [`containerd/project`](https://github.com/containerd/project) repository. + +## Optional + +By default, support for gogoproto is available along side the standard Google +protobuf types. +You can choose to leave gogo support out by using the `!no_gogo` build tag. diff --git a/vendor/github.com/containerd/typeurl/v2/types.go b/vendor/github.com/containerd/typeurl/v2/types.go index 78817b701e..9bf7810416 100644 --- a/vendor/github.com/containerd/typeurl/v2/types.go +++ b/vendor/github.com/containerd/typeurl/v2/types.go @@ -24,7 +24,6 @@ import ( "reflect" "sync" - gogoproto "github.com/gogo/protobuf/proto" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/known/anypb" @@ -33,8 +32,16 @@ import ( var ( mu sync.RWMutex registry = make(map[reflect.Type]string) + handlers []handler ) +type handler interface { + Marshaller(interface{}) func() ([]byte, error) + Unmarshaller(interface{}) func([]byte) error + TypeURL(interface{}) string + GetType(url string) (reflect.Type, bool) +} + // Definitions of common error types used throughout typeurl. // // These error types are used with errors.Wrap and errors.Wrapf to add context @@ -112,9 +119,12 @@ func TypeURL(v interface{}) (string, error) { switch t := v.(type) { case proto.Message: return string(t.ProtoReflect().Descriptor().FullName()), nil - case gogoproto.Message: - return gogoproto.MessageName(t), nil default: + for _, h := range handlers { + if u := h.TypeURL(v); u != "" { + return u, nil + } + } return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound) } } @@ -149,12 +159,19 @@ func MarshalAny(v interface{}) (Any, error) { marshal = func(v interface{}) ([]byte, error) { return proto.Marshal(t) } - case gogoproto.Message: - marshal = func(v interface{}) ([]byte, error) { - return gogoproto.Marshal(t) - } default: - marshal = json.Marshal + for _, h := range handlers { + if m := h.Marshaller(v); m != nil { + marshal = func(v interface{}) ([]byte, error) { + return m() + } + break + } + } + + if marshal == nil { + marshal = json.Marshal + } } url, err := TypeURL(v) @@ -223,13 +240,13 @@ func MarshalAnyToProto(from interface{}) (*anypb.Any, error) { } func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - t, err := getTypeByUrl(typeURL) + t, isProto, err := getTypeByUrl(typeURL) if err != nil { return nil, err } if v == nil { - v = reflect.New(t.t).Interface() + v = reflect.New(t).Interface() } else { // Validate interface type provided by client vURL, err := TypeURL(v) @@ -241,51 +258,45 @@ func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) } } - if t.isProto { - switch t := v.(type) { - case proto.Message: - err = proto.Unmarshal(value, t) - case gogoproto.Message: - err = gogoproto.Unmarshal(value, t) + if isProto { + pm, ok := v.(proto.Message) + if ok { + return v, proto.Unmarshal(value, pm) } - } else { - err = json.Unmarshal(value, v) - } - return v, err -} + for _, h := range handlers { + if unmarshal := h.Unmarshaller(v); unmarshal != nil { + return v, unmarshal(value) + } + } + } -type urlType struct { - t reflect.Type - isProto bool + // fallback to json unmarshaller + return v, json.Unmarshal(value, v) } -func getTypeByUrl(url string) (urlType, error) { +func getTypeByUrl(url string) (_ reflect.Type, isProto bool, _ error) { mu.RLock() for t, u := range registry { if u == url { mu.RUnlock() - return urlType{ - t: t, - }, nil + return t, false, nil } } mu.RUnlock() - // fallback to proto registry - t := gogoproto.MessageType(url) - if t != nil { - return urlType{ - // get the underlying Elem because proto returns a pointer to the type - t: t.Elem(), - isProto: true, - }, nil - } mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) if err != nil { - return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound) + if errors.Is(err, protoregistry.NotFound) { + for _, h := range handlers { + if t, isProto := h.GetType(url); t != nil { + return t, isProto, nil + } + } + } + return nil, false, fmt.Errorf("type with url %s: %w", url, ErrNotFound) } empty := mt.New().Interface() - return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil + return reflect.TypeOf(empty).Elem(), true, nil } func tryDereference(v interface{}) reflect.Type { diff --git a/vendor/github.com/containerd/typeurl/v2/types_gogo.go b/vendor/github.com/containerd/typeurl/v2/types_gogo.go new file mode 100644 index 0000000000..adb892ec60 --- /dev/null +++ b/vendor/github.com/containerd/typeurl/v2/types_gogo.go @@ -0,0 +1,68 @@ +//go:build !no_gogo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package typeurl + +import ( + "reflect" + + gogoproto "github.com/gogo/protobuf/proto" +) + +func init() { + handlers = append(handlers, gogoHandler{}) +} + +type gogoHandler struct{} + +func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) { + pm, ok := v.(gogoproto.Message) + if !ok { + return nil + } + return func() ([]byte, error) { + return gogoproto.Marshal(pm) + } +} + +func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error { + pm, ok := v.(gogoproto.Message) + if !ok { + return nil + } + + return func(dt []byte) error { + return gogoproto.Unmarshal(dt, pm) + } +} + +func (gogoHandler) TypeURL(v interface{}) string { + pm, ok := v.(gogoproto.Message) + if !ok { + return "" + } + return gogoproto.MessageName(pm) +} + +func (gogoHandler) GetType(url string) (reflect.Type, bool) { + t := gogoproto.MessageType(url) + if t == nil { + return nil, false + } + return t.Elem(), true +} diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index e008c7e86f..7bc44d1f83 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -109,7 +109,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar } } - if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil { + if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil { return copySingleImageResult{}, err } @@ -316,12 +316,15 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar return res, nil } -// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary. -func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { +// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary. +func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error { + ociConfig, configErr := src.OCIConfig(ctx) + // Do not fail on configErr here, this might be an artifact + // and maybe nothing needs this to be a container image and to process the config. + if dest.MustMatchRuntimeOS() { - c, err := src.OCIConfig(ctx) - if err != nil { - return fmt.Errorf("parsing image configuration: %w", err) + if configErr != nil { + return fmt.Errorf("parsing image configuration: %w", configErr) } wantedPlatforms := platform.WantedPlatforms(sys) @@ -331,7 +334,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst // For a transitional period, this might trigger warnings because the Variant // field was added to OCI config only recently. If this turns out to be too noisy, // revert this check to only look for (OS, Architecture). - if platform.MatchesPlatform(c.Platform, wantedPlatform) { + if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) { match = true break } @@ -339,9 +342,14 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst } if !match { logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q", - c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", ")) + ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", ")) } } + + if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil { + return err + } + return nil } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 000c8987d9..6e88aa01d1 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don' type dirImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize stubs.AlwaysSupportsSignatures diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go index 64ccf6ae55..6ade2384f7 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/client.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -3,6 +3,7 @@ package daemon import ( "net/http" "path/filepath" + "time" "github.com/containers/image/v5/types" dockerclient "github.com/docker/docker/client" @@ -47,6 +48,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { } switch serverURL.Scheme { case "unix": // Nothing + case "npipe": // Nothing case "http": hc := httpConfig() opts = append(opts, dockerclient.WithHTTPClient(hc)) @@ -82,6 +84,11 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, TLSClientConfig: tlsc, + // In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility. + // These idle connection limits really only apply to long-running clients, which is not our case here; + // we include the same values purely for symmetry. + MaxIdleConns: 6, + IdleConnTimeout: 30 * time.Second, }, CheckRedirect: dockerclient.CheckRedirect, }, nil @@ -92,6 +99,11 @@ func httpConfig() *http.Client { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, TLSClientConfig: nil, + // In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility. + // These idle connection limits really only apply to long-running clients, which is not our case here; + // we include the same values purely for symmetry. + MaxIdleConns: 6, + IdleConnTimeout: 30 * time.Second, }, CheckRedirect: dockerclient.CheckRedirect, } diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go index 0a0064576a..622d21fb1c 100644 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -24,7 +24,6 @@ import ( "slices" "github.com/docker/distribution/registry/api/errcode" - dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" ) // errNoErrorsInBody is returned when an HTTP response body parses to an empty @@ -114,10 +113,11 @@ func mergeErrors(err1, err2 error) error { // UnexpectedHTTPStatusError returned for response code outside of expected // range. func handleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { + switch { + case resp.StatusCode == http.StatusUnauthorized: // Check for OAuth errors within the `WWW-Authenticate` header first // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range dockerChallenge.ResponseChallenges(resp) { + for _, c := range parseAuthHeader(resp.Header) { if c.Scheme == "bearer" { var err errcode.Error // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 @@ -138,6 +138,8 @@ func handleErrorResponse(resp *http.Response) error { return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) } } + fallthrough + case resp.StatusCode >= 400 && resp.StatusCode < 500: err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index f5e2bb61e8..3ac43cf9fc 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -41,6 +41,7 @@ import ( type dockerImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize ref dockerReference diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 6e44ce0960..41ab9bfd16 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } return } + if parts >= len(chunks) { + errs <- errors.New("too many parts returned by the server") + break + } s := signalCloseReader{ closed: make(chan struct{}), stream: p, diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index f142346807..8f5ba7e36e 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -24,6 +24,7 @@ import ( type Destination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize stubs.NoSignaturesInitialize diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go new file mode 100644 index 0000000000..c4536e933b --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go @@ -0,0 +1,16 @@ +package stubs + +import ( + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing. +type IgnoresOriginalOCIConfig struct{} + +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index 0fe902e31f..b2462a3bc1 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -14,6 +14,7 @@ import ( // wrapped provides the private.ImageDestination operations // for a destination that only implements types.ImageDestination type wrapped struct { + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize types.ImageDestination diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index 07922ceceb..4c1589ef02 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. -func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { +func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { editInstances := []ListEdit{} for i, instance := range updates { editInstances = append(editInstances, ListEdit{ - UpdateOldDigest: index.Manifests[i].Digest, + UpdateOldDigest: list.Manifests[i].Digest, UpdateDigest: instance.Digest, UpdateSize: instance.Size, UpdateMediaType: instance.MediaType, ListOperation: ListOpUpdate}) } - return index.editInstances(editInstances) + return list.editInstances(editInstances) } -func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { +func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error { addedEntries := []Schema2ManifestDescriptor{} for i, editInstance := range editInstances { switch editInstance.ListOperation { @@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { if err := editInstance.UpdateDigest.Validate(); err != nil { return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) } - targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool { + targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool { return m.Digest == editInstance.UpdateOldDigest }) if targetIndex == -1 { return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest) } - index.Manifests[targetIndex].Digest = editInstance.UpdateDigest + list.Manifests[targetIndex].Digest = editInstance.UpdateDigest if editInstance.UpdateSize < 0 { return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) } - index.Manifests[targetIndex].Size = editInstance.UpdateSize + list.Manifests[targetIndex].Size = editInstance.UpdateSize if editInstance.UpdateMediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType) } - index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType + list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType case ListOpAdd: if editInstance.AddPlatform == nil { // Should we create a struct with empty fields instead? @@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { if len(addedEntries) != 0 { // slices.Clone() here to ensure a private backing array; // an external caller could have manually created Schema2ListPublic with a slice with extra capacity. - index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) + list.Manifests = append(slices.Clone(list.Manifests), addedEntries...) } return nil } -func (index *Schema2List) EditInstances(editInstances []ListEdit) error { - return index.editInstances(editInstances) +func (list *Schema2List) EditInstances(editInstances []ListEdit) error { + return list.editInstances(editInstances) } func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { @@ -280,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List { return &Schema2List{*public} } -func (index *Schema2List) CloneInternal() List { - return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic)) +func (list *Schema2List) CloneInternal() List { + return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic)) } -func (index *Schema2List) Clone() ListPublic { - return index.CloneInternal() +func (list *Schema2List) Clone() ListPublic { + return list.CloneInternal() } // Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 4247a8db77..afd425483d 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -10,6 +10,7 @@ import ( compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) // ImageSourceInternalOnly is the part of private.ImageSource that is not @@ -41,6 +42,12 @@ type ImageDestinationInternalOnly interface { // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures // on unsupported formats. + // NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, + // or an error obtaining that value (e.g. if the image is an artifact and not a container image). + // The destination can use it in its TryReusingBlob/PutBlob implementations + // (otherwise it only obtains the final config after all layers are written). + NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 1d1e26c84f..54b2e50566 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -14,6 +14,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -103,6 +104,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool { return d.unpackedDest.SupportsPutBlobPartial() } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr) +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go index 53827b11af..72af74e251 100644 --- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -98,7 +98,7 @@ func ValidateScope(scope string) error { } func validateScopeWindows(scope string) error { - matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope) if !matched { return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope) } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 85374cecf0..cb2768d745 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -27,6 +27,7 @@ import ( type ociImageDestination struct { impl.Compat impl.PropertyMethodsInitialize + stubs.IgnoresOriginalOCIConfig stubs.NoPutBlobPartialInitialize stubs.NoSignaturesInitialize diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 61f69e93fd..bd5e77aa8f 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -22,6 +22,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) type openshiftImageDestination struct { @@ -111,6 +112,14 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool { return d.docker.SupportsPutBlobPartial() } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (d *openshiftImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return d.docker.NoteOriginalOCIConfig(ociConfig, configErr) +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go index f7103d13eb..54e3d294b9 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go @@ -19,6 +19,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -138,6 +139,14 @@ func (d *blobCacheDestination) HasThreadSafePutBlob() bool { return d.destination.HasThreadSafePutBlob() } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (d *blobCacheDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + return d.destination.NoteOriginalOCIConfig(ociConfig, configErr) +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go index c0b48dafa7..c0dc7b232b 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go @@ -20,7 +20,7 @@ func (f *fulcioTrustRoot) validate() error { return errors.New("fulcio disabled at compile-time") } -func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, +func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, untrustedPayloadBytes []byte) (crypto.PublicKey, error) { return nil, errors.New("fulcio disabled at compile-time") diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go index 7c121cc2ee..2b20bbed2e 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go @@ -10,6 +10,6 @@ import ( // VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. // Returns bundle upload time on success. -func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { +func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time") } diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 51fac71e44..bad11706b6 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -17,11 +17,13 @@ import ( "sync/atomic" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" + srcImpl "github.com/containers/image/v5/internal/imagesource/impl" + srcStubs "github.com/containers/image/v5/internal/imagesource/stubs" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/manifest" @@ -57,8 +59,9 @@ type storageImageDestination struct { imageRef storageReference directory string // Temporary directory where we store blobs until Commit() time nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - manifestDigest digest.Digest // Valid if len(manifest) != 0 + manifest []byte // (Per-instance) manifest contents, or nil if not yet known. + manifestMIMEType string // Valid if manifest != nil + manifestDigest digest.Digest // Valid if manifest != nil untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet signatures []byte // Signature contents, temporary signatureses map[digest.Digest][]byte // Instance signature contents, temporary @@ -121,6 +124,9 @@ type storageImageDestinationLockProtected struct { filenames map[digest.Digest]string // Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set. fileSizes map[digest.Digest]int64 + + // Config + configDigest digest.Digest // "" if N/A or not known yet. } // addedLayerInfo records data about a layer to use in this image. @@ -201,6 +207,18 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1))) } +// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, +// or an error obtaining that value (e.g. if the image is an artifact and not a container image). +// The destination can use it in its TryReusingBlob/PutBlob implementations +// (otherwise it only obtains the final config after all layers are written). +func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { + if configErr != nil { + return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr) + } + s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig) + return nil +} + // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -214,7 +232,17 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream return info, err } - if options.IsConfig || options.LayerIndex == nil { + if options.IsConfig { + s.lock.Lock() + defer s.lock.Unlock() + if s.lockProtected.configDigest != "" { + return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q", + s.lockProtected.configDigest.String(), info.Digest.String()) + } + s.lockProtected.configDigest = info.Digest + return info, nil + } + if options.LayerIndex == nil { return info, nil } @@ -351,35 +379,41 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces blobDigest := srcInfo.Digest s.lock.Lock() - if out.UncompressedDigest != "" { - s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest - if out.TOCDigest != "" { - options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest) - } - // Don’t set indexToTOCDigest on this path: - // - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID. - // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. - // That TOC is quite unlikely to match any other TOC value. - - // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is - // responsible for ensuring blobDigest has been validated. - if out.CompressedDigest != blobDigest { - return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q", - out.CompressedDigest, blobDigest) - } - // So, record also information about blobDigest, that might benefit reuse. - // We trust PrepareStagedLayer to validate or create both values correctly. - s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest - options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) - } else { - // Use diffID for layer identity if it is known. - if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" { - s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest + if err := func() error { // A scope for defer + defer s.lock.Unlock() + + if out.UncompressedDigest != "" { + s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest + if out.TOCDigest != "" { + options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest) + } + // Don’t set indexToTOCDigest on this path: + // - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID. + // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. + // That TOC is quite unlikely to match any other TOC value. + + // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is + // responsible for ensuring blobDigest has been validated. + if out.CompressedDigest != blobDigest { + return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q", + out.CompressedDigest, blobDigest) + } + // So, record also information about blobDigest, that might benefit reuse. + // We trust PrepareStagedLayer to validate or create both values correctly. + s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest + options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) + } else { + // Use diffID for layer identity if it is known. + if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" { + s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest + } + s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest } - s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest + s.lockProtected.diffOutputs[options.LayerIndex] = out + return nil + }(); err != nil { + return private.UploadedBlob{}, err } - s.lockProtected.diffOutputs[options.LayerIndex] = out - s.lock.Unlock() succeeded = true return private.UploadedBlob{ @@ -975,9 +1009,11 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest) } var trustedOriginalDigest digest.Digest // For storage.LayerOptions + var trustedOriginalSize *int64 if gotFilename { // The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest. trustedOriginalDigest = trusted.blobDigest + trustedOriginalSize = nil // It’s s.lockProtected.fileSizes[trusted.blobDigest], but we don’t hold the lock now, and the consumer can compute it at trivial cost. } else { // Try to find the layer with contents matching the data we use. var layer *storage.Layer // = nil @@ -1032,22 +1068,36 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D if trusted.diffID == "" && layer.UncompressedDigest != "" { trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now. } - // The stream we have is uncompressed, and it matches trusted.diffID (if known). - // - // FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse. - // But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created - // layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream). + + // Set the layer’s CompressedDigest/CompressedSize to relevant values if known, to allow more layer reuse. + // But we don’t want to just use the size from the manifest if we never saw the compressed blob, + // so that we don’t propagate mistakes / attacks. // - // We can legitimately set storage.LayerOptions.OriginalDigest to "", - // but that would just result in PutLayer computing the digest of the input stream == trusted.diffID. - // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. - trustedOriginalDigest = trusted.diffID + // s.lockProtected.fileSizes[trusted.blobDigest] is not set, otherwise we would have found gotFilename. + // So, check if the layer we found contains that metadata. (If that layer continues to exist, there’s no benefit + // to us propagating the metadata; but that layer could be removed, and in that case propagating the metadata to + // this new layer copy can help.) + if trusted.blobDigest != "" && layer.CompressedDigest == trusted.blobDigest && layer.CompressedSize > 0 { + trustedOriginalDigest = trusted.blobDigest + sizeCopy := layer.CompressedSize + trustedOriginalSize = &sizeCopy + } else { + // The stream we have is uncompressed, and it matches trusted.diffID (if known). + // + // We can legitimately set storage.LayerOptions.OriginalDigest to "", + // but that would just result in PutLayer computing the digest of the input stream == trusted.diffID. + // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. + trustedOriginalDigest = trusted.diffID + trustedOriginalSize = nil // Probably layer.UncompressedSize, but the consumer can compute it at trivial cost. + } // Allow using the already-collected layer contents without extracting the layer again. // // This only matches against the uncompressed digest. - // We don’t have the original compressed data here to trivially set filenames[layerDigest]. - // In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API. + // If we have trustedOriginalDigest == trusted.blobDigest, we could arrange to reuse the + // same uncompressed stream for future calls of createNewLayer; but for the non-layer blobs (primarily the config), + // we assume that the file at filenames[someDigest] matches someDigest _exactly_; we would need to differentiate + // between “original files” and “possibly uncompressed files”. // Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity. if trusted.diffID != "" { s.lock.Lock() @@ -1067,6 +1117,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{ OriginalDigest: trustedOriginalDigest, + OriginalSize: trustedOriginalSize, // nil in many cases // This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream. UncompressedDigest: trusted.diffID, }, file) @@ -1076,52 +1127,110 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D return layer, nil } +// uncommittedImageSource allows accessing an image’s metadata (not layers) before it has been committed, +// to allow using image.FromUnparsedImage. +type uncommittedImageSource struct { + srcImpl.Compat + srcImpl.PropertyMethodsInitialize + srcImpl.NoSignatures + srcImpl.DoesNotAffectLayerInfosForCopy + srcStubs.NoGetBlobAtInitialize + + d *storageImageDestination +} + +func newUncommittedImageSource(d *storageImageDestination) *uncommittedImageSource { + s := &uncommittedImageSource{ + PropertyMethodsInitialize: srcImpl.PropertyMethods(srcImpl.Properties{ + HasThreadSafeGetBlob: true, + }), + NoGetBlobAtInitialize: srcStubs.NoGetBlobAt(d.Reference()), + + d: d, + } + s.Compat = srcImpl.AddCompat(s) + return s +} + +func (u *uncommittedImageSource) Reference() types.ImageReference { + return u.d.Reference() +} + +func (u *uncommittedImageSource) Close() error { + return nil +} + +func (u *uncommittedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return u.d.manifest, u.d.manifestMIMEType, nil +} + +func (u *uncommittedImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + blob, err := u.d.getConfigBlob(info) + if err != nil { + return nil, -1, err + } + return io.NopCloser(bytes.NewReader(blob)), int64(len(blob)), nil +} + // untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config. // If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil). // WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication. func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) { - // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and - // nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil. + // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, + // nothing is writing to s.manifest yet, and s.untrustedDiffIDValues might have been set + // by NoteOriginalOCIConfig and are not being updated any more; + // or PutManifest has been called and s.manifest != nil. // Either way this function does not need the protection of s.lock. - if s.manifest == nil { - return "", nil - } if s.untrustedDiffIDValues == nil { - mt := manifest.GuessMIMEType(s.manifest) - if mt != imgspecv1.MediaTypeImageManifest { - // We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage, - // and then use types.Image.OCIConfig so that we can parse the image. - // - // In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation), - // while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull). - // So it is not implemented yet. - return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt) + // Typically, we expect untrustedDiffIDValues to be set by the generic copy code + // via NoteOriginalOCIConfig; this is a compatibility fallback for external callers + // of the public types.ImageDestination. + if s.manifest == nil { + return "", nil } - man, err := manifest.FromBlob(s.manifest, mt) + + ctx := context.Background() // This is all happening in memory, no need to worry about cancellation. + unparsed := image.UnparsedInstance(newUncommittedImageSource(s), nil) + sourced, err := image.FromUnparsedImage(ctx, nil, unparsed) if err != nil { - return "", fmt.Errorf("parsing manifest: %w", err) + return "", fmt.Errorf("parsing image to be committed: %w", err) } - - cb, err := s.getConfigBlob(man.ConfigInfo()) + configOCI, err := sourced.OCIConfig(ctx) if err != nil { - return "", err + return "", fmt.Errorf("obtaining config of image to be committed: %w", err) } - // retrieve the expected uncompressed digest from the config blob. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return "", err - } - s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs) - if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory… - s.untrustedDiffIDValues = []digest.Digest{} - } + s.setUntrustedDiffIDValuesFromOCIConfig(configOCI) } + if layerIndex >= len(s.untrustedDiffIDValues) { return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex) } - return s.untrustedDiffIDValues[layerIndex], nil + res := s.untrustedDiffIDValues[layerIndex] + if res == "" { + // In practice, this should, right now, only matter for pulls of OCI images + // (this code path implies that we did a partial pull because a layer has an annotation), + // So, DiffIDs should always be set. + // + // It is, though, reachable by pulling an OCI image while converting to schema1, + // using a manual (skopeo copy) or something similar, not (podman pull). + // + // Our schema1.OCIConfig code produces non-empty DiffID arrays of empty values. + // The current semantics of this function are that ("", nil) means "try again later", + // which is not what we want to happen; for now, turn that into an explicit error. + return "", fmt.Errorf("DiffID value for layer %d is unknown or explicitly empty", layerIndex) + } + return res, nil +} + +// setUntrustedDiffIDValuesFromOCIConfig updates s.untrustedDiffIDvalues from config. +// The caller must ensure s.lock does not need to be held. +func (s *storageImageDestination) setUntrustedDiffIDValuesFromOCIConfig(config *imgspecv1.Image) { + s.untrustedDiffIDValues = slices.Clone(config.RootFS.DiffIDs) + if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory… + s.untrustedDiffIDValues = []digest.Digest{} + } } // CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. @@ -1131,7 +1240,7 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { // This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. - if len(s.manifest) == 0 { + if s.manifest == nil { return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()") } toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx) @@ -1159,7 +1268,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options } } // Find the list of layer blobs. - man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + man, err := manifest.FromBlob(s.manifest, s.manifestMIMEType) if err != nil { return fmt.Errorf("parsing manifest: %w", err) } @@ -1193,29 +1302,21 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options imgOptions.CreationDate = *inspect.Created } - // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so - // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := set.New[digest.Digest]() - for blob := range s.lockProtected.filenames { - dataBlobs.Add(blob) - } - for _, layerBlob := range layerBlobs { - dataBlobs.Delete(layerBlob.Digest) - } - for _, blob := range dataBlobs.Values() { - v, err := os.ReadFile(s.lockProtected.filenames[blob]) + // Set up to save the config as a data item. Since we only share layers, the config should be in a file. + if s.lockProtected.configDigest != "" { + v, err := os.ReadFile(s.lockProtected.filenames[s.lockProtected.configDigest]) if err != nil { - return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) + return fmt.Errorf("copying config blob %q to image: %w", s.lockProtected.configDigest, err) } imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ - Key: blob.String(), + Key: s.lockProtected.configDigest.String(), Data: v, Digest: digest.Canonical.FromBytes(v), }) } // Set up to save the options.UnparsedToplevel's manifest if it differs from // the per-platform one, which is saved below. - if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { + if !bytes.Equal(toplevelManifest, s.manifest) { manifestDigest, err := manifest.Digest(toplevelManifest) if err != nil { return fmt.Errorf("digesting top-level manifest: %w", err) @@ -1370,6 +1471,10 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob return err } s.manifest = bytes.Clone(manifestBlob) + if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil + s.manifest = []byte{} + } + s.manifestMIMEType = manifest.GuessMIMEType(s.manifest) s.manifestDigest = digest return nil } @@ -1392,7 +1497,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s if instanceDigest == nil { s.signatures = sigblob s.metadata.SignatureSizes = sizes - if len(s.manifest) > 0 { + if s.manifest != nil { manifestDigest := s.manifestDigest instanceDigest = &manifestDigest } diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index acc4cb30e8..5775c4acbc 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -153,7 +153,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag } if s.id == "" { logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) - return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage) + // %.0w makes the error visible to error.Unwrap() without including any text. + // ErrNoSuchImage ultimately is “identifier is not an image”, which is not helpful for identifying the root cause. + return nil, fmt.Errorf("reference %q does not resolve to an image ID%.0w", s.StringWithinTransport(), ErrNoSuchImage) } if loadedImage == nil { img, err := s.transport.store.Image(s.id) diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index 55788f8877..ff76b20667 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -35,13 +35,14 @@ type storageImageSource struct { impl.PropertyMethodsInitialize stubs.NoGetBlobAtInitialize - imageRef storageReference - image *storage.Image - systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files - metadata storageImageMetadata - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - getBlobMutexProtected getBlobMutexProtected + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + metadata storageImageMetadata + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + cachedManifestMIMEType string // Valid if cachedManifest != nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + getBlobMutexProtected getBlobMutexProtected } // getBlobMutexProtected contains storageImageSource data protected by getBlobMutex. @@ -247,7 +248,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di } return blob, manifest.GuessMIMEType(blob), err } - if len(s.cachedManifest) == 0 { + if s.cachedManifest == nil { // The manifest is stored as a big data item. // Prefer the manifest corresponding to the user-specified digest, if available. if s.imageRef.named != nil { @@ -267,15 +268,16 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di } // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). - if len(s.cachedManifest) == 0 { + if s.cachedManifest == nil { cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) if err != nil { return nil, "", err } s.cachedManifest = cachedBlob } + s.cachedManifestMIMEType = manifest.GuessMIMEType(s.cachedManifest) } - return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err + return s.cachedManifest, s.cachedManifestMIMEType, err } // LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 3743721fc3..7a16c8181e 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 33 + VersionMinor = 34 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go index 603925dfea..7ca32fc802 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go @@ -25,7 +25,7 @@ import ( "github.com/containers/ocicrypt/config" "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/utils" - "go.mozilla.org/pkcs7" + "github.com/smallstep/pkcs7" ) type pkcs7KeyWrapper struct { diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 0fed08c3b2..344b9a992d 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,13 +17,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-39" + FEDORA_NAME: "fedora-41" DEBIAN_NAME: "debian-13" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20241010t105554z-f40f39d13" + IMAGE_SUFFIX: "c20241107t210000z-f41f40d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 64c01b67f8..b8a993315b 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de # N/B: This value is managed by Renovate, manual changes are # possible, as long as they don't disturb the formatting # (i.e. DO NOT ADD A 'v' prefix!) -GOLANGCI_LINT_VERSION := 1.61.0 +GOLANGCI_LINT_VERSION := 1.63.3 default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 3ebf789f5a..a38254646f 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.56.0 +1.57.0-dev diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go index 396648e7fa..e8837ff953 100644 --- a/vendor/github.com/containers/storage/check.go +++ b/vendor/github.com/containers/storage/check.go @@ -80,7 +80,7 @@ type CheckOptions struct { // layer to the contents that we'd expect it to have to ignore certain // discrepancies type checkIgnore struct { - ownership, timestamps, permissions bool + ownership, timestamps, permissions, filetype bool } // CheckMost returns a CheckOptions with mostly just "quick" checks enabled. @@ -139,8 +139,10 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) { if strings.Contains(o, "ignore_chown_errors=true") { ignore.ownership = true } - if strings.HasPrefix(o, "force_mask=") { + if strings.Contains(o, "force_mask=") { + ignore.ownership = true ignore.permissions = true + ignore.filetype = true } } for o := range s.pullOptions { @@ -833,7 +835,7 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error { // compareFileInfo returns a string summarizing what's different between the two checkFileInfos func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string { var comparison []string - if a.typeflag != b.typeflag { + if a.typeflag != b.typeflag && !ignore.filetype { comparison = append(comparison, fmt.Sprintf("filetype:%v→%v", a.typeflag, b.typeflag)) } if idmap != nil && !idmap.Empty() { diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index a8312811b7..d039342637 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -776,3 +776,8 @@ func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp func (a *Driver) SupportsShifting() bool { return false } + +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + return graphdriver.DedupResult{}, nil +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index c7f37d97e0..4a80339f40 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -673,3 +673,8 @@ func (d *Driver) ListLayers() ([]string, error) { func (d *Driver) AdditionalImageStores() []string { return nil } + +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + return graphdriver.DedupResult{}, nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go index 882fa04bd0..4f27502073 100644 --- a/vendor/github.com/containers/storage/drivers/chown_darwin.go +++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go @@ -83,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai } if uid != int(st.Uid) || gid != int(st.Gid) { capability, err := system.Lgetxattr(path, "security.capability") - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("%s: %w", os.Args[0], err) } diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go index 808f4fea66..b0c25cd992 100644 --- a/vendor/github.com/containers/storage/drivers/chown_unix.go +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -101,7 +101,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai } if uid != int(st.Uid) || gid != int(st.Gid) { cap, err := system.Lgetxattr(path, "security.capability") - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.ENOTSUP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("%s: %w", os.Args[0], err) } diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index 8c0bbed618..93fc0a3261 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -106,7 +106,7 @@ func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { func copyXattr(srcPath, dstPath, attr string) error { data, err := system.Lgetxattr(srcPath, attr) - if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } if data != nil { @@ -279,7 +279,7 @@ func doCopyXattrs(srcPath, dstPath string) error { } xattrs, err := system.Llistxattr(srcPath) - if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index dc9b12c3ca..ed0e628883 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -8,6 +8,7 @@ import ( "path/filepath" "strings" + "github.com/containers/storage/internal/dedup" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fileutils" @@ -81,6 +82,23 @@ type ApplyDiffWithDifferOpts struct { Flags map[string]interface{} } +// DedupArgs contains the information to perform storage deduplication. +type DedupArgs struct { + // Layers is the list of layers to deduplicate. + Layers []string + + // Options that are passed directly to the pkg/dedup.DedupDirs function. + Options dedup.DedupOptions +} + +// DedupResult contains the result of the Dedup() call. +type DedupResult struct { + // Deduped represents the total number of bytes saved by deduplication. + // This value accounts also for all previously deduplicated data, not only the savings + // from the last run. + Deduped uint64 +} + // InitFunc initializes the storage driver. type InitFunc func(homedir string, options Options) (Driver, error) @@ -139,6 +157,8 @@ type ProtoDriver interface { // AdditionalImageStores returns additional image stores supported by the driver // This API is experimental and can be changed without bumping the major version number. AdditionalImageStores() []string + // Dedup performs deduplication of the driver's storage. + Dedup(DedupArgs) (DedupResult, error) } // DiffDriver is the interface to use to implement graph diffs diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go index 7867d50063..5cbf5e1cef 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check_116.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go @@ -10,7 +10,6 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/system" - "golang.org/x/sys/unix" ) func scanForMountProgramIndicators(home string) (detected bool, err error) { @@ -28,7 +27,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) { } if d.IsDir() { xattrs, err := system.Llistxattr(path) - if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } for _, xattr := range xattrs { diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go index 1fb3e62a43..797e3646e9 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -1,4 +1,4 @@ -//go:build linux && cgo +//go:build linux package overlay @@ -27,7 +27,7 @@ var ( composeFsHelperErr error // skipMountViaFile is used to avoid trying to mount EROFS directly via the file if we already know the current kernel - // does not support it. Mounting directly via a file will be supported in kernel 6.12. + // does not support it. Mounting directly via a file is supported from Linux 6.12. skipMountViaFile atomic.Bool ) diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index caf896463b..56278805f8 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -22,6 +22,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/drivers/overlayutils" "github.com/containers/storage/drivers/quota" + "github.com/containers/storage/internal/dedup" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" @@ -1096,6 +1097,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl } if d.options.forceMask != nil { + st.Mode |= os.ModeDir if err := idtools.SetContainersOverrideXattr(diff, st); err != nil { return err } @@ -2740,3 +2742,22 @@ func getMappedMountRoot(path string) string { } return dirName } + +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + var dirs []string + for _, layer := range req.Layers { + dir, _, inAdditionalStore := d.dir2(layer, false) + if inAdditionalStore { + continue + } + if err := fileutils.Exists(dir); err == nil { + dirs = append(dirs, filepath.Join(dir, "diff")) + } + } + r, err := dedup.DedupDirs(dirs, req.Options) + if err != nil { + return graphdriver.DedupResult{}, err + } + return graphdriver.DedupResult{Deduped: r.Deduped}, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go deleted file mode 100644 index a6df21d0f7..0000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build linux && !cgo - -package overlay - -import ( - "fmt" -) - -func openComposefsMount(dataDir string) (int, error) { - return 0, fmt.Errorf("composefs not supported on this build") -} - -func getComposeFsHelper() (string, error) { - return "", fmt.Errorf("composefs not supported on this build") -} - -func mountComposefsBlob(dataDir, mountPoint string) error { - return fmt.Errorf("composefs not supported on this build") -} - -func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { - return fmt.Errorf("composefs not supported on this build") -} diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go index 6e82a1ea38..c4f9628225 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_overlay.go +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -1,4 +1,4 @@ -//go:build !exclude_graphdriver_overlay && linux && cgo +//go:build !exclude_graphdriver_overlay && linux package register diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index f60ec17b3f..98dc55b0e3 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -10,6 +10,7 @@ import ( "strings" graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/internal/dedup" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fileutils" @@ -348,3 +349,19 @@ func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) } + +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + var dirs []string + for _, layer := range req.Layers { + dir := d.dir2(layer, false) + if err := fileutils.Exists(dir); err == nil { + dirs = append(dirs, dir) + } + } + r, err := dedup.DedupDirs(dirs, req.Options) + if err != nil { + return graphdriver.DedupResult{}, err + } + return graphdriver.DedupResult{Deduped: r.Deduped}, nil +} diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index d38e74534e..59ed9a7567 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -975,6 +975,11 @@ func (d *Driver) AdditionalImageStores() []string { return nil } +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + return graphdriver.DedupResult{}, nil +} + // UpdateLayerIDMap changes ownerships in the layer's filesystem tree from // matching those in toContainer to matching those in toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index ef26c6c7f0..f53b0e1b61 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -511,3 +511,8 @@ func (d *Driver) ListLayers() ([]string, error) { func (d *Driver) AdditionalImageStores() []string { return nil } + +// Dedup performs deduplication of the driver's storage. +func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { + return graphdriver.DedupResult{}, nil +} diff --git a/vendor/github.com/containers/storage/internal/dedup/dedup.go b/vendor/github.com/containers/storage/internal/dedup/dedup.go new file mode 100644 index 0000000000..59fcd0d23a --- /dev/null +++ b/vendor/github.com/containers/storage/internal/dedup/dedup.go @@ -0,0 +1,163 @@ +package dedup + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "hash/crc64" + "io/fs" + "sync" + + "github.com/opencontainers/selinux/pkg/pwalkdir" + "github.com/sirupsen/logrus" +) + +var notSupported = errors.New("reflinks are not supported on this platform") + +const ( + DedupHashInvalid DedupHashMethod = iota + DedupHashCRC + DedupHashFileSize + DedupHashSHA256 +) + +type DedupHashMethod int + +type DedupOptions struct { + // HashMethod is the hash function to use to find identical files + HashMethod DedupHashMethod +} + +type DedupResult struct { + // Deduped represents the total number of bytes saved by deduplication. + // This value accounts also for all previously deduplicated data, not only the savings + // from the last run. + Deduped uint64 +} + +func getFileChecksum(hashMethod DedupHashMethod, path string, info fs.FileInfo) (string, error) { + switch hashMethod { + case DedupHashInvalid: + return "", fmt.Errorf("invalid hash method: %v", hashMethod) + case DedupHashFileSize: + return fmt.Sprintf("%v", info.Size()), nil + case DedupHashSHA256: + return readAllFile(path, info, func(buf []byte) (string, error) { + h := sha256.New() + if _, err := h.Write(buf); err != nil { + return "", err + } + return string(h.Sum(nil)), nil + }) + case DedupHashCRC: + return readAllFile(path, info, func(buf []byte) (string, error) { + c := crc64.New(crc64.MakeTable(crc64.ECMA)) + if _, err := c.Write(buf); err != nil { + return "", err + } + bufRet := make([]byte, 8) + binary.BigEndian.PutUint64(bufRet, c.Sum64()) + return string(bufRet), nil + }) + default: + return "", fmt.Errorf("unknown hash method: %v", hashMethod) + } +} + +type pathsLocked struct { + paths []string + lock sync.Mutex +} + +func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) { + res := DedupResult{} + hashToPaths := make(map[string]*pathsLocked) + lock := sync.Mutex{} // protects `hashToPaths` and `res` + + dedup, err := newDedupFiles() + if err != nil { + return res, err + } + + for _, dir := range dirs { + logrus.Debugf("Deduping directory %s", dir) + if err := pwalkdir.Walk(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.Type().IsRegular() { + return nil + } + info, err := d.Info() + if err != nil { + return err + } + size := uint64(info.Size()) + if size == 0 { + // do not bother with empty files + return nil + } + + // the file was already deduplicated + if visited, err := dedup.isFirstVisitOf(info); err != nil { + return err + } else if visited { + return nil + } + + h, err := getFileChecksum(options.HashMethod, path, info) + if err != nil { + return err + } + + lock.Lock() + item, foundItem := hashToPaths[h] + if !foundItem { + item = &pathsLocked{paths: []string{path}} + hashToPaths[h] = item + lock.Unlock() + return nil + } + item.lock.Lock() + lock.Unlock() + + dedupBytes, err := func() (uint64, error) { // function to have a scope for the defer statement + defer item.lock.Unlock() + + var dedupBytes uint64 + for _, src := range item.paths { + deduped, err := dedup.dedup(src, path, info) + if err == nil && deduped > 0 { + logrus.Debugf("Deduped %q -> %q (%d bytes)", src, path, deduped) + dedupBytes += deduped + break + } + logrus.Debugf("Failed to deduplicate: %v", err) + if errors.Is(err, notSupported) { + return dedupBytes, err + } + } + if dedupBytes == 0 { + item.paths = append(item.paths, path) + } + return dedupBytes, nil + }() + if err != nil { + return err + } + + lock.Lock() + res.Deduped += dedupBytes + lock.Unlock() + return nil + }); err != nil { + // if reflinks are not supported, return immediately without errors + if errors.Is(err, notSupported) { + return res, nil + } + return res, err + } + } + return res, nil +} diff --git a/vendor/github.com/containers/storage/internal/dedup/dedup_linux.go b/vendor/github.com/containers/storage/internal/dedup/dedup_linux.go new file mode 100644 index 0000000000..90ccb5f318 --- /dev/null +++ b/vendor/github.com/containers/storage/internal/dedup/dedup_linux.go @@ -0,0 +1,139 @@ +package dedup + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +type deviceInodePair struct { + dev uint64 + ino uint64 +} + +type dedupFiles struct { + lock sync.Mutex + visitedInodes map[deviceInodePair]struct{} +} + +func newDedupFiles() (*dedupFiles, error) { + return &dedupFiles{ + visitedInodes: make(map[deviceInodePair]struct{}), + }, nil +} + +func (d *dedupFiles) recordInode(dev, ino uint64) (bool, error) { + d.lock.Lock() + defer d.lock.Unlock() + + di := deviceInodePair{ + dev: dev, + ino: ino, + } + + _, visited := d.visitedInodes[di] + d.visitedInodes[di] = struct{}{} + return visited, nil +} + +// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited. +func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return false, fmt.Errorf("unable to get raw syscall.Stat_t data") + } + return d.recordInode(uint64(st.Dev), st.Ino) +} + +// dedup deduplicates the file at src path to dst path +func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) { + srcFile, err := os.OpenFile(src, os.O_RDONLY, 0) + if err != nil { + return 0, fmt.Errorf("failed to open source file: %w", err) + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dst, os.O_WRONLY, 0) + if err != nil { + return 0, fmt.Errorf("failed to open destination file: %w", err) + } + defer dstFile.Close() + + stSrc, err := srcFile.Stat() + if err != nil { + return 0, fmt.Errorf("failed to stat source file: %w", err) + } + sSrc, ok := stSrc.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("unable to get raw syscall.Stat_t data") + } + sDest, ok := fiDst.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("unable to get raw syscall.Stat_t data") + } + if sSrc.Dev == sDest.Dev && sSrc.Ino == sDest.Ino { + // same inode, we are dealing with a hard link, no need to deduplicate + return 0, nil + } + + value := unix.FileDedupeRange{ + Src_offset: 0, + Src_length: uint64(stSrc.Size()), + Info: []unix.FileDedupeRangeInfo{ + { + Dest_fd: int64(dstFile.Fd()), + Dest_offset: 0, + }, + }, + } + err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value) + if err == nil { + return uint64(value.Info[0].Bytes_deduped), nil + } + + if errors.Is(err, unix.ENOTSUP) { + return 0, notSupported + } + return 0, fmt.Errorf("failed to clone file %q: %w", src, err) +} + +func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) { + size := info.Size() + if size == 0 { + return fn(nil) + } + + file, err := os.Open(path) + if err != nil { + return "", err + } + defer file.Close() + + if size < 4096 { + // small file, read it all + data := make([]byte, size) + _, err = io.ReadFull(file, data) + if err != nil { + return "", err + } + return fn(data) + } + + mmap, err := unix.Mmap(int(file.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_PRIVATE) + if err != nil { + return "", fmt.Errorf("failed to mmap file: %w", err) + } + defer func() { + _ = unix.Munmap(mmap) + }() + + _ = unix.Madvise(mmap, unix.MADV_SEQUENTIAL) + + return fn(mmap) +} diff --git a/vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go b/vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go new file mode 100644 index 0000000000..cfadf83268 --- /dev/null +++ b/vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go @@ -0,0 +1,27 @@ +//go:build !linux + +package dedup + +import ( + "io/fs" +) + +type dedupFiles struct{} + +func newDedupFiles() (*dedupFiles, error) { + return nil, notSupported +} + +// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited. +func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) { + return false, notSupported +} + +// dedup deduplicates the file at src path to dst path +func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) { + return 0, notSupported +} + +func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) { + return "", notSupported +} diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 6fe1a08035..1f8203fbfc 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -336,6 +336,9 @@ type rwLayerStore interface { // Clean up unreferenced layers GarbageCollect() error + + // Dedup deduplicates layers in the store. + dedup(drivers.DedupArgs) (drivers.DedupResult, error) } type multipleLockFile struct { @@ -913,23 +916,32 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { // user of this storage area marked for deletion but didn't manage to // actually delete. var incompleteDeletionErrors error // = nil + var layersToDelete []*Layer for _, layer := range r.layers { if layer.Flags == nil { layer.Flags = make(map[string]interface{}) } if layerHasIncompleteFlag(layer) { - logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) - err := r.deleteInternal(layer.ID) - if err != nil { - // Don't return the error immediately, because deleteInternal does not saveLayers(); - // Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully - // deleted incomplete layers have their metadata correctly removed. - incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors, - fmt.Errorf("deleting layer %#v: %w", layer.ID, err)) - } - modifiedLocations |= layerLocation(layer) + // Important: Do not call r.deleteInternal() here. It modifies r.layers + // which causes unexpected side effects while iterating over r.layers here. + // The range loop has no idea that the underlying elements where shifted + // around. + layersToDelete = append(layersToDelete, layer) } } + // Now actually delete the layers + for _, layer := range layersToDelete { + logrus.Warnf("Found incomplete layer %q, deleting it", layer.ID) + err := r.deleteInternal(layer.ID) + if err != nil { + // Don't return the error immediately, because deleteInternal does not saveLayers(); + // Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully + // deleted incomplete layers have their metadata correctly removed. + incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors, + fmt.Errorf("deleting layer %#v: %w", layer.ID, err)) + } + modifiedLocations |= layerLocation(layer) + } if err := r.saveLayers(modifiedLocations); err != nil { return false, err } @@ -2592,6 +2604,11 @@ func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) { return r.layersByDigestMap(r.bytocsum, d) } +// Requires startWriting. +func (r *layerStore) dedup(req drivers.DedupArgs) (drivers.DedupResult, error) { + return r.driver.Dedup(req) +} + func closeAll(closes ...func() error) (rErr error) { for _, f := range closes { if err := f(); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 13a458956c..41daad8530 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -78,6 +78,7 @@ const ( windows = "windows" darwin = "darwin" freebsd = "freebsd" + linux = "linux" ) var xattrsToIgnore = map[string]interface{}{ @@ -427,7 +428,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error { } for _, xattr := range []string{"security.capability", "security.ima"} { capability, err := system.Lgetxattr(path, xattr) - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err) } if capability != nil { @@ -440,7 +441,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error { // readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header func readUserXattrToTarHeader(path string, hdr *tar.Header) error { xattrs, err := system.Llistxattr(path) - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform { return err } for _, key := range xattrs { @@ -655,12 +656,20 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() + typeFlag := hdr.Typeflag mask := hdrInfo.Mode() + + // update also the implementation of ForceMask in pkg/chunked if forceMask != nil { mask = *forceMask + // If we have a forceMask, force the real type to either be a directory, + // a link, or a regular file. + if typeFlag != tar.TypeDir && typeFlag != tar.TypeSymlink && typeFlag != tar.TypeLink { + typeFlag = tar.TypeReg + } } - switch hdr.Typeflag { + switch typeFlag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two @@ -728,16 +737,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } - if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") { - value := idtools.Stat{ - IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}, - Mode: hdrInfo.Mode() & 0o7777, - } - if err := idtools.SetContainersOverrideXattr(path, value); err != nil { - return err - } - } - // Lchown is not supported on Windows. if Lchown && runtime.GOOS != windows { if chownOpts == nil { @@ -793,18 +792,30 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L continue } if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil { - if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. We also ignore EPERM errors if we are running in a - // user namespace. + if errors.Is(err, system.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { + // Ignore specific error cases: + // - ENOTSUP: Expected for graphdrivers lacking extended attribute support: + // - Legacy AUFS versions + // - FreeBSD with unsupported namespaces (trusted, security) + // - EPERM: Expected when operating within a user namespace + // All other errors will cause a failure. errs = append(errs, err.Error()) continue } return err } + } + if forceMask != nil && (typeFlag == tar.TypeReg || typeFlag == tar.TypeDir || runtime.GOOS == "darwin") { + value := idtools.Stat{ + IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}, + Mode: hdrInfo.Mode(), + Major: int(hdr.Devmajor), + Minor: int(hdr.Devminor), + } + if err := idtools.SetContainersOverrideXattr(path, value); err != nil { + return err + } } // We defer setting flags on directories until the end of @@ -1149,11 +1160,11 @@ loop: } if options.ForceMask != nil { - value := idtools.Stat{Mode: 0o755} + value := idtools.Stat{Mode: os.ModeDir | os.FileMode(0o755)} if rootHdr != nil { value.IDs.UID = rootHdr.Uid value.IDs.GID = rootHdr.Gid - value.Mode = os.FileMode(rootHdr.Mode) + value.Mode = os.ModeDir | os.FileMode(rootHdr.Mode) } if err := idtools.SetContainersOverrideXattr(dest, value); err != nil { return err @@ -1379,7 +1390,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id uid, gid = hdr.Uid, hdr.Gid if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok { attrs := strings.Split(string(xstat), ":") - if len(attrs) == 3 { + if len(attrs) >= 3 { val, err := strconv.ParseUint(attrs[0], 10, 32) if err != nil { uid = int(val) diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index 3075c27bbf..2b52654930 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -270,6 +270,7 @@ type FileInfo struct { capability []byte added bool xattrs map[string]string + target string } // LookUp looks up the file information of a file. @@ -336,6 +337,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { // back mtime if statDifferent(oldStat, oldInfo, newStat, info) || !bytes.Equal(oldChild.capability, newChild.capability) || + oldChild.target != newChild.target || !reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) { change := Change{ Path: newChild.path(), @@ -390,6 +392,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { name: string(os.PathSeparator), idMappings: idMappings, children: make(map[string]*FileInfo), + target: "", } return root } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go index dc308120dc..42e77c4dea 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -79,6 +79,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { children: make(map[string]*FileInfo), parent: parent, idMappings: root.idMappings, + target: "", } cpath := filepath.Join(dir, path) stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) @@ -87,11 +88,11 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { } info.stat = stat info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } xattrs, err := system.Llistxattr(cpath) - if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + if err != nil && !errors.Is(err, system.ENOTSUP) { return err } for _, key := range xattrs { @@ -110,6 +111,12 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { info.xattrs[key] = string(value) } } + if fi.Mode()&os.ModeSymlink != 0 { + info.target, err = os.Readlink(cpath) + if err != nil { + return err + } + } parent.children[info.name] = info return nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 47742b05d8..c9cde23b7f 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -16,7 +16,7 @@ import ( storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" "github.com/containers/storage/pkg/ioutils" "github.com/docker/go-units" jsoniter "github.com/json-iterator/go" @@ -848,12 +848,12 @@ func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks boo return "", "", nil } -func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) { +func (c *layersCache) findChunkInOtherLayers(chunk *minimal.FileMetadata) (string, string, int64, error) { return c.findDigestInternal(chunk.ChunkDigest) } -func unmarshalToc(manifest []byte) (*internal.TOC, error) { - var toc internal.TOC +func unmarshalToc(manifest []byte) (*minimal.TOC, error) { + var toc minimal.TOC iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) @@ -864,7 +864,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { case "entries": for iter.ReadArray() { - var m internal.FileMetadata + var m minimal.FileMetadata for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { switch strings.ToLower(field) { case "type": diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go index e828d479f2..564efc8bf0 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go @@ -4,18 +4,18 @@ import ( "io" "github.com/containers/storage/pkg/chunked/compressor" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" ) const ( - TypeReg = internal.TypeReg - TypeChunk = internal.TypeChunk - TypeLink = internal.TypeLink - TypeChar = internal.TypeChar - TypeBlock = internal.TypeBlock - TypeDir = internal.TypeDir - TypeFifo = internal.TypeFifo - TypeSymlink = internal.TypeSymlink + TypeReg = minimal.TypeReg + TypeChunk = minimal.TypeChunk + TypeLink = minimal.TypeLink + TypeChar = minimal.TypeChar + TypeBlock = minimal.TypeBlock + TypeDir = minimal.TypeDir + TypeFifo = minimal.TypeFifo + TypeSymlink = minimal.TypeSymlink ) // ZstdCompressor is a CompressorFunc for the zstd compression algorithm. diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 2dac463543..229ce366e4 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -10,7 +10,7 @@ import ( "strconv" "time" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" @@ -20,6 +20,12 @@ import ( expMaps "golang.org/x/exp/maps" ) +const ( + // maxTocSize is the maximum size of a blob that we will attempt to process. + // It is used to prevent DoS attacks from layers that embed a very large TOC file. + maxTocSize = (1 << 20) * 50 +) + var typesToTar = map[string]byte{ TypeReg: tar.TypeReg, TypeLink: tar.TypeLink, @@ -44,25 +50,21 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, if blobSize <= footerSize { return nil, 0, errors.New("blob too small") } - chunk := ImageSourceChunk{ - Offset: uint64(blobSize - footerSize), - Length: uint64(footerSize), - } - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + + footer := make([]byte, footerSize) + streamsOrErrors, err := getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(blobSize - footerSize), Length: uint64(footerSize)}) if err != nil { return nil, 0, err } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, 0, err - } - defer reader.Close() - footer := make([]byte, footerSize) - if _, err := io.ReadFull(reader, footer); err != nil { - return nil, 0, err + + for soe := range streamsOrErrors { + if soe.stream != nil { + _, err = io.ReadFull(soe.stream, footer) + _ = soe.stream.Close() + } + if soe.err != nil && err == nil { + err = soe.err + } } /* Read the ToC offset: @@ -81,48 +83,54 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, size := int64(blobSize - footerSize - tocOffset) // set a reasonable limit - if size > (1<<20)*50 { + if size > maxTocSize { return nil, 0, errors.New("manifest too big") } - chunk = ImageSourceChunk{ - Offset: uint64(tocOffset), - Length: uint64(size), - } - parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk}) - if err != nil { - return nil, 0, err - } - - var tocReader io.ReadCloser - select { - case r := <-parts: - tocReader = r - case err := <-errs: - return nil, 0, err - } - defer tocReader.Close() - - r, err := pgzip.NewReader(tocReader) + streamsOrErrors, err = getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(tocOffset), Length: uint64(size)}) if err != nil { return nil, 0, err } - defer r.Close() - - aTar := archivetar.NewReader(r) - header, err := aTar.Next() - if err != nil { - return nil, 0, err + var manifestUncompressed []byte + + for soe := range streamsOrErrors { + if soe.stream != nil { + err1 := func() error { + defer soe.stream.Close() + + r, err := pgzip.NewReader(soe.stream) + if err != nil { + return err + } + defer r.Close() + + aTar := archivetar.NewReader(r) + + header, err := aTar.Next() + if err != nil { + return err + } + // set a reasonable limit + if header.Size > maxTocSize { + return errors.New("manifest too big") + } + + manifestUncompressed = make([]byte, header.Size) + if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { + return err + } + return nil + }() + if err == nil { + err = err1 + } + } else if err == nil { + err = soe.err + } } - // set a reasonable limit - if header.Size > (1<<20)*50 { - return nil, 0, errors.New("manifest too big") - } - - manifestUncompressed := make([]byte, header.Size) - if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { - return nil, 0, err + if manifestUncompressed == nil { + return nil, 0, errors.New("manifest not found") } manifestDigester := digest.Canonical.Digester() @@ -140,10 +148,10 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, // readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. // Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset). -func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) { - offsetMetadata := annotations[internal.ManifestInfoKey] +func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) (_ []byte, _ *minimal.TOC, _ []byte, _ int64, retErr error) { + offsetMetadata := annotations[minimal.ManifestInfoKey] if offsetMetadata == "" { - return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey) + return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", minimal.ManifestInfoKey) } var manifestChunk ImageSourceChunk var manifestLengthUncompressed, manifestType uint64 @@ -153,21 +161,21 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di // The tarSplit… values are valid if tarSplitChunk.Offset > 0 var tarSplitChunk ImageSourceChunk var tarSplitLengthUncompressed uint64 - if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found { + if tarSplitInfoKeyAnnotation, found := annotations[minimal.TarSplitInfoKey]; found { if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil { return nil, nil, nil, 0, err } } - if manifestType != internal.ManifestTypeCRFS { + if manifestType != minimal.ManifestTypeCRFS { return nil, nil, nil, 0, errors.New("invalid manifest type") } // set a reasonable limit - if manifestChunk.Length > (1<<20)*50 { + if manifestChunk.Length > maxTocSize { return nil, nil, nil, 0, errors.New("manifest too big") } - if manifestLengthUncompressed > (1<<20)*50 { + if manifestLengthUncompressed > maxTocSize { return nil, nil, nil, 0, errors.New("manifest too big") } @@ -175,26 +183,31 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di if tarSplitChunk.Offset > 0 { chunks = append(chunks, tarSplitChunk) } - parts, errs, err := blobStream.GetBlobAt(chunks) + + streamsOrErrors, err := getBlobAt(blobStream, chunks...) if err != nil { return nil, nil, nil, 0, err } + defer func() { + err := ensureAllBlobsDone(streamsOrErrors) + if retErr == nil { + retErr = err + } + }() + readBlob := func(len uint64) ([]byte, error) { - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, err + soe, ok := <-streamsOrErrors + if !ok { + return nil, errors.New("stream closed") + } + if soe.err != nil { + return nil, soe.err } + defer soe.stream.Close() blob := make([]byte, len) - if _, err := io.ReadFull(reader, blob); err != nil { - reader.Close() - return nil, err - } - if err := reader.Close(); err != nil { + if _, err := io.ReadFull(soe.stream, blob); err != nil { return nil, err } return blob, nil @@ -217,7 +230,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di var decodedTarSplit []byte = nil if toc.TarSplitDigest != "" { if tarSplitChunk.Offset <= 0 { - return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey) + return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", minimal.TarSplitInfoKey) } tarSplit, err := readBlob(tarSplitChunk.Length) if err != nil { @@ -247,11 +260,11 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di } // ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries. -func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { - pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries +func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error { + pendingFiles := map[string]*minimal.FileMetadata{} // Name -> an entry in toc.Entries for i := range toc.Entries { e := &toc.Entries[i] - if e.Type != internal.TypeChunk { + if e.Type != minimal.TypeChunk { if _, ok := pendingFiles[e.Name]; ok { return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name) } @@ -266,7 +279,7 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error { return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name) } delete(pendingFiles, hdr.Name) - expected, err := internal.NewFileMetadata(hdr) + expected, err := minimal.NewFileMetadata(hdr) if err != nil { return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err) } @@ -347,8 +360,8 @@ func ensureTimePointersMatch(a, b *time.Time) error { // ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data // in the tar stream or matching contents) -func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error { - // Keep this in sync with internal.FileMetadata! +func ensureFileMetadataAttributesMatch(a, b *minimal.FileMetadata) error { + // Keep this in sync with minimal.FileMetadata! if a.Type != b.Type { return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type) diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index 654969749b..56ae4c770c 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -9,7 +9,7 @@ import ( "bytes" "io" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" "github.com/containers/storage/pkg/ioutils" "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" @@ -213,7 +213,7 @@ func newTarSplitData(level int) (*tarSplitData, error) { compressed := bytes.NewBuffer(nil) digester := digest.Canonical.Digester() - zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level) + zstdWriter, err := minimal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level) if err != nil { return nil, err } @@ -254,7 +254,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r buf := make([]byte, 4096) - zstdWriter, err := internal.ZstdWriterWithLevel(dest, level) + zstdWriter, err := minimal.ZstdWriterWithLevel(dest, level) if err != nil { return err } @@ -276,7 +276,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r return offset, nil } - var metadata []internal.FileMetadata + var metadata []minimal.FileMetadata for { hdr, err := tr.Next() if err != nil { @@ -341,9 +341,9 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r chunkSize := rcReader.WrittenOut - lastChunkOffset if chunkSize > 0 { - chunkType := internal.ChunkTypeData + chunkType := minimal.ChunkTypeData if rcReader.IsLastChunkZeros { - chunkType = internal.ChunkTypeZeros + chunkType = minimal.ChunkTypeZeros } chunks = append(chunks, chunk{ @@ -368,17 +368,17 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r } } - mainEntry, err := internal.NewFileMetadata(hdr) + mainEntry, err := minimal.NewFileMetadata(hdr) if err != nil { return err } mainEntry.Digest = checksum mainEntry.Offset = startOffset mainEntry.EndOffset = lastOffset - entries := []internal.FileMetadata{mainEntry} + entries := []minimal.FileMetadata{mainEntry} for i := 1; i < len(chunks); i++ { - entries = append(entries, internal.FileMetadata{ - Type: internal.TypeChunk, + entries = append(entries, minimal.FileMetadata{ + Type: minimal.TypeChunk, Name: hdr.Name, ChunkOffset: chunks[i].ChunkOffset, }) @@ -424,13 +424,13 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r } tarSplitData.zstd = nil - ts := internal.TarSplitData{ + ts := minimal.TarSplitData{ Data: tarSplitData.compressed.Bytes(), Digest: tarSplitData.digester.Digest(), UncompressedSize: tarSplitData.uncompressedCounter.Count, } - return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level) + return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level) } type zstdChunkedWriter struct { diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go index d98cee09de..fde42d75fc 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -12,7 +12,8 @@ import ( "strings" "time" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" + storagePath "github.com/containers/storage/pkg/chunked/internal/path" "golang.org/x/sys/unix" ) @@ -85,17 +86,17 @@ func escapedOptional(val []byte, escape int) string { func getStMode(mode uint32, typ string) (uint32, error) { switch typ { - case internal.TypeReg, internal.TypeLink: + case minimal.TypeReg, minimal.TypeLink: mode |= unix.S_IFREG - case internal.TypeChar: + case minimal.TypeChar: mode |= unix.S_IFCHR - case internal.TypeBlock: + case minimal.TypeBlock: mode |= unix.S_IFBLK - case internal.TypeDir: + case minimal.TypeDir: mode |= unix.S_IFDIR - case internal.TypeFifo: + case minimal.TypeFifo: mode |= unix.S_IFIFO - case internal.TypeSymlink: + case minimal.TypeSymlink: mode |= unix.S_IFLNK default: return 0, fmt.Errorf("unknown type %s", typ) @@ -103,24 +104,14 @@ func getStMode(mode uint32, typ string) (uint32, error) { return mode, nil } -func sanitizeName(name string) string { - path := filepath.Clean(name) - if path == "." { - path = "/" - } else if path[0] != '/' { - path = "/" + path - } - return path -} - -func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { - path := sanitizeName(entry.Name) +func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *minimal.FileMetadata) error { + path := storagePath.CleanAbsPath(entry.Name) parent := filepath.Dir(path) if _, found := added[parent]; !found && path != "/" { - parentEntry := &internal.FileMetadata{ + parentEntry := &minimal.FileMetadata{ Name: parent, - Type: internal.TypeDir, + Type: minimal.TypeDir, Mode: 0o755, } if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil { @@ -143,7 +134,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[ nlinks := links[entry.Name] + links[entry.Linkname] + 1 link := "" - if entry.Type == internal.TypeLink { + if entry.Type == minimal.TypeLink { link = "@" } @@ -169,10 +160,10 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[ var payload string if entry.Linkname != "" { - if entry.Type == internal.TypeSymlink { + if entry.Type == minimal.TypeSymlink { payload = entry.Linkname } else { - payload = sanitizeName(entry.Linkname) + payload = storagePath.CleanAbsPath(entry.Linkname) } } else { if len(entry.Digest) > 10 { @@ -219,7 +210,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[ // GenerateDump generates a dump of the TOC in the same format as `composefs-info dump` func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) { - toc, ok := tocI.(*internal.TOC) + toc, ok := tocI.(*minimal.TOC) if !ok { return nil, fmt.Errorf("invalid TOC type") } @@ -235,21 +226,21 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, }() links := make(map[string]int) - added := make(map[string]*internal.FileMetadata) + added := make(map[string]*minimal.FileMetadata) for _, e := range toc.Entries { if e.Linkname == "" { continue } - if e.Type == internal.TypeSymlink { + if e.Type == minimal.TypeSymlink { continue } links[e.Linkname] = links[e.Linkname] + 1 } if len(toc.Entries) == 0 { - root := &internal.FileMetadata{ + root := &minimal.FileMetadata{ Name: "/", - Type: internal.TypeDir, + Type: minimal.TypeDir, Mode: 0o755, } @@ -261,7 +252,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, } for _, e := range toc.Entries { - if e.Type == internal.TypeChunk { + if e.Type == minimal.TypeChunk { continue } if err := dumpNode(w, added, links, verityDigests, &e); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go index e26e5e5c55..82685e9c91 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go @@ -15,7 +15,8 @@ import ( driversCopy "github.com/containers/storage/drivers/copy" "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" + storagePath "github.com/containers/storage/pkg/chunked/internal/path" securejoin "github.com/cyphar/filepath-securejoin" "github.com/vbatts/tar-split/archive/tar" "golang.org/x/sys/unix" @@ -34,14 +35,14 @@ func procPathForFd(fd int) string { return fmt.Sprintf("/proc/self/fd/%d", fd) } -// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that +// fileMetadata is a wrapper around minimal.FileMetadata with additional private fields that // are not part of the TOC document. // Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk. type fileMetadata struct { - internal.FileMetadata + minimal.FileMetadata // chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg. - chunks []*internal.FileMetadata + chunks []*minimal.FileMetadata // skipSetAttrs is set when the file attributes must not be // modified, e.g. it is a hard link from a different source, @@ -49,10 +50,37 @@ type fileMetadata struct { skipSetAttrs bool } +// splitPath takes a file path as input and returns two components: dir and base. +// Differently than filepath.Split(), this function handles some edge cases. +// If the path refers to a file in the root directory, the returned dir is "/". +// The returned base value is never empty, it never contains any slash and the +// value "..". +func splitPath(path string) (string, string, error) { + path = storagePath.CleanAbsPath(path) + dir, base := filepath.Split(path) + if base == "" { + base = "." + } + // Remove trailing slashes from dir, but make sure that "/" is preserved. + dir = strings.TrimSuffix(dir, "/") + if dir == "" { + dir = "/" + } + + if strings.Contains(base, "/") { + // This should never happen, but be safe as the base is passed to *at syscalls. + return "", "", fmt.Errorf("internal error: splitPath(%q) contains a slash", path) + } + return dir, base, nil +} + func doHardLink(dirfd, srcFd int, destFile string) error { - destDir, destBase := filepath.Split(destFile) + destDir, destBase, err := splitPath(destFile) + if err != nil { + return err + } destDirFd := dirfd - if destDir != "" && destDir != "." { + if destDir != "/" { f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0) if err != nil { return err @@ -72,7 +100,7 @@ func doHardLink(dirfd, srcFd int, destFile string) error { return nil } - err := doLink() + err = doLink() // if the destination exists, unlink it first and try again if err != nil && os.IsExist(err) { @@ -281,8 +309,11 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the // last component as the path to openat(). if hasNoFollow { - dirName, baseName := filepath.Split(name) - if dirName != "" && dirName != "." { + dirName, baseName, err := splitPath(name) + if err != nil { + return -1, err + } + if dirName != "/" { newRoot, err := securejoin.SecureJoin(root, dirName) if err != nil { return -1, err @@ -409,7 +440,8 @@ func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.Fil if errors.Is(err, unix.ENOENT) { parent := filepath.Dir(name) - if parent != "" { + // do not create the root directory, it should always exist + if parent != name { pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode) if err2 != nil { return nil, err @@ -448,9 +480,12 @@ func appendHole(fd int, name string, size int64) error { } func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error { - parent, base := filepath.Split(name) + parent, base, err := splitPath(name) + if err != nil { + return err + } parentFd := dirfd - if parent != "" && parent != "." { + if parent != "/" { parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0) if err != nil { return err @@ -506,9 +541,12 @@ func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *arch } func safeSymlink(dirfd int, metadata *fileMetadata) error { - destDir, destBase := filepath.Split(metadata.Name) + destDir, destBase, err := splitPath(metadata.Name) + if err != nil { + return err + } destDirFd := dirfd - if destDir != "" && destDir != "." { + if destDir != "/" { f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0) if err != nil { return err @@ -542,9 +580,12 @@ func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { } func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { - dir, base := filepath.Split(path) + dir, base, err := splitPath(path) + if err != nil { + return err + } dirfd := d.Dirfd - if dir != "" && dir != "." { + if dir != "/" { dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0) if err != nil { return err diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go similarity index 99% rename from vendor/github.com/containers/storage/pkg/chunked/internal/compression.go rename to vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go index 2a24e4bf2e..377ece2e88 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go @@ -1,4 +1,4 @@ -package internal +package minimal // NOTE: This is used from github.com/containers/image by callers that // don't otherwise use containers/storage, so don't make this depend on any diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go b/vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go new file mode 100644 index 0000000000..50ca1def87 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go @@ -0,0 +1,12 @@ +package path + +import ( + "path/filepath" +) + +// CleanAbsPath removes any ".." and "." from the path +// and ensures it starts with a "/". If the path refers to the root +// directory, it returns "/". +func CleanAbsPath(path string) string { + return filepath.Clean("/" + path) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 8ecbfb9826..cbf8229ac2 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -22,7 +22,8 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/compressor" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" + path "github.com/containers/storage/pkg/chunked/internal/path" "github.com/containers/storage/pkg/chunked/toc" "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/idtools" @@ -59,7 +60,7 @@ type compressedFileType int type chunkedDiffer struct { stream ImageSourceSeekable manifest []byte - toc *internal.TOC // The parsed contents of manifest, or nil if not yet available + toc *minimal.TOC // The parsed contents of manifest, or nil if not yet available tarSplit []byte layersCache *layersCache tocOffset int64 @@ -189,7 +190,7 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges // On error, the second parameter is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull) // is permissible. func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (graphdriver.Differ, bool, error) { - zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] + zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[minimal.ManifestChecksumKey] estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] switch { @@ -391,7 +392,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool { } // fill only the attributes used by canDedupMetadataWithHardLink. otherFile := fileMetadata{ - FileMetadata: internal.FileMetadata{ + FileMetadata: minimal.FileMetadata{ UID: int(st.Uid), GID: int(st.Gid), Mode: int64(st.Mode), @@ -735,7 +736,12 @@ func (d *destinationFile) Close() (Err error) { } } - return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) + mode := os.FileMode(d.metadata.Mode) + if d.options.ForceMask != nil { + mode = *d.options.ForceMask + } + + return setFileAttrs(d.dirfd, d.file, mode, d.metadata, d.options, false) } func closeDestinationFiles(files chan *destinationFile, errors chan error) { @@ -1141,44 +1147,140 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) { return new, nil } -func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) { - var payload io.ReadCloser - var streams chan io.ReadCloser - var errs chan error - var err error +type streamOrErr struct { + stream io.ReadCloser + err error +} - chunksToRequest := []ImageSourceChunk{ - { - Offset: 0, - Length: uint64(c.blobSize), - }, +// ensureAllBlobsDone ensures that all blobs are closed and returns the first error encountered. +func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) { + for soe := range streamsOrErrors { + if soe.stream != nil { + _ = soe.stream.Close() + } else if retErr == nil { + retErr = soe.err + } } + return +} - streams, errs, err = c.stream.GetBlobAt(chunksToRequest) - if err != nil { - return "", err +// getBlobAtConverterGoroutine reads from the streams and errs channels, then sends +// either a stream or an error to the stream channel. The streams channel is closed when +// there are no more streams and errors to read. +// It ensures that no more than maxStreams streams are returned, and that every item from the +// streams and errs channels is consumed. +func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCloser, errs chan error, maxStreams int) { + tooManyStreams := false + streamsSoFar := 0 + + err := errors.New("Unexpected error in getBlobAtGoroutine") + + defer func() { + if err != nil { + stream <- streamOrErr{err: err} + } + close(stream) + }() + +loop: + for { + select { + case p, ok := <-streams: + if !ok { + streams = nil + break loop + } + if streamsSoFar >= maxStreams { + tooManyStreams = true + _ = p.Close() + continue + } + streamsSoFar++ + stream <- streamOrErr{stream: p} + case err, ok := <-errs: + if !ok { + errs = nil + break loop + } + stream <- streamOrErr{err: err} + } } - select { - case p := <-streams: - payload = p - case err := <-errs: - return "", err + if streams != nil { + for p := range streams { + if streamsSoFar >= maxStreams { + tooManyStreams = true + _ = p.Close() + continue + } + streamsSoFar++ + stream <- streamOrErr{stream: p} + } + } + if errs != nil { + for err := range errs { + stream <- streamOrErr{err: err} + } } - if payload == nil { - return "", errors.New("invalid stream returned") + if tooManyStreams { + stream <- streamOrErr{err: fmt.Errorf("too many streams returned, got more than %d", maxStreams)} } - defer payload.Close() + err = nil +} - originalRawDigester := digest.Canonical.Digester() +// getBlobAt provides a much more convenient way to consume data returned by ImageSourceSeekable.GetBlobAt. +// GetBlobAt returns two channels, forcing a caller to `select` on both of them — and in Go, reading a closed channel +// always succeeds in select. +// Instead, getBlobAt provides a single channel with all events, which can be consumed conveniently using `range`. +func getBlobAt(is ImageSourceSeekable, chunksToRequest ...ImageSourceChunk) (chan streamOrErr, error) { + streams, errs, err := is.GetBlobAt(chunksToRequest) + if err != nil { + return nil, err + } + stream := make(chan streamOrErr) + go getBlobAtConverterGoroutine(stream, streams, errs, len(chunksToRequest)) + return stream, nil +} - r := io.TeeReader(payload, originalRawDigester.Hash()) +func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) { + streamsOrErrors, err := getBlobAt(c.stream, ImageSourceChunk{Offset: 0, Length: uint64(c.blobSize)}) + if err != nil { + return "", err + } - // copy the entire tarball and compute its digest - _, err = io.CopyBuffer(destination, r, c.copyBuffer) + originalRawDigester := digest.Canonical.Digester() + for soe := range streamsOrErrors { + if soe.stream != nil { + r := io.TeeReader(soe.stream, originalRawDigester.Hash()) + // copy the entire tarball and compute its digest + _, err = io.CopyBuffer(destination, r, c.copyBuffer) + _ = soe.stream.Close() + } + if soe.err != nil && err == nil { + err = soe.err + } + } return originalRawDigester.Digest(), err } +func typeToOsMode(typ string) (os.FileMode, error) { + switch typ { + case TypeReg, TypeLink: + return 0, nil + case TypeSymlink: + return os.ModeSymlink, nil + case TypeDir: + return os.ModeDir, nil + case TypeChar: + return os.ModeDevice | os.ModeCharDevice, nil + case TypeBlock: + return os.ModeDevice, nil + case TypeFifo: + return os.ModeNamedPipe, nil + } + return 0, fmt.Errorf("unknown file type %q", typ) +} + func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) { defer c.layersCache.release() defer func() { @@ -1325,7 +1427,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if err == nil { value := idtools.Stat{ IDs: idtools.IDPair{UID: int(uid), GID: int(gid)}, - Mode: os.FileMode(mode), + Mode: os.ModeDir | os.FileMode(mode), } if err := idtools.SetContainersOverrideXattr(dest, value); err != nil { return output, err @@ -1408,13 +1510,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff filesToWaitFor := 0 for i := range mergedEntries { r := &mergedEntries[i] - if options.ForceMask != nil { - value := idtools.FormatContainersOverrideXattr(r.UID, r.GID, int(r.Mode)) - if r.Xattrs == nil { - r.Xattrs = make(map[string]string) - } - r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) - } mode := os.FileMode(r.Mode) @@ -1423,10 +1518,37 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff return output, err } - r.Name = filepath.Clean(r.Name) + size := r.Size + + // update also the implementation of ForceMask in pkg/archive + if options.ForceMask != nil { + mode = *options.ForceMask + + // special files will be stored as regular files + if t != tar.TypeDir && t != tar.TypeSymlink && t != tar.TypeReg && t != tar.TypeLink { + t = tar.TypeReg + size = 0 + } + + // if the entry will be stored as a directory or a regular file, store in a xattr the original + // owner and mode. + if t == tar.TypeDir || t == tar.TypeReg { + typeMode, err := typeToOsMode(r.Type) + if err != nil { + return output, err + } + value := idtools.FormatContainersOverrideXattrDevice(r.UID, r.GID, typeMode|fs.FileMode(r.Mode), int(r.Devmajor), int(r.Devminor)) + if r.Xattrs == nil { + r.Xattrs = make(map[string]string) + } + r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) + } + } + + r.Name = path.CleanAbsPath(r.Name) // do not modify the value of symlinks if r.Linkname != "" && t != tar.TypeSymlink { - r.Linkname = filepath.Clean(r.Linkname) + r.Linkname = path.CleanAbsPath(r.Linkname) } if whiteoutConverter != nil { @@ -1434,8 +1556,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff Typeflag: t, Name: r.Name, Linkname: r.Linkname, - Size: r.Size, - Mode: r.Mode, + Size: size, + Mode: int64(mode), Uid: r.UID, Gid: r.GID, } @@ -1454,7 +1576,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff switch t { case tar.TypeReg: // Create directly empty files. - if r.Size == 0 { + if size == 0 { // Used to have a scope for cleanup. createEmptyFile := func() error { file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0) @@ -1474,7 +1596,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } case tar.TypeDir: - if r.Name == "" || r.Name == "." { + if r.Name == "/" { output.RootDirMode = &mode } if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil { @@ -1509,7 +1631,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff return output, fmt.Errorf("invalid type %q", t) } - totalChunksSize += r.Size + totalChunksSize += size if t == tar.TypeReg { index := i @@ -1572,7 +1694,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } switch chunk.ChunkType { - case internal.ChunkTypeData: + case minimal.ChunkTypeData: root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk) if err != nil { return output, err @@ -1585,7 +1707,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff Offset: offset, } } - case internal.ChunkTypeZeros: + case minimal.ChunkTypeZeros: missingPartsSize -= size mp.Hole = true // Mark all chunks belonging to the missing part as holes @@ -1618,7 +1740,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff return output, nil } -func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { +func mustSkipFile(fileType compressedFileType, e minimal.FileMetadata) bool { // ignore the metadata files for the estargz format. if fileType != fileTypeEstargz { return false @@ -1631,7 +1753,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { return false } -func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) { +func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []minimal.FileMetadata) ([]fileMetadata, error) { countNextChunks := func(start int) int { count := 0 for _, e := range entries[start:] { @@ -1668,7 +1790,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i if e.Type == TypeReg { nChunks := countNextChunks(i + 1) - e.chunks = make([]*internal.FileMetadata, nChunks+1) + e.chunks = make([]*minimal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { // we need a copy here, otherwise we override the // .Size later @@ -1703,7 +1825,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i // validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the // same digest as chunk.ChunkDigest -func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { +func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0) if err != nil { return false diff --git a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go index 6fbaa41b5a..6f39b2ae2e 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go +++ b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go @@ -3,7 +3,7 @@ package toc import ( "errors" - "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/internal/minimal" digest "github.com/opencontainers/go-digest" ) @@ -19,7 +19,7 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" // This is an experimental feature and may be changed/removed in the future. func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { d1, ok1 := annotations[tocJSONDigestAnnotation] - d2, ok2 := annotations[internal.ManifestChecksumKey] + d2, ok2 := annotations[minimal.ManifestChecksumKey] switch { case ok1 && ok2: return nil, errors.New("both zstd:chunked and eStargz TOC found") diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 248594e935..299bdbef7f 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -4,6 +4,7 @@ import ( "bufio" "errors" "fmt" + "io/fs" "os" "os/user" "runtime" @@ -369,27 +370,66 @@ func checkChownErr(err error, name string, uid, gid int) error { // Stat contains file states that can be overridden with ContainersOverrideXattr. type Stat struct { - IDs IDPair - Mode os.FileMode + IDs IDPair + Mode os.FileMode + Major int + Minor int } // FormatContainersOverrideXattr will format the given uid, gid, and mode into a string // that can be used as the value for the ContainersOverrideXattr xattr. func FormatContainersOverrideXattr(uid, gid, mode int) string { - return fmt.Sprintf("%d:%d:0%o", uid, gid, mode&0o7777) + return FormatContainersOverrideXattrDevice(uid, gid, fs.FileMode(mode), 0, 0) +} + +// FormatContainersOverrideXattrDevice will format the given uid, gid, and mode into a string +// that can be used as the value for the ContainersOverrideXattr xattr. For devices, it also +// needs the major and minor numbers. +func FormatContainersOverrideXattrDevice(uid, gid int, mode fs.FileMode, major, minor int) string { + typ := "" + switch mode & os.ModeType { + case os.ModeDir: + typ = "dir" + case os.ModeSymlink: + typ = "symlink" + case os.ModeNamedPipe: + typ = "pipe" + case os.ModeSocket: + typ = "socket" + case os.ModeDevice: + typ = fmt.Sprintf("block-%d-%d", major, minor) + case os.ModeDevice | os.ModeCharDevice: + typ = fmt.Sprintf("char-%d-%d", major, minor) + default: + typ = "file" + } + unixMode := mode & os.ModePerm + if mode&os.ModeSetuid != 0 { + unixMode |= 0o4000 + } + if mode&os.ModeSetgid != 0 { + unixMode |= 0o2000 + } + if mode&os.ModeSticky != 0 { + unixMode |= 0o1000 + } + return fmt.Sprintf("%d:%d:%04o:%s", uid, gid, unixMode, typ) } // GetContainersOverrideXattr will get and decode ContainersOverrideXattr. func GetContainersOverrideXattr(path string) (Stat, error) { - var stat Stat xstat, err := system.Lgetxattr(path, ContainersOverrideXattr) if err != nil { - return stat, err + return Stat{}, err } + return parseOverrideXattr(xstat) // This will fail if (xstat, err) == (nil, nil), i.e. the xattr does not exist. +} +func parseOverrideXattr(xstat []byte) (Stat, error) { + var stat Stat attrs := strings.Split(string(xstat), ":") - if len(attrs) != 3 { - return stat, fmt.Errorf("The number of clons in %s does not equal to 3", + if len(attrs) < 3 { + return stat, fmt.Errorf("The number of parts in %s is less than 3", ContainersOverrideXattr) } @@ -397,47 +437,105 @@ func GetContainersOverrideXattr(path string) (Stat, error) { if err != nil { return stat, fmt.Errorf("Failed to parse UID: %w", err) } - stat.IDs.UID = int(value) - value, err = strconv.ParseUint(attrs[0], 10, 32) + value, err = strconv.ParseUint(attrs[1], 10, 32) if err != nil { return stat, fmt.Errorf("Failed to parse GID: %w", err) } - stat.IDs.GID = int(value) value, err = strconv.ParseUint(attrs[2], 8, 32) if err != nil { return stat, fmt.Errorf("Failed to parse mode: %w", err) } + stat.Mode = os.FileMode(value) & os.ModePerm + if value&0o1000 != 0 { + stat.Mode |= os.ModeSticky + } + if value&0o2000 != 0 { + stat.Mode |= os.ModeSetgid + } + if value&0o4000 != 0 { + stat.Mode |= os.ModeSetuid + } - stat.Mode = os.FileMode(value) - + if len(attrs) > 3 { + typ := attrs[3] + if strings.HasPrefix(typ, "file") { + } else if strings.HasPrefix(typ, "dir") { + stat.Mode |= os.ModeDir + } else if strings.HasPrefix(typ, "symlink") { + stat.Mode |= os.ModeSymlink + } else if strings.HasPrefix(typ, "pipe") { + stat.Mode |= os.ModeNamedPipe + } else if strings.HasPrefix(typ, "socket") { + stat.Mode |= os.ModeSocket + } else if strings.HasPrefix(typ, "block") { + stat.Mode |= os.ModeDevice + stat.Major, stat.Minor, err = parseDevice(typ) + if err != nil { + return stat, err + } + } else if strings.HasPrefix(typ, "char") { + stat.Mode |= os.ModeDevice | os.ModeCharDevice + stat.Major, stat.Minor, err = parseDevice(typ) + if err != nil { + return stat, err + } + } else { + return stat, fmt.Errorf("Invalid file type %s", typ) + } + } return stat, nil } +func parseDevice(typ string) (int, int, error) { + parts := strings.Split(typ, "-") + // If there are more than 3 parts, just ignore them to be forward compatible + if len(parts) < 3 { + return 0, 0, fmt.Errorf("Invalid device type %s", typ) + } + if parts[0] != "block" && parts[0] != "char" { + return 0, 0, fmt.Errorf("Invalid device type %s", typ) + } + major, err := strconv.Atoi(parts[1]) + if err != nil { + return 0, 0, fmt.Errorf("Failed to parse major number: %w", err) + } + minor, err := strconv.Atoi(parts[2]) + if err != nil { + return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err) + } + return major, minor, nil +} + // SetContainersOverrideXattr will encode and set ContainersOverrideXattr. func SetContainersOverrideXattr(path string, stat Stat) error { - value := FormatContainersOverrideXattr(stat.IDs.UID, stat.IDs.GID, int(stat.Mode)) + value := FormatContainersOverrideXattrDevice(stat.IDs.UID, stat.IDs.GID, stat.Mode, stat.Major, stat.Minor) return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0) } func SafeChown(name string, uid, gid int) error { if runtime.GOOS == "darwin" { - var mode os.FileMode = 0o0700 + stat := Stat{ + Mode: os.FileMode(0o0700), + } xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) - if err == nil { - attrs := strings.Split(string(xstat), ":") - if len(attrs) == 3 { - val, err := strconv.ParseUint(attrs[2], 8, 32) - if err == nil { - mode = os.FileMode(val) - } + if err == nil && xstat != nil { + stat, err = parseOverrideXattr(xstat) + if err != nil { + return err + } + } else { + st, err := os.Stat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode. + if err != nil { + return err } + stat.Mode = st.Mode() } - value := Stat{IDPair{uid, gid}, mode} - if err = SetContainersOverrideXattr(name, value); err != nil { + stat.IDs = IDPair{UID: uid, GID: gid} + if err = SetContainersOverrideXattr(name, stat); err != nil { return err } uid = os.Getuid() @@ -453,19 +551,24 @@ func SafeChown(name string, uid, gid int) error { func SafeLchown(name string, uid, gid int) error { if runtime.GOOS == "darwin" { - var mode os.FileMode = 0o0700 + stat := Stat{ + Mode: os.FileMode(0o0700), + } xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) - if err == nil { - attrs := strings.Split(string(xstat), ":") - if len(attrs) == 3 { - val, err := strconv.ParseUint(attrs[2], 8, 32) - if err == nil { - mode = os.FileMode(val) - } + if err == nil && xstat != nil { + stat, err = parseOverrideXattr(xstat) + if err != nil { + return err + } + } else { + st, err := os.Lstat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode. + if err != nil { + return err } + stat.Mode = st.Mode() } - value := Stat{IDPair{uid, gid}, mode} - if err = SetContainersOverrideXattr(name, value); err != nil { + stat.IDs = IDPair{UID: uid, GID: gid} + if err = SetContainersOverrideXattr(name, stat); err != nil { return err } uid = os.Getuid() diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index 4ad6345178..40cb22b0fb 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -1,4 +1,4 @@ -//go:build linux && cgo +//go:build linux package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go index cf77e52328..9acc519ccd 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -1,4 +1,4 @@ -//go:build linux && cgo +//go:build linux package loopback diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go index 2a4f5cbcfb..2cd8fc8b0e 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -1,21 +1,7 @@ -//go:build linux && cgo +//go:build linux package loopback -/* -#include // FIXME: present only for defines, maybe we can remove it? - -#ifndef LOOP_CTL_GET_FREE - #define LOOP_CTL_GET_FREE 0x4C82 -#endif - -#ifndef LO_FLAGS_PARTSCAN - #define LO_FLAGS_PARTSCAN 8 -#endif - -*/ -import "C" - type loopInfo64 struct { loDevice uint64 /* ioctl r/o */ loInode uint64 /* ioctl r/o */ @@ -34,19 +20,19 @@ type loopInfo64 struct { // IOCTL consts const ( - LoopSetFd = C.LOOP_SET_FD - LoopCtlGetFree = C.LOOP_CTL_GET_FREE - LoopGetStatus64 = C.LOOP_GET_STATUS64 - LoopSetStatus64 = C.LOOP_SET_STATUS64 - LoopClrFd = C.LOOP_CLR_FD - LoopSetCapacity = C.LOOP_SET_CAPACITY + LoopSetFd = 0x4C00 + LoopCtlGetFree = 0x4C82 + LoopGetStatus64 = 0x4C05 + LoopSetStatus64 = 0x4C04 + LoopClrFd = 0x4C01 + LoopSetCapacity = 0x4C07 ) // LOOP consts. const ( - LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR - LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY - LoFlagsPartScan = C.LO_FLAGS_PARTSCAN - LoKeySize = C.LO_KEY_SIZE - LoNameSize = C.LO_NAME_SIZE + LoFlagsAutoClear = 0x4C07 + LoFlagsReadOnly = 1 + LoFlagsPartScan = 8 + LoKeySize = 32 + LoNameSize = 64 ) diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go index 2d88212794..b3527e3a0d 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -1,4 +1,4 @@ -//go:build linux && cgo +//go:build linux package loopback diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go b/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go new file mode 100644 index 0000000000..1314058f17 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go @@ -0,0 +1,93 @@ +//go:build freebsd + +package system + +import ( + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + EXTATTR_NAMESPACE_EMPTY = unix.EXTATTR_NAMESPACE_EMPTY + EXTATTR_NAMESPACE_USER = unix.EXTATTR_NAMESPACE_USER + EXTATTR_NAMESPACE_SYSTEM = unix.EXTATTR_NAMESPACE_SYSTEM +) + +// ExtattrGetLink retrieves the value of the extended attribute identified by attrname +// in the given namespace and associated with the given path in the file system. +// If the path is a symbolic link, the extended attribute is retrieved from the link itself. +// Returns a []byte slice if the extattr is set and nil otherwise. +func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { + size, errno := unix.ExtattrGetLink(path, attrnamespace, attrname, + uintptr(unsafe.Pointer(nil)), 0) + if errno != nil { + if errno == unix.ENOATTR { + return nil, nil + } + return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} + } + if size == 0 { + return []byte{}, nil + } + + dest := make([]byte, size) + size, errno = unix.ExtattrGetLink(path, attrnamespace, attrname, + uintptr(unsafe.Pointer(&dest[0])), size) + if errno != nil { + return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} + } + + return dest[:size], nil +} + +// ExtattrSetLink sets the value of extended attribute identified by attrname +// in the given namespace and associated with the given path in the file system. +// If the path is a symbolic link, the extended attribute is set on the link itself. +func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { + if len(data) == 0 { + data = []byte{} // ensure non-nil for empty data + } + if _, errno := unix.ExtattrSetLink(path, attrnamespace, attrname, + uintptr(unsafe.Pointer(&data[0])), len(data)); errno != nil { + return &os.PathError{Op: "extattr_set_link", Path: path, Err: errno} + } + + return nil +} + +// ExtattrListLink lists extended attributes associated with the given path +// in the specified namespace. If the path is a symbolic link, the attributes +// are listed from the link itself. +func ExtattrListLink(path string, attrnamespace int) ([]string, error) { + size, errno := unix.ExtattrListLink(path, attrnamespace, + uintptr(unsafe.Pointer(nil)), 0) + if errno != nil { + return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} + } + if size == 0 { + return []string{}, nil + } + + dest := make([]byte, size) + size, errno = unix.ExtattrListLink(path, attrnamespace, + uintptr(unsafe.Pointer(&dest[0])), size) + if errno != nil { + return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} + } + + var attrs []string + for i := 0; i < size; { + // Each attribute is preceded by a single byte length + length := int(dest[i]) + i++ + if i+length > size { + break + } + attrs = append(attrs, string(dest[i:i+length])) + i += length + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go b/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go new file mode 100644 index 0000000000..07b67357f3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go @@ -0,0 +1,24 @@ +//go:build !freebsd + +package system + +const ( + EXTATTR_NAMESPACE_EMPTY = 0 + EXTATTR_NAMESPACE_USER = 0 + EXTATTR_NAMESPACE_SYSTEM = 0 +) + +// ExtattrGetLink is not supported on platforms other than FreeBSD. +func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// ExtattrSetLink is not supported on platforms other than FreeBSD. +func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { + return ErrNotSupportedPlatform +} + +// ExtattrListLink is not supported on platforms other than FreeBSD. +func ExtattrListLink(path string, attrnamespace int) ([]string, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go new file mode 100644 index 0000000000..715f05b938 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go index 75275b964e..27ada2083e 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go @@ -12,7 +12,7 @@ const ( E2BIG unix.Errno = unix.E2BIG // Operation not supported - EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + ENOTSUP unix.Errno = unix.ENOTSUP ) // Lgetxattr retrieves the value of the extended attribute identified by attr diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go b/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go new file mode 100644 index 0000000000..5d653976e5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go @@ -0,0 +1,85 @@ +package system + +import ( + "strings" + + "golang.org/x/sys/unix" +) + +const ( + // Value is larger than the maximum size allowed + E2BIG unix.Errno = unix.E2BIG + + // Operation not supported + ENOTSUP unix.Errno = unix.ENOTSUP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW +) + +var ( + namespaceMap = map[string]int{ + "user": EXTATTR_NAMESPACE_USER, + "system": EXTATTR_NAMESPACE_SYSTEM, + } +) + +func xattrToExtattr(xattr string) (namespace int, extattr string, err error) { + namespaceName, extattr, found := strings.Cut(xattr, ".") + if !found { + return -1, "", ENOTSUP + } + + namespace, ok := namespaceMap[namespaceName] + if !ok { + return -1, "", ENOTSUP + } + return namespace, extattr, nil +} + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// Returns a []byte slice if the xattr is set and nil otherwise. +func Lgetxattr(path string, attr string) ([]byte, error) { + namespace, extattr, err := xattrToExtattr(attr) + if err != nil { + return nil, err + } + return ExtattrGetLink(path, namespace, extattr) +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, value []byte, flags int) error { + if flags != 0 { + // FIXME: Flags are not supported on FreeBSD, but we can implement + // them mimicking the behavior of the Linux implementation. + // See lsetxattr(2) on Linux for more information. + return ENOTSUP + } + + namespace, extattr, err := xattrToExtattr(attr) + if err != nil { + return err + } + return ExtattrSetLink(path, namespace, extattr, value) +} + +// Llistxattr lists extended attributes associated with the given path +// in the file system. +func Llistxattr(path string) ([]string, error) { + attrs := []string{} + + for namespaceName, namespace := range namespaceMap { + namespaceAttrs, err := ExtattrListLink(path, namespace) + if err != nil { + return nil, err + } + + for _, attr := range namespaceAttrs { + attrs = append(attrs, namespaceName+"."+attr) + } + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index 6b47c4e717..12462cca33 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -12,7 +12,7 @@ const ( E2BIG unix.Errno = unix.E2BIG // Operation not supported - EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + ENOTSUP unix.Errno = unix.ENOTSUP // Value is too small or too large for maximum size allowed EOVERFLOW unix.Errno = unix.EOVERFLOW diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index 0ab61f3def..66bf5858f6 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -1,4 +1,4 @@ -//go:build !linux && !darwin +//go:build !linux && !darwin && !freebsd package system @@ -9,7 +9,7 @@ const ( E2BIG syscall.Errno = syscall.Errno(0) // Operation not supported - EOPNOTSUPP syscall.Errno = syscall.Errno(0) + ENOTSUP syscall.Errno = syscall.Errno(0) // Value is too small or too large for maximum size allowed EOVERFLOW syscall.Errno = syscall.Errno(0) diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index a2d385008b..cd1cf861f6 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -20,6 +20,7 @@ import ( _ "github.com/containers/storage/drivers/register" drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/internal/dedup" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" @@ -166,6 +167,26 @@ type flaggableStore interface { type StoreOptions = types.StoreOptions +type DedupHashMethod = dedup.DedupHashMethod + +const ( + DedupHashInvalid = dedup.DedupHashInvalid + DedupHashCRC = dedup.DedupHashCRC + DedupHashFileSize = dedup.DedupHashFileSize + DedupHashSHA256 = dedup.DedupHashSHA256 +) + +type ( + DedupOptions = dedup.DedupOptions + DedupResult = dedup.DedupResult +) + +// DedupArgs is used to pass arguments to the Dedup command. +type DedupArgs struct { + // Options that are passed directly to the internal/dedup.DedupDirs function. + Options DedupOptions +} + // Store wraps up the various types of file-based stores that we use into a // singleton object that initializes and manages them all together. type Store interface { @@ -589,6 +610,9 @@ type Store interface { // MultiList returns consistent values as of a single point in time. // WARNING: The values may already be out of date by the time they are returned to the caller. MultiList(MultiListOptions) (MultiListResult, error) + + // Dedup deduplicates layers in the store. + Dedup(DedupArgs) (drivers.DedupResult, error) } // AdditionalLayer represents a layer that is contained in the additional layer store @@ -3843,3 +3867,43 @@ func (s *store) MultiList(options MultiListOptions) (MultiListResult, error) { } return out, nil } + +// Dedup deduplicates layers in the store. +func (s *store) Dedup(req DedupArgs) (drivers.DedupResult, error) { + imgs, err := s.Images() + if err != nil { + return drivers.DedupResult{}, err + } + var topLayers []string + for _, i := range imgs { + topLayers = append(topLayers, i.TopLayer) + topLayers = append(topLayers, i.MappedTopLayers...) + } + return writeToLayerStore(s, func(rlstore rwLayerStore) (drivers.DedupResult, error) { + layers := make(map[string]struct{}) + for _, i := range topLayers { + cur := i + for cur != "" { + if _, visited := layers[cur]; visited { + break + } + l, err := rlstore.Get(cur) + if err != nil { + if err == ErrLayerUnknown { + break + } + return drivers.DedupResult{}, err + } + layers[cur] = struct{}{} + cur = l.Parent + } + } + r := drivers.DedupArgs{ + Options: req.Options, + } + for l := range layers { + r.Layers = append(r.Layers, l) + } + return rlstore.dedup(r) + }) +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go deleted file mode 100644 index 2c3ebe1653..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go +++ /dev/null @@ -1,27 +0,0 @@ -package challenge - -import ( - "net/url" - "strings" -) - -// FROM: https://golang.org/src/net/http/http.go -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// FROM: http://golang.org/src/net/http/transport.go -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -// FROM: http://golang.org/src/net/http/transport.go -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go deleted file mode 100644 index fe238210cd..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ /dev/null @@ -1,237 +0,0 @@ -package challenge - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "sync" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// Manager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type Manager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleManager returns an instance of -// Manger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleManager() Manager { - return &simpleManager{ - Challenges: make(map[string][]Challenge), - } -} - -type simpleManager struct { - sync.RWMutex - Challenges map[string][]Challenge -} - -func normalizeURL(endpoint *url.URL) { - endpoint.Host = strings.ToLower(endpoint.Host) - endpoint.Host = canonicalAddr(endpoint) -} - -func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - normalizeURL(&endpoint) - - m.RLock() - defer m.RUnlock() - challenges := m.Challenges[endpoint.String()] - return challenges, nil -} - -func (m *simpleManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, - Scheme: resp.Request.URL.Scheme, - } - normalizeURL(&urlCopy) - - m.Lock() - defer m.Unlock() - m.Challenges[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index cfd52ae95d..f519806cd4 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -5820,8 +5820,6 @@ definitions: type: "string" example: - "WARNING: No memory limit support" - - "WARNING: bridge-nf-call-iptables is disabled" - - "WARNING: bridge-nf-call-ip6tables is disabled" CDISpecDirs: description: | List of directories where (Container Device Interface) CDI @@ -9563,7 +9561,7 @@ paths: type: "string" example: "OK" headers: - API-Version: + Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: @@ -9619,7 +9617,7 @@ paths: type: "string" example: "(empty)" headers: - API-Version: + Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index bf3e9b1cd6..7c43268b3a 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -56,8 +56,8 @@ func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) } - ping.APIVersion = resp.header.Get("API-Version") - ping.OSType = resp.header.Get("OSType") + ping.APIVersion = resp.header.Get("Api-Version") + ping.OSType = resp.header.Get("Ostype") if resp.header.Get("Docker-Experimental") == "true" { ping.Experimental = true } diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go index 5e29a6b3b8..97f355d2e4 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -10,7 +10,9 @@ import ( // Lstat takes a path to a file and returns // a system.StatT type pertaining to that file. // -// Throws an error if the file does not exist +// Throws an error if the file does not exist. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go index 359c791d9b..4180f3ac20 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -4,6 +4,8 @@ import "os" // Lstat calls os.Lstat to get a fileinfo interface back. // This is then copied into our own locally defined structure. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Lstat(path string) (*StatT, error) { fi, err := os.Lstat(path) if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go index 2a62237a45..e0cd22d7a7 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -11,6 +11,8 @@ import ( // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Mkdev(major int64, minor int64) uint32 { return uint32(unix.Mkdev(uint32(major), uint32(minor))) } diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go index e218e742d4..4f66453d62 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go @@ -8,6 +8,8 @@ import ( // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go index 34df0b9236..34c5532631 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go @@ -8,6 +8,8 @@ import ( // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 4309d42b9f..0557235f98 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -17,6 +17,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { // FromStatT converts a syscall.Stat_t type to a system.Stat_t type // This is exposed on Linux as pkg/archive/changes uses it. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func FromStatT(s *syscall.Stat_t) (*StatT, error) { return fromStatT(s) } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go index 205e54677d..661b0bed20 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -9,6 +9,8 @@ import ( // StatT type contains status of a file. It contains metadata // like permission, owner, group, size, etc about a file. +// +// Deprecated: this type is only used internally, and will be removed in the next release. type StatT struct { mode uint32 uid uint32 @@ -56,7 +58,9 @@ func (s StatT) IsDir() bool { // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // -// Throws an error if the file does not exist +// Throws an error if the file does not exist. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index 10876cd73e..e74a0f4fd7 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -7,6 +7,8 @@ import ( // StatT type contains status of a file. It contains metadata // like permission, size, etc about a file. +// +// Deprecated: this type is only used internally, and will be removed in the next release. type StatT struct { mode os.FileMode size int64 @@ -31,7 +33,9 @@ func (s StatT) Mtim() time.Time { // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // -// Throws an error if the file does not exist +// Throws an error if the file does not exist. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Stat(path string) (*StatT, error) { fi, err := os.Stat(path) if err != nil { diff --git a/vendor/github.com/proglottis/gpgme/.gitignore b/vendor/github.com/proglottis/gpgme/.gitignore index 0210b26e03..bc485bfc5b 100644 --- a/vendor/github.com/proglottis/gpgme/.gitignore +++ b/vendor/github.com/proglottis/gpgme/.gitignore @@ -1 +1,3 @@ testdata/gpghome/random_seed +testdata/gpghome/.gpg-v21-migrated +testdata/gpghome/private-keys-v1.d/ diff --git a/vendor/github.com/proglottis/gpgme/callbacks.go b/vendor/github.com/proglottis/gpgme/callbacks.go deleted file mode 100644 index d1dc610d42..0000000000 --- a/vendor/github.com/proglottis/gpgme/callbacks.go +++ /dev/null @@ -1,42 +0,0 @@ -package gpgme - -import ( - "sync" -) - -var callbacks struct { - sync.Mutex - m map[uintptr]interface{} - c uintptr -} - -func callbackAdd(v interface{}) uintptr { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m == nil { - callbacks.m = make(map[uintptr]interface{}) - } - callbacks.c++ - ret := callbacks.c - callbacks.m[ret] = v - return ret -} - -func callbackLookup(c uintptr) interface{} { - callbacks.Lock() - defer callbacks.Unlock() - ret := callbacks.m[c] - if ret == nil { - panic("callback pointer not found") - } - return ret -} - -func callbackDelete(c uintptr) { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m[c] == nil { - panic("callback pointer not found") - } - delete(callbacks.m, c) -} diff --git a/vendor/github.com/proglottis/gpgme/data.go b/vendor/github.com/proglottis/gpgme/data.go index eee32c0323..0e81c36d66 100644 --- a/vendor/github.com/proglottis/gpgme/data.go +++ b/vendor/github.com/proglottis/gpgme/data.go @@ -10,6 +10,7 @@ import ( "io" "os" "runtime" + "runtime/cgo" "unsafe" ) @@ -19,30 +20,32 @@ const ( SeekEnd = C.SEEK_END ) +var dataCallbacks = C.struct_gpgme_data_cbs{ + read: C.gpgme_data_read_cb_t(C.gogpgme_readfunc), + write: C.gpgme_data_write_cb_t(C.gogpgme_writefunc), + seek: C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc), +} + //export gogpgme_readfunc func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - n, err := d.r.Read(d.buf[:size]) + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) + n, err := d.r.Read(unsafe.Slice((*byte)(buffer), size)) if err != nil && err != io.EOF { + d.err = err C.gpgme_err_set_errno(C.EIO) return -1 } - C.memcpy(buffer, unsafe.Pointer(&d.buf[0]), C.size_t(n)) return C.ssize_t(n) } //export gogpgme_writefunc func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - C.memcpy(unsafe.Pointer(&d.buf[0]), buffer, C.size_t(size)) - n, err := d.w.Write(d.buf[:size]) + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) + n, err := d.w.Write(unsafe.Slice((*byte)(buffer), size)) if err != nil && err != io.EOF { + d.err = err C.gpgme_err_set_errno(C.EIO) return -1 } @@ -51,9 +54,11 @@ func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { //export gogpgme_seekfunc func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t { - d := callbackLookup(uintptr(handle)).(*Data) + h := *(*cgo.Handle)(handle) + d := h.Value().(*Data) n, err := d.s.Seek(int64(offset), int(whence)) if err != nil { + d.err = err C.gpgme_err_set_errno(C.EIO) return -1 } @@ -63,12 +68,11 @@ func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) // The Data buffer used to communicate with GPGME type Data struct { dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C - buf []byte - cbs C.struct_gpgme_data_cbs r io.Reader w io.Writer s io.Seeker - cbc uintptr // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh) + cbc cgo.Handle // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh) + err error } func newData() *Data { @@ -86,6 +90,7 @@ func NewData() (*Data, error) { // NewDataFile returns a new file based data buffer func NewDataFile(f *os.File) (*Data, error) { d := newData() + d.r = f return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd()))) } @@ -103,20 +108,22 @@ func NewDataBytes(b []byte) (*Data, error) { func NewDataReader(r io.Reader) (*Data, error) { d := newData() d.r = r - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + if s, ok := r.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // NewDataWriter returns a new callback based data buffer func NewDataWriter(w io.Writer) (*Data, error) { d := newData() d.w = w - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + if s, ok := w.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // NewDataReadWriter returns a new callback based data buffer @@ -124,11 +131,11 @@ func NewDataReadWriter(rw io.ReadWriter) (*Data, error) { d := newData() d.r = rw d.w = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + if s, ok := rw.(io.Seeker); ok { + d.s = s + } + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // NewDataReadWriteSeeker returns a new callback based data buffer @@ -137,12 +144,8 @@ func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) { d.r = rw d.w = rw d.s = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - d.cbs.seek = C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) + d.cbc = cgo.NewHandle(d) + return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc))) } // Close releases any resources associated with the data buffer @@ -151,7 +154,7 @@ func (d *Data) Close() error { return nil } if d.cbc > 0 { - callbackDelete(d.cbc) + d.cbc.Delete() } _, err := C.gpgme_data_release(d.dh) runtime.KeepAlive(d) @@ -160,24 +163,42 @@ func (d *Data) Close() error { } func (d *Data) Write(p []byte) (int, error) { - n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + var buffer *byte + if len(p) > 0 { + buffer = &p[0] + } + + n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(buffer), C.size_t(len(p))) runtime.KeepAlive(d) - if err != nil { + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: return 0, err - } - if n == 0 { + case len(p) > 0 && n == 0: return 0, io.EOF } return int(n), nil } func (d *Data) Read(p []byte) (int, error) { - n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + var buffer *byte + if len(p) > 0 { + buffer = &p[0] + } + + n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(buffer), C.size_t(len(p))) runtime.KeepAlive(d) - if err != nil { + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: return 0, err - } - if n == 0 { + case len(p) > 0 && n == 0: return 0, io.EOF } return int(n), nil @@ -186,7 +207,15 @@ func (d *Data) Read(p []byte) (int, error) { func (d *Data) Seek(offset int64, whence int) (int64, error) { n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence)) runtime.KeepAlive(d) - return int64(n), err + switch { + case d.err != nil: + defer func() { d.err = nil }() + + return 0, d.err + case err != nil: + return 0, err + } + return int64(n), nil } // Name returns the associated filename if any diff --git a/vendor/github.com/proglottis/gpgme/go_gpgme.c b/vendor/github.com/proglottis/gpgme/go_gpgme.c index 00da3ab304..f5b38ce7ed 100644 --- a/vendor/github.com/proglottis/gpgme/go_gpgme.c +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.c @@ -1,13 +1,5 @@ #include "go_gpgme.h" -gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle) { - return gpgme_data_new_from_cbs(dh, cbs, (void *)handle); -} - -void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle) { - gpgme_set_passphrase_cb(ctx, cb, (void *)handle); -} - gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) { return gpgme_data_seek(dh, offset, whence); } @@ -15,17 +7,17 @@ gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) { gpgme_error_t gogpgme_op_assuan_transact_ext( gpgme_ctx_t ctx, char* cmd, - uintptr_t data_h, - uintptr_t inquiry_h, - uintptr_t status_h, + void* data_h, + void* inquiry_h, + void* status_h, gpgme_error_t *operr ){ return gpgme_op_assuan_transact_ext( ctx, cmd, - (gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, (void *)data_h, - (gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, (void *)inquiry_h, - (gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, (void *)status_h, + (gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, data_h, + (gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, inquiry_h, + (gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, status_h, operr ); } diff --git a/vendor/github.com/proglottis/gpgme/go_gpgme.h b/vendor/github.com/proglottis/gpgme/go_gpgme.h index eb3a4ba88b..8eb5fddd9f 100644 --- a/vendor/github.com/proglottis/gpgme/go_gpgme.h +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.h @@ -10,11 +10,9 @@ extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd); -extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle); -extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle); extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence); -extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, uintptr_t data_h, uintptr_t inquiry_h , uintptr_t status_h, gpgme_error_t *operr); +extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, void *data_h, void *inquiry_h , void *status_h, gpgme_error_t *operr); extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen ); extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args); diff --git a/vendor/github.com/proglottis/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go index 8f495ddf5d..15af69c865 100644 --- a/vendor/github.com/proglottis/gpgme/gpgme.go +++ b/vendor/github.com/proglottis/gpgme/gpgme.go @@ -7,11 +7,13 @@ package gpgme // #include // #include "go_gpgme.h" import "C" + import ( "fmt" "io" "os" "runtime" + "runtime/cgo" "time" "unsafe" ) @@ -27,7 +29,8 @@ type Callback func(uidHint string, prevWasBad bool, f *os.File) error //export gogpgme_passfunc func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t { - c := callbackLookup(uintptr(hook)).(*Context) + h := *(*cgo.Handle)(hook) + c := h.Value().(*Context) go_uid_hint := C.GoString(uid_hint) f := os.NewFile(uintptr(fd), go_uid_hint) defer f.Close() @@ -233,6 +236,17 @@ func SetEngineInfo(proto Protocol, fileName, homeDir string) error { return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome)) } +func GetDirInfo(what string) string { + cwhat := C.CString(what) + defer C.free(unsafe.Pointer(cwhat)) + + cdir := C.gpgme_get_dirinfo(cwhat) + if cdir == nil { + return "" + } + return C.GoString(cdir) +} + func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { var keys []*Key ctx, err := New() @@ -243,7 +257,7 @@ func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { if err := ctx.KeyListStart(pattern, secretOnly); err != nil { return keys, err } - defer ctx.KeyListEnd() + defer func() { _ = ctx.KeyListEnd() }() for ctx.KeyListNext() { keys = append(keys, ctx.Key) } @@ -268,8 +282,10 @@ func Decrypt(r io.Reader) (*Data, error) { if err != nil { return nil, err } - err = ctx.Decrypt(cipher, plain) - plain.Seek(0, SeekSet) + if err := ctx.Decrypt(cipher, plain); err != nil { + return nil, err + } + _, err = plain.Seek(0, SeekSet) return plain, err } @@ -278,7 +294,7 @@ type Context struct { KeyError error callback Callback - cbc uintptr // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx) + cbc cgo.Handle // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx) ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C } @@ -295,7 +311,7 @@ func (c *Context) Release() { return } if c.cbc > 0 { - callbackDelete(c.cbc) + c.cbc.Delete() } C.gpgme_release(c.ctx) runtime.KeepAlive(c) @@ -364,15 +380,14 @@ func (c *Context) SetCallback(callback Callback) error { var err error c.callback = callback if c.cbc > 0 { - callbackDelete(c.cbc) + c.cbc.Delete() } if callback != nil { - cbc := callbackAdd(c) - c.cbc = cbc - _, err = C.gogpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), C.uintptr_t(cbc)) + c.cbc = cgo.NewHandle(c) + _, err = C.gpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), unsafe.Pointer(&c.cbc)) } else { c.cbc = 0 - _, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0) + _, err = C.gpgme_set_passphrase_cb(c.ctx, nil, nil) } runtime.KeepAlive(c) return err @@ -564,9 +579,11 @@ func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error { return err } -type AssuanDataCallback func(data []byte) error -type AssuanInquireCallback func(name, args string) error -type AssuanStatusCallback func(status, args string) error +type ( + AssuanDataCallback func(data []byte) error + AssuanInquireCallback func(name, args string) error + AssuanStatusCallback func(status, args string) error +) // AssuanSend sends a raw Assuan command to gpg-agent func (c *Context) AssuanSend( @@ -577,17 +594,17 @@ func (c *Context) AssuanSend( ) error { var operr C.gpgme_error_t - dataPtr := callbackAdd(&data) - inquiryPtr := callbackAdd(&inquiry) - statusPtr := callbackAdd(&status) + dataPtr := cgo.NewHandle(&data) + inquiryPtr := cgo.NewHandle(&inquiry) + statusPtr := cgo.NewHandle(&status) cmdCStr := C.CString(cmd) defer C.free(unsafe.Pointer(cmdCStr)) err := C.gogpgme_op_assuan_transact_ext( c.ctx, cmdCStr, - C.uintptr_t(dataPtr), - C.uintptr_t(inquiryPtr), - C.uintptr_t(statusPtr), + unsafe.Pointer(&dataPtr), + unsafe.Pointer(&inquiryPtr), + unsafe.Pointer(&statusPtr), &operr, ) runtime.KeepAlive(c) @@ -600,11 +617,14 @@ func (c *Context) AssuanSend( //export gogpgme_assuan_data_callback func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t { - c := callbackLookup(uintptr(handle)).(*AssuanDataCallback) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanDataCallback) if *c == nil { return 0 } - (*c)(C.GoBytes(data, C.int(datalen))) + if err := (*c)(C.GoBytes(data, C.int(datalen))); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } return 0 } @@ -612,11 +632,14 @@ func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, da func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t { name := C.GoString(cName) args := C.GoString(cArgs) - c := callbackLookup(uintptr(handle)).(*AssuanInquireCallback) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanInquireCallback) if *c == nil { return 0 } - (*c)(name, args) + if err := (*c)(name, args); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } return 0 } @@ -624,11 +647,14 @@ func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t { status := C.GoString(cStatus) args := C.GoString(cArgs) - c := callbackLookup(uintptr(handle)).(*AssuanStatusCallback) + h := *(*cgo.Handle)(handle) + c := h.Value().(*AssuanStatusCallback) if *c == nil { return 0 } - (*c)(status, args) + if err := (*c)(status, args); err != nil { + return C.gpgme_error(C.GPG_ERR_USER_1) + } return 0 } diff --git a/vendor/github.com/proglottis/gpgme/unset_agent_info.go b/vendor/github.com/proglottis/gpgme/unset_agent_info.go index 986aca59f6..8add8ec877 100644 --- a/vendor/github.com/proglottis/gpgme/unset_agent_info.go +++ b/vendor/github.com/proglottis/gpgme/unset_agent_info.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package gpgme diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go index dfc1f0c0e8..6714b3488e 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go @@ -134,7 +134,7 @@ func (i *InteractiveIDTokenGetter) doOobFlow(cfg *oauth2.Config, stateToken stri fmt.Fprintln(i.GetOutput(), "Go to the following link in a browser:\n\n\t", authURL) fmt.Fprintf(i.GetOutput(), "Enter verification code: ") var code string - fmt.Fscanf(i.GetInput(), "%s", &code) + _, _ = fmt.Fscanf(i.GetInput(), "%s", &code) // New line in case read input doesn't move cursor to next line. fmt.Fprintln(i.GetOutput()) return code diff --git a/vendor/go.mozilla.org/pkcs7/.gitignore b/vendor/github.com/smallstep/pkcs7/.gitignore similarity index 88% rename from vendor/go.mozilla.org/pkcs7/.gitignore rename to vendor/github.com/smallstep/pkcs7/.gitignore index daf913b1b3..948aae2acf 100644 --- a/vendor/go.mozilla.org/pkcs7/.gitignore +++ b/vendor/github.com/smallstep/pkcs7/.gitignore @@ -22,3 +22,7 @@ _testmain.go *.exe *.test *.prof + +# Development +.envrc +coverage.out \ No newline at end of file diff --git a/vendor/go.mozilla.org/pkcs7/LICENSE b/vendor/github.com/smallstep/pkcs7/LICENSE similarity index 100% rename from vendor/go.mozilla.org/pkcs7/LICENSE rename to vendor/github.com/smallstep/pkcs7/LICENSE diff --git a/vendor/go.mozilla.org/pkcs7/Makefile b/vendor/github.com/smallstep/pkcs7/Makefile similarity index 100% rename from vendor/go.mozilla.org/pkcs7/Makefile rename to vendor/github.com/smallstep/pkcs7/Makefile diff --git a/vendor/github.com/smallstep/pkcs7/README.md b/vendor/github.com/smallstep/pkcs7/README.md new file mode 100644 index 0000000000..9d94e65f25 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/README.md @@ -0,0 +1,63 @@ +# pkcs7 + +[![Go Reference](https://pkg.go.dev/badge/github.com/smallstep/pkcs7.svg)](https://pkg.go.dev/github.com/smallstep/pkcs7) +[![Build Status](https://github.com/smallstep/pkcs7/workflows/CI/badge.svg?query=branch%3Amain+event%3Apush)](https://github.com/smallstep/pkcs7/actions/workflows/ci.yml?query=branch%3Amain+event%3Apush) + +pkcs7 implements parsing and creating signed and enveloped messages. + +```go +package main + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "os" + + "github.com/smallstep/pkcs7" +) + +func SignAndDetach(content []byte, cert *x509.Certificate, privkey *rsa.PrivateKey) (signed []byte, err error) { + toBeSigned, err := NewSignedData(content) + if err != nil { + return fmt.Errorf("Cannot initialize signed data: %w", err) + } + if err = toBeSigned.AddSigner(cert, privkey, SignerInfoConfig{}); err != nil { + return fmt.Errorf("Cannot add signer: %w", err) + } + + // Detach signature, omit if you want an embedded signature + toBeSigned.Detach() + + signed, err = toBeSigned.Finish() + if err != nil { + return fmt.Errorf("Cannot finish signing data: %w", err) + } + + // Verify the signature + pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) + p7, err := pkcs7.Parse(signed) + if err != nil { + return fmt.Errorf("Cannot parse our signed data: %w", err) + } + + // since the signature was detached, reattach the content here + p7.Content = content + + if bytes.Compare(content, p7.Content) != 0 { + return fmt.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content) + } + if err = p7.Verify(); err != nil { + return fmt.Errorf("Cannot verify our signed data: %w", err) + } + + return signed, nil +} +``` + + +## Credits + +This is a fork of [mozilla-services/pkcs7](https://github.com/mozilla-services/pkcs7) which, itself, was a fork of [fullsailor/pkcs7](https://github.com/fullsailor/pkcs7). diff --git a/vendor/go.mozilla.org/pkcs7/ber.go b/vendor/github.com/smallstep/pkcs7/ber.go similarity index 91% rename from vendor/go.mozilla.org/pkcs7/ber.go rename to vendor/github.com/smallstep/pkcs7/ber.go index 73da024a0d..52333215de 100644 --- a/vendor/go.mozilla.org/pkcs7/ber.go +++ b/vendor/github.com/smallstep/pkcs7/ber.go @@ -5,8 +5,6 @@ import ( "errors" ) -var encodeIndent = 0 - type asn1Object interface { EncodeTo(writer *bytes.Buffer) error } @@ -17,8 +15,6 @@ type asn1Structured struct { } func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { - //fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) - encodeIndent++ inner := new(bytes.Buffer) for _, obj := range s.content { err := obj.EncodeTo(inner) @@ -26,7 +22,6 @@ func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { return err } } - encodeIndent-- out.Write(s.tagBytes) encodeLength(out, inner.Len()) out.Write(inner.Bytes()) @@ -47,8 +42,8 @@ func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error { if err = encodeLength(out, p.length); err != nil { return err } - //fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) - //fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) + // fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) + // fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) out.Write(p.content) return nil @@ -58,7 +53,7 @@ func ber2der(ber []byte) ([]byte, error) { if len(ber) == 0 { return nil, errors.New("ber2der: input ber is empty") } - //fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) + // fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) out := new(bytes.Buffer) obj, _, err := readObject(ber, 0) @@ -69,7 +64,7 @@ func ber2der(ber []byte) ([]byte, error) { // if offset < len(ber) { // return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber)) - //} + // } return out.Bytes(), nil } @@ -154,7 +149,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { } } // jvehent 20170227: this doesn't appear to be used anywhere... - //tag = tag*128 + ber[offset] - 0x80 + // tag = tag*128 + ber[offset] - 0x80 offset++ if offset > berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") @@ -204,7 +199,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { if length < 0 { return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length") } - //fmt.Printf("--> length : %d\n", length) + // fmt.Printf("--> length : %d\n", length) contentEnd := offset + length if contentEnd > len(ber) { return nil, 0, errors.New("ber2der: BER tag length is more than available data") @@ -259,7 +254,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { } func isIndefiniteTermination(ber []byte, offset int) (bool, error) { - if len(ber) - offset < 2 { + if len(ber)-offset < 2 { return false, errors.New("ber2der: Invalid BER format") } @@ -267,5 +262,5 @@ func isIndefiniteTermination(ber []byte, offset int) (bool, error) { } func debugprint(format string, a ...interface{}) { - //fmt.Printf(format, a) + // fmt.Printf(format, a) } diff --git a/vendor/go.mozilla.org/pkcs7/decrypt.go b/vendor/github.com/smallstep/pkcs7/decrypt.go similarity index 62% rename from vendor/go.mozilla.org/pkcs7/decrypt.go rename to vendor/github.com/smallstep/pkcs7/decrypt.go index 0d088d6287..76dc17f74c 100644 --- a/vendor/go.mozilla.org/pkcs7/decrypt.go +++ b/vendor/github.com/smallstep/pkcs7/decrypt.go @@ -9,6 +9,7 @@ import ( "crypto/rand" "crypto/rsa" "crypto/x509" + "crypto/x509/pkix" "encoding/asn1" "errors" "fmt" @@ -17,6 +18,12 @@ import ( // ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported") +// ErrUnsupportedAsymmetricEncryptionAlgorithm is returned when attempting to use an unknown asymmetric encryption algorithm +var ErrUnsupportedAsymmetricEncryptionAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA PKCS#1 v1.5 and RSA OAEP are supported") + +// ErrUnsupportedKeyType is returned when attempting to encrypting keys using a key that's not an RSA key +var ErrUnsupportedKeyType = errors.New("pkcs7: only RSA keys are supported") + // ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type") @@ -31,9 +38,21 @@ func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte return nil, errors.New("pkcs7: no enveloped recipient for provided certificate") } switch pkey := pkey.(type) { - case *rsa.PrivateKey: - var contentKey []byte - contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, pkey, recipient.EncryptedKey) + case crypto.Decrypter: + var opts crypto.DecrypterOpts + switch algorithm := recipient.KeyEncryptionAlgorithm.Algorithm; { + case algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP): + hashFunc, err := getHashFuncForKeyEncryptionAlgorithm(recipient.KeyEncryptionAlgorithm) + if err != nil { + return nil, err + } + opts = &rsa.OAEPOptions{Hash: hashFunc} + case algorithm.Equal(OIDEncryptionAlgorithmRSA): + opts = &rsa.PKCS1v15DecryptOptions{} + default: + return nil, ErrUnsupportedAsymmetricEncryptionAlgorithm + } + contentKey, err := pkey.Decrypt(rand.Reader, recipient.EncryptedKey, opts) if err != nil { return nil, err } @@ -42,6 +61,44 @@ func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte return nil, ErrUnsupportedAlgorithm } +// RFC 4055, 4.1 +// The current ASN.1 parser does not support non-integer defaults so the 'default:' tags here do nothing. +type rsaOAEPAlgorithmParameters struct { + HashFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:0,default:sha1Identifier"` + MaskGenFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:1,default:mgf1SHA1Identifier"` + PSourceFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:2,default:pSpecifiedEmptyIdentifier"` +} + +func getHashFuncForKeyEncryptionAlgorithm(keyEncryptionAlgorithm pkix.AlgorithmIdentifier) (crypto.Hash, error) { + invalidHashFunc := crypto.Hash(0) + params := &rsaOAEPAlgorithmParameters{ + HashFunc: pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA1}, // set default hash algorithm to SHA1 + } + var rest []byte + rest, err := asn1.Unmarshal(keyEncryptionAlgorithm.Parameters.FullBytes, params) + if err != nil { + return invalidHashFunc, fmt.Errorf("pkcs7: failed unmarshaling key encryption algorithm parameters: %v", err) + } + if len(rest) != 0 { + return invalidHashFunc, errors.New("pkcs7: trailing data after RSA OAEP parameters") + } + + switch { + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return crypto.SHA1, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA224): + return crypto.SHA224, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return crypto.SHA256, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return crypto.SHA384, nil + case params.HashFunc.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return crypto.SHA512, nil + default: + return invalidHashFunc, errors.New("pkcs7: unsupported hash function for RSA OAEP") + } +} + // DecryptUsingPSK decrypts encrypted data using caller provided // pre-shared secret func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) { @@ -60,7 +117,6 @@ func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { !alg.Equal(OIDEncryptionAlgorithmAES128CBC) && !alg.Equal(OIDEncryptionAlgorithmAES128GCM) && !alg.Equal(OIDEncryptionAlgorithmAES256GCM) { - fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg) return nil, ErrUnsupportedAlgorithm } @@ -147,10 +203,10 @@ func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { func unpad(data []byte, blocklen int) ([]byte, error) { if blocklen < 1 { - return nil, fmt.Errorf("invalid blocklen %d", blocklen) + return nil, fmt.Errorf("pkcs7: invalid blocklen %d", blocklen) } if len(data)%blocklen != 0 || len(data) == 0 { - return nil, fmt.Errorf("invalid data len %d", len(data)) + return nil, fmt.Errorf("pkcs7: invalid data len %d", len(data)) } // the last byte is the length of padding @@ -160,7 +216,7 @@ func unpad(data []byte, blocklen int) ([]byte, error) { pad := data[len(data)-padlen:] for _, padbyte := range pad { if padbyte != byte(padlen) { - return nil, errors.New("invalid padding") + return nil, errors.New("pkcs7: invalid padding") } } diff --git a/vendor/go.mozilla.org/pkcs7/encrypt.go b/vendor/github.com/smallstep/pkcs7/encrypt.go similarity index 74% rename from vendor/go.mozilla.org/pkcs7/encrypt.go rename to vendor/github.com/smallstep/pkcs7/encrypt.go index 6b2655708c..a5c96e7553 100644 --- a/vendor/go.mozilla.org/pkcs7/encrypt.go +++ b/vendor/github.com/smallstep/pkcs7/encrypt.go @@ -2,6 +2,7 @@ package pkcs7 import ( "bytes" + "crypto" "crypto/aes" "crypto/cipher" "crypto/des" @@ -66,6 +67,24 @@ var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC // content with an unsupported algorithm. var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported") +// KeyEncryptionAlgorithm determines the algorithm used to encrypt a +// content key. Change the value of this variable to change which +// algorithm is used in the Encrypt() function. +var KeyEncryptionAlgorithm = OIDEncryptionAlgorithmRSA + +// ErrUnsupportedKeyEncryptionAlgorithm is returned when an +// unsupported key encryption algorithm OID is provided. +var ErrUnsupportedKeyEncryptionAlgorithm = errors.New("pkcs7: unsupported key encryption algorithm provided") + +// KeyEncryptionHash determines the crypto.Hash algorithm to use +// when encrypting a content key. Change the value of this variable +// to change which algorithm is used in the Encrypt() function. +var KeyEncryptionHash = crypto.SHA256 + +// ErrUnsupportedKeyEncryptionHash is returned when an +// unsupported key encryption hash is provided. +var ErrUnsupportedKeyEncryptionHash = errors.New("pkcs7: unsupported key encryption hash provided") + // ErrPSKNotProvided is returned when attempting to encrypt // using a PSK without actually providing the PSK. var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided") @@ -256,7 +275,7 @@ func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, e // value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the // value before calling Encrypt(). For example: // -// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM +// ContentEncryptionAlgorithm = EncryptionAlgorithmAES256GCM // // TODO(fullsailor): Add support for encrypting content with other algorithms func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { @@ -288,7 +307,27 @@ func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { // Prepare each recipient's encrypted cipher key recipientInfos := make([]recipientInfo, len(recipients)) for i, recipient := range recipients { - encrypted, err := encryptKey(key, recipient) + algorithm := KeyEncryptionAlgorithm + hash := KeyEncryptionHash + var kea pkix.AlgorithmIdentifier + switch { + case algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP): + parameters, err := getParametersForKeyEncryptionAlgorithm(algorithm, hash) + if err != nil { + return nil, fmt.Errorf("failed to get parameters for key encryption: %v", err) + } + kea = pkix.AlgorithmIdentifier{ + Algorithm: algorithm, + Parameters: parameters, + } + case algorithm.Equal(OIDEncryptionAlgorithmRSA): + kea = pkix.AlgorithmIdentifier{ + Algorithm: algorithm, + } + default: + return nil, ErrUnsupportedKeyEncryptionAlgorithm + } + encrypted, err := encryptKey(key, recipient, algorithm, hash) if err != nil { return nil, err } @@ -297,12 +336,10 @@ func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { return nil, err } info := recipientInfo{ - Version: 0, - IssuerAndSerialNumber: ias, - KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: OIDEncryptionAlgorithmRSA, - }, - EncryptedKey: encrypted, + Version: 0, + IssuerAndSerialNumber: ias, + KeyEncryptionAlgorithm: kea, + EncryptedKey: encrypted, } recipientInfos[i] = info } @@ -327,6 +364,37 @@ func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { return asn1.Marshal(wrapper) } +func getParametersForKeyEncryptionAlgorithm(algorithm asn1.ObjectIdentifier, hash crypto.Hash) (asn1.RawValue, error) { + if !algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP) { + return asn1.RawValue{}, nil // return empty; not used + } + + params := rsaOAEPAlgorithmParameters{} + switch hash { + case crypto.SHA1: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA1} + case crypto.SHA224: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA224} + case crypto.SHA256: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA256} + case crypto.SHA384: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA384} + case crypto.SHA512: + params.HashFunc = pkix.AlgorithmIdentifier{Algorithm: OIDDigestAlgorithmSHA512} + default: + return asn1.RawValue{}, ErrUnsupportedAlgorithm + } + + b, err := asn1.Marshal(params) + if err != nil { + return asn1.RawValue{}, fmt.Errorf("failed marshaling key encryption parameters: %v", err) + } + + return asn1.RawValue{ + FullBytes: b, + }, nil +} + // EncryptUsingPSK creates and returns an encrypted data PKCS7 structure, // encrypted using caller provided pre-shared secret. func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { @@ -375,15 +443,23 @@ func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { } func marshalEncryptedContent(content []byte) asn1.RawValue { - asn1Content, _ := asn1.Marshal(content) - return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true} + return asn1.RawValue{Bytes: content, Class: 2, IsCompound: false} } -func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) { - if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil { +func encryptKey(key []byte, recipient *x509.Certificate, algorithm asn1.ObjectIdentifier, hash crypto.Hash) ([]byte, error) { + pub, ok := recipient.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, ErrUnsupportedKeyType + } + + switch { + case algorithm.Equal(OIDEncryptionAlgorithmRSA): return rsa.EncryptPKCS1v15(rand.Reader, pub, key) + case algorithm.Equal(OIDEncryptionAlgorithmRSAESOAEP): + return rsa.EncryptOAEP(hash.New(), rand.Reader, pub, key, nil) + default: + return nil, ErrUnsupportedKeyEncryptionAlgorithm } - return nil, ErrUnsupportedAlgorithm } func pad(data []byte, blocklen int) ([]byte, error) { diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go new file mode 100644 index 0000000000..378cc265d2 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/debug.go @@ -0,0 +1,14 @@ +package legacyx509 + +import "fmt" + +// legacyGodebugSetting is a type mimicking Go's internal godebug package +// settings, which are used to enable / disable certain functionalities at +// build time. +type legacyGodebugSetting int + +func (s legacyGodebugSetting) Value() string { + return fmt.Sprintf("%d", s) +} + +func (s legacyGodebugSetting) IncNonDefault() {} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go new file mode 100644 index 0000000000..7d1469b6d0 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/doc.go @@ -0,0 +1,14 @@ +/* +Package legacyx509 is a copy of certain parts of Go's crypto/x509 package. +It is based on Go 1.23, and has just the parts copied over required for +parsing X509 certificates. + +The primary reason this copy exists is to keep support for parsing PKCS7 +messages containing Simple Certificate Enrolment Protocol (SCEP) requests +from Windows devices. Go 1.23 made a change marking certificates with a +critical authority key identifier as invalid, which is mandated by RFC 5280, +but apparently Windows marks those specific certificates as such, resulting +in those SCEP requests failing from being parsed correctly. +*/ + +package legacyx509 diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go new file mode 100644 index 0000000000..8268a07c50 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/oid.go @@ -0,0 +1,377 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package legacyx509 + +import ( + "bytes" + "encoding/asn1" + "errors" + "math" + "math/big" + "math/bits" + "strconv" + "strings" +) + +var ( + errInvalidOID = errors.New("invalid oid") +) + +// An OID represents an ASN.1 OBJECT IDENTIFIER. +type OID struct { + der []byte +} + +// ParseOID parses a Object Identifier string, represented by ASCII numbers separated by dots. +func ParseOID(oid string) (OID, error) { + var o OID + return o, o.unmarshalOIDText(oid) +} + +func newOIDFromDER(der []byte) (OID, bool) { + if len(der) == 0 || der[len(der)-1]&0x80 != 0 { + return OID{}, false + } + + start := 0 + for i, v := range der { + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == start && v == 0x80 { + return OID{}, false + } + if v&0x80 == 0 { + start = i + 1 + } + } + + return OID{der}, true +} + +// OIDFromInts creates a new OID using ints, each integer is a separate component. +func OIDFromInts(oid []uint64) (OID, error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return OID{}, errInvalidOID + } + + length := base128IntLength(oid[0]*40 + oid[1]) + for _, v := range oid[2:] { + length += base128IntLength(v) + } + + der := make([]byte, 0, length) + der = appendBase128Int(der, oid[0]*40+oid[1]) + for _, v := range oid[2:] { + der = appendBase128Int(der, v) + } + return OID{der}, nil +} + +func base128IntLength(n uint64) int { + if n == 0 { + return 1 + } + return (bits.Len64(n) + 6) / 7 +} + +func appendBase128Int(dst []byte, n uint64) []byte { + for i := base128IntLength(n) - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + dst = append(dst, o) + } + return dst +} + +func base128BigIntLength(n *big.Int) int { + if n.Cmp(big.NewInt(0)) == 0 { + return 1 + } + return (n.BitLen() + 6) / 7 +} + +func appendBase128BigInt(dst []byte, n *big.Int) []byte { + if n.Cmp(big.NewInt(0)) == 0 { + return append(dst, 0) + } + + for i := base128BigIntLength(n) - 1; i >= 0; i-- { + o := byte(big.NewInt(0).Rsh(n, uint(i)*7).Bits()[0]) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + dst = append(dst, o) + } + return dst +} + +// AppendText implements [encoding.TextAppender] +func (o OID) AppendText(b []byte) ([]byte, error) { + return append(b, o.String()...), nil +} + +// MarshalText implements [encoding.TextMarshaler] +func (o OID) MarshalText() ([]byte, error) { + return o.AppendText(nil) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] +func (o *OID) UnmarshalText(text []byte) error { + return o.unmarshalOIDText(string(text)) +} + +// cutString slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func cutString(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +func (o *OID) unmarshalOIDText(oid string) error { + // (*big.Int).SetString allows +/- signs, but we don't want + // to allow them in the string representation of Object Identifier, so + // reject such encodings. + for _, c := range oid { + isDigit := c >= '0' && c <= '9' + if !isDigit && c != '.' { + return errInvalidOID + } + } + + var ( + firstNum string + secondNum string + ) + + var nextComponentExists bool + firstNum, oid, nextComponentExists = cutString(oid, ".") + if !nextComponentExists { + return errInvalidOID + } + secondNum, oid, nextComponentExists = cutString(oid, ".") + + var ( + first = big.NewInt(0) + second = big.NewInt(0) + ) + + if _, ok := first.SetString(firstNum, 10); !ok { + return errInvalidOID + } + if _, ok := second.SetString(secondNum, 10); !ok { + return errInvalidOID + } + + if first.Cmp(big.NewInt(2)) > 0 || (first.Cmp(big.NewInt(2)) < 0 && second.Cmp(big.NewInt(40)) >= 0) { + return errInvalidOID + } + + firstComponent := first.Mul(first, big.NewInt(40)) + firstComponent.Add(firstComponent, second) + + der := appendBase128BigInt(make([]byte, 0, 32), firstComponent) + + for nextComponentExists { + var strNum string + strNum, oid, nextComponentExists = cutString(oid, ".") + b, ok := big.NewInt(0).SetString(strNum, 10) + if !ok { + return errInvalidOID + } + der = appendBase128BigInt(der, b) + } + + o.der = der + return nil +} + +// AppendBinary implements [encoding.BinaryAppender] +func (o OID) AppendBinary(b []byte) ([]byte, error) { + return append(b, o.der...), nil +} + +// MarshalBinary implements [encoding.BinaryMarshaler] +func (o OID) MarshalBinary() ([]byte, error) { + return o.AppendBinary(nil) +} + +// cloneBytes returns a copy of b[:len(b)]. +// The result may have additional unused capacity. +// Clone(nil) returns nil. +func cloneBytes(b []byte) []byte { + if b == nil { + return nil + } + return append([]byte{}, b...) +} + +// UnmarshalBinary implements [encoding.BinaryUnmarshaler] +func (o *OID) UnmarshalBinary(b []byte) error { + oid, ok := newOIDFromDER(cloneBytes(b)) + if !ok { + return errInvalidOID + } + *o = oid + return nil +} + +// Equal returns true when oid and other represents the same Object Identifier. +func (oid OID) Equal(other OID) bool { + // There is only one possible DER encoding of + // each unique Object Identifier. + return bytes.Equal(oid.der, other.der) +} + +func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, failed bool) { + offset = initOffset + var ret64 int64 + for shifted := 0; offset < len(bytes); shifted++ { + // 5 * 7 bits per byte == 35 bits of data + // Thus the representation is either non-minimal or too large for an int32 + if shifted == 5 { + failed = true + return + } + ret64 <<= 7 + b := bytes[offset] + // integers should be minimally encoded, so the leading octet should + // never be 0x80 + if shifted == 0 && b == 0x80 { + failed = true + return + } + ret64 |= int64(b & 0x7f) + offset++ + if b&0x80 == 0 { + ret = int(ret64) + // Ensure that the returned value fits in an int on all platforms + if ret64 > math.MaxInt32 { + failed = true + } + return + } + } + failed = true + return +} + +// EqualASN1OID returns whether an OID equals an asn1.ObjectIdentifier. If +// asn1.ObjectIdentifier cannot represent the OID specified by oid, because +// a component of OID requires more than 31 bits, it returns false. +func (oid OID) EqualASN1OID(other asn1.ObjectIdentifier) bool { + if len(other) < 2 { + return false + } + v, offset, failed := parseBase128Int(oid.der, 0) + if failed { + // This should never happen, since we've already parsed the OID, + // but just in case. + return false + } + if v < 80 { + a, b := v/40, v%40 + if other[0] != a || other[1] != b { + return false + } + } else { + a, b := 2, v-80 + if other[0] != a || other[1] != b { + return false + } + } + + i := 2 + for ; offset < len(oid.der); i++ { + v, offset, failed = parseBase128Int(oid.der, offset) + if failed { + // Again, shouldn't happen, since we've already parsed + // the OID, but better safe than sorry. + return false + } + if i >= len(other) || v != other[i] { + return false + } + } + + return i == len(other) +} + +// Strings returns the string representation of the Object Identifier. +func (oid OID) String() string { + var b strings.Builder + b.Grow(32) + const ( + valSize = 64 // size in bits of val. + bitsPerByte = 7 + maxValSafeShift = (1 << (valSize - bitsPerByte)) - 1 + ) + var ( + start = 0 + val = uint64(0) + numBuf = make([]byte, 0, 21) + bigVal *big.Int + overflow bool + ) + for i, v := range oid.der { + curVal := v & 0x7F + valEnd := v&0x80 == 0 + if valEnd { + if start != 0 { + b.WriteByte('.') + } + } + if !overflow && val > maxValSafeShift { + if bigVal == nil { + bigVal = new(big.Int) + } + bigVal = bigVal.SetUint64(val) + overflow = true + } + if overflow { + bigVal = bigVal.Lsh(bigVal, bitsPerByte).Or(bigVal, big.NewInt(int64(curVal))) + if valEnd { + if start == 0 { + b.WriteString("2.") + bigVal = bigVal.Sub(bigVal, big.NewInt(80)) + } + numBuf = bigVal.Append(numBuf, 10) + b.Write(numBuf) + numBuf = numBuf[:0] + val = 0 + start = i + 1 + overflow = false + } + continue + } + val <<= bitsPerByte + val |= uint64(curVal) + if valEnd { + if start == 0 { + if val < 80 { + b.Write(strconv.AppendUint(numBuf, val/40, 10)) + b.WriteByte('.') + b.Write(strconv.AppendUint(numBuf, val%40, 10)) + } else { + b.WriteString("2.") + b.Write(strconv.AppendUint(numBuf, val-80, 10)) + } + } else { + b.Write(strconv.AppendUint(numBuf, val, 10)) + } + val = 0 + start = i + 1 + } + } + return b.String() +} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go new file mode 100644 index 0000000000..ec57e79f6e --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/parser.go @@ -0,0 +1,1027 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package legacyx509 + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf16" + "unicode/utf8" + + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + + stdx509 "crypto/x509" +) + +// ParseCertificates parses one or more certificates from the given ASN.1 DER +// data. The certificates must be concatenated with no intermediate padding. +func ParseCertificates(der []byte) ([]*stdx509.Certificate, error) { + var certs []*stdx509.Certificate + for len(der) > 0 { + cert, err := parseCertificate(der) + if err != nil { + return nil, err + } + certs = append(certs, cert) + der = der[len(cert.Raw):] + } + return certs, nil +} + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +// This is a simplified version of encoding/asn1.isPrintable. +func isPrintable(b byte) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + b == '*' || + // This is not technically allowed either. However, not + // only is it relatively common, but there are also a + // handful of CA certificates that contain it. At least + // one of which will not expire until 2027. + b == '&' +} + +// parseASN1String parses the ASN.1 string types T61String, PrintableString, +// UTF8String, BMPString, IA5String, and NumericString. This is mostly copied +// from the respective encoding/asn1.parse... methods, rather than just +// increasing the API surface of that package. +func parseASN1String(tag cryptobyte_asn1.Tag, value []byte) (string, error) { + switch tag { + case cryptobyte_asn1.T61String: + return string(value), nil + case cryptobyte_asn1.PrintableString: + for _, b := range value { + if !isPrintable(b) { + return "", errors.New("invalid PrintableString") + } + } + return string(value), nil + case cryptobyte_asn1.UTF8String: + if !utf8.Valid(value) { + return "", errors.New("invalid UTF-8 string") + } + return string(value), nil + case cryptobyte_asn1.Tag(asn1.TagBMPString): + if len(value)%2 != 0 { + return "", errors.New("invalid BMPString") + } + + // Strip terminator if present. + if l := len(value); l >= 2 && value[l-1] == 0 && value[l-2] == 0 { + value = value[:l-2] + } + + s := make([]uint16, 0, len(value)/2) + for len(value) > 0 { + s = append(s, uint16(value[0])<<8+uint16(value[1])) + value = value[2:] + } + + return string(utf16.Decode(s)), nil + case cryptobyte_asn1.IA5String: + s := string(value) + if isIA5String(s) != nil { + return "", errors.New("invalid IA5String") + } + return s, nil + case cryptobyte_asn1.Tag(asn1.TagNumericString): + for _, b := range value { + if !('0' <= b && b <= '9' || b == ' ') { + return "", errors.New("invalid NumericString") + } + } + return string(value), nil + } + return "", fmt.Errorf("unsupported string type: %v", tag) +} + +// parseName parses a DER encoded Name as defined in RFC 5280. We may +// want to export this function in the future for use in crypto/tls. +func parseName(raw cryptobyte.String) (*pkix.RDNSequence, error) { + if !raw.ReadASN1(&raw, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid RDNSequence") + } + + var rdnSeq pkix.RDNSequence + for !raw.Empty() { + var rdnSet pkix.RelativeDistinguishedNameSET + var set cryptobyte.String + if !raw.ReadASN1(&set, cryptobyte_asn1.SET) { + return nil, errors.New("x509: invalid RDNSequence") + } + for !set.Empty() { + var atav cryptobyte.String + if !set.ReadASN1(&atav, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid RDNSequence: invalid attribute") + } + var attr pkix.AttributeTypeAndValue + if !atav.ReadASN1ObjectIdentifier(&attr.Type) { + return nil, errors.New("x509: invalid RDNSequence: invalid attribute type") + } + var rawValue cryptobyte.String + var valueTag cryptobyte_asn1.Tag + if !atav.ReadAnyASN1(&rawValue, &valueTag) { + return nil, errors.New("x509: invalid RDNSequence: invalid attribute value") + } + var err error + attr.Value, err = parseASN1String(valueTag, rawValue) + if err != nil { + return nil, fmt.Errorf("x509: invalid RDNSequence: invalid attribute value: %s", err) + } + rdnSet = append(rdnSet, attr) + } + + rdnSeq = append(rdnSeq, rdnSet) + } + + return &rdnSeq, nil +} + +func parseAI(der cryptobyte.String) (pkix.AlgorithmIdentifier, error) { + ai := pkix.AlgorithmIdentifier{} + if !der.ReadASN1ObjectIdentifier(&ai.Algorithm) { + return ai, errors.New("x509: malformed OID") + } + if der.Empty() { + return ai, nil + } + var params cryptobyte.String + var tag cryptobyte_asn1.Tag + if !der.ReadAnyASN1Element(¶ms, &tag) { + return ai, errors.New("x509: malformed parameters") + } + ai.Parameters.Tag = int(tag) + ai.Parameters.FullBytes = params + return ai, nil +} + +func parseTime(der *cryptobyte.String) (time.Time, error) { + var t time.Time + switch { + case der.PeekASN1Tag(cryptobyte_asn1.UTCTime): + if !der.ReadASN1UTCTime(&t) { + return t, errors.New("x509: malformed UTCTime") + } + case der.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime): + if !der.ReadASN1GeneralizedTime(&t) { + return t, errors.New("x509: malformed GeneralizedTime") + } + default: + return t, errors.New("x509: unsupported time format") + } + return t, nil +} + +func parseValidity(der cryptobyte.String) (time.Time, time.Time, error) { + notBefore, err := parseTime(&der) + if err != nil { + return time.Time{}, time.Time{}, err + } + notAfter, err := parseTime(&der) + if err != nil { + return time.Time{}, time.Time{}, err + } + + return notBefore, notAfter, nil +} + +func parseExtension(der cryptobyte.String) (pkix.Extension, error) { + var ext pkix.Extension + if !der.ReadASN1ObjectIdentifier(&ext.Id) { + return ext, errors.New("x509: malformed extension OID field") + } + if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) { + if !der.ReadASN1Boolean(&ext.Critical) { + return ext, errors.New("x509: malformed extension critical field") + } + } + var val cryptobyte.String + if !der.ReadASN1(&val, cryptobyte_asn1.OCTET_STRING) { + return ext, errors.New("x509: malformed extension value field") + } + ext.Value = val + return ext, nil +} + +func parsePublicKey(keyData *publicKeyInfo) (interface{}, error) { + oid := keyData.Algorithm.Algorithm + params := keyData.Algorithm.Parameters + der := cryptobyte.String(keyData.PublicKey.RightAlign()) + switch { + case oid.Equal(oidPublicKeyRSA): + // RSA public keys must have a NULL in the parameters. + // See RFC 3279, Section 2.3.1. + if !bytes.Equal(params.FullBytes, asn1.NullBytes) { + return nil, errors.New("x509: RSA key missing NULL parameters") + } + + p := &pkcs1PublicKey{N: new(big.Int)} + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid RSA public key") + } + if !der.ReadASN1Integer(p.N) { + return nil, errors.New("x509: invalid RSA modulus") + } + if !der.ReadASN1Integer(&p.E) { + return nil, errors.New("x509: invalid RSA public exponent") + } + + if p.N.Sign() <= 0 { + return nil, errors.New("x509: RSA modulus is not a positive number") + } + if p.E <= 0 { + return nil, errors.New("x509: RSA public exponent is not a positive number") + } + + pub := &rsa.PublicKey{ + E: p.E, + N: p.N, + } + return pub, nil + case oid.Equal(oidPublicKeyECDSA): + paramsDer := cryptobyte.String(params.FullBytes) + namedCurveOID := new(asn1.ObjectIdentifier) + if !paramsDer.ReadASN1ObjectIdentifier(namedCurveOID) { + return nil, errors.New("x509: invalid ECDSA parameters") + } + namedCurve := namedCurveFromOID(*namedCurveOID) + if namedCurve == nil { + return nil, errors.New("x509: unsupported elliptic curve") + } + x, y := elliptic.Unmarshal(namedCurve, der) + if x == nil { + return nil, errors.New("x509: failed to unmarshal elliptic curve point") + } + pub := &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + } + return pub, nil + case oid.Equal(oidPublicKeyEd25519): + // RFC 8410, Section 3 + // > For all of the OIDs, the parameters MUST be absent. + if len(params.FullBytes) != 0 { + return nil, errors.New("x509: Ed25519 key encoded with illegal parameters") + } + if len(der) != ed25519.PublicKeySize { + return nil, errors.New("x509: wrong Ed25519 public key size") + } + return ed25519.PublicKey(der), nil + // case oid.Equal(oidPublicKeyX25519): + // // RFC 8410, Section 3 + // // > For all of the OIDs, the parameters MUST be absent. + // if len(params.FullBytes) != 0 { + // return nil, errors.New("x509: X25519 key encoded with illegal parameters") + // } + // return ecdh.X25519().NewPublicKey(der) + case oid.Equal(oidPublicKeyDSA): + y := new(big.Int) + if !der.ReadASN1Integer(y) { + return nil, errors.New("x509: invalid DSA public key") + } + pub := &dsa.PublicKey{ + Y: y, + Parameters: dsa.Parameters{ + P: new(big.Int), + Q: new(big.Int), + G: new(big.Int), + }, + } + paramsDer := cryptobyte.String(params.FullBytes) + if !paramsDer.ReadASN1(¶msDer, cryptobyte_asn1.SEQUENCE) || + !paramsDer.ReadASN1Integer(pub.Parameters.P) || + !paramsDer.ReadASN1Integer(pub.Parameters.Q) || + !paramsDer.ReadASN1Integer(pub.Parameters.G) { + return nil, errors.New("x509: invalid DSA parameters") + } + if pub.Y.Sign() <= 0 || pub.Parameters.P.Sign() <= 0 || + pub.Parameters.Q.Sign() <= 0 || pub.Parameters.G.Sign() <= 0 { + return nil, errors.New("x509: zero or negative DSA parameter") + } + return pub, nil + default: + return nil, errors.New("x509: unknown public key algorithm") + } +} + +func parseKeyUsageExtension(der cryptobyte.String) (stdx509.KeyUsage, error) { + var usageBits asn1.BitString + if !der.ReadASN1BitString(&usageBits) { + return 0, errors.New("x509: invalid key usage") + } + + var usage int + for i := 0; i < 9; i++ { + if usageBits.At(i) != 0 { + usage |= 1 << uint(i) + } + } + return stdx509.KeyUsage(usage), nil +} + +func parseBasicConstraintsExtension(der cryptobyte.String) (bool, int, error) { + var isCA bool + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return false, 0, errors.New("x509: invalid basic constraints") + } + if der.PeekASN1Tag(cryptobyte_asn1.BOOLEAN) { + if !der.ReadASN1Boolean(&isCA) { + return false, 0, errors.New("x509: invalid basic constraints") + } + } + maxPathLen := -1 + if der.PeekASN1Tag(cryptobyte_asn1.INTEGER) { + if !der.ReadASN1Integer(&maxPathLen) { + return false, 0, errors.New("x509: invalid basic constraints") + } + } + + // TODO: map out.MaxPathLen to 0 if it has the -1 default value? (Issue 19285) + return isCA, maxPathLen, nil +} + +func forEachSAN(der cryptobyte.String, callback func(tag int, data []byte) error) error { + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid subject alternative names") + } + for !der.Empty() { + var san cryptobyte.String + var tag cryptobyte_asn1.Tag + if !der.ReadAnyASN1(&san, &tag) { + return errors.New("x509: invalid subject alternative name") + } + if err := callback(int(tag^0x80), san); err != nil { + return err + } + } + + return nil +} + +func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) { + err = forEachSAN(der, func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + email := string(data) + if err := isIA5String(email); err != nil { + return errors.New("x509: SAN rfc822Name is malformed") + } + emailAddresses = append(emailAddresses, email) + case nameTypeDNS: + name := string(data) + if err := isIA5String(name); err != nil { + return errors.New("x509: SAN dNSName is malformed") + } + dnsNames = append(dnsNames, string(name)) + case nameTypeURI: + uriStr := string(data) + if err := isIA5String(uriStr); err != nil { + return errors.New("x509: SAN uniformResourceIdentifier is malformed") + } + uri, err := url.Parse(uriStr) + if err != nil { + return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) + } + if len(uri.Host) > 0 { + if _, ok := domainToReverseLabels(uri.Host); !ok { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) + } + } + uris = append(uris, uri) + case nameTypeIP: + switch len(data) { + case net.IPv4len, net.IPv6len: + ipAddresses = append(ipAddresses, data) + default: + return errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data))) + } + } + + return nil + }) + + return +} + +func parseAuthorityKeyIdentifier(e pkix.Extension) ([]byte, error) { + // RFC 5280, Section 4.2.1.1 + // if e.Critical { + // // Conforming CAs MUST mark this extension as non-critical + // return nil, errors.New("x509: authority key identifier incorrectly marked critical") + // } + val := cryptobyte.String(e.Value) + var akid cryptobyte.String + if !val.ReadASN1(&akid, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: invalid authority key identifier") + } + if akid.PeekASN1Tag(cryptobyte_asn1.Tag(0).ContextSpecific()) { + if !akid.ReadASN1(&akid, cryptobyte_asn1.Tag(0).ContextSpecific()) { + return nil, errors.New("x509: invalid authority key identifier") + } + return akid, nil + } + return nil, nil +} + +func parseExtKeyUsageExtension(der cryptobyte.String) ([]stdx509.ExtKeyUsage, []asn1.ObjectIdentifier, error) { + var extKeyUsages []stdx509.ExtKeyUsage + var unknownUsages []asn1.ObjectIdentifier + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return nil, nil, errors.New("x509: invalid extended key usages") + } + for !der.Empty() { + var eku asn1.ObjectIdentifier + if !der.ReadASN1ObjectIdentifier(&eku) { + return nil, nil, errors.New("x509: invalid extended key usages") + } + if extKeyUsage, ok := extKeyUsageFromOID(eku); ok { + extKeyUsages = append(extKeyUsages, stdx509.ExtKeyUsage(extKeyUsage)) + } else { + unknownUsages = append(unknownUsages, eku) + } + } + return extKeyUsages, unknownUsages, nil +} + +// func parseCertificatePoliciesExtension(der cryptobyte.String) ([]OID, error) { +// var oids []OID +// if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { +// return nil, errors.New("x509: invalid certificate policies") +// } +// for !der.Empty() { +// var cp cryptobyte.String +// var OIDBytes cryptobyte.String +// if !der.ReadASN1(&cp, cryptobyte_asn1.SEQUENCE) || !cp.ReadASN1(&OIDBytes, cryptobyte_asn1.OBJECT_IDENTIFIER) { +// return nil, errors.New("x509: invalid certificate policies") +// } +// oid, ok := newOIDFromDER(OIDBytes) +// if !ok { +// return nil, errors.New("x509: invalid certificate policies") +// } +// oids = append(oids, oid) +// } +// return oids, nil +// } + +// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits. +func isValidIPMask(mask []byte) bool { + seenZero := false + + for _, b := range mask { + if seenZero { + if b != 0 { + return false + } + + continue + } + + switch b { + case 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe: + seenZero = true + case 0xff: + default: + return false + } + } + + return true +} + +func parseNameConstraintsExtension(out *stdx509.Certificate, e pkix.Extension) (unhandled bool, err error) { + // RFC 5280, 4.2.1.10 + + // NameConstraints ::= SEQUENCE { + // permittedSubtrees [0] GeneralSubtrees OPTIONAL, + // excludedSubtrees [1] GeneralSubtrees OPTIONAL } + // + // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree + // + // GeneralSubtree ::= SEQUENCE { + // base GeneralName, + // minimum [0] BaseDistance DEFAULT 0, + // maximum [1] BaseDistance OPTIONAL } + // + // BaseDistance ::= INTEGER (0..MAX) + + outer := cryptobyte.String(e.Value) + var toplevel, permitted, excluded cryptobyte.String + var havePermitted, haveExcluded bool + if !outer.ReadASN1(&toplevel, cryptobyte_asn1.SEQUENCE) || + !outer.Empty() || + !toplevel.ReadOptionalASN1(&permitted, &havePermitted, cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()) || + !toplevel.ReadOptionalASN1(&excluded, &haveExcluded, cryptobyte_asn1.Tag(1).ContextSpecific().Constructed()) || + !toplevel.Empty() { + return false, errors.New("x509: invalid NameConstraints extension") + } + + if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 { + // From RFC 5280, Section 4.2.1.10: + // “either the permittedSubtrees field + // or the excludedSubtrees MUST be + // present” + return false, errors.New("x509: empty name constraints extension") + } + + getValues := func(subtrees cryptobyte.String) (dnsNames []string, ips []*net.IPNet, emails, uriDomains []string, err error) { + for !subtrees.Empty() { + var seq, value cryptobyte.String + var tag cryptobyte_asn1.Tag + if !subtrees.ReadASN1(&seq, cryptobyte_asn1.SEQUENCE) || + !seq.ReadAnyASN1(&value, &tag) { + return nil, nil, nil, nil, fmt.Errorf("x509: invalid NameConstraints extension") + } + + var ( + dnsTag = cryptobyte_asn1.Tag(2).ContextSpecific() + emailTag = cryptobyte_asn1.Tag(1).ContextSpecific() + ipTag = cryptobyte_asn1.Tag(7).ContextSpecific() + uriTag = cryptobyte_asn1.Tag(6).ContextSpecific() + ) + + switch tag { + case dnsTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain + // itself, but that's not valid in a + // normal domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain) + } + dnsNames = append(dnsNames, domain) + + case ipTag: + l := len(value) + var ip, mask []byte + + switch l { + case 8: + ip = value[:4] + mask = value[4:] + + case 32: + ip = value[:16] + mask = value[16:] + + default: + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained value of length %d", l) + } + + if !isValidIPMask(mask) { + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained invalid mask %x", mask) + } + + ips = append(ips, &net.IPNet{IP: net.IP(ip), Mask: net.IPMask(mask)}) + + case emailTag: + constraint := string(value) + if err := isIA5String(constraint); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + // If the constraint contains an @ then + // it specifies an exact mailbox name. + if strings.Contains(constraint, "@") { + if _, ok := parseRFC2821Mailbox(constraint); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) + } + } else { + // Otherwise it's a domain name. + domain := constraint + if len(domain) > 0 && domain[0] == '.' { + domain = domain[1:] + } + if _, ok := domainToReverseLabels(domain); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) + } + } + emails = append(emails, constraint) + + case uriTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + if net.ParseIP(domain) != nil { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain itself, + // but that's not valid in a normal + // domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain) + } + uriDomains = append(uriDomains, domain) + + default: + unhandled = true + } + } + + return dnsNames, ips, emails, uriDomains, nil + } + + if out.PermittedDNSDomains, out.PermittedIPRanges, out.PermittedEmailAddresses, out.PermittedURIDomains, err = getValues(permitted); err != nil { + return false, err + } + if out.ExcludedDNSDomains, out.ExcludedIPRanges, out.ExcludedEmailAddresses, out.ExcludedURIDomains, err = getValues(excluded); err != nil { + return false, err + } + out.PermittedDNSDomainsCritical = e.Critical + + return unhandled, nil +} + +func processExtensions(out *stdx509.Certificate) error { + var err error + for _, e := range out.Extensions { + unhandled := false + + if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 { + switch e.Id[3] { + case 15: + out.KeyUsage, err = parseKeyUsageExtension(e.Value) + if err != nil { + return err + } + case 19: + out.IsCA, out.MaxPathLen, err = parseBasicConstraintsExtension(e.Value) + if err != nil { + return err + } + out.BasicConstraintsValid = true + out.MaxPathLenZero = out.MaxPathLen == 0 + case 17: + out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(e.Value) + if err != nil { + return err + } + + if len(out.DNSNames) == 0 && len(out.EmailAddresses) == 0 && len(out.IPAddresses) == 0 && len(out.URIs) == 0 { + // If we didn't parse anything then we do the critical check, below. + unhandled = true + } + + case 30: + unhandled, err = parseNameConstraintsExtension(out, e) + if err != nil { + return err + } + + case 31: + // RFC 5280, 4.2.1.13 + + // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint + // + // DistributionPoint ::= SEQUENCE { + // distributionPoint [0] DistributionPointName OPTIONAL, + // reasons [1] ReasonFlags OPTIONAL, + // cRLIssuer [2] GeneralNames OPTIONAL } + // + // DistributionPointName ::= CHOICE { + // fullName [0] GeneralNames, + // nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + val := cryptobyte.String(e.Value) + if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid CRL distribution points") + } + for !val.Empty() { + var dpDER cryptobyte.String + if !val.ReadASN1(&dpDER, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid CRL distribution point") + } + var dpNameDER cryptobyte.String + var dpNamePresent bool + if !dpDER.ReadOptionalASN1(&dpNameDER, &dpNamePresent, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) { + return errors.New("x509: invalid CRL distribution point") + } + if !dpNamePresent { + continue + } + if !dpNameDER.ReadASN1(&dpNameDER, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) { + return errors.New("x509: invalid CRL distribution point") + } + for !dpNameDER.Empty() { + if !dpNameDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) { + break + } + var uri cryptobyte.String + if !dpNameDER.ReadASN1(&uri, cryptobyte_asn1.Tag(6).ContextSpecific()) { + return errors.New("x509: invalid CRL distribution point") + } + out.CRLDistributionPoints = append(out.CRLDistributionPoints, string(uri)) + } + } + + case 35: + out.AuthorityKeyId, err = parseAuthorityKeyIdentifier(e) + if err != nil { + return err + } + case 37: + out.ExtKeyUsage, out.UnknownExtKeyUsage, err = parseExtKeyUsageExtension(e.Value) + if err != nil { + return err + } + case 14: + // RFC 5280, 4.2.1.2 + if e.Critical { + // Conforming CAs MUST mark this extension as non-critical + return errors.New("x509: subject key identifier incorrectly marked critical") + } + val := cryptobyte.String(e.Value) + var skid cryptobyte.String + if !val.ReadASN1(&skid, cryptobyte_asn1.OCTET_STRING) { + return errors.New("x509: invalid subject key identifier") + } + out.SubjectKeyId = skid + // case 32: + // out.Policies, err = parseCertificatePoliciesExtension(e.Value) + // if err != nil { + // return err + // } + // out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, 0, len(out.Policies)) + // for _, oid := range out.Policies { + // if oid, ok := oid.toASN1OID(); ok { + // out.PolicyIdentifiers = append(out.PolicyIdentifiers, oid) + // } + // } + default: + // Unknown extensions are recorded if critical. + unhandled = true + } + } else if e.Id.Equal(oidExtensionAuthorityInfoAccess) { + // RFC 5280 4.2.2.1: Authority Information Access + if e.Critical { + // Conforming CAs MUST mark this extension as non-critical + return errors.New("x509: authority info access incorrectly marked critical") + } + val := cryptobyte.String(e.Value) + if !val.ReadASN1(&val, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid authority info access") + } + for !val.Empty() { + var aiaDER cryptobyte.String + if !val.ReadASN1(&aiaDER, cryptobyte_asn1.SEQUENCE) { + return errors.New("x509: invalid authority info access") + } + var method asn1.ObjectIdentifier + if !aiaDER.ReadASN1ObjectIdentifier(&method) { + return errors.New("x509: invalid authority info access") + } + if !aiaDER.PeekASN1Tag(cryptobyte_asn1.Tag(6).ContextSpecific()) { + continue + } + if !aiaDER.ReadASN1(&aiaDER, cryptobyte_asn1.Tag(6).ContextSpecific()) { + return errors.New("x509: invalid authority info access") + } + switch { + case method.Equal(oidAuthorityInfoAccessOcsp): + out.OCSPServer = append(out.OCSPServer, string(aiaDER)) + case method.Equal(oidAuthorityInfoAccessIssuers): + out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(aiaDER)) + } + } + } else { + // Unknown extensions are recorded if critical. + unhandled = true + } + + if e.Critical && unhandled { + out.UnhandledCriticalExtensions = append(out.UnhandledCriticalExtensions, e.Id) + } + } + + return nil +} + +var x509negativeserial = legacyGodebugSetting(0) // replaces godebug.New("x509negativeserial") + +func parseCertificate(der []byte) (*stdx509.Certificate, error) { + cert := &stdx509.Certificate{} + + input := cryptobyte.String(der) + // we read the SEQUENCE including length and tag bytes so that + // we can populate Certificate.Raw, before unwrapping the + // SEQUENCE so it can be operated on + if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed certificate") + } + cert.Raw = input + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed certificate") + } + + var tbs cryptobyte.String + // do the same trick again as above to extract the raw + // bytes for Certificate.RawTBSCertificate + if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed tbs certificate") + } + cert.RawTBSCertificate = tbs + if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed tbs certificate") + } + + if !tbs.ReadOptionalASN1Integer(&cert.Version, cryptobyte_asn1.Tag(0).Constructed().ContextSpecific(), 0) { + return nil, errors.New("x509: malformed version") + } + if cert.Version < 0 { + return nil, errors.New("x509: malformed version") + } + // for backwards compat reasons Version is one-indexed, + // rather than zero-indexed as defined in 5280 + cert.Version++ + if cert.Version > 3 { + return nil, errors.New("x509: invalid version") + } + + serial := new(big.Int) + if !tbs.ReadASN1Integer(serial) { + return nil, errors.New("x509: malformed serial number") + } + if serial.Sign() == -1 { + if x509negativeserial.Value() != "1" { + return nil, errors.New("x509: negative serial number") + } else { + x509negativeserial.IncNonDefault() + } + } + cert.SerialNumber = serial + + var sigAISeq cryptobyte.String + if !tbs.ReadASN1(&sigAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed signature algorithm identifier") + } + // Before parsing the inner algorithm identifier, extract + // the outer algorithm identifier and make sure that they + // match. + var outerSigAISeq cryptobyte.String + if !input.ReadASN1(&outerSigAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed algorithm identifier") + } + if !bytes.Equal(outerSigAISeq, sigAISeq) { + return nil, errors.New("x509: inner and outer signature algorithm identifiers don't match") + } + sigAI, err := parseAI(sigAISeq) + if err != nil { + return nil, err + } + cert.SignatureAlgorithm = getSignatureAlgorithmFromAI(sigAI) + + var issuerSeq cryptobyte.String + if !tbs.ReadASN1Element(&issuerSeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed issuer") + } + cert.RawIssuer = issuerSeq + issuerRDNs, err := parseName(issuerSeq) + if err != nil { + return nil, err + } + cert.Issuer.FillFromRDNSequence(issuerRDNs) + + var validity cryptobyte.String + if !tbs.ReadASN1(&validity, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed validity") + } + cert.NotBefore, cert.NotAfter, err = parseValidity(validity) + if err != nil { + return nil, err + } + + var subjectSeq cryptobyte.String + if !tbs.ReadASN1Element(&subjectSeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed issuer") + } + cert.RawSubject = subjectSeq + subjectRDNs, err := parseName(subjectSeq) + if err != nil { + return nil, err + } + cert.Subject.FillFromRDNSequence(subjectRDNs) + + var spki cryptobyte.String + if !tbs.ReadASN1Element(&spki, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed spki") + } + cert.RawSubjectPublicKeyInfo = spki + if !spki.ReadASN1(&spki, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed spki") + } + var pkAISeq cryptobyte.String + if !spki.ReadASN1(&pkAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed public key algorithm identifier") + } + pkAI, err := parseAI(pkAISeq) + if err != nil { + return nil, err + } + cert.PublicKeyAlgorithm = getPublicKeyAlgorithmFromOID(pkAI.Algorithm) + var spk asn1.BitString + if !spki.ReadASN1BitString(&spk) { + return nil, errors.New("x509: malformed subjectPublicKey") + } + if cert.PublicKeyAlgorithm != stdx509.UnknownPublicKeyAlgorithm { + cert.PublicKey, err = parsePublicKey(&publicKeyInfo{ + Algorithm: pkAI, + PublicKey: spk, + }) + if err != nil { + return nil, err + } + } + + if cert.Version > 1 { + if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(1).ContextSpecific()) { + return nil, errors.New("x509: malformed issuerUniqueID") + } + if !tbs.SkipOptionalASN1(cryptobyte_asn1.Tag(2).ContextSpecific()) { + return nil, errors.New("x509: malformed subjectUniqueID") + } + if cert.Version == 3 { + var extensions cryptobyte.String + var present bool + if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.Tag(3).Constructed().ContextSpecific()) { + return nil, errors.New("x509: malformed extensions") + } + if present { + seenExts := make(map[string]bool) + if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed extensions") + } + for !extensions.Empty() { + var extension cryptobyte.String + if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("x509: malformed extension") + } + ext, err := parseExtension(extension) + if err != nil { + return nil, err + } + oidStr := ext.Id.String() + if seenExts[oidStr] { + return nil, fmt.Errorf("x509: certificate contains duplicate extension with OID %q", oidStr) + } + seenExts[oidStr] = true + cert.Extensions = append(cert.Extensions, ext) + } + err = processExtensions(cert) + if err != nil { + return nil, err + } + } + } + } + + var signature asn1.BitString + if !input.ReadASN1BitString(&signature) { + return nil, errors.New("x509: malformed signature") + } + cert.Signature = signature.RightAlign() + + return cert, nil +} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go new file mode 100644 index 0000000000..da3c38a4e4 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/pkcs1.go @@ -0,0 +1,15 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package legacyx509 + +import ( + "math/big" +) + +// pkcs1PublicKey reflects the ASN.1 structure of a PKCS #1 public key. +type pkcs1PublicKey struct { + N *big.Int + E int +} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go new file mode 100644 index 0000000000..901e3ba85b --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/verify.go @@ -0,0 +1,193 @@ +package legacyx509 + +import ( + "bytes" + "strings" +) + +// rfc2821Mailbox represents a “mailbox” (which is an email address to most +// people) by breaking it into the “local” (i.e. before the '@') and “domain” +// parts. +type rfc2821Mailbox struct { + local, domain string +} + +// parseRFC2821Mailbox parses an email address into local and domain parts, +// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280, +// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The +// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”. +func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { + if len(in) == 0 { + return mailbox, false + } + + localPartBytes := make([]byte, 0, len(in)/2) + + if in[0] == '"' { + // Quoted-string = DQUOTE *qcontent DQUOTE + // non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127 + // qcontent = qtext / quoted-pair + // qtext = non-whitespace-control / + // %d33 / %d35-91 / %d93-126 + // quoted-pair = ("\" text) / obs-qp + // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text + // + // (Names beginning with “obs-” are the obsolete syntax from RFC 2822, + // Section 4. Since it has been 16 years, we no longer accept that.) + in = in[1:] + QuotedString: + for { + if len(in) == 0 { + return mailbox, false + } + c := in[0] + in = in[1:] + + switch { + case c == '"': + break QuotedString + + case c == '\\': + // quoted-pair + if len(in) == 0 { + return mailbox, false + } + if in[0] == 11 || + in[0] == 12 || + (1 <= in[0] && in[0] <= 9) || + (14 <= in[0] && in[0] <= 127) { + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + } else { + return mailbox, false + } + + case c == 11 || + c == 12 || + // Space (char 32) is not allowed based on the + // BNF, but RFC 3696 gives an example that + // assumes that it is. Several “verified” + // errata continue to argue about this point. + // We choose to accept it. + c == 32 || + c == 33 || + c == 127 || + (1 <= c && c <= 8) || + (14 <= c && c <= 31) || + (35 <= c && c <= 91) || + (93 <= c && c <= 126): + // qtext + localPartBytes = append(localPartBytes, c) + + default: + return mailbox, false + } + } + } else { + // Atom ("." Atom)* + NextChar: + for len(in) > 0 { + // atext from RFC 2822, Section 3.2.4 + c := in[0] + + switch { + case c == '\\': + // Examples given in RFC 3696 suggest that + // escaped characters can appear outside of a + // quoted string. Several “verified” errata + // continue to argue the point. We choose to + // accept it. + in = in[1:] + if len(in) == 0 { + return mailbox, false + } + fallthrough + + case ('0' <= c && c <= '9') || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + c == '!' || c == '#' || c == '$' || c == '%' || + c == '&' || c == '\'' || c == '*' || c == '+' || + c == '-' || c == '/' || c == '=' || c == '?' || + c == '^' || c == '_' || c == '`' || c == '{' || + c == '|' || c == '}' || c == '~' || c == '.': + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + + default: + break NextChar + } + } + + if len(localPartBytes) == 0 { + return mailbox, false + } + + // From RFC 3696, Section 3: + // “period (".") may also appear, but may not be used to start + // or end the local part, nor may two or more consecutive + // periods appear.” + twoDots := []byte{'.', '.'} + if localPartBytes[0] == '.' || + localPartBytes[len(localPartBytes)-1] == '.' || + bytes.Contains(localPartBytes, twoDots) { + return mailbox, false + } + } + + if len(in) == 0 || in[0] != '@' { + return mailbox, false + } + in = in[1:] + + // The RFC species a format for domains, but that's known to be + // violated in practice so we accept that anything after an '@' is the + // domain part. + if _, ok := domainToReverseLabels(in); !ok { + return mailbox, false + } + + mailbox.local = string(localPartBytes) + mailbox.domain = in + return mailbox, true +} + +// domainToReverseLabels converts a textual domain name like foo.example.com to +// the list of labels in reverse order, e.g. ["com", "example", "foo"]. +func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + for len(domain) > 0 { + if i := strings.LastIndexByte(domain, '.'); i == -1 { + reverseLabels = append(reverseLabels, domain) + domain = "" + } else { + reverseLabels = append(reverseLabels, domain[i+1:]) + domain = domain[:i] + if i == 0 { // domain == "" + // domain is prefixed with an empty label, append an empty + // string to reverseLabels to indicate this. + reverseLabels = append(reverseLabels, "") + } + } + } + + if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 { + // An empty label at the end indicates an absolute value. + return nil, false + } + + for _, label := range reverseLabels { + if len(label) == 0 { + // Empty labels are otherwise invalid. + return nil, false + } + + for _, c := range label { + if c < 33 || c > 126 { + // Invalid character. + return nil, false + } + } + } + + return reverseLabels, true +} diff --git a/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go new file mode 100644 index 0000000000..a4500bfb17 --- /dev/null +++ b/vendor/github.com/smallstep/pkcs7/internal/legacy/x509/x509.go @@ -0,0 +1,488 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package x509 implements a subset of the X.509 standard. +// +// It allows parsing and generating certificates, certificate signing +// requests, certificate revocation lists, and encoded public and private keys. +// It provides a certificate verifier, complete with a chain builder. +// +// The package targets the X.509 technical profile defined by the IETF (RFC +// 2459/3280/5280), and as further restricted by the CA/Browser Forum Baseline +// Requirements. There is minimal support for features outside of these +// profiles, as the primary goal of the package is to provide compatibility +// with the publicly trusted TLS certificate ecosystem and its policies and +// constraints. +// +// On macOS and Windows, certificate verification is handled by system APIs, but +// the package aims to apply consistent validation rules across operating +// systems. +package legacyx509 + +import ( + "bytes" + "crypto" + "crypto/elliptic" + stdx509 "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + "strconv" + "unicode" + + // Explicitly import these for their crypto.RegisterHash init side-effects. + // Keep these as blank imports, even if they're imported above. + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +type publicKeyInfo struct { + Raw asn1.RawContent + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +type SignatureAlgorithm int + +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = iota + + MD2WithRSA // Unsupported. + MD5WithRSA // Only supported for signing, not verification. + SHA1WithRSA // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses. + SHA256WithRSA + SHA384WithRSA + SHA512WithRSA + DSAWithSHA1 // Unsupported. + DSAWithSHA256 // Unsupported. + ECDSAWithSHA1 // Only supported for signing, and verification of CRLs, CSRs, and OCSP responses. + ECDSAWithSHA256 + ECDSAWithSHA384 + ECDSAWithSHA512 + SHA256WithRSAPSS + SHA384WithRSAPSS + SHA512WithRSAPSS + PureEd25519 +) + +func (algo SignatureAlgorithm) String() string { + for _, details := range signatureAlgorithmDetails { + if details.algo == algo { + return details.name + } + } + return strconv.Itoa(int(algo)) +} + +type PublicKeyAlgorithm int + +const ( + UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota + RSA + DSA // Only supported for parsing. + ECDSA + Ed25519 +) + +var publicKeyAlgoName = [...]string{ + RSA: "RSA", + DSA: "DSA", + ECDSA: "ECDSA", + Ed25519: "Ed25519", +} + +func (algo PublicKeyAlgorithm) String() string { + if 0 < algo && int(algo) < len(publicKeyAlgoName) { + return publicKeyAlgoName[algo] + } + return strconv.Itoa(int(algo)) +} + +// OIDs for signature algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } +// +// RFC 3279 2.2.1 RSA Signature Algorithms +// +// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 } +// +// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 } +// +// dsaWithSha1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 } +// +// RFC 3279 2.2.3 ECDSA Signature Algorithm +// +// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-x962(10045) +// signatures(4) ecdsa-with-SHA1(1)} +// +// RFC 4055 5 PKCS #1 Version 1.5 +// +// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 } +// +// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 } +// +// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 } +// +// RFC 5758 3.1 DSA Signature Algorithms +// +// dsaWithSha256 OBJECT IDENTIFIER ::= { +// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) +// csor(3) algorithms(4) id-dsa-with-sha2(3) 2} +// +// RFC 5758 3.2 ECDSA Signature Algorithm +// +// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } +// +// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } +// +// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } +// +// RFC 8410 3 Curve25519 and Curve448 Algorithm Identifiers +// +// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } +var ( + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} + + oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8} + + // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA + // but it's specified by ISO. Microsoft's makecert.exe has been known + // to produce certificates with this OID. + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} +) + +var signatureAlgorithmDetails = []struct { + algo SignatureAlgorithm + name string + oid asn1.ObjectIdentifier + params asn1.RawValue + pubKeyAlgo PublicKeyAlgorithm + hash crypto.Hash + isRSAPSS bool +}{ + {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, asn1.NullRawValue, RSA, crypto.MD5, false}, + {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, asn1.NullRawValue, RSA, crypto.SHA1, false}, + {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, asn1.NullRawValue, RSA, crypto.SHA1, false}, + {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, asn1.NullRawValue, RSA, crypto.SHA256, false}, + {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, asn1.NullRawValue, RSA, crypto.SHA384, false}, + {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, asn1.NullRawValue, RSA, crypto.SHA512, false}, + {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, pssParametersSHA256, RSA, crypto.SHA256, true}, + {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, pssParametersSHA384, RSA, crypto.SHA384, true}, + {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, pssParametersSHA512, RSA, crypto.SHA512, true}, + {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, emptyRawValue, DSA, crypto.SHA1, false}, + {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, emptyRawValue, DSA, crypto.SHA256, false}, + {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, emptyRawValue, ECDSA, crypto.SHA1, false}, + {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, emptyRawValue, ECDSA, crypto.SHA256, false}, + {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, emptyRawValue, ECDSA, crypto.SHA384, false}, + {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, emptyRawValue, ECDSA, crypto.SHA512, false}, + {PureEd25519, "Ed25519", oidSignatureEd25519, emptyRawValue, Ed25519, crypto.Hash(0) /* no pre-hashing */, false}, +} + +var emptyRawValue = asn1.RawValue{} + +// DER encoded RSA PSS parameters for the +// SHA256, SHA384, and SHA512 hashes as defined in RFC 3447, Appendix A.2.3. +// The parameters contain the following values: +// - hashAlgorithm contains the associated hash identifier with NULL parameters +// - maskGenAlgorithm always contains the default mgf1SHA1 identifier +// - saltLength contains the length of the associated hash +// - trailerField always contains the default trailerFieldBC value +var ( + pssParametersSHA256 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 162, 3, 2, 1, 32}} + pssParametersSHA384 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 162, 3, 2, 1, 48}} + pssParametersSHA512 = asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 162, 3, 2, 1, 64}} +) + +// pssParameters reflects the parameters in an AlgorithmIdentifier that +// specifies RSA PSS. See RFC 3447, Appendix A.2.3. +type pssParameters struct { + // The following three fields are not marked as + // optional because the default values specify SHA-1, + // which is no longer suitable for use in signatures. + Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"` + MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"` + SaltLength int `asn1:"explicit,tag:2"` + TrailerField int `asn1:"optional,explicit,tag:3,default:1"` +} + +func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) stdx509.SignatureAlgorithm { + if ai.Algorithm.Equal(oidSignatureEd25519) { + // RFC 8410, Section 3 + // > For all of the OIDs, the parameters MUST be absent. + if len(ai.Parameters.FullBytes) != 0 { + return stdx509.UnknownSignatureAlgorithm + } + } + + if !ai.Algorithm.Equal(oidSignatureRSAPSS) { + for _, details := range signatureAlgorithmDetails { + if ai.Algorithm.Equal(details.oid) { + return stdx509.SignatureAlgorithm(details.algo) + } + } + return stdx509.UnknownSignatureAlgorithm + } + + // RSA PSS is special because it encodes important parameters + // in the Parameters. + + var params pssParameters + if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil { + return stdx509.UnknownSignatureAlgorithm + } + + var mgf1HashFunc pkix.AlgorithmIdentifier + if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil { + return stdx509.UnknownSignatureAlgorithm + } + + // PSS is greatly overburdened with options. This code forces them into + // three buckets by requiring that the MGF1 hash function always match the + // message hash function (as recommended in RFC 3447, Section 8.1), that the + // salt length matches the hash length, and that the trailer field has the + // default value. + if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) || + !params.MGF.Algorithm.Equal(oidMGF1) || + !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || + (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) || + params.TrailerField != 1 { + return stdx509.UnknownSignatureAlgorithm + } + + switch { + case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32: + return stdx509.SHA256WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48: + return stdx509.SHA384WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64: + return stdx509.SHA512WithRSAPSS + } + + return stdx509.UnknownSignatureAlgorithm +} + +var ( + // RFC 3279, 2.3 Public Key Algorithms + // + // pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) + // rsadsi(113549) pkcs(1) 1 } + // + // rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } + // + // id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) + // x9-57(10040) x9cm(4) 1 } + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + // RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters + // + // id-ecPublicKey OBJECT IDENTIFIER ::= { + // iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + // RFC 8410, Section 3 + // + // id-X25519 OBJECT IDENTIFIER ::= { 1 3 101 110 } + // id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 } + oidPublicKeyX25519 = asn1.ObjectIdentifier{1, 3, 101, 110} + oidPublicKeyEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} +) + +// getPublicKeyAlgorithmFromOID returns the exposed PublicKeyAlgorithm +// identifier for public key types supported in certificates and CSRs. Marshal +// and Parse functions may support a different set of public key types. +func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) stdx509.PublicKeyAlgorithm { + switch { + case oid.Equal(oidPublicKeyRSA): + return stdx509.RSA + case oid.Equal(oidPublicKeyDSA): + return stdx509.DSA + case oid.Equal(oidPublicKeyECDSA): + return stdx509.ECDSA + case oid.Equal(oidPublicKeyEd25519): + return stdx509.Ed25519 + } + return stdx509.UnknownPublicKeyAlgorithm +} + +// RFC 5480, 2.1.1.1. Named Curve +// +// secp224r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 33 } +// +// secp256r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 7 } +// +// secp384r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 34 } +// +// secp521r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 35 } +// +// NB: secp256r1 is equivalent to prime256v1 +var ( + oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} +) + +func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve { + switch { + case oid.Equal(oidNamedCurveP224): + return elliptic.P224() + case oid.Equal(oidNamedCurveP256): + return elliptic.P256() + case oid.Equal(oidNamedCurveP384): + return elliptic.P384() + case oid.Equal(oidNamedCurveP521): + return elliptic.P521() + } + return nil +} + +// KeyUsage represents the set of actions that are valid for a given key. It's +// a bitmap of the KeyUsage* constants. +type KeyUsage int + +const ( + KeyUsageDigitalSignature KeyUsage = 1 << iota + KeyUsageContentCommitment + KeyUsageKeyEncipherment + KeyUsageDataEncipherment + KeyUsageKeyAgreement + KeyUsageCertSign + KeyUsageCRLSign + KeyUsageEncipherOnly + KeyUsageDecipherOnly +) + +// RFC 5280, 4.2.1.12 Extended Key Usage +// +// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } +// +// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } +// +// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } +// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } +// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } +// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } +// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } +// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } +var ( + oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} + oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} + oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} + oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} + oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} + oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} + oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} + oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} + oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} + oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} + oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} + oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} + oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22} + oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1} +) + +// ExtKeyUsage represents an extended set of actions that are valid for a given key. +// Each of the ExtKeyUsage* constants define a unique action. +type ExtKeyUsage int + +const ( + ExtKeyUsageAny ExtKeyUsage = iota + ExtKeyUsageServerAuth + ExtKeyUsageClientAuth + ExtKeyUsageCodeSigning + ExtKeyUsageEmailProtection + ExtKeyUsageIPSECEndSystem + ExtKeyUsageIPSECTunnel + ExtKeyUsageIPSECUser + ExtKeyUsageTimeStamping + ExtKeyUsageOCSPSigning + ExtKeyUsageMicrosoftServerGatedCrypto + ExtKeyUsageNetscapeServerGatedCrypto + ExtKeyUsageMicrosoftCommercialCodeSigning + ExtKeyUsageMicrosoftKernelCodeSigning +) + +// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. +var extKeyUsageOIDs = []struct { + extKeyUsage ExtKeyUsage + oid asn1.ObjectIdentifier +}{ + {ExtKeyUsageAny, oidExtKeyUsageAny}, + {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, + {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, + {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, + {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, + {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem}, + {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel}, + {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser}, + {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, + {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning}, + {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, + {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, + {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning}, + {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning}, +} + +func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) { + for _, pair := range extKeyUsageOIDs { + if oid.Equal(pair.oid) { + return pair.extKeyUsage, true + } + } + return +} + +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +var ( + oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1} +) + +var ( + oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} + oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} +) + +func isIA5String(s string) error { + for _, r := range s { + // Per RFC5280 "IA5String is limited to the set of ASCII characters" + if r > unicode.MaxASCII { + return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s) + } + } + + return nil +} diff --git a/vendor/go.mozilla.org/pkcs7/pkcs7.go b/vendor/github.com/smallstep/pkcs7/pkcs7.go similarity index 70% rename from vendor/go.mozilla.org/pkcs7/pkcs7.go rename to vendor/github.com/smallstep/pkcs7/pkcs7.go index ccc6cc6dfe..f6c6dfbbb5 100644 --- a/vendor/go.mozilla.org/pkcs7/pkcs7.go +++ b/vendor/github.com/smallstep/pkcs7/pkcs7.go @@ -13,8 +13,11 @@ import ( "errors" "fmt" "sort" + "sync" _ "crypto/sha1" // for crypto.SHA1 + + legacyx509 "github.com/smallstep/pkcs7/internal/legacy/x509" ) // PKCS7 Represents a PKCS7 structure @@ -31,7 +34,7 @@ type contentInfo struct { Content asn1.RawValue `asn1:"explicit,optional,tag:0"` } -// ErrUnsupportedContentType is returned when a PKCS7 content is not supported. +// ErrUnsupportedContentType is returned when a PKCS7 content type is not supported. // Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2), // and Enveloped Data are supported (1.2.840.113549.1.7.3) var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type") @@ -53,6 +56,7 @@ var ( OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + OIDDigestAlgorithmSHA224 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 4} OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} @@ -63,23 +67,28 @@ var ( OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} // Signature Algorithms - OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} - OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} - OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} - OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} - OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + OIDEncryptionAlgorithmRSAMD5 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} // see https://www.rfc-editor.org/rfc/rfc8017#appendix-A.2.4 + OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} // ditto + OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} // ditto + OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} // ditto + OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} // ditto + OIDEncryptionAlgorithmRSASHA224 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 14} // ditto OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} - // Encryption Algorithms - OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} - OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} - OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} - OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} - OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} - OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} + // Asymmetric Encryption Algorithms + OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} // see https://www.rfc-editor.org/rfc/rfc8017#appendix-A.2.2 + OIDEncryptionAlgorithmRSAESOAEP = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 7} // see https://www.rfc-editor.org/rfc/rfc8017#appendix-A.2.1 + + // Symmetric Encryption Algorithms + OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} // see https://www.rfc-editor.org/rfc/rfc8018.html#appendix-B.2.1 + OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} // see https://www.rfc-editor.org/rfc/rfc8018.html#appendix-B.2.2 + OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} // see https://www.rfc-editor.org/rfc/rfc3565.html#section-4.1 + OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} // see https://www.rfc-editor.org/rfc/rfc5084.html#section-3.2 + OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} // see https://www.rfc-editor.org/rfc/rfc8018.html#appendix-B.2.5 + OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} // see https://www.rfc-editor.org/rfc/rfc5084.html#section-3.2 ) func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) { @@ -114,11 +123,11 @@ func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1. return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm") } -// getOIDForEncryptionAlgorithm takes the private key type of the signer and +// getOIDForEncryptionAlgorithm takes the public or private key type of the signer and // the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm -func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { - switch pkey.(type) { - case *rsa.PrivateKey: +func getOIDForEncryptionAlgorithm(pkey interface{}, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { + switch k := pkey.(type) { + case *rsa.PrivateKey, *rsa.PublicKey: switch { default: return OIDEncryptionAlgorithmRSA, nil @@ -133,7 +142,7 @@ func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.Obje case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): return OIDEncryptionAlgorithmRSASHA512, nil } - case *ecdsa.PrivateKey: + case *ecdsa.PrivateKey, *ecdsa.PublicKey: switch { case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): return OIDDigestAlgorithmECDSASHA1, nil @@ -144,8 +153,13 @@ func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.Obje case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): return OIDDigestAlgorithmECDSASHA512, nil } - case *dsa.PrivateKey: + case *dsa.PrivateKey, *dsa.PublicKey: return OIDDigestAlgorithmDSA, nil + case crypto.Signer: + // This generic case is here to cover types from other packages. It + // was specifically added to handle the private keyRSA type in the + // github.com/go-piv/piv-go/piv package. + return getOIDForEncryptionAlgorithm(k.Public(), OIDDigestAlg) } return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown private key type %T", pkey) @@ -202,6 +216,40 @@ func parseEncryptedData(data []byte) (*PKCS7, error) { }, nil } +// SetFallbackLegacyX509CertificateParserEnabled enables parsing certificates +// embedded in a PKCS7 message using the logic from crypto/x509 from before +// Go 1.23. Go 1.23 introduced a breaking change in case a certificate contains +// a critical authority key identifier, which is the correct thing to do based +// on RFC 5280, but it breaks Windows devices performing the Simple Certificate +// Enrolment Protocol (SCEP), as the certificates embedded in those requests +// apparently have authority key identifier extensions marked critical. +// +// See https://go-review.googlesource.com/c/go/+/562341 for the change in the +// Go source. +// +// When [SetFallbackLegacyX509CertificateParserEnabled] is called with true, it +// enables parsing using the legacy crypto/x509 certificate parser. It'll first +// try to parse the certificates using the regular Go crypto/x509 package, but +// if it fails on the above case, it'll retry parsing the certificates using a +// copy of the crypto/x509 package based on Go 1.23, but skips checking the +// authority key identifier extension being critical or not. +func SetFallbackLegacyX509CertificateParserEnabled(v bool) { + legacyX509CertificateParser.Lock() + legacyX509CertificateParser.enabled = v + legacyX509CertificateParser.Unlock() +} + +var legacyX509CertificateParser struct { + sync.RWMutex + enabled bool +} + +func isLegacyX509ParserEnabled() bool { + legacyX509CertificateParser.RLock() + defer legacyX509CertificateParser.RUnlock() + return legacyX509CertificateParser.enabled +} + func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { if len(raw.Raw) == 0 { return nil, nil @@ -212,7 +260,14 @@ func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { return nil, err } - return x509.ParseCertificates(val.Bytes) + certificates, err := x509.ParseCertificates(val.Bytes) + if err != nil && err.Error() == "x509: authority key identifier incorrectly marked critical" { + if isLegacyX509ParserEnabled() { + certificates, err = legacyx509.ParseCertificates(val.Bytes) + } + } + + return certificates, err } func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool { diff --git a/vendor/go.mozilla.org/pkcs7/sign.go b/vendor/github.com/smallstep/pkcs7/sign.go similarity index 100% rename from vendor/go.mozilla.org/pkcs7/sign.go rename to vendor/github.com/smallstep/pkcs7/sign.go diff --git a/vendor/go.mozilla.org/pkcs7/verify.go b/vendor/github.com/smallstep/pkcs7/verify.go similarity index 87% rename from vendor/go.mozilla.org/pkcs7/verify.go rename to vendor/github.com/smallstep/pkcs7/verify.go index f09e27245c..7525f918b1 100644 --- a/vendor/go.mozilla.org/pkcs7/verify.go +++ b/vendor/github.com/smallstep/pkcs7/verify.go @@ -54,6 +54,21 @@ func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime ti return nil } +// SigningTimeNotValidError is returned when the signing time attribute +// falls outside of the signer certificate validity. +type SigningTimeNotValidError struct { + SigningTime time.Time + NotBefore time.Time // NotBefore of signer + NotAfter time.Time // NotAfter of signer +} + +func (e *SigningTimeNotValidError) Error() string { + return fmt.Sprintf("pkcs7: signing time %q is outside of certificate validity %q to %q", + e.SigningTime.Format(time.RFC3339), + e.NotBefore.Format(time.RFC3339), + e.NotAfter.Format(time.RFC3339)) +} + func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool, currentTime time.Time) (err error) { signedData := p7.Content ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) @@ -91,10 +106,11 @@ func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPo if err == nil { // signing time found, performing validity check if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { - return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", - signingTime.Format(time.RFC3339), - ee.NotBefore.Format(time.RFC3339), - ee.NotAfter.Format(time.RFC3339)) + return &SigningTimeNotValidError{ + SigningTime: signingTime, + NotBefore: ee.NotBefore, + NotAfter: ee.NotAfter, + } } } } @@ -146,10 +162,11 @@ func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (e if err == nil { // signing time found, performing validity check if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { - return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", - signingTime.Format(time.RFC3339), - ee.NotBefore.Format(time.RFC3339), - ee.NotAfter.Format(time.RFC3339)) + return &SigningTimeNotValidError{ + SigningTime: signingTime, + NotBefore: ee.NotBefore, + NotAfter: ee.NotAfter, + } } } } @@ -210,8 +227,13 @@ func parseSignedData(data []byte) (*PKCS7, error) { // Compound octet string if compound.IsCompound { if compound.Tag == 4 { - if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil { - return nil, err + for len(compound.Bytes) > 0 { + var cdata asn1.RawValue + if _, err = asn1.Unmarshal(compound.Bytes, &cdata); err != nil { + return nil, err + } + content = append(content, cdata.Bytes...) + compound.Bytes = compound.Bytes[len(cdata.FullBytes):] } } else { content = compound.Bytes @@ -278,13 +300,13 @@ func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384), digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): switch { - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1): return x509.SHA1WithRSA, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256): return x509.SHA256WithRSA, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384): return x509.SHA384WithRSA, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512), digest.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): return x509.SHA512WithRSA, nil default: return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index 91dd430c1c..6fc80512fd 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -23,21 +23,19 @@ var errAlignmentOverflow = errors.New("integer overflow when calculating alignme // nextAligned finds the next offset that satisfies alignment. func nextAligned(offset int64, alignment int) (int64, error) { - align64 := uint64(alignment) - offset64 := uint64(offset) + align64 := int64(alignment) - if align64 <= 0 || offset64%align64 == 0 { + if align64 <= 0 || offset%align64 == 0 { return offset, nil } - offset64 += (align64 - offset64%align64) + align64 -= offset % align64 - if offset64 > math.MaxInt64 { + if (math.MaxInt64 - offset) < align64 { return 0, errAlignmentOverflow } - //nolint:gosec // Overflow handled above. - return int64(offset64), nil + return offset + align64, nil } // writeDataObjectAt writes the data object described by di to ws, using time t, recording details diff --git a/vendor/go.mozilla.org/pkcs7/README.md b/vendor/go.mozilla.org/pkcs7/README.md deleted file mode 100644 index a55d117c68..0000000000 --- a/vendor/go.mozilla.org/pkcs7/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# pkcs7 - -[![GoDoc](https://godoc.org/go.mozilla.org/pkcs7?status.svg)](https://godoc.org/go.mozilla.org/pkcs7) -[![Build Status](https://github.com/mozilla-services/pkcs7/workflows/CI/badge.svg?branch=master&event=push)](https://github.com/mozilla-services/pkcs7/actions/workflows/ci.yml?query=branch%3Amaster+event%3Apush) - -pkcs7 implements parsing and creating signed and enveloped messages. - -```go -package main - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "os" - - "go.mozilla.org/pkcs7" -) - -func SignAndDetach(content []byte, cert *x509.Certificate, privkey *rsa.PrivateKey) (signed []byte, err error) { - toBeSigned, err := NewSignedData(content) - if err != nil { - err = fmt.Errorf("Cannot initialize signed data: %s", err) - return - } - if err = toBeSigned.AddSigner(cert, privkey, SignerInfoConfig{}); err != nil { - err = fmt.Errorf("Cannot add signer: %s", err) - return - } - - // Detach signature, omit if you want an embedded signature - toBeSigned.Detach() - - signed, err = toBeSigned.Finish() - if err != nil { - err = fmt.Errorf("Cannot finish signing data: %s", err) - return - } - - // Verify the signature - pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) - p7, err := pkcs7.Parse(signed) - if err != nil { - err = fmt.Errorf("Cannot parse our signed data: %s", err) - return - } - - // since the signature was detached, reattach the content here - p7.Content = content - - if bytes.Compare(content, p7.Content) != 0 { - err = fmt.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content) - return - } - if err = p7.Verify(); err != nil { - err = fmt.Errorf("Cannot verify our signed data: %s", err) - return - } - - return signed, nil -} -``` - - - -## Credits -This is a fork of [fullsailor/pkcs7](https://github.com/fullsailor/pkcs7) diff --git a/vendor/go.mozilla.org/pkcs7/verify_test_dsa.go b/vendor/go.mozilla.org/pkcs7/verify_test_dsa.go deleted file mode 100644 index 1eb05bc3ea..0000000000 --- a/vendor/go.mozilla.org/pkcs7/verify_test_dsa.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build go1.11 go1.12 go1.13 go1.14 go1.15 - -package pkcs7 - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - "os/exec" - "testing" -) - -func TestVerifyEC2(t *testing.T) { - fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture) - p7, err := Parse(fixture.Input) - if err != nil { - t.Errorf("Parse encountered unexpected error: %v", err) - } - p7.Certificates = []*x509.Certificate{fixture.Certificate} - if err := p7.Verify(); err != nil { - t.Errorf("Verify failed with error: %v", err) - } -} - -var EC2IdentityDocumentFixture = ` ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA -JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh -eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1 -cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh -bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs -bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg -OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK -ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj -aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy -YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA -AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n -dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi -IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B -CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG -CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w -LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA -AAAAAA== ------END PKCS7----- ------BEGIN CERTIFICATE----- -MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw -FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD -VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z -ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u -IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl -cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e -ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 -VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P -hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j -k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U -hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF -lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf -MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW -MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw -vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw -7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K ------END CERTIFICATE-----` - -func TestDSASignWithOpenSSLAndVerify(t *testing.T) { - content := []byte(` -A ship in port is safe, -but that's not what ships are built for. --- Grace Hopper`) - // write the content to a temp file - tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpContentFile.Name(), content, 0755) - - // write the signer cert to a temp file - tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0755) - - // write the signer key to a temp file - tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0755) - - tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature") - if err != nil { - t.Fatal(err) - } - // call openssl to sign the content - opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1", - "-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(), - "-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(), - "-certfile", tmpSignerCertFile.Name(), "-outform", "PEM") - out, err := opensslCMD.CombinedOutput() - if err != nil { - t.Fatalf("openssl command failed with %s: %s", err, out) - } - - // verify the signed content - pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name()) - if err != nil { - t.Fatal(err) - } - fmt.Printf("%s\n", pemSignature) - derBlock, _ := pem.Decode(pemSignature) - if derBlock == nil { - t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name()) - } - p7, err := Parse(derBlock.Bytes) - if err != nil { - t.Fatalf("Parse encountered unexpected error: %v", err) - } - if err := p7.Verify(); err != nil { - t.Fatalf("Verify failed with error: %v", err) - } - os.Remove(tmpSignerCertFile.Name()) // clean up - os.Remove(tmpSignerKeyFile.Name()) // clean up - os.Remove(tmpContentFile.Name()) // clean up -} - -var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY----- -MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS -PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl -pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith -1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L -vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 -zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo -g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE= ------END PRIVATE KEY-----`) - -var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE----- -MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV -bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD -VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du -MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r -bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE -ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC -AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD -Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE -exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii -Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 -V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI -puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl -nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp -rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt -1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT -ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G -CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 -qzy/7yePTlhlpj+ahMM= ------END CERTIFICATE-----`) - -type DSATestFixture struct { - Input []byte - Certificate *x509.Certificate -} - -func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture { - var result DSATestFixture - var derBlock *pem.Block - var pemBlock = []byte(testPEMBlock) - for { - derBlock, pemBlock = pem.Decode(pemBlock) - if derBlock == nil { - break - } - switch derBlock.Type { - case "PKCS7": - result.Input = derBlock.Bytes - case "CERTIFICATE": - result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) - } - } - - return result -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 0000000000..2492f796af --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,825 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 0000000000..90ef6a241d --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 0000000000..cf254f5f1e --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,350 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint48 appends a big-endian, 48-bit value to the byte string. +func (b *Builder) AddUint48(v uint64) { + b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back non-negative n bytes written directly to the Builder. +// An attempt by a child builder passed to a continuation to unwrite bytes +// from its parent will panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n < 0 { + panic("cryptobyte: attempted to unwrite negative number of bytes") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 0000000000..4b0f8097f9 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint48(out *uint64) bool { + v := s.read(6) + if v == nil { + return false + } + *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go deleted file mode 100644 index 2c033dff47..0000000000 --- a/vendor/golang.org/x/exp/constraints/constraints.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type Ordered interface { - Integer | Float | ~string -} diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go index ecc0dabb74..c25939b92b 100644 --- a/vendor/golang.org/x/exp/maps/maps.go +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -5,9 +5,20 @@ // Package maps defines various functions useful with maps of any type. package maps +import "maps" + +// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of +// these functions except Keys and Values should be annotated +// (provisionally with "//go:fix inline") so that tools can safely and +// automatically replace calls to exp/maps with calls to std maps by +// inlining them. + // Keys returns the keys of the map m. // The keys will be in an indeterminate order. func Keys[M ~map[K]V, K comparable, V any](m M) []K { + // The simplest true equivalent using std is: + // return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)). + r := make([]K, 0, len(m)) for k := range m { r = append(r, k) @@ -18,6 +29,9 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K { // Values returns the values of the map m. // The values will be in an indeterminate order. func Values[M ~map[K]V, K comparable, V any](m M) []V { + // The simplest true equivalent using std is: + // return slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)). + r := make([]V, 0, len(m)) for _, v := range m { r = append(r, v) @@ -28,50 +42,24 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V { // Equal reports whether two maps contain the same key/value pairs. // Values are compared using ==. func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || v1 != v2 { - return false - } - } - return true + return maps.Equal(m1, m2) } // EqualFunc is like Equal, but compares values using eq. // Keys are still compared with ==. func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || !eq(v1, v2) { - return false - } - } - return true + return maps.EqualFunc(m1, m2, eq) } // Clear removes all entries from m, leaving it empty. func Clear[M ~map[K]V, K comparable, V any](m M) { - for k := range m { - delete(m, k) - } + clear(m) } // Clone returns a copy of m. This is a shallow clone: // the new keys and values are set using ordinary assignment. func Clone[M ~map[K]V, K comparable, V any](m M) M { - // Preserve nil in case it matters. - if m == nil { - return nil - } - r := make(M, len(m)) - for k, v := range m { - r[k] = v - } - return r + return maps.Clone(m) } // Copy copies all key/value pairs in src adding them to dst. @@ -79,16 +67,10 @@ func Clone[M ~map[K]V, K comparable, V any](m M) M { // the value in dst will be overwritten by the value associated // with the key in src. func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { - for k, v := range src { - dst[k] = v - } + maps.Copy(dst, src) } // DeleteFunc deletes any key/value pairs from m for which del returns true. func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { - for k, v := range m { - if del(k, v) { - delete(m, k) - } - } + maps.DeleteFunc(m, del) } diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go deleted file mode 100644 index fbf1934a06..0000000000 --- a/vendor/golang.org/x/exp/slices/cmp.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// min is a version of the predeclared function from the Go 1.21 release. -func min[T constraints.Ordered](a, b T) T { - if a < b || isNaN(a) { - return a - } - return b -} - -// max is a version of the predeclared function from the Go 1.21 release. -func max[T constraints.Ordered](a, b T) T { - if a > b || isNaN(a) { - return a - } - return b -} - -// cmpLess is a copy of cmp.Less from the Go 1.21 release. -func cmpLess[T constraints.Ordered](x, y T) bool { - return (isNaN(x) && !isNaN(y)) || x < y -} - -// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. -func cmpCompare[T constraints.Ordered](x, y T) int { - xNaN := isNaN(x) - yNaN := isNaN(y) - if xNaN && yNaN { - return 0 - } - if xNaN || x < y { - return -1 - } - if yNaN || x > y { - return +1 - } - return 0 -} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 46ceac3439..757383ea1c 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -6,26 +6,22 @@ package slices import ( - "unsafe" - - "golang.org/x/exp/constraints" + "cmp" + "slices" ) +// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of +// these functions should be annotated (provisionally with "//go:fix +// inline") so that tools can safely and automatically replace calls +// to exp/slices with calls to std slices by inlining them. + // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. func Equal[S ~[]E, E comparable](s1, s2 S) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if s1[i] != s2[i] { - return false - } - } - return true + return slices.Equal(s1, s2) } // EqualFunc reports whether two slices are equal using an equality @@ -34,16 +30,7 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool { // increasing index order, and the comparison stops at the first index // for which eq returns false. func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { - if len(s1) != len(s2) { - return false - } - for i, v1 := range s1 { - v2 := s2[i] - if !eq(v1, v2) { - return false - } - } - return true + return slices.EqualFunc(s1, s2, eq) } // Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair @@ -53,20 +40,8 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmpCompare(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 +func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int { + return slices.Compare(s1, s2) } // CompareFunc is like [Compare] but uses a custom comparison function on each @@ -75,52 +50,30 @@ func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmp(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 + return slices.CompareFunc(s1, s2, cmp) } // Index returns the index of the first occurrence of v in s, // or -1 if not present. func Index[S ~[]E, E comparable](s S, v E) int { - for i := range s { - if v == s[i] { - return i - } - } - return -1 + return slices.Index(s, v) } // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 + return slices.IndexFunc(s, f) } // Contains reports whether v is present in s. func Contains[S ~[]E, E comparable](s S, v E) bool { - return Index(s, v) >= 0 + return slices.Contains(s, v) } // ContainsFunc reports whether at least one // element e of s satisfies f(e). func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { - return IndexFunc(s, f) >= 0 + return slices.ContainsFunc(s, f) } // Insert inserts the values v... into s at index i, @@ -131,92 +84,7 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S { - m := len(v) - if m == 0 { - return s - } - n := len(s) - if i == n { - return append(s, v...) - } - if n+m > cap(s) { - // Use append rather than make so that we bump the size of - // the slice up to the next storage class. - // This is what Grow does but we don't call Grow because - // that might copy the values twice. - s2 := append(s[:i], make(S, n+m-i)...) - copy(s2[i:], v) - copy(s2[i+m:], s[i:]) - return s2 - } - s = s[:n+m] - - // before: - // s: aaaaaaaabbbbccccccccdddd - // ^ ^ ^ ^ - // i i+m n n+m - // after: - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // - // a are the values that don't move in s. - // v are the values copied in from v. - // b and c are the values from s that are shifted up in index. - // d are the values that get overwritten, never to be seen again. - - if !overlaps(v, s[i+m:]) { - // Easy case - v does not overlap either the c or d regions. - // (It might be in some of a or b, or elsewhere entirely.) - // The data we copy up doesn't write to v at all, so just do it. - - copy(s[i+m:], s[i:]) - - // Now we have - // s: aaaaaaaabbbbbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // Note the b values are duplicated. - - copy(s[i:], v) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s - } - - // The hard case - v overlaps c or d. We can't just shift up - // the data because we'd move or clobber the values we're trying - // to insert. - // So instead, write v on top of d, then rotate. - copy(s[n:], v) - - // Now we have - // s: aaaaaaaabbbbccccccccvvvv - // ^ ^ ^ ^ - // i i+m n n+m - - rotateRight(s[i:], m) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s -} - -// clearSlice sets all elements up to the length of s to the zero value of E. -// We may use the builtin clear func instead, and remove clearSlice, when upgrading -// to Go 1.21+. -func clearSlice[S ~[]E, E any](s S) { - var zero E - for i := range s { - s[i] = zero - } + return slices.Insert(s, i, v...) } // Delete removes the elements s[i:j] from s, returning the modified slice. @@ -225,135 +93,27 @@ func clearSlice[S ~[]E, E any](s S) { // make a single call deleting them all together than to delete one at a time. // Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j:len(s)] // bounds check - - if i == j { - return s - } - - oldlen := len(s) - s = append(s[:i], s[j:]...) - clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC - return s + return slices.Delete(s, i, j) } // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. // DeleteFunc zeroes the elements between the new length and the original length. func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := IndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] + return slices.DeleteFunc(s, del) } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. // When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { - _ = s[i:j] // verify that i:j is a valid subslice - - if i == j { - return Insert(s, i, v...) - } - if j == len(s) { - return append(s[:i], v...) - } - - tot := len(s[:i]) + len(v) + len(s[j:]) - if tot > cap(s) { - // Too big to fit, allocate and copy over. - s2 := append(s[:i], make(S, tot-i)...) // See Insert - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 - } - - r := s[:tot] - - if i+len(v) <= j { - // Easy, as v fits in the deleted portion. - copy(r[i:], v) - if i+len(v) != j { - copy(r[i+len(v):], s[j:]) - } - clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC - return r - } - - // We are expanding (v is bigger than j-i). - // The situation is something like this: - // (example has i=4,j=8,len(s)=16,len(v)=6) - // s: aaaaxxxxbbbbbbbbyy - // ^ ^ ^ ^ - // i j len(s) tot - // a: prefix of s - // x: deleted range - // b: more of s - // y: area to expand into - - if !overlaps(r[i+len(v):], v) { - // Easy, as v is not clobbered by the first copy. - copy(r[i+len(v):], s[j:]) - copy(r[i:], v) - return r - } - - // This is a situation where we don't have a single place to which - // we can copy v. Parts of it need to go to two different places. - // We want to copy the prefix of v into y and the suffix into x, then - // rotate |y| spots to the right. - // - // v[2:] v[:2] - // | | - // s: aaaavvvvbbbbbbbbvv - // ^ ^ ^ ^ - // i j len(s) tot - // - // If either of those two destinations don't alias v, then we're good. - y := len(v) - (j - i) // length of y portion - - if !overlaps(r[i:j], v) { - copy(r[i:j], v[y:]) - copy(r[len(s):], v[:y]) - rotateRight(r[i:], y) - return r - } - if !overlaps(r[len(s):], v) { - copy(r[len(s):], v[:y]) - copy(r[i:j], v[y:]) - rotateRight(r[i:], y) - return r - } - - // Now we know that v overlaps both x and y. - // That means that the entirety of b is *inside* v. - // So we don't need to preserve b at all; instead we - // can copy v first, then copy the b part of v out of - // v to the right destination. - k := startIdx(v, s[j:]) - copy(r[i:], v) - copy(r[i+len(v):], r[i+k:]) - return r + return slices.Replace(s, i, j, v...) } // Clone returns a copy of the slice. // The elements are copied using assignment, so this is a shallow clone. func Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) + return slices.Clone(s) } // Compact replaces consecutive runs of equal elements with a single copy. @@ -362,40 +122,14 @@ func Clone[S ~[]E, E any](s S) S { // which may have a smaller length. // Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if s[k] != s[k-1] { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] + return slices.Compact(s) } // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. // CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if !eq(s[k], s[k-1]) { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] + return slices.CompactFunc(s, eq) } // Grow increases the slice's capacity, if necessary, to guarantee space for @@ -403,113 +137,15 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { // to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. func Grow[S ~[]E, E any](s S, n int) S { - if n < 0 { - panic("cannot be negative") - } - if n -= cap(s) - len(s); n > 0 { - // TODO(https://go.dev/issue/53888): Make using []E instead of S - // to workaround a compiler bug where the runtime.growslice optimization - // does not take effect. Revert when the compiler is fixed. - s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] - } - return s + return slices.Grow(s, n) } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. func Clip[S ~[]E, E any](s S) S { - return s[:len(s):len(s)] -} - -// Rotation algorithm explanation: -// -// rotate left by 2 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join first parts -// 89234567 01 -// recursively rotate first left part by 2 -// 23456789 01 -// join at the end -// 2345678901 -// -// rotate left by 8 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join last parts -// 89 23456701 -// recursively rotate second part left by 6 -// 89 01234567 -// join at the end -// 8901234567 - -// TODO: There are other rotate algorithms. -// This algorithm has the desirable property that it moves each element exactly twice. -// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. -// The follow-cycles algorithm can be 1-write but it is not very cache friendly. - -// rotateLeft rotates b left by n spaces. -// s_final[i] = s_orig[i+r], wrapping around. -func rotateLeft[E any](s []E, r int) { - for r != 0 && r != len(s) { - if r*2 <= len(s) { - swap(s[:r], s[len(s)-r:]) - s = s[:len(s)-r] - } else { - swap(s[:len(s)-r], s[r:]) - s, r = s[len(s)-r:], r*2-len(s) - } - } -} -func rotateRight[E any](s []E, r int) { - rotateLeft(s, len(s)-r) -} - -// swap swaps the contents of x and y. x and y must be equal length and disjoint. -func swap[E any](x, y []E) { - for i := 0; i < len(x); i++ { - x[i], y[i] = y[i], x[i] - } -} - -// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. -func overlaps[E any](a, b []E) bool { - if len(a) == 0 || len(b) == 0 { - return false - } - elemSize := unsafe.Sizeof(a[0]) - if elemSize == 0 { - return false - } - // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. - // Also see crypto/internal/alias/alias.go:AnyOverlap - return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && - uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) -} - -// startIdx returns the index in haystack where the needle starts. -// prerequisite: the needle must be aliased entirely inside the haystack. -func startIdx[E any](haystack, needle []E) int { - p := &needle[0] - for i := range haystack { - if p == &haystack[i] { - return i - } - } - // TODO: what if the overlap is by a non-integral number of Es? - panic("needle not found") + return slices.Clip(s) } // Reverse reverses the elements of the slice in place. func Reverse[S ~[]E, E any](s S) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } + slices.Reverse(s) } diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index f58bbc7ba4..e270a74652 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,21 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp - package slices import ( - "math/bits" - - "golang.org/x/exp/constraints" + "cmp" + "slices" ) +// TODO(adonovan): add a "//go:fix inline" annotation to each function +// in this file; see https://go.dev/issue/32816. + // Sort sorts a slice of any ordered type in ascending order. // When sorting floating-point numbers, NaNs are ordered before other values. -func Sort[S ~[]E, E constraints.Ordered](x S) { - n := len(x) - pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +func Sort[S ~[]E, E cmp.Ordered](x S) { + slices.Sort(x) } // SortFunc sorts the slice x in ascending order as determined by the cmp @@ -29,118 +28,60 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. // To indicate 'uncomparable', return 0 from the function. func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - n := len(x) - pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) + slices.SortFunc(x, cmp) } // SortStableFunc sorts the slice x while keeping the original order of equal // elements, using cmp to compare elements in the same way as [SortFunc]. func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - stableCmpFunc(x, len(x), cmp) + slices.SortStableFunc(x, cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { - for i := len(x) - 1; i > 0; i-- { - if cmpLess(x[i], x[i-1]) { - return false - } - } - return true +func IsSorted[S ~[]E, E cmp.Ordered](x S) bool { + return slices.IsSorted(x) } // IsSortedFunc reports whether x is sorted in ascending order, with cmp as the // comparison function as defined by [SortFunc]. func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { - for i := len(x) - 1; i > 0; i-- { - if cmp(x[i], x[i-1]) < 0 { - return false - } - } - return true + return slices.IsSortedFunc(x, cmp) } // Min returns the minimal value in x. It panics if x is empty. // For floating-point numbers, Min propagates NaNs (any NaN value in x // forces the output to be NaN). -func Min[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Min: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = min(m, x[i]) - } - return m +func Min[S ~[]E, E cmp.Ordered](x S) E { + return slices.Min(x) } // MinFunc returns the minimal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one minimal element // according to the cmp function, MinFunc returns the first one. func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MinFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) < 0 { - m = x[i] - } - } - return m + return slices.MinFunc(x, cmp) } // Max returns the maximal value in x. It panics if x is empty. // For floating-point E, Max propagates NaNs (any NaN value in x // forces the output to be NaN). -func Max[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Max: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = max(m, x[i]) - } - return m +func Max[S ~[]E, E cmp.Ordered](x S) E { + return slices.Max(x) } // MaxFunc returns the maximal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one maximal element // according to the cmp function, MaxFunc returns the first one. func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MaxFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) > 0 { - m = x[i] - } - } - return m + return slices.MaxFunc(x, cmp) } // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { - // Inlining is faster than calling BinarySearchFunc with a lambda. - n := len(x) - // Define x[-1] < target and x[n] >= target. - // Invariant: x[i-1] < target, x[j] >= target. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmpLess(x[h], target) { - i = h + 1 // preserves x[i-1] < target - } else { - j = h // preserves x[j] >= target - } - } - // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) +func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) { + return slices.BinarySearch(x, target) } // BinarySearchFunc works like [BinarySearch], but uses a custom comparison @@ -151,47 +92,5 @@ func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { // cmp must implement the same ordering as the slice, such that if // cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { - n := len(x) - // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . - // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmp(x[h], target) < 0 { - i = h + 1 // preserves cmp(x[i - 1], target) < 0 - } else { - j = h // preserves cmp(x[j], target) >= 0 - } - } - // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. - return i, i < n && cmp(x[i], target) == 0 -} - -type sortedHint int // hint for pdqsort when choosing the pivot - -const ( - unknownHint sortedHint = iota - increasingHint - decreasingHint -) - -// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf -type xorshift uint64 - -func (r *xorshift) Next() uint64 { - *r ^= *r << 13 - *r ^= *r >> 17 - *r ^= *r << 5 - return uint64(*r) -} - -func nextPowerOfTwo(length int) uint { - return 1 << bits.Len(uint(length)) -} - -// isNaN reports whether x is a NaN without requiring the math package. -// This will always return false if T is not floating-point. -func isNaN[T constraints.Ordered](x T) bool { - return x != x + return slices.BinarySearchFunc(x, target, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go deleted file mode 100644 index 06f2c7a248..0000000000 --- a/vendor/golang.org/x/exp/slices/zsortanyfunc.go +++ /dev/null @@ -1,479 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -// insertionSortCmpFunc sorts data[a:b] using insertion sort. -func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownCmpFunc implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { - child++ - } - if !(cmp(data[first+root], data[first+child]) < 0) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownCmpFunc(data, i, hi, first, cmp) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownCmpFunc(data, lo, i, first, cmp) - } -} - -// pdqsortCmpFunc sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortCmpFunc(data, a, b, cmp) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortCmpFunc(data, a, b, cmp) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsCmpFunc(data, a, b, cmp) - limit-- - } - - pivot, hint := choosePivotCmpFunc(data, a, b, cmp) - if hint == decreasingHint { - reverseRangeCmpFunc(data, a, b, cmp) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortCmpFunc(data, a, b, cmp) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { - mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) - a = mid - continue - } - - mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortCmpFunc(data, a, mid, limit, cmp) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortCmpFunc(data, mid+1, b, limit, cmp) - b = mid - } - } -} - -// partitionCmpFunc does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !(cmp(data[a], data[i]) < 0) { - i++ - } - for i <= j && (cmp(data[a], data[j]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !(cmp(data[i], data[i-1]) < 0) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotCmpFunc chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentCmpFunc(data, i, &swaps, cmp) - j = medianAdjacentCmpFunc(data, j, &swaps, cmp) - k = medianAdjacentCmpFunc(data, k, &swaps, cmp) - } - // Find the median among i, j, k and stores it into j. - j = medianCmpFunc(data, i, j, k, &swaps, cmp) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { - if cmp(data[b], data[a]) < 0 { - *swaps++ - return b, a - } - return a, b -} - -// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { - a, b = order2CmpFunc(data, a, b, swaps, cmp) - b, c = order2CmpFunc(data, b, c, swaps, cmp) - a, b = order2CmpFunc(data, a, b, swaps, cmp) - return b -} - -// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { - return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) -} - -func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortCmpFunc(data, a, b, cmp) - a = b - b += blockSize - } - insertionSortCmpFunc(data, a, n, cmp) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeCmpFunc(data, a, a+blockSize, b, cmp) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeCmpFunc(data, a, m, n, cmp) - } - blockSize *= 2 - } -} - -// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmp(data[h], data[a]) < 0 { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !(cmp(data[m], data[h]) < 0) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !(cmp(data[p-c], data[c]) < 0) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateCmpFunc(data, start, m, end, cmp) - } - if a < start && start < mid { - symMergeCmpFunc(data, a, start, mid, cmp) - } - if mid < end && end < b { - symMergeCmpFunc(data, mid, end, b, cmp) - } -} - -// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeCmpFunc(data, m-i, m, j, cmp) - i -= j - } else { - swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) - j -= i - } - } - // i == j - swapRangeCmpFunc(data, m-i, m, i, cmp) -} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go deleted file mode 100644 index 99b47c3986..0000000000 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ /dev/null @@ -1,481 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// insertionSortOrdered sorts data[a:b] using insertion sort. -func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownOrdered implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { - child++ - } - if !cmpLess(data[first+root], data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownOrdered(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownOrdered(data, lo, i, first) - } -} - -// pdqsortOrdered sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortOrdered(data, a, b) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortOrdered(data, a, b) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsOrdered(data, a, b) - limit-- - } - - pivot, hint := choosePivotOrdered(data, a, b) - if hint == decreasingHint { - reverseRangeOrdered(data, a, b) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortOrdered(data, a, b) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !cmpLess(data[a-1], data[pivot]) { - mid := partitionEqualOrdered(data, a, b, pivot) - a = mid - continue - } - - mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortOrdered(data, a, mid, limit) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortOrdered(data, mid+1, b, limit) - b = mid - } - } -} - -// partitionOrdered does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !cmpLess(data[a], data[i]) { - i++ - } - for i <= j && cmpLess(data[a], data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !cmpLess(data[i], data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsOrdered scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotOrdered chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentOrdered(data, i, &swaps) - j = medianAdjacentOrdered(data, j, &swaps) - k = medianAdjacentOrdered(data, k, &swaps) - } - // Find the median among i, j, k and stores it into j. - j = medianOrdered(data, i, j, k, &swaps) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if cmpLess(data[b], data[a]) { - *swaps++ - return b, a - } - return a, b -} - -// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { - a, b = order2Ordered(data, a, b, swaps) - b, c = order2Ordered(data, b, c, swaps) - a, b = order2Ordered(data, a, b, swaps) - return b -} - -// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { - return medianOrdered(data, a-1, a, a+1, swaps) -} - -func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableOrdered[E constraints.Ordered](data []E, n int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortOrdered(data, a, b) - a = b - b += blockSize - } - insertionSortOrdered(data, a, n) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeOrdered(data, a, a+blockSize, b) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeOrdered(data, a, m, n) - } - blockSize *= 2 - } -} - -// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmpLess(data[h], data[a]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !cmpLess(data[m], data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !cmpLess(data[p-c], data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateOrdered(data, start, m, end) - } - if a < start && start < mid { - symMergeOrdered(data, a, start, mid) - } - if mid < end && end < b { - symMergeOrdered(data, mid, end, b) - } -} - -// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeOrdered(data, m-i, m, j) - i -= j - } else { - swapRangeOrdered(data, m-i, m+j-i, i) - j -= i - } - } - // i == j - swapRangeOrdered(data, m-i, m, i) -} diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c204..48dbb9d84c 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 0854d298e4..d9bfa6e1e7 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b181f386a1..3a2092f105 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -130,7 +130,7 @@ type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and + // If it's not in the list, the connection will gracefully close, and // a new connection will be created. // // This will trigger a state transition for the SubConn. @@ -142,8 +142,11 @@ type SubConn interface { Connect() // GetOrBuildProducer returns a reference to the existing Producer for this // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) // Shutdown shuts down the SubConn gracefully. Any started RPCs will be // allowed to complete. No future calls should be made on the SubConn. @@ -452,8 +455,10 @@ type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. Build(grpcClientConnInterface any) (p Producer, close func()) } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 2b87bd79c7..d5ed172ae6 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go new file mode 100644 index 0000000000..c519789458 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -0,0 +1,24 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the pickfirst package. +package internal + +import "math/rand" + +// RandShuffle pseudo-randomizes the order of addresses. +var RandShuffle = rand.Shuffle diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 4d69b4052f..e069346a75 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -26,18 +26,23 @@ import ( "math/rand" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +// Shuffler is an interface for shuffling an address list. type Shuffler interface { ShuffleAddressListForTesting(n int, swap func(i, j int)) } +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n +// is the number of elements. swap swaps the elements with indexes i and j. func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { @@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 0000000000..985b6edc7f --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,625 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" +) + +// TODO: change to pick-first when this becomes the default pick_first policy. +const logPrefix = "[pick-first-leaf-lb %p] " + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + addressList: addressList{}, + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + mu: sync.Mutex{}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + state connectivity.State + lastErr error +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + state: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.state = connectivity.TransientFailure + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8305 section 4." - A61 + // TODO: support the above language. + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + + // Since we have a new set of addresses, we are again at first pass. + b.firstPass = true + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + prevAddr := b.addressList.currentAddress() + prevAddrsCount := b.addressList.size() + b.addressList.updateAddrs(newAddrs) + if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.requestConnectionLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.requestConnectionLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { + b.firstPass = true + b.requestConnectionLocked() + } +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.state { + case connectivity.Idle: + scd.subConn.Connect() + case connectivity.TransientFailure: + // Try the next address. + lastErr = scd.lastErr + continue + case connectivity.Ready: + // Should never happen. + b.logger.Errorf("Requesting a connection even though we have a READY SubConn") + case connectivity.Shutdown: + // Should never happen. + b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") + case connectivity.Connecting: + // Wait for the SubConn to report success or failure. + } + return + } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass. + b.endFirstPassLocked(lastErr) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.state + sd.state = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + return + } + + if newState.ConnectivityState == connectivity.Ready { + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + b.state = connectivity.Ready + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + b.state = connectivity.Idle + b.addressList.reset() + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The balancer can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + // If the balancer is already in CONNECTING, no update is needed. + if b.state == connectivity.Idle { + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. We ignore such updates. + + if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + return + } + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + // End of the first pass. + b.endFirstPassLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { + b.firstPass = false + b.numTF = 0 + b.state = connectivity.TransientFailure + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.state == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +// first returns the first address in the list. If the list is empty, it returns +// an empty address instead. +func (al *addressList) first() resolver.Address { + if len(al.addresses) == 0 { + return resolver.Address{} + } + return al.addresses[0] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 8ad6ce2f09..2a4f2878ae 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -24,12 +24,14 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) @@ -256,8 +258,8 @@ type acBalancerWrapper struct { ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer + producersMu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer } // updateState is invoked by grpc to push a subConn state update to the @@ -267,6 +269,9 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if ctx.Err() != nil || acbw.ccb.balancer == nil { return } + // Invalidate all producers on any state change. + acbw.closeProducers() + // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. @@ -275,16 +280,6 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve setConnectedAddress(&scs, curAddr) } acbw.stateListener(scs) - acbw.ac.mu.Lock() - defer acbw.ac.mu.Unlock() - if s == connectivity.Ready { - // When changing states to READY, reset stateReadyChan. Wait until - // after we notify the LB policy's listener(s) in order to prevent - // ac.getTransport() from unblocking before the LB policy starts - // tracking the subchannel as READY. - close(acbw.ac.stateReadyChan) - acbw.ac.stateReadyChan = make(chan struct{}) - } }) } @@ -301,6 +296,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { + acbw.closeProducers() acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } @@ -308,9 +304,10 @@ func (acbw *acBalancerWrapper) Shutdown() { // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready") + } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } @@ -335,8 +332,8 @@ type refCountedProducer struct { } func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() // Look up existing producer from this builder. pData := acbw.producers[pb] @@ -353,13 +350,26 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( // and delete the refCountedProducer from the map if the total reference // count goes to zero. unref := func() { - acbw.mu.Lock() + acbw.producersMu.Lock() + // If closeProducers has already closed this producer instance, refs is + // set to 0, so the check after decrementing will never pass, and the + // producer will not be double-closed. pData.refs-- if pData.refs == 0 { defer pData.close() // Run outside the acbw mutex delete(acbw.producers, pb) } - acbw.mu.Unlock() + acbw.producersMu.Unlock() } return pData.producer, grpcsync.OnceFunc(unref) } + +func (acbw *acBalancerWrapper) closeProducers() { + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() + for pb, pData := range acbw.producers { + pData.refs = 0 + pData.close() + delete(acbw.producers, pb) + } +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 9c8850e3fd..19763f8edd 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -825,14 +825,13 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddresses(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateReadyChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -1141,10 +1140,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being @@ -1179,8 +1183,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateReadyChan chan struct{} // closed and recreated on every READY state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1251,6 +1254,8 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { @@ -1292,7 +1297,7 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { @@ -1504,29 +1509,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateReadyChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 4114358545..e163a473df 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 2b285beee3..518692c3af 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 13821a9266..85540f86a7 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -33,6 +33,8 @@ type lbConfig struct { childConfig serviceconfig.LoadBalancingConfig } +// ChildName returns the name of the child balancer of the gracefulswitch +// Balancer. func ChildName(l serviceconfig.LoadBalancingConfig) string { return l.(*lbConfig).childBuilder.Name() } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go index d7e9e1d54e..3ec662799a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -43,6 +43,8 @@ type Channel struct { // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 + // ChannelMetrics holds connectivity state, target and call metrics for the + // channel within channelz. ChannelMetrics ChannelMetrics } @@ -50,6 +52,8 @@ type Channel struct { // nesting. func (c *Channel) channelzIdentifier() {} +// String returns a string representation of the Channel, including its parent +// entity and ID. func (c *Channel) String() string { if c.Parent == nil { return fmt.Sprintf("Channel #%d", c.ID) @@ -61,24 +65,31 @@ func (c *Channel) id() int64 { return c.ID } +// SubChans returns a copy of the map of sub-channels associated with the +// Channel. func (c *Channel) SubChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.subChans) } +// NestedChans returns a copy of the map of nested channels associated with the +// Channel. func (c *Channel) NestedChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.nestedChans) } +// Trace returns a copy of the Channel's trace data. func (c *Channel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() return c.trace.copy() } +// ChannelMetrics holds connectivity state, target and call metrics for the +// channel within channelz. type ChannelMetrics struct { // The current connectivity state of the channel. State atomic.Pointer[connectivity.State] @@ -136,12 +147,16 @@ func strFromPointer(s *string) string { return *s } +// String returns a string representation of the ChannelMetrics, including its +// state, target, and call metrics. func (c *ChannelMetrics) String() string { return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), ) } +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with +// specified initial values for testing purposes. func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { c := &ChannelMetrics{} c.State.Store(&state) diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go index cdfc49d6ea..b5a8249929 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/server.go +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se return sm } +// CopyFrom copies the metrics data from the provided ServerMetrics +// instance into the current instance. func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { sm.CallsStarted.Store(o.CallsStarted.Load()) sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go index fa64834b25..90103847c5 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/socket.go +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct { RemoteFlowControlWindow int64 } +// SocketType represents the type of socket. type SocketType string +// SocketType can be one of these. const ( SocketTypeNormal = "NormalSocket" SocketTypeListen = "ListenSocket" ) +// Socket represents a socket within channelz which includes socket +// metrics and data related to socket activity and provides methods +// for managing and interacting with sockets. type Socket struct { Entity SocketType SocketType @@ -100,6 +105,8 @@ type Socket struct { Security credentials.ChannelzSecurityValue } +// String returns a string representation of the Socket, including its parent +// entity, socket type, and ID. func (ls *Socket) String() string { return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go index 3b88e4cba8..b20802e6e9 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 { return sc.ID } +// Sockets returns a copy of the sockets map associated with the SubChannel. func (sc *SubChannel) Sockets() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(sc.sockets) } +// Trace returns a copy of the ChannelTrace associated with the SubChannel. func (sc *SubChannel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 36b8674032..2bffe47776 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -79,13 +79,21 @@ type TraceEvent struct { Parent *TraceEvent } +// ChannelTrace provides tracing information for a channel. +// It tracks various events and metadata related to the channel's lifecycle +// and operations. type ChannelTrace struct { - cm *channelMap - clearCalled bool + cm *channelMap + clearCalled bool + // The time when the trace was created. CreationTime time.Time - EventNum int64 - mu sync.Mutex - Events []*traceEvent + // A counter for the number of events recorded in the + // trace. + EventNum int64 + mu sync.Mutex + // A slice of traceEvent pointers representing the events recorded for + // this channel. + Events []*traceEvent } func (c *ChannelTrace) copy() *ChannelTrace { @@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{ RefNormalSocket: "NormalSocket", } +// String returns a string representation of the RefChannelType func (r RefChannelType) String() string { return refChannelTypeToString[r] } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 452985f8d8..6e7dd6b772 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -50,6 +50,11 @@ var ( // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 19b9d63927..8e8e861280 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,7 +53,7 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// TrySchedule tries to schedules the provided callback function f to be +// TrySchedule tries to schedule the provided callback function f to be // executed in the order it was added. This is a best-effort operation. If the // context passed to NewCallbackSerializer was canceled before this method is // called, the callback will not be scheduled. diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index ec62b4775e..683d1955c6 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index fe49cb74c5..2c13ee9dac 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool { return true } +// EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { m.tryEnterIdleMode() } @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } +// Close stops the timer associated with the Manager, if it exists. func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 7aae9240ff..20b4dc3d35 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -191,6 +191,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -205,10 +207,6 @@ var ( // default resolver scheme. UserSetDefaultScheme = false - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) - // ConnectedAddress returns the connected address for a SubConnState. The // address is only valid if the state is READY. ConnectedAddress any // func (scs SubConnState) resolver.Address @@ -235,7 +233,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 4552db16b0..374c12fb77 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -177,7 +177,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup @@ -237,7 +237,9 @@ func (d *dnsResolver) watcher() { } func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { - if !EnableSRVLookups { + // Skip this particular host to avoid timeouts with some versions of + // systemd-resolved. + if !EnableSRVLookups || d.host == "metadata.google.internal." { return nil, nil } var newAddrs []resolver.Address diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index be110d41f9..79044657be 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -54,6 +54,8 @@ func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { } } +// RecordInt64Count records the measurement alongside labels on the int +// count associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -62,6 +64,8 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordFloat64Count records the measurement alongside labels on the float +// count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -70,6 +74,8 @@ func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHand } } +// RecordInt64Histo records the measurement alongside labels on the int +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -78,6 +84,8 @@ func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, } } +// RecordFloat64Histo records the measurement alongside labels on the float +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -86,6 +94,8 @@ func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHand } } +// RecordInt64Gauge records the measurement alongside labels on the int +// gauge associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 757925381f..1186f1e9a9 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -149,6 +149,8 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index c769deab53..62b81885d8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -86,9 +86,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -335,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, @@ -527,8 +528,9 @@ func (t *http2Client) getPeer() *peer.Peer { // to be the last frame loopy writes to the transport. func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() - defer t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + maxStreamID := t.nextID - 2 + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil { return false, err } return false, g.closeConn @@ -1008,6 +1010,9 @@ func (t *http2Client) Close(err error) { // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + goAwayDebugMessage := t.goAwayDebugMessage t.mu.Unlock() // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the @@ -1025,11 +1030,13 @@ func (t *http2Client) Close(err error) { } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - var st *status.Status if len(goAwayDebugMessage) > 0 { st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) @@ -1316,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1332,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1350,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1375,8 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } streamsToClose := make([]*Stream, 0) @@ -1393,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1628,7 +1633,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1669,7 +1680,7 @@ func (t *http2Client) reader(errCh chan<- error) { continue } // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + errClose = connectionErrorf(true, err, "error reading from server: %v", err) return } switch frame := frame.(type) { @@ -1684,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1697,6 +1708,13 @@ func (t *http2Client) reader(errCh chan<- error) { // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1720,7 +1738,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index fdd6fa86cc..e12cb0bc91 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -547,6 +547,15 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } +// ReadHeader reads data into the provided header slice from the stream. It +// first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered +// with partially read data, it is converted to `io.ErrUnexpectedEOF` to +// indicate an unexpected end of the stream. The method returns any error +// encountered during the read process or nil if the header was successfully +// read. func (s *Stream) ReadHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { @@ -616,7 +625,7 @@ func (t *transportReader) ReadHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(len(header)) + t.windowHandler(n) return n, nil } diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go index 4d66b2ccc2..ecbf0b9a73 100644 --- a/vendor/google.golang.org/grpc/mem/buffers.go +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -65,6 +65,9 @@ var ( refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} ) +// IsBelowBufferPoolingThreshold returns true if the given size is less than or +// equal to the threshold for buffer pooling. This is used to determine whether +// to pool buffers or allocate them directly. func IsBelowBufferPoolingThreshold(size int) bool { return size <= bufferPoolingThreshold } @@ -89,7 +92,11 @@ func newBuffer() *buffer { // // Note that the backing array of the given data is not copied. func NewBuffer(data *[]byte, pool BufferPool) Buffer { - if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { return (SliceBuffer)(*data) } b := newBuffer() @@ -194,19 +201,19 @@ func (b *buffer) read(buf []byte) (int, Buffer) { return n, b } -// String returns a string representation of the buffer. May be used for -// debugging purposes. func (b *buffer) String() string { return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) } +// ReadUnsafe reads bytes from the given Buffer into the provided slice. +// It does not perform safety checks. func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { return buf.read(dst) } // SplitUnsafe modifies the receiver to point to the first n bytes while it -// returns a new reference to the remaining bytes. The returned Buffer functions -// just like a normal reference acquired using Ref(). +// returns a new reference to the remaining bytes. The returned Buffer +// functions just like a normal reference acquired using Ref(). func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { return buf.split(n) } @@ -232,12 +239,21 @@ func (e emptyBuffer) read([]byte) (int, Buffer) { return 0, e } +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides +// methods for reading, splitting, and managing the byte slice. type SliceBuffer []byte +// ReadOnlyData returns the byte slice. func (s SliceBuffer) ReadOnlyData() []byte { return s } -func (s SliceBuffer) Ref() {} -func (s SliceBuffer) Free() {} -func (s SliceBuffer) Len() int { return len(s) } + +// Ref is a noop implementation of Ref. +func (s SliceBuffer) Ref() {} + +// Free is a noop implementation of Free. +func (s SliceBuffer) Free() {} + +// Len is a noop implementation of Len. +func (s SliceBuffer) Len() int { return len(s) } func (s SliceBuffer) split(n int) (left, right Buffer) { return s[:n], s[n:] diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2d96f1405e..aba1ae3e67 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -791,9 +791,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool if !haveCompressor { if isServer { return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } else { - return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 187fbf1195..5a47094ae8 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.67.0" +const Version = "1.68.1" diff --git a/vendor/modules.txt b/vendor/modules.txt index 6d32c9bd9b..3ccf8824b6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -125,11 +125,11 @@ github.com/containerd/log # github.com/containerd/platforms v0.2.1 ## explicit; go 1.20 github.com/containerd/platforms -# github.com/containerd/stargz-snapshotter/estargz v0.15.1 -## explicit; go 1.19 +# github.com/containerd/stargz-snapshotter/estargz v0.16.3 +## explicit; go 1.22.0 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/containerd/typeurl/v2 v2.2.0 +# github.com/containerd/typeurl/v2 v2.2.3 ## explicit; go 1.21 github.com/containerd/typeurl/v2 # github.com/containernetworking/cni v1.2.3 @@ -251,8 +251,8 @@ github.com/containers/conmon/runner/config # github.com/containers/gvisor-tap-vsock v0.8.1 ## explicit; go 1.22.0 github.com/containers/gvisor-tap-vsock/pkg/types -# github.com/containers/image/v5 v5.33.0 -## explicit; go 1.22.6 +# github.com/containers/image/v5 v5.33.1-0.20250102201119-717b49b0ac3d +## explicit; go 1.22.8 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -335,8 +335,8 @@ github.com/containers/libtrust # github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 ## explicit; go 1.20 github.com/containers/luksy -# github.com/containers/ocicrypt v1.2.0 -## explicit; go 1.21 +# github.com/containers/ocicrypt v1.2.1 +## explicit; go 1.22 github.com/containers/ocicrypt github.com/containers/ocicrypt/blockcipher github.com/containers/ocicrypt/config @@ -362,7 +362,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.56.0 +# github.com/containers/storage v1.56.1-0.20250102204143-48e12ae2d80f ## explicit; go 1.22.0 github.com/containers/storage github.com/containers/storage/drivers @@ -376,12 +376,14 @@ github.com/containers/storage/drivers/register github.com/containers/storage/drivers/vfs github.com/containers/storage/drivers/windows github.com/containers/storage/drivers/zfs +github.com/containers/storage/internal/dedup github.com/containers/storage/pkg/archive github.com/containers/storage/pkg/chrootarchive github.com/containers/storage/pkg/chunked github.com/containers/storage/pkg/chunked/compressor github.com/containers/storage/pkg/chunked/dump -github.com/containers/storage/pkg/chunked/internal +github.com/containers/storage/pkg/chunked/internal/minimal +github.com/containers/storage/pkg/chunked/internal/path github.com/containers/storage/pkg/chunked/toc github.com/containers/storage/pkg/config github.com/containers/storage/pkg/directory @@ -468,8 +470,7 @@ github.com/distribution/reference ## explicit github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 -github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v27.4.0+incompatible +# github.com/docker/docker v27.4.1+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -966,8 +967,8 @@ github.com/pmezard/go-difflib/difflib # github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c ## explicit; go 1.14 github.com/power-devops/perfstat -# github.com/proglottis/gpgme v0.1.3 -## explicit; go 1.11 +# github.com/proglottis/gpgme v0.1.4 +## explicit; go 1.17 github.com/proglottis/gpgme # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 @@ -989,7 +990,7 @@ github.com/rootless-containers/rootlesskit/v2/pkg/port/portutil # github.com/seccomp/libseccomp-golang v0.10.0 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/secure-systems-lab/go-securesystemslib v0.8.0 +# github.com/secure-systems-lab/go-securesystemslib v0.9.0 ## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/encrypted # github.com/segmentio/ksuid v1.0.4 @@ -1017,8 +1018,8 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey github.com/sigstore/rekor/pkg/generated/client/tlog github.com/sigstore/rekor/pkg/generated/models github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.8.9 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore v1.8.11 +## explicit; go 1.22.0 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/oauth github.com/sigstore/sigstore/pkg/oauthflow @@ -1035,6 +1036,10 @@ github.com/skeema/knownhosts # github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 ## explicit github.com/skratchdot/open-golang/open +# github.com/smallstep/pkcs7 v0.1.1 +## explicit; go 1.14 +github.com/smallstep/pkcs7 +github.com/smallstep/pkcs7/internal/legacy/x509 # github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra @@ -1049,8 +1054,8 @@ github.com/stefanberger/go-pkcs11uri github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/require -# github.com/sylabs/sif/v2 v2.19.1 -## explicit; go 1.22.5 +# github.com/sylabs/sif/v2 v2.20.2 +## explicit; go 1.22.0 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit @@ -1131,9 +1136,6 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 -## explicit; go 1.11 -go.mozilla.org/pkcs7 # go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io @@ -1180,6 +1182,8 @@ golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/curve25519 golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 @@ -1202,9 +1206,8 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts golang.org/x/crypto/twofish golang.org/x/crypto/xts -# golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f +# golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 ## explicit; go 1.22.0 -golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices # golang.org/x/mod v0.22.0 @@ -1227,7 +1230,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.23.0 +# golang.org/x/oauth2 v0.24.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -1271,15 +1274,15 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.6.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.27.0 +# golang.org/x/tools v0.28.0 ## explicit; go 1.22.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector # google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.67.0 -## explicit; go 1.21 +# google.golang.org/grpc v1.68.1 +## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1287,6 +1290,8 @@ google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/pickfirst/internal +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz