diff --git a/cmd/machine-api-operator/start.go b/cmd/machine-api-operator/start.go index dce09c53b..cfcde08f6 100644 --- a/cmd/machine-api-operator/start.go +++ b/cmd/machine-api-operator/start.go @@ -2,21 +2,24 @@ package main import ( "context" + "crypto/tls" "errors" "flag" "fmt" - "net/http" "os" + "reflect" "strconv" + "sync" + "sync/atomic" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/cobra" "github.com/spf13/pflag" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/record" @@ -24,16 +27,25 @@ import ( "k8s.io/utils/clock" osconfigv1 "github.com/openshift/api/config/v1" + osclientset "github.com/openshift/client-go/config/clientset/versioned" + utiltls "github.com/openshift/controller-runtime-common/pkg/tls" "github.com/openshift/library-go/pkg/operator/events" - "github.com/openshift/machine-api-operator/pkg/metrics" + maometrics "github.com/openshift/machine-api-operator/pkg/metrics" "github.com/openshift/machine-api-operator/pkg/operator" "github.com/openshift/machine-api-operator/pkg/util" "github.com/openshift/machine-api-operator/pkg/version" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) const ( // defaultMetricsPort is the default port to expose metrics. - defaultMetricsPort = 8080 + defaultMetricsPort = 8443 + metricsCertDir = "/etc/tls/private" + metricsCertFile = "tls.crt" + metricsKeyFile = "tls.key" ) var ( @@ -82,10 +94,20 @@ func runStartCmd(cmd *cobra.Command, args []string) error { return fmt.Errorf("error creating clients: %v", err) } stopCh := make(chan struct{}) + leaderElectionCtx, leaderElectionCancel := context.WithCancel(context.Background()) + var shutdownOnce sync.Once + var shuttingDown atomic.Bool + shutdown := func() { + shutdownOnce.Do(func() { + shuttingDown.Store(true) + close(stopCh) + leaderElectionCancel() + }) + } le := util.GetLeaderElectionConfig(cb.config, osconfigv1.LeaderElection{}) - leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{ + leaderelection.RunOrDie(leaderElectionCtx, leaderelection.LeaderElectionConfig{ Lock: CreateResourceLock(cb, componentNamespace, componentName), RenewDeadline: le.RenewDeadline.Duration, RetryPeriod: le.RetryPeriod.Duration, @@ -93,6 +115,9 @@ func runStartCmd(cmd *cobra.Command, args []string) error { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { ctrlCtx := CreateControllerContext(cb, stopCh, componentNamespace) + if err := setupTLSProfileWatcher(ctrlCtx, shutdown); err != nil { + klog.Fatalf("Unable to set up TLS profile watcher: %v", err) + } startControllersOrDie(ctrlCtx) ctrlCtx.KubeNamespacedInformerFactory.Start(ctrlCtx.Stop) ctrlCtx.ConfigInformerFactory.Start(ctrlCtx.Stop) @@ -100,15 +125,19 @@ func runStartCmd(cmd *cobra.Command, args []string) error { startMetricsCollectionAndServer(ctrlCtx) close(ctrlCtx.InformersStarted) - select {} + <-stopCh }, OnStoppedLeading: func() { + if shuttingDown.Load() { + klog.Info("Leader election stopped due to shutdown") + return + } klog.Fatalf("Leader election lost") }, }, ReleaseOnCancel: true, }) - panic("unreachable") + return nil } func initMachineAPIInformers(ctx *ControllerContext) { @@ -182,11 +211,11 @@ func startControllersOrDie(ctx *ControllerContext) { func startMetricsCollectionAndServer(ctx *ControllerContext) { machineInformer := ctx.MachineInformerFactory.Machine().V1beta1().Machines() machinesetInformer := ctx.MachineInformerFactory.Machine().V1beta1().MachineSets() - machineMetricsCollector := metrics.NewMachineCollector( + machineMetricsCollector := maometrics.NewMachineCollector( machineInformer, machinesetInformer, componentNamespace) - prometheus.MustRegister(machineMetricsCollector) + ctrlmetrics.Registry.MustRegister(machineMetricsCollector) metricsPort := defaultMetricsPort if port, ok := os.LookupEnv("METRICS_PORT"); ok { v, err := strconv.Atoi(port) @@ -195,17 +224,136 @@ func startMetricsCollectionAndServer(ctx *ControllerContext) { } metricsPort = v } - klog.V(4).Info("Starting server to serve prometheus metrics") - go startHTTPMetricServer(fmt.Sprintf("localhost:%d", metricsPort)) + klog.V(4).Info("Starting secure metrics server") + tlsOpts, err := metricsTLSOptions(ctx) + if err != nil { + klog.Fatalf("Unable to configure metrics TLS: %v", err) + } + metricsServer, err := newSecureMetricsServer( + ctx, + fmt.Sprintf(":%d", metricsPort), + tlsOpts, + ) + if err != nil { + klog.Fatalf("Unable to initialize secure metrics server: %v", err) + } + + metricsServerCtx, cancel := context.WithCancel(context.Background()) + go func() { + <-ctx.Stop + cancel() + }() + + go func() { + if err := metricsServer.Start(metricsServerCtx); err != nil { + klog.Fatalf("Unable to start secure metrics server: %v", err) + } + }() +} + +func metricsTLSOptions(ctx *ControllerContext) ([]func(*tls.Config), error) { + scheme := runtime.NewScheme() + if err := osconfigv1.Install(scheme); err != nil { + return nil, fmt.Errorf("unable to add config.openshift.io scheme: %w", err) + } + + k8sClient, err := client.New(ctx.ClientBuilder.config, client.Options{Scheme: scheme}) + if err != nil { + return nil, fmt.Errorf("unable to create Kubernetes client: %w", err) + } + + tlsSecurityProfileSpec, err := utiltls.FetchAPIServerTLSProfile(context.Background(), k8sClient) + if err != nil { + return nil, fmt.Errorf("unable to get TLS profile from API server: %w", err) + } + + tlsConfigFn, unsupportedCiphers := utiltls.NewTLSConfigFromProfile(tlsSecurityProfileSpec) + if len(unsupportedCiphers) > 0 { + klog.Infof("TLS configuration contains unsupported ciphers that will be ignored: %v", unsupportedCiphers) + } + + return []func(*tls.Config){tlsConfigFn}, nil +} + +func newSecureMetricsServer(ctx *ControllerContext, metricsAddr string, tlsOpts []func(*tls.Config)) (metricsserver.Server, error) { + httpClient, err := rest.HTTPClientFor(ctx.ClientBuilder.config) + if err != nil { + return nil, fmt.Errorf("unable to create HTTP client for metrics authn/authz: %w", err) + } + + return metricsserver.NewServer(metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: true, + FilterProvider: filters.WithAuthenticationAndAuthorization, + CertDir: metricsCertDir, + CertName: metricsCertFile, + KeyName: metricsKeyFile, + TLSOpts: tlsOpts, + }, ctx.ClientBuilder.config, httpClient) +} + +func setupTLSProfileWatcher(ctx *ControllerContext, shutdown func()) error { + configClient := ctx.ClientBuilder.OpenshiftClientOrDie("tls-profile-watcher") + initialProfile, err := fetchAPIServerTLSProfileSpec(context.Background(), configClient) + if err != nil { + return err + } + + apiServerInformer := ctx.ConfigInformerFactory.Config().V1().APIServers().Informer() + _, err = apiServerInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + handleTLSProfileEvent(obj, initialProfile, shutdown) + }, + UpdateFunc: func(_, newObj interface{}) { + handleTLSProfileEvent(newObj, initialProfile, shutdown) + }, + }) + if err != nil { + return fmt.Errorf("failed to add APIServer event handler: %w", err) + } + + return nil +} + +func fetchAPIServerTLSProfileSpec(ctx context.Context, configClient osclientset.Interface) (osconfigv1.TLSProfileSpec, error) { + apiServer, err := configClient.ConfigV1().APIServers().Get(ctx, utiltls.APIServerName, metav1.GetOptions{}) + if err != nil { + return osconfigv1.TLSProfileSpec{}, fmt.Errorf("failed to get APIServer %q: %w", utiltls.APIServerName, err) + } + + profile, err := utiltls.GetTLSProfileSpec(apiServer.Spec.TLSSecurityProfile) + if err != nil { + return osconfigv1.TLSProfileSpec{}, fmt.Errorf("failed to get TLS profile from APIServer %q: %w", utiltls.APIServerName, err) + } + + return profile, nil } -func startHTTPMetricServer(metricsPort string) { - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.Handler()) +func handleTLSProfileEvent(obj interface{}, initialProfile osconfigv1.TLSProfileSpec, shutdown func()) { + apiServer, ok := obj.(*osconfigv1.APIServer) + if !ok { + return + } + if apiServer.Name != utiltls.APIServerName { + return + } + + currentProfile, err := utiltls.GetTLSProfileSpec(apiServer.Spec.TLSSecurityProfile) + if err != nil { + klog.Errorf("Failed to get TLS profile from APIServer %q: %v", apiServer.Name, err) + return + } - server := &http.Server{ - Addr: metricsPort, - Handler: mux, + if reflect.DeepEqual(initialProfile, currentProfile) { + klog.V(2).Info("TLS security profile unchanged") + return } - klog.Fatal(server.ListenAndServe()) + + klog.Infof("TLS security profile has changed, initiating a shutdown to pick up the new configuration: initialMinTLSVersion=%s currentMinTLSVersion=%s initialCiphers=%v currentCiphers=%v", + initialProfile.MinTLSVersion, + currentProfile.MinTLSVersion, + initialProfile.Ciphers, + currentProfile.Ciphers, + ) + shutdown() } diff --git a/go.mod b/go.mod index bec2870d8..1af6ea8e2 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,13 @@ go 1.24.0 // These are needed for the OTE tests. Due to how we get the kubeconfig from the command line, there doesn't seem to be // an API yet we can leverage so that I do not have to copy what openshift/kubernetes/openshift-hack/cmd/k8s-tests-ext did to initialize. replace ( + // TEMPORARY: selinux v1.13.0 requires filepath-securejoin v0.6.0+ (pathrs-lite), but runc v1.2.5 + // requires MkdirAllHandle which was removed in v0.6.0. runc can't be bumped to v1.3+ because + // openshift/kubernetes still imports runc/libcontainer/cgroups (moved to a separate module in v1.3). + // v0.5.2 is the bridge version that has both APIs. Remove this replace once + // https://github.com/openshift/kubernetes/pull/2593 lands and the k8s.io/kubernetes replace below is updated. + github.com/cyphar/filepath-securejoin => github.com/cyphar/filepath-securejoin v0.5.2 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251015171918-61114aa5a292 // openshift kubernetes has very old copy of k8s.io/kubernetes/pkg/kubelet/server/server.go k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20251015171918-61114aa5a292 // openshift kubernetes has very old copy of k8s.io/kubernetes/cmd/kubelet/app/options/options.go @@ -16,28 +23,30 @@ require ( github.com/go-logr/logr v1.4.3 github.com/golangci/golangci-lint v1.64.8 github.com/google/uuid v1.6.0 - github.com/onsi/ginkgo/v2 v2.27.2 - github.com/onsi/gomega v1.38.2 + github.com/onsi/ginkgo/v2 v2.28.1 + github.com/onsi/gomega v1.39.1 github.com/openshift-eng/openshift-tests-extension v0.0.0-20251105193959-75a0be5d9bd7 - github.com/openshift/api v0.0.0-20260114133223-6ab113cb7368 - github.com/openshift/client-go v0.0.0-20251202151200-fb4471581cf8 + github.com/openshift/api v0.0.0-20260213155647-8fe9fe363807 + github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 github.com/openshift/cluster-api-actuator-pkg/testutils v0.0.0-20250910145856-21d03d30056d github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-20251029084908-344babe6a957 - github.com/openshift/library-go v0.0.0-20251107090138-0de9712313a5 + github.com/openshift/controller-runtime-common v0.0.0-20260213175913-767fef058eca + github.com/openshift/library-go v0.0.0-20260213153706-03f1709971c5 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.23.2 github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 github.com/vmware/govmomi v0.52.0 - golang.org/x/net v0.46.0 + golang.org/x/net v0.49.0 // indirect golang.org/x/time v0.14.0 gopkg.in/gcfg.v1 v1.2.3 // indirect - k8s.io/api v0.34.1 - k8s.io/apimachinery v0.34.1 - k8s.io/apiserver v0.34.1 - k8s.io/client-go v0.34.1 + k8s.io/api v0.34.3 + k8s.io/apimachinery v0.34.3 + k8s.io/apiserver v0.34.3 + k8s.io/client-go v0.34.3 k8s.io/cloud-provider-vsphere v1.32.2 - k8s.io/component-base v0.34.1 + k8s.io/component-base v0.34.3 k8s.io/cri-client v0.34.1 // indirect k8s.io/csi-translation-lib v0.34.1 // indirect k8s.io/dynamic-resource-allocation v0.34.1 // indirect @@ -47,16 +56,14 @@ require ( k8s.io/kubernetes v1.34.1 k8s.io/mount-utils v0.34.1 // indirect k8s.io/sample-apiserver v0.34.1 // indirect - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + k8s.io/utils v0.0.0-20260108192941-914a6e750570 sigs.k8s.io/cluster-api v1.11.3 - sigs.k8s.io/controller-runtime v0.22.3 + sigs.k8s.io/controller-runtime v0.22.5 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240923090159-236e448db12c sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 sigs.k8s.io/yaml v1.6.0 ) -require github.com/pkg/errors v0.9.1 - require ( 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect 4d63.com/gochecknoglobals v0.2.2 // indirect @@ -67,7 +74,7 @@ require ( github.com/Antonboom/nilnil v1.0.1 // indirect github.com/Antonboom/testifylint v1.5.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/Crocmagnon/fatcontext v0.7.1 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect @@ -112,9 +119,9 @@ require ( github.com/containerd/ttrpc v1.2.6 // indirect github.com/containerd/typeurl/v2 v2.2.2 // indirect github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/coreos/go-systemd/v22 v22.6.0 // indirect github.com/curioswitch/go-reassign v0.3.0 // indirect - github.com/cyphar/filepath-securejoin v0.4.1 // indirect + github.com/cyphar/filepath-securejoin v0.6.1 // indirect github.com/daixiang0/gci v0.13.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect @@ -171,7 +178,7 @@ require ( github.com/google/cel-go v0.26.0 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect + github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect @@ -241,8 +248,8 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/runc v1.2.5 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect - github.com/opencontainers/selinux v1.11.1 // indirect + github.com/opencontainers/runtime-spec v1.3.0 // indirect + github.com/opencontainers/selinux v1.13.0 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -319,26 +326,25 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.43.0 // indirect + golang.org/x/crypto v0.47.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect - golang.org/x/mod v0.28.0 // indirect + golang.org/x/mod v0.32.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.17.0 // indirect - golang.org/x/sys v0.37.0 // indirect - golang.org/x/term v0.36.0 // indirect - golang.org/x/text v0.30.0 // indirect - golang.org/x/tools v0.37.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect + golang.org/x/tools v0.41.0 // indirect golang.org/x/tools/go/expect v0.1.1-deprecated // indirect - golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/grpc v1.72.1 // indirect google.golang.org/protobuf v1.36.8 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect @@ -346,24 +352,24 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.6.1 // indirect - k8s.io/apiextensions-apiserver v0.34.1 // indirect + k8s.io/apiextensions-apiserver v0.34.3 // indirect k8s.io/cli-runtime v0.34.1 // indirect k8s.io/cloud-provider v0.32.0 // indirect k8s.io/cluster-bootstrap v0.33.3 // indirect k8s.io/component-helpers v0.34.1 // indirect k8s.io/controller-manager v0.32.1 // indirect k8s.io/cri-api v0.34.1 // indirect - k8s.io/kms v0.34.1 // indirect + k8s.io/kms v0.34.3 // indirect k8s.io/kube-aggregator v0.34.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect k8s.io/kubelet v0.34.1 // indirect k8s.io/pod-security-admission v0.32.2 // indirect mvdan.cc/gofumpt v0.7.0 // indirect mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/kustomize/api v0.20.1 // indirect sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect ) diff --git a/go.sum b/go.sum index daef2f2bf..04a9f937a 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5l github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= -github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/Crocmagnon/fatcontext v0.7.1 h1:SC/VIbRRZQeQWj/TcQBS6JmrXcfA+BU4OGSVUt54PjM= github.com/Crocmagnon/fatcontext v0.7.1/go.mod h1:1wMvv3NXEBJucFGfwOJBxSVWcoIO6emV215SMkW9MFU= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= @@ -114,15 +114,15 @@ github.com/containerd/typeurl/v2 v2.2.2 h1:3jN/k2ysKuPCsln5Qv8bzR9cxal8XjkxPogJf github.com/containerd/typeurl/v2 v2.2.2/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= -github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= -github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.5.2 h1:w/T2bhKr4pgwG0SUGjU4S/Is9+zUknLh5ROTJLzWX8E= +github.com/cyphar/filepath-securejoin v0.5.2/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -223,7 +223,6 @@ github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUW github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= @@ -268,8 +267,8 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= @@ -433,8 +432,8 @@ github.com/nunnatsa/ginkgolinter v0.19.1 h1:mjwbOlDQxZi9Cal+KfbEJTCz327OLNfwNvoZ github.com/nunnatsa/ginkgolinter v0.19.1/go.mod h1:jkQ3naZDmxaZMXPWaS9rblH+i+GWXQCaS/JFIWcOH2s= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= +github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg= github.com/opencontainers/cgroups v0.0.3 h1:Jc9dWh/0YLGjdy6J/9Ln8NM5BfTA4W2BY0GMozy3aDU= github.com/opencontainers/cgroups v0.0.3/go.mod h1:s8lktyhlGUqM7OSRL5P7eAW6Wb+kWPNvt4qvVfzA5vs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -443,28 +442,30 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runc v1.2.5 h1:8KAkq3Wrem8bApgOHyhRI/8IeLXIfmZ6Qaw6DNSLnA4= github.com/opencontainers/runc v1.2.5/go.mod h1:dOQeFo29xZKBNeRBI0B19mJtfHv68YgCTh1X+YphA+4= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= -github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/opencontainers/runtime-spec v1.3.0 h1:YZupQUdctfhpZy3TM39nN9Ika5CBWT5diQ8ibYCRkxg= +github.com/opencontainers/runtime-spec v1.3.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.13.0 h1:Zza88GWezyT7RLql12URvoxsbLfjFx988+LGaWfbL84= +github.com/opencontainers/selinux v1.13.0/go.mod h1:XxWTed+A/s5NNq4GmYScVy+9jzXhGBVEOAyucdRUY8s= github.com/openshift-eng/openshift-tests-extension v0.0.0-20251105193959-75a0be5d9bd7 h1:Z1swlS6b3Adm6RPhjqefs3DWnNFLDxRX+WC8GMXhja4= github.com/openshift-eng/openshift-tests-extension v0.0.0-20251105193959-75a0be5d9bd7/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= -github.com/openshift/api v0.0.0-20260114133223-6ab113cb7368 h1:kSr3DOlq0NCrHd65HB2o/pBsks7AfRm+fkpf9RLUPoc= -github.com/openshift/api v0.0.0-20260114133223-6ab113cb7368/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= -github.com/openshift/client-go v0.0.0-20251202151200-fb4471581cf8 h1:97rgISdT4IOmXlmEUV5Wr6d8BzzjPclzAjCARLbSlT0= -github.com/openshift/client-go v0.0.0-20251202151200-fb4471581cf8/go.mod h1:WVJnsrbSO1J8x8KceOmv1d5CpoN34Uzsaz1O4MIOKJI= +github.com/openshift/api v0.0.0-20260213155647-8fe9fe363807 h1:coR/haF16EW8KS1E/PwJfDzMSy4mU9K0H1rcHejqYDY= +github.com/openshift/api v0.0.0-20260213155647-8fe9fe363807/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 h1:6rd4zSo2UaWQcAPZfHK9yzKVqH0BnMv1hqMzqXZyTds= +github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13/go.mod h1:YvOmPmV7wcJxpfhTDuFqqs2Xpb3M3ovsM6Qs/i2ptq4= github.com/openshift/cluster-api-actuator-pkg/testutils v0.0.0-20250910145856-21d03d30056d h1:+sqUThLi/lmgT5/scmmjnS6+RZFtbdxRAscNfCPyLPI= github.com/openshift/cluster-api-actuator-pkg/testutils v0.0.0-20250910145856-21d03d30056d/go.mod h1:9+FWWWLkVrnBo1eYhA/0Ehlq5JMgIAHtcB0IF+qV1AA= github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-20251029084908-344babe6a957 h1:eVnkMTFnirnoUOlAUT3Hy8WriIi1JoSrilWym3Dl8Q4= github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-20251029084908-344babe6a957/go.mod h1:TBlORAAtNZ/Tl86pO7GjNXKsH/g0QAW5GnvYstdOhYI= +github.com/openshift/controller-runtime-common v0.0.0-20260213175913-767fef058eca h1:EOc/lbyZxtn1b/BvRwnLpbgBg0F+2RUQd3xMiA4/JsQ= +github.com/openshift/controller-runtime-common v0.0.0-20260213175913-767fef058eca/go.mod h1:59nLF3/IfhAtoQZfUzlCyidTAdlVT6KiVeTicUi2wuA= github.com/openshift/kubernetes v1.30.1-0.20251027205255-4e0347881cbd h1:WCCP41uY1QoqrDeykXFF/Dmf8NV3fnnAXUnh1oVFxW8= github.com/openshift/kubernetes v1.30.1-0.20251027205255-4e0347881cbd/go.mod h1:w3+IfrXNp5RosdDXg3LB55yijJqR/FwouvVntYHQf0o= github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251015171918-61114aa5a292 h1:Uq4CTGAl32NYNK7KD35oRg7pdZOFkUF+/2wGSyR0VYA= github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251015171918-61114aa5a292/go.mod h1:TRSSqgXggJaDK5vtVtlQ9wEYOk32Pl+9tf0ROf3ljiM= github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20251015171918-61114aa5a292 h1:ITqtj/xBNlSPChpvV4a+VQ93Sk5RFkwRx+CnIWBrZ98= github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20251015171918-61114aa5a292/go.mod h1:+bTwPbT5dZB8j6eKQHBZRMfNmY6MEryba1wQljr9VWw= -github.com/openshift/library-go v0.0.0-20251107090138-0de9712313a5 h1:Gq8jCFgSrilZ2ZHjQleFZWlblikc1aaRZ0hqs+yvrP4= -github.com/openshift/library-go v0.0.0-20251107090138-0de9712313a5/go.mod h1:OlFFws1AO51uzfc48MsStGE4SFMWlMZD0+f5a/zCtKI= +github.com/openshift/library-go v0.0.0-20260213153706-03f1709971c5 h1:9Pe6iVOMjt9CdA/vaKBNUSoEIjIe1po5Ha3ABRYXLJI= +github.com/openshift/library-go v0.0.0-20260213153706-03f1709971c5/go.mod h1:K3FoNLgNBFYbFuG+Kr8usAnQxj1w84XogyUp2M8rK8k= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 h1:PMTgifBcBRLJJiM+LgSzPDTk9/Rx4qS09OUrfpY6GBQ= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= @@ -695,8 +696,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -705,8 +706,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= @@ -724,8 +725,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= -golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -741,8 +742,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= -golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -754,8 +755,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -778,8 +779,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -788,8 +789,8 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -800,8 +801,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -824,8 +825,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= -golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -847,8 +848,8 @@ google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXn gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= @@ -867,24 +868,24 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= -k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= -k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= -k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= -k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= -k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= -k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4= +k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk= +k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g= +k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0= +k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE= +k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= k8s.io/cli-runtime v0.34.1 h1:btlgAgTrYd4sk8vJTRG6zVtqBKt9ZMDeQZo2PIzbL7M= k8s.io/cli-runtime v0.34.1/go.mod h1:aVA65c+f0MZiMUPbseU/M9l1Wo2byeaGwUuQEQVVveE= -k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= -k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A= +k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM= k8s.io/cloud-provider v0.32.0 h1:QXYJGmwME2q2rprymbmw2GroMChQYc/MWN6l/I4Kgp8= k8s.io/cloud-provider v0.32.0/go.mod h1:cz3gVodkhgwi2ugj/JUPglIruLSdDaThxawuDyCHfr8= k8s.io/cloud-provider-vsphere v1.32.2 h1:/OWUMXhRIDACM2j9Loj/Jh3/Z7q6o7kFbE78iCs92Zg= k8s.io/cloud-provider-vsphere v1.32.2/go.mod h1:v+shTeZ4WM232SEePcD+svnV+atFeEAc07Y0EIWn36M= k8s.io/cluster-bootstrap v0.33.3 h1:u2NTxJ5CFSBFXaDxLQoOWMly8eni31psVso+caq6uwI= k8s.io/cluster-bootstrap v0.33.3/go.mod h1:p970f8u8jf273zyQ5raD8WUu2XyAl0SAWOY82o7i/ds= -k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= -k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= +k8s.io/component-base v0.34.3 h1:zsEgw6ELqK0XncCQomgO9DpUIzlrYuZYA0Cgo+JWpVk= +k8s.io/component-base v0.34.3/go.mod h1:5iIlD8wPfWE/xSHTRfbjuvUul2WZbI2nOUK65XL0E/c= k8s.io/component-helpers v0.34.1 h1:gWhH3CCdwAx5P3oJqZKb4Lg5FYZTWVbdWtOI8n9U4XY= k8s.io/component-helpers v0.34.1/go.mod h1:4VgnUH7UA/shuBur+OWoQC0xfb69sy/93ss0ybZqm3c= k8s.io/controller-manager v0.32.1 h1:z3oQp1O5l0cSzM/MKf8V4olhJ9TmnELoJRPcV/v1s+Y= @@ -899,12 +900,12 @@ k8s.io/dynamic-resource-allocation v0.34.1 h1:pd9qhOeAFkn8eOO4BthAiGHQc8pu+N6TK/ k8s.io/dynamic-resource-allocation v0.34.1/go.mod h1:Zlpqyh6EKhTVoQDe5BS31/8oMXGfG6c12ydj3ChXyuw= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.34.1 h1:iCFOvewDPzWM9fMTfyIPO+4MeuZ0tcZbugxLNSHFG4w= -k8s.io/kms v0.34.1/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM= +k8s.io/kms v0.34.3 h1:QzBOD0sk1bGQVMcZQAHGjtbP1iKZJUyhC6D0I+BTxIE= +k8s.io/kms v0.34.3/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM= k8s.io/kube-aggregator v0.34.1 h1:WNLV0dVNoFKmuyvdWLd92iDSyD/TSTjqwaPj0U9XAEU= k8s.io/kube-aggregator v0.34.1/go.mod h1:RU8j+5ERfp0h+gIvWtxRPfsa5nK7rboDm8RST8BJfYQ= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/kube-scheduler v0.34.1 h1:S5td6VZwC3lCqERXclerDXhJ26zYc6JroY0s03+PqJ8= k8s.io/kube-scheduler v0.34.1/go.mod h1:UiOkod/w+HKoGut9mz9ie4s4KcI82vmLFdq1iIgsmRs= k8s.io/kubectl v0.34.1 h1:1qP1oqT5Xc93K+H8J7ecpBjaz511gan89KO9Vbsh/OI= @@ -915,8 +916,8 @@ k8s.io/pod-security-admission v0.32.2 h1:zDfAb/t0LbNU3z0ZMHtCb1zp8x05gWCGhmBYpUp k8s.io/pod-security-admission v0.32.2/go.mod h1:yxMPB3i1pGMLfxbe4BiWMuowMD7cdHR32y4nCj4wH+s= k8s.io/sample-apiserver v0.34.1 h1:6zqa6I1O8+Wsq2ru7nj+C+HyGXWHXfUOwGg2hucMQ1M= k8s.io/sample-apiserver v0.34.1/go.mod h1:xPfTeaMTkuyCo8eSpbeuLenm2eFH+Kc784ukUe96zhY= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20260108192941-914a6e750570 h1:JT4W8lsdrGENg9W+YwwdLJxklIuKWdRm+BC+xt33FOY= +k8s.io/utils v0.0.0-20260108192941-914a6e750570/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo= mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U= @@ -925,12 +926,12 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUo sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/cluster-api v1.11.3 h1:apxfugbP1X8AG7THCM74CTarCOW4H2oOc6hlbm1hY80= sigs.k8s.io/cluster-api v1.11.3/go.mod h1:CA471SACi81M8DzRKTlWpHV33G0cfWEj7sC4fALFVok= -sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y= -sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/controller-runtime v0.22.5 h1:v3nfSUMowX/2WMp27J9slwGFyAt7IV0YwBxAkrUr0GE= +sigs.k8s.io/controller-runtime v0.22.5/go.mod h1:pc5SoYWnWI6I+cBHYYdZ7B6YHZVY5xNfll88JB+vniI= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240923090159-236e448db12c h1:w1vANkdIpYwbEZH0y1C7iJItgdEGvF9A3eCdRmLhg8I= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240923090159-236e448db12c/go.mod h1:IaDsO8xSPRxRG1/rm9CP7+jPmj0nMNAuNi/yiHnLX8k= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= @@ -939,7 +940,7 @@ sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/go-build.sh b/hack/go-build.sh index 728e01c0e..86e8a88bf 100755 --- a/hack/go-build.sh +++ b/hack/go-build.sh @@ -12,7 +12,10 @@ eval $(go env | grep -e "GOHOSTOS" -e "GOHOSTARCH") : "${GOARCH:=${GOHOSTARCH}}" # Go to the root of the repo -cd "$(git rev-parse --show-cdup)" +cdup="$(git rev-parse --show-cdup)" +if [ -n "$cdup" ]; then + cd "$cdup" +fi if [ -z ${VERSION_OVERRIDE+a} ]; then if [ -n "${BUILD_VERSION+a}" ] && [ -n "${BUILD_RELEASE+a}" ]; then diff --git a/install/0000_30_machine-api-operator_02_machine.CustomNoUpgrade.crd.yaml b/install/0000_30_machine-api-operator_02_machine.CustomNoUpgrade.crd.yaml index 3e68b107e..775b9ff34 100644 --- a/install/0000_30_machine-api-operator_02_machine.CustomNoUpgrade.crd.yaml +++ b/install/0000_30_machine-api-operator_02_machine.CustomNoUpgrade.crd.yaml @@ -567,6 +567,16 @@ spec: serialized/deserialized from this field. type: object x-kubernetes-preserve-unknown-fields: true + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/install/0000_30_machine-api-operator_02_machine.DevPreviewNoUpgrade.crd.yaml b/install/0000_30_machine-api-operator_02_machine.DevPreviewNoUpgrade.crd.yaml index 9ece1ebed..e14ed6f87 100644 --- a/install/0000_30_machine-api-operator_02_machine.DevPreviewNoUpgrade.crd.yaml +++ b/install/0000_30_machine-api-operator_02_machine.DevPreviewNoUpgrade.crd.yaml @@ -567,6 +567,16 @@ spec: serialized/deserialized from this field. type: object x-kubernetes-preserve-unknown-fields: true + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/install/0000_30_machine-api-operator_02_machine.TechPreviewNoUpgrade.crd.yaml b/install/0000_30_machine-api-operator_02_machine.TechPreviewNoUpgrade.crd.yaml index b24670fd3..a91cadf51 100644 --- a/install/0000_30_machine-api-operator_02_machine.TechPreviewNoUpgrade.crd.yaml +++ b/install/0000_30_machine-api-operator_02_machine.TechPreviewNoUpgrade.crd.yaml @@ -567,6 +567,16 @@ spec: serialized/deserialized from this field. type: object x-kubernetes-preserve-unknown-fields: true + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/install/0000_30_machine-api-operator_03_machineset.CustomNoUpgrade.crd.yaml b/install/0000_30_machine-api-operator_03_machineset.CustomNoUpgrade.crd.yaml index db01cca4f..7c68b7b18 100644 --- a/install/0000_30_machine-api-operator_03_machineset.CustomNoUpgrade.crd.yaml +++ b/install/0000_30_machine-api-operator_03_machineset.CustomNoUpgrade.crd.yaml @@ -674,6 +674,16 @@ spec: description: replicas is the most recently observed number of replicas. format: int32 type: integer + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/install/0000_30_machine-api-operator_03_machineset.DevPreviewNoUpgrade.crd.yaml b/install/0000_30_machine-api-operator_03_machineset.DevPreviewNoUpgrade.crd.yaml index 1556758e6..37fb42ca9 100644 --- a/install/0000_30_machine-api-operator_03_machineset.DevPreviewNoUpgrade.crd.yaml +++ b/install/0000_30_machine-api-operator_03_machineset.DevPreviewNoUpgrade.crd.yaml @@ -674,6 +674,16 @@ spec: description: replicas is the most recently observed number of replicas. format: int32 type: integer + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/install/0000_30_machine-api-operator_03_machineset.TechPreviewNoUpgrade.crd.yaml b/install/0000_30_machine-api-operator_03_machineset.TechPreviewNoUpgrade.crd.yaml index f9801ce02..d93d2c15f 100644 --- a/install/0000_30_machine-api-operator_03_machineset.TechPreviewNoUpgrade.crd.yaml +++ b/install/0000_30_machine-api-operator_03_machineset.TechPreviewNoUpgrade.crd.yaml @@ -674,6 +674,16 @@ spec: description: replicas is the most recently observed number of replicas. format: int32 type: integer + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/install/0000_30_machine-api-operator_09_rbac.yaml b/install/0000_30_machine-api-operator_09_rbac.yaml index ae24aa41f..ee64f254e 100644 --- a/install/0000_30_machine-api-operator_09_rbac.yaml +++ b/install/0000_30_machine-api-operator_09_rbac.yaml @@ -257,6 +257,7 @@ rules: - apiGroups: - config.openshift.io resources: + - apiservers - infrastructures - dnses - clusterversions @@ -426,6 +427,7 @@ rules: - apiGroups: - config.openshift.io resources: + - apiservers - featuregates - featuregates/status - proxies diff --git a/install/0000_30_machine-api-operator_11_deployment.yaml b/install/0000_30_machine-api-operator_11_deployment.yaml index 893b17f89..add4645cd 100644 --- a/install/0000_30_machine-api-operator_11_deployment.yaml +++ b/install/0000_30_machine-api-operator_11_deployment.yaml @@ -28,31 +28,6 @@ spec: priorityClassName: system-node-critical serviceAccountName: machine-api-operator containers: - - name: kube-rbac-proxy - image: quay.io/openshift/origin-kube-rbac-proxy - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://localhost:8080/" - - "--tls-cert-file=/etc/tls/private/tls.crt" - - "--tls-private-key-file=/etc/tls/private/tls.key" - - "--config-file=/etc/kube-rbac-proxy/config-file.yaml" - - "--tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305" - - "--logtostderr=true" - - "--v=3" - ports: - - containerPort: 8443 - name: https - protocol: TCP - resources: - requests: - memory: 20Mi - cpu: 10m - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - name: config - mountPath: /etc/kube-rbac-proxy - - mountPath: /etc/tls/private - name: machine-api-operator-tls - name: machine-api-operator image: quay.io/openshift/origin-machine-api-operator command: @@ -75,7 +50,11 @@ spec: fieldRef: fieldPath: metadata.name - name: METRICS_PORT - value: "8080" + value: "8443" + ports: + - containerPort: 8443 + name: https + protocol: TCP resources: requests: cpu: 10m @@ -84,6 +63,8 @@ spec: volumeMounts: - name: images mountPath: /etc/machine-api-operator-config/images + - mountPath: /etc/tls/private + name: machine-api-operator-tls nodeSelector: node-role.kubernetes.io/master: "" restartPolicy: Always @@ -100,10 +81,6 @@ spec: effect: "NoExecute" tolerationSeconds: 120 volumes: - - name: config - configMap: - name: kube-rbac-proxy - defaultMode: 420 - name: images configMap: defaultMode: 420 diff --git a/pkg/controller/machine/machine_controller_test.go b/pkg/controller/machine/machine_controller_test.go index 35bbd6bda..d77c5c4e3 100644 --- a/pkg/controller/machine/machine_controller_test.go +++ b/pkg/controller/machine/machine_controller_test.go @@ -22,8 +22,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "context" + machinev1 "github.com/openshift/api/machine/v1beta1" - "golang.org/x/net/context" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" diff --git a/pkg/operator/config.go b/pkg/operator/config.go index 953eccd33..b8faa2506 100644 --- a/pkg/operator/config.go +++ b/pkg/operator/config.go @@ -25,6 +25,7 @@ type OperatorConfig struct { Proxy *configv1.Proxy PlatformType configv1.PlatformType Features map[string]bool + TLSProfile configv1.TLSProfileSpec } type Controllers struct { diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 8672165d9..b6bbc90d8 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -14,6 +14,7 @@ import ( configinformersv1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" machineclientset "github.com/openshift/client-go/machine/clientset/versioned" + utiltls "github.com/openshift/controller-runtime-common/pkg/tls" "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" @@ -482,6 +483,16 @@ func (optr *Operator) maoConfigFromInfrastructure() (*OperatorConfig, error) { klog.V(2).Info("Enabling MachineAPIMigration for provider controller and machinesets") } + // Fetch TLS security profile from APIServer + apiServer, err := optr.osClient.ConfigV1().APIServers().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to fetch APIServer for TLS profile: %w", err) + } + tlsProfile, err := utiltls.GetTLSProfileSpec(apiServer.Spec.TLSSecurityProfile) + if err != nil { + return nil, fmt.Errorf("failed to get TLS profile spec: %w", err) + } + return &OperatorConfig{ TargetNamespace: optr.namespace, Proxy: clusterWideProxy, @@ -495,5 +506,6 @@ func (optr *Operator) maoConfigFromInfrastructure() (*OperatorConfig, error) { }, PlatformType: provider, Features: features, + TLSProfile: tlsProfile, }, nil } diff --git a/pkg/operator/operator_test.go b/pkg/operator/operator_test.go index 8188a8999..7e371f23e 100644 --- a/pkg/operator/operator_test.go +++ b/pkg/operator/operator_test.go @@ -217,9 +217,15 @@ func TestOperatorSync_NoOp(t *testing.T) { }, } + apiServer := &openshiftv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + } + stopCh := make(chan struct{}) defer close(stopCh) - optr, err := newFakeOperator(nil, []runtime.Object{infra, proxy}, nil, imagesJSONFile, nil, stopCh) + optr, err := newFakeOperator(nil, []runtime.Object{infra, proxy, apiServer}, nil, imagesJSONFile, nil, stopCh) if err != nil { t.Fatal(err) } @@ -354,12 +360,20 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, } + // Default APIServer with Intermediate TLS profile + defaultAPIServer := &openshiftv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + } + testCases := []struct { name string platform openshiftv1.PlatformType infra *openshiftv1.Infrastructure featureGate *openshiftv1.FeatureGate proxy *openshiftv1.Proxy + apiServer *openshiftv1.APIServer imagesFile string expectedConfig *OperatorConfig expectedError error @@ -382,7 +396,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -396,6 +411,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.AWSPlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -416,7 +432,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -430,6 +447,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.LibvirtPlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -450,7 +468,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -464,6 +483,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.OpenStackPlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -484,7 +504,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -498,6 +519,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.AzurePlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -518,7 +540,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -532,6 +555,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.BareMetalPlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -552,7 +576,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -566,6 +591,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.GCPPlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -586,7 +612,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -600,6 +627,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: kubemarkPlatform, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -620,7 +648,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -634,6 +663,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.VSpherePlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -654,7 +684,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -668,6 +699,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.NonePlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -690,7 +722,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -704,6 +737,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.BareMetalPlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -724,7 +758,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -738,6 +773,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: openshiftv1.BareMetalPlatformType, Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -758,7 +794,8 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, }, - proxy: proxy, + proxy: proxy, + apiServer: defaultAPIServer, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -772,6 +809,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, PlatformType: "bad-platform", Features: enabledFeatureMap, + TLSProfile: *openshiftv1.TLSProfiles[openshiftv1.TLSProfileIntermediateType], }, }, { @@ -832,6 +870,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { proxy := tc.proxy.DeepCopy() objects = append(objects, proxy) } + if tc.apiServer != nil { + apiServer := tc.apiServer.DeepCopy() + objects = append(objects, apiServer) + } stopCh := make(chan struct{}) defer close(stopCh) diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 4fbf9ac42..4e164a12e 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -2,6 +2,7 @@ package operator import ( "context" + "crypto/tls" "fmt" "os" "slices" @@ -20,8 +21,10 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/reconcile" - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" machinev1beta1 "github.com/openshift/api/machine/v1beta1" + utiltls "github.com/openshift/controller-runtime-common/pkg/tls" + libgocrypto "github.com/openshift/library-go/pkg/crypto" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" "github.com/openshift/library-go/pkg/operator/resource/resourcehash" @@ -237,7 +240,7 @@ func (optr *Operator) syncWebhookConfiguration(config *OperatorConfig) error { if err := optr.syncMachineMutatingWebhook(); err != nil { return err } - if config.PlatformType == v1.BareMetalPlatformType { + if config.PlatformType == configv1.BareMetalPlatformType { if err := optr.syncMetal3RemediationValidatingWebhook(); err != nil { return err } @@ -510,7 +513,7 @@ func newRBACConfigVolumes() []corev1.Volume { func newPodTemplateSpec(config *OperatorConfig, features map[string]bool) *corev1.PodTemplateSpec { containers := newContainers(config, features) withMHCProxy := config.Controllers.MachineHealthCheck != "" - proxyContainers := newKubeProxyContainers(config.Controllers.KubeRBACProxy, withMHCProxy) + proxyContainers := newKubeProxyContainers(config.Controllers.KubeRBACProxy, withMHCProxy, config.TLSProfile) tolerations := []corev1.Toleration{ { Key: "node-role.kubernetes.io/master", @@ -673,7 +676,7 @@ func newContainers(config *OperatorConfig, features map[string]bool) []corev1.Co machineControllerArgs := append([]string{}, featureGateArgs...) switch config.PlatformType { - case v1.AzurePlatformType, v1.GCPPlatformType: + case configv1.AzurePlatformType, configv1.GCPPlatformType: machineControllerArgs = append(machineControllerArgs, "--max-concurrent-reconciles=10") } @@ -853,20 +856,20 @@ func newContainers(config *OperatorConfig, features map[string]bool) []corev1.Co return containers } -func newKubeProxyContainers(image string, withMHCProxy bool) []corev1.Container { +func newKubeProxyContainers(image string, withMHCProxy bool, tlsProfile configv1.TLSProfileSpec) []corev1.Container { proxyContainers := []corev1.Container{ - newKubeProxyContainer(image, "machineset-mtrc", metrics.DefaultMachineSetMetricsAddress, machineSetExposeMetricsPort), - newKubeProxyContainer(image, "machine-mtrc", metrics.DefaultMachineMetricsAddress, machineExposeMetricsPort), + newKubeProxyContainer(image, "machineset-mtrc", metrics.DefaultMachineSetMetricsAddress, machineSetExposeMetricsPort, tlsProfile), + newKubeProxyContainer(image, "machine-mtrc", metrics.DefaultMachineMetricsAddress, machineExposeMetricsPort, tlsProfile), } if withMHCProxy { proxyContainers = append(proxyContainers, - newKubeProxyContainer(image, "mhc-mtrc", metrics.DefaultHealthCheckMetricsAddress, machineHealthCheckExposeMetricsPort), + newKubeProxyContainer(image, "mhc-mtrc", metrics.DefaultHealthCheckMetricsAddress, machineHealthCheckExposeMetricsPort, tlsProfile), ) } return proxyContainers } -func newKubeProxyContainer(image, portName, upstreamPort string, exposePort int32) corev1.Container { +func newKubeProxyContainer(image, portName, upstreamPort string, exposePort int32, tlsProfile configv1.TLSProfileSpec) corev1.Container { configMountPath := "/etc/kube-rbac-proxy" tlsCertMountPath := "/etc/tls/private" resources := corev1.ResourceRequirements{ @@ -875,16 +878,32 @@ func newKubeProxyContainer(image, portName, upstreamPort string, exposePort int3 corev1.ResourceCPU: resource.MustParse("10m"), }, } + + tlsConfigFn, _ := utiltls.NewTLSConfigFromProfile(tlsProfile) + + // Apply the config function to get the validated cipher codes. + tlsConf := &tls.Config{} + tlsConfigFn(tlsConf) + args := []string{ fmt.Sprintf("--secure-listen-address=0.0.0.0:%d", exposePort), fmt.Sprintf("--upstream=http://localhost%s", upstreamPort), fmt.Sprintf("--config-file=%s/config-file.yaml", configMountPath), fmt.Sprintf("--tls-cert-file=%s/tls.crt", tlsCertMountPath), fmt.Sprintf("--tls-private-key-file=%s/tls.key", tlsCertMountPath), - "--tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + } + + // Ciphers are empty when using TLS 1.3, so we don't need to set them. + if len(tlsConf.CipherSuites) > 0 { + ianaCiphers := libgocrypto.CipherSuitesToNamesOrDie(tlsConf.CipherSuites) + args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(ianaCiphers, ","))) + } + + args = append(args, + fmt.Sprintf("--tls-min-version=%s", tlsProfile.MinTLSVersion), "--logtostderr=true", "--v=3", - } + ) ports := []corev1.ContainerPort{{ Name: portName, ContainerPort: exposePort, diff --git a/pkg/operator/sync_test.go b/pkg/operator/sync_test.go index 7c28c04cb..7030b743f 100644 --- a/pkg/operator/sync_test.go +++ b/pkg/operator/sync_test.go @@ -7,7 +7,7 @@ import ( "time" . "github.com/onsi/gomega" - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" machinev1beta1 "github.com/openshift/api/machine/v1beta1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -456,20 +456,20 @@ func TestSyncWebhookConfiguration(t *testing.T) { testCases := []struct { name string - platformType v1.PlatformType + platformType configv1.PlatformType expectedNrMutatingWebhooks int expectedNrValidatingWebhooks int }{ { name: "webhooks on non baremetal", // using AWS as random non baremetal platform - platformType: v1.AWSPlatformType, + platformType: configv1.AWSPlatformType, expectedNrMutatingWebhooks: 1, expectedNrValidatingWebhooks: 1, }, { name: "webhooks on baremetal", - platformType: v1.BareMetalPlatformType, + platformType: configv1.BareMetalPlatformType, expectedNrMutatingWebhooks: 2, expectedNrValidatingWebhooks: 2, }, @@ -504,3 +504,103 @@ func TestSyncWebhookConfiguration(t *testing.T) { }) } } + +func TestNewKubeProxyContainer(t *testing.T) { + testCases := []struct { + name string + image string + portName string + upstreamPort string + exposePort int32 + tlsProfile configv1.TLSProfileSpec + expectedCipherSuitesInArgs bool + }{ + { + name: "TLS 1.2 Intermediate profile with cipher suites", + image: "test-image:latest", + portName: "test-mtrc", + upstreamPort: ":8080", + exposePort: 8443, + tlsProfile: configv1.TLSProfileSpec{ + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + }, + MinTLSVersion: configv1.VersionTLS12, + }, + expectedCipherSuitesInArgs: true, + }, + { + name: "TLS 1.3 Modern profile without cipher suites", + image: "test-image:latest", + portName: "test-mtrc", + upstreamPort: ":8080", + exposePort: 8443, + tlsProfile: configv1.TLSProfileSpec{ + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + }, + MinTLSVersion: configv1.VersionTLS13, + }, + expectedCipherSuitesInArgs: false, + }, + { + name: "Empty cipher list", + image: "test-image:latest", + portName: "test-mtrc", + upstreamPort: ":8080", + exposePort: 8443, + tlsProfile: configv1.TLSProfileSpec{ + Ciphers: []string{}, + MinTLSVersion: configv1.VersionTLS13, + }, + expectedCipherSuitesInArgs: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + container := newKubeProxyContainer(tc.image, tc.portName, tc.upstreamPort, tc.exposePort, tc.tlsProfile) + + // Verify basic container properties + g.Expect(container.Name).To(Equal("kube-rbac-proxy-" + tc.portName)) + g.Expect(container.Image).To(Equal(tc.image)) + + // Verify ports + g.Expect(container.Ports).To(HaveLen(1)) + g.Expect(container.Ports[0].Name).To(Equal(tc.portName)) + g.Expect(container.Ports[0].ContainerPort).To(Equal(tc.exposePort)) + + // Verify resource requests + g.Expect(container.Resources.Requests).To(HaveKey(corev1.ResourceMemory)) + g.Expect(container.Resources.Requests).To(HaveKey(corev1.ResourceCPU)) + + // Verify volume mounts + g.Expect(container.VolumeMounts).To(HaveLen(2)) + + // Verify args + hasCipherSuitesArg := false + hasTLSMinVersionArg := false + for _, arg := range container.Args { + if len(arg) >= len("--tls-cipher-suites=") && arg[:len("--tls-cipher-suites=")] == "--tls-cipher-suites=" { + hasCipherSuitesArg = true + } + if len(arg) >= len("--tls-min-version=") && arg[:len("--tls-min-version=")] == "--tls-min-version=" { + hasTLSMinVersionArg = true + g.Expect(arg).To(ContainSubstring(string(tc.tlsProfile.MinTLSVersion))) + } + } + + g.Expect(hasCipherSuitesArg).To(Equal(tc.expectedCipherSuitesInArgs), + "cipher suites arg presence mismatch") + g.Expect(hasTLSMinVersionArg).To(BeTrue(), "TLS min version arg should be present") + }) + } +} diff --git a/pkg/webhooks/machine_webhook.go b/pkg/webhooks/machine_webhook.go index 38c4fc867..54d20e9a1 100644 --- a/pkg/webhooks/machine_webhook.go +++ b/pkg/webhooks/machine_webhook.go @@ -13,6 +13,8 @@ import ( "k8s.io/component-base/featuregate" + "slices" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -24,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/klog/v2" - "k8s.io/utils/strings/slices" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 639e6c399..235496eeb 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godocs.io/github.com/BurntSushi/toml +Documentation: https://pkg.go.dev/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index c05a0b7e5..3fa516caa 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { return md.unify(primValue.undecoded, rvalue(v)) } +// markDecodedRecursive is a helper to mark any key under the given tmap as +// decoded, recursing as needed +func markDecodedRecursive(md *MetaData, tmap map[string]any) { + for key := range tmap { + md.decoded[md.context.add(key).String()] = struct{}{} + if tmap, ok := tmap[key].(map[string]any); ok { + md.context = append(md.context, key) + markDecodedRecursive(md, tmap) + md.context = md.context[0 : len(md.context)-1] + } + } +} + // unify performs a sort of type unification based on the structure of `rv`, // which is the client representation. // @@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error { if err != nil { return md.parseErr(err) } + // Assume the Unmarshaler decoded everything, so mark all keys under + // this table as decoded. + if tmap, ok := data.(map[string]any); ok { + markDecodedRecursive(md, tmap) + } + if aot, ok := data.([]map[string]any); ok { + for _, tmap := range aot { + markDecodedRecursive(md, tmap) + } + } return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 73366c0d9..ac196e7df 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { // Sort keys so that we have deterministic output. And write keys directly // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string + var mapKeysDirect, mapKeysSub []reflect.Value for _, mapKey := range rv.MapKeys() { - k := mapKey.String() if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { - mapKeysSub = append(mapKeysSub, k) + mapKeysSub = append(mapKeysSub, mapKey) } else { - mapKeysDirect = append(mapKeysDirect, k) + mapKeysDirect = append(mapKeysDirect, mapKey) } } - var writeMapKeys = func(mapKeys []string, trailC bool) { - sort.Strings(mapKeys) + writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { + sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) for i, mapKey := range mapKeys { - val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + val := eindirect(rv.MapIndex(mapKey)) if isNil(val) { continue } if inline { - enc.writeKeyValue(Key{mapKey}, val, true) + enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { enc.wf(", ") } } else { - enc.encode(key.add(mapKey), val) + enc.encode(key.add(mapKey.String()), val) } } } @@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } } -const is32Bit = (32 << (^uint(0) >> 63)) == 32 - func pointerTo(t reflect.Type) reflect.Type { if t.Kind() == reflect.Ptr { return pointerTo(t.Elem()) @@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) - if is32Bit { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - copyStart := make([]int, len(start)) - copy(copyStart, start) - start = copyStart - } + // Need to make a copy because ... ehm, I don't know why... I guess + // allocating a new array can cause it to fail(?) + // + // Done for: https://github.com/BurntSushi/toml/issues/430 + // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. @@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } addFields(rt, rv, nil) - writeFields := func(fields [][]int) { + writeFields := func(fields [][]int, totalFields int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex) @@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) - if fieldIndex[0] != len(fields)-1 { + if fieldIndex[0] != totalFields-1 { enc.wf(", ") } } else { @@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.wf("{") } - writeFields(fieldsDirect) - writeFields(fieldsSub) + + l := len(fieldsDirect) + len(fieldsSub) + writeFields(fieldsDirect, l) + writeFields(fieldsSub, l) if inline { enc.wf("}") } diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index 1dd523211..b7077d3ae 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -69,7 +69,7 @@ type Position struct { Line int // Line number, starting at 1. Col int // Error column, starting at 1. Start int // Start of error, as byte offset starting at 0. - Len int // Lenght of the error in bytes. + Len int // Length of the error in bytes. } func (p Position) withCol(tomlFile string) Position { diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 6878d9d69..1c3b47702 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() - pos.Line-- + if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { + pos.Line-- + } pos.Len = 1 pos.Start = lx.pos - 1 lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} @@ -1117,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { case 'x': r = lx.peek() if !isHex(r) { - lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) } return lexHexInteger } @@ -1265,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isBareKeyChar(r rune, tomlNext bool) bool { - if tomlNext { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' || - r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || - (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || - (r >= 0x037f && r <= 0x1fff) || - (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || - (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || - (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || - (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || - (r >= 0x10000 && r <= 0xeffff) - } - - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index e61453730..0d337026c 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string { // Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { - if cap(k) > len(k) { - return append(k, piece) - } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 3f2c090c8..e3ea8a9a2 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) { // it anyway. if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] - //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 data = data[3:] } @@ -529,7 +528,7 @@ func numUnderscoresOK(s string) bool { } } - // isHexis a superset of all the permissable characters surrounding an + // isHex is a superset of all the permissible characters surrounding an // underscore. accept = isHex(r) } diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go index 147f756fe..22ce8f1df 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -12,7 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +// Package dbus provides integration with the systemd D-Bus API. +// See http://www.freedesktop.org/wiki/Software/systemd/dbus/ package dbus import ( diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go index 074148cb4..a64f0b3ea 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -24,15 +24,15 @@ import ( "github.com/godbus/dbus/v5" ) -// Who can be used to specify which process to kill in the unit via the KillUnitWithTarget API +// Who specifies which process to send a signal to via the [KillUnitWithTarget]. type Who string const ( - // All sends the signal to all processes in the unit + // All sends the signal to all processes in the unit. All Who = "all" - // Main sends the signal to the main process of the unit + // Main sends the signal to the main process of the unit. Main Who = "main" - // Control sends the signal to the control process of the unit + // Control sends the signal to the control process of the unit. Control Who = "control" ) @@ -41,7 +41,8 @@ func (c *Conn) jobComplete(signal *dbus.Signal) { var job dbus.ObjectPath var unit string var result string - dbus.Store(signal.Body, &id, &job, &unit, &result) + + _ = dbus.Store(signal.Body, &id, &job, &unit, &result) c.jobListener.Lock() out, ok := c.jobListener.jobs[job] if ok { @@ -51,7 +52,7 @@ func (c *Conn) jobComplete(signal *dbus.Signal) { c.jobListener.Unlock() } -func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) { +func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...any) (int, error) { if ch != nil { c.jobListener.Lock() defer c.jobListener.Unlock() @@ -102,6 +103,10 @@ func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error // has been removed too. skipped indicates that a job was skipped because it // didn't apply to the units current state. // +// Important: It is the caller's responsibility to unblock the provided channel write, +// either by reading from the channel or by using a buffered channel. Until the write +// is unblocked, the Conn object cannot handle other jobs. +// // If no error occurs, the ID of the underlying systemd job will be returned. There // does exist the possibility for no error to be returned, but for the returned job // ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint @@ -192,19 +197,21 @@ func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) } -// Deprecated: use KillUnitContext instead. +// Deprecated: use [KillUnitWithTarget] instead. func (c *Conn) KillUnit(name string, signal int32) { c.KillUnitContext(context.Background(), name, signal) } // KillUnitContext takes the unit name and a UNIX signal number to send. // All of the unit's processes are killed. +// +// Deprecated: use [KillUnitWithTarget] instead, with target argument set to [All]. func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { - c.KillUnitWithTarget(ctx, name, All, signal) + _ = c.KillUnitWithTarget(ctx, name, All, signal) } -// KillUnitWithTarget is like KillUnitContext, but allows you to specify which -// process in the unit to send the signal to. +// KillUnitWithTarget sends a signal to the specified unit. +// The target argument can be one of [All], [Main], or [Control]. func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() } @@ -240,7 +247,7 @@ func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { } // getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. -func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { +func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]any, error) { var err error var props map[string]dbus.Variant @@ -254,7 +261,7 @@ func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInte return nil, err } - out := make(map[string]interface{}, len(props)) + out := make(map[string]any, len(props)) for k, v := range props { out[k] = v.Value() } @@ -263,36 +270,36 @@ func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInte } // Deprecated: use GetUnitPropertiesContext instead. -func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { +func (c *Conn) GetUnitProperties(unit string) (map[string]any, error) { return c.GetUnitPropertiesContext(context.Background(), unit) } // GetUnitPropertiesContext takes the (unescaped) unit name and returns all of // its dbus object properties. -func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { +func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]any, error) { path := unitPath(unit) return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") } // Deprecated: use GetUnitPathPropertiesContext instead. -func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { +func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]any, error) { return c.GetUnitPathPropertiesContext(context.Background(), path) } // GetUnitPathPropertiesContext takes the (escaped) unit path and returns all // of its dbus object properties. -func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { +func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]any, error) { return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") } // Deprecated: use GetAllPropertiesContext instead. -func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { +func (c *Conn) GetAllProperties(unit string) (map[string]any, error) { return c.GetAllPropertiesContext(context.Background(), unit) } // GetAllPropertiesContext takes the (unescaped) unit name and returns all of // its dbus object properties. -func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { +func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]any, error) { path := unitPath(unit) return c.getProperties(ctx, path, "") } @@ -331,20 +338,20 @@ func (c *Conn) GetServiceProperty(service string, propertyName string) (*Propert return c.GetServicePropertyContext(context.Background(), service, propertyName) } -// GetServiceProperty returns property for given service name and property name. +// GetServicePropertyContext returns property for given service name and property name. func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) } // Deprecated: use GetUnitTypePropertiesContext instead. -func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]any, error) { return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) } // GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. // Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. // Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. -func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { +func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]any, error) { path := unitPath(unit) return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) } @@ -389,22 +396,22 @@ type UnitStatus struct { JobPath dbus.ObjectPath // The job object path } -type storeFunc func(retvalues ...interface{}) error +type storeFunc func(retvalues ...any) error func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { - result := make([][]interface{}, 0) + result := make([][]any, 0) err := f(&result) if err != nil { return nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } status := make([]UnitStatus, len(result)) - statusInterface := make([]interface{}, len(status)) + statusInterface := make([]any, len(status)) for i := range status { statusInterface[i] = &status[i] } @@ -499,19 +506,19 @@ type UnitFile struct { } func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { - result := make([][]interface{}, 0) + result := make([][]any, 0) err := f(&result) if err != nil { return nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } files := make([]UnitFile, len(result)) - fileInterface := make([]interface{}, len(files)) + fileInterface := make([]any, len(files)) for i := range files { fileInterface[i] = &files[i] } @@ -529,7 +536,7 @@ func (c *Conn) ListUnitFiles() ([]UnitFile, error) { return c.ListUnitFilesContext(context.Background()) } -// ListUnitFiles returns an array of all available units on disk. +// ListUnitFilesContext returns an array of all available units on disk. func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) } @@ -569,19 +576,19 @@ func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUn // or unlink), the file name of the symlink and the destination of the // symlink. func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - result := make([][]interface{}, 0) + result := make([][]any, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) if err != nil { return nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } changes := make([]LinkUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) + changesInterface := make([]any, len(changes)) for i := range changes { changesInterface[i] = &changes[i] } @@ -618,19 +625,19 @@ func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { var carries_install_info bool - result := make([][]interface{}, 0) + result := make([][]any, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) if err != nil { return false, nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } changes := make([]EnableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) + changesInterface := make([]any, len(changes)) for i := range changes { changesInterface[i] = &changes[i] } @@ -667,19 +674,19 @@ func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFile // symlink or unlink), the file name of the symlink and the destination of the // symlink. func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { - result := make([][]interface{}, 0) + result := make([][]any, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) if err != nil { return nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } changes := make([]DisableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) + changesInterface := make([]any, len(changes)) for i := range changes { changesInterface[i] = &changes[i] } @@ -713,19 +720,19 @@ func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUn // runtime only (true, /run/systemd/..), or persistently (false, // /etc/systemd/..). func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - result := make([][]interface{}, 0) + result := make([][]any, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) if err != nil { return nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } changes := make([]MaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) + changesInterface := make([]any, len(changes)) for i := range changes { changesInterface[i] = &changes[i] } @@ -757,19 +764,19 @@ func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileCh // for runtime only (true, /run/systemd/..), or persistently (false, // /etc/systemd/..). func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - result := make([][]interface{}, 0) + result := make([][]any, 0) err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) if err != nil { return nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } changes := make([]UnmaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) + changesInterface := make([]any, len(changes)) for i := range changes { changesInterface[i] = &changes[i] } @@ -829,18 +836,18 @@ func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { } func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { - result := make([][]interface{}, 0) + result := make([][]any, 0) if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { return nil, err } - resultInterface := make([]interface{}, len(result)) + resultInterface := make([]any, len(result)) for i := range result { resultInterface[i] = result[i] } status := make([]JobStatus, len(result)) - statusInterface := make([]interface{}, len(status)) + statusInterface := make([]any, len(status)) for i := range status { statusInterface[i] = &status[i] } @@ -852,13 +859,18 @@ func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { return status, nil } -// Freeze the cgroup associated with the unit. -// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2. +// FreezeUnit freezes the cgroup associated with the unit. +// Note that FreezeUnit and [ThawUnit] are only supported on systems running with cgroup v2. func (c *Conn) FreezeUnit(ctx context.Context, unit string) error { return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store() } -// Unfreeze the cgroup associated with the unit. +// ThawUnit unfreezes the cgroup associated with the unit. func (c *Conn) ThawUnit(ctx context.Context, unit string) error { return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store() } + +// AttachProcessesToUnit moves existing processes, identified by pids, into an existing systemd unit. +func (c *Conn) AttachProcessesToUnit(ctx context.Context, unit, subcgroup string, pids []uint32) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.AttachProcessesToUnit", 0, unit, subcgroup, pids).Store() +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go index 7e370fea2..f0f6aad9d 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go @@ -70,7 +70,7 @@ func (c *Conn) dispatch() { switch signal.Name { case "org.freedesktop.systemd1.Manager.JobRemoved": unitName := signal.Body[2].(string) - c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + _ = c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) case "org.freedesktop.systemd1.Manager.UnitNew": unitPath = signal.Body[1].(dbus.ObjectPath) case "org.freedesktop.DBus.Properties.PropertiesChanged": @@ -262,7 +262,7 @@ func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { return ok && t >= time.Now().UnixNano() } -func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]any) { loadState, ok := info["LoadState"].(string) if !ok { return diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go index 5b408d584..dbe4aa887 100644 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go @@ -40,8 +40,8 @@ func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan err } // NewSubscriptionSet returns a new subscription set. -func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { - return &SubscriptionSet{newSet(), conn} +func (c *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), c} } // mismatchUnitStatus returns true if the provided UnitStatus objects diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go index ac24c7767..16c4e4775 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go @@ -41,6 +41,6 @@ const ( ) // Print prints a message to the local systemd journal using Send(). -func Print(priority Priority, format string, a ...interface{}) error { +func Print(priority Priority, format string, a ...any) error { return Send(fmt.Sprintf(format, a...), priority, nil) } diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go index c5b23a819..6266e16e5 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows -// +build !windows // Package journal provides write bindings to the local systemd journal. // It is implemented in pure Go and connects to the journal directly over its @@ -31,7 +30,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "strconv" @@ -194,7 +192,7 @@ func appendVariable(w io.Writer, name, value string) { * - the data, followed by a newline */ fmt.Fprintln(w, name) - binary.Write(w, binary.LittleEndian, uint64(len(value))) + _ = binary.Write(w, binary.LittleEndian, uint64(len(value))) fmt.Fprintln(w, value) } else { /* just write the variable and value all on one line */ @@ -214,7 +212,7 @@ func validVarName(name string) error { } for _, c := range name { - if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + if ('A' > c || c > 'Z') && ('0' > c || c > '9') && c != '_' { return errors.New("Variable name contains invalid characters") } } @@ -239,7 +237,7 @@ func isSocketSpaceError(err error) bool { // tempFd creates a temporary, unlinked file under `/dev/shm`. func tempFd() (*os.File, error) { - file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + file, err := os.CreateTemp("/dev/shm/", "journal.XXXXX") if err != nil { return nil, err } diff --git a/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml b/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml new file mode 100644 index 000000000..e965034ed --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: MPL-2.0 + +# Copyright (C) 2025 Aleksa Sarai +# Copyright (C) 2025 SUSE LLC +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +version: "2" + +linters: + enable: + - asasalint + - asciicheck + - containedctx + - contextcheck + - errcheck + - errorlint + - exhaustive + - forcetypeassert + - godot + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - musttag + - nilerr + - nilnesserr + - nilnil + - noctx + - prealloc + - revive + - staticcheck + - testifylint + - unconvert + - unparam + - unused + - usetesting + settings: + govet: + enable: + - nilness + testifylint: + enable-all: true + +formatters: + enable: + - gofumpt + - goimports + settings: + goimports: + local-prefixes: + - github.com/cyphar/filepath-securejoin diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md index ca0e3c62c..1fc7eeb06 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -4,7 +4,167 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). -## [Unreleased] ## +## [Unreleased 0.5.z] ## + +## [0.5.2] - 2025-11-19 ## + +> "Will you walk into my parlour?" said a spider to a fly. + +### Fixed ### +- Our logic for deciding whether to use `openat2(2)` or fallback to an `O_PATH` + resolver would cache the result to avoid doing needless test runs of + `openat2(2)`. However, this causes issues when `pathrs-lite` is being used by + a program that applies new seccomp-bpf filters onto itself -- if the filter + denies `openat2(2)` then we would return that error rather than falling back + to the `O_PATH` resolver. To resolve this issue, we no longer cache the + result if `openat2(2)` was successful, only if there was an error. +- A file descriptor leak in our `openat2` wrapper (when doing the necessary + `dup` for `RESOLVE_IN_ROOT`) has been removed. + +## [0.5.1] - 2025-10-31 ## + +> Spooky scary skeletons send shivers down your spine! + +### Changed ### +- `openat2` can return `-EAGAIN` if it detects a possible attack in certain + scenarios (namely if there was a rename or mount while walking a path with a + `..` component). While this is necessary to avoid a denial-of-service in the + kernel, it does require retry loops in userspace. + + In previous versions, `pathrs-lite` would retry `openat2` 32 times before + returning an error, but we've received user reports that this limit can be + hit on systems with very heavy load. In some synthetic benchmarks (testing + the worst-case of an attacker doing renames in a tight loop on every core of + a 16-core machine) we managed to get a ~3% failure rate in runc. We have + improved this situation in two ways: + + * We have now increased this limit to 128, which should be good enough for + most use-cases without becoming a denial-of-service vector (the number of + syscalls called by the `O_PATH` resolver in a typical case is within the + same ballpark). The same benchmarks show a failure rate of ~0.12% which + (while not zero) is probably sufficient for most users. + + * In addition, we now return a `unix.EAGAIN` error that is bubbled up and can + be detected by callers. This means that callers with stricter requirements + to avoid spurious errors can choose to do their own infinite `EAGAIN` retry + loop (though we would strongly recommend users use time-based deadlines in + such retry loops to avoid potentially unbounded denials-of-service). + +## [0.5.0] - 2025-09-26 ## + +> Let the past die. Kill it if you have to. + +> **NOTE**: With this release, some parts of +> `github.com/cyphar/filepath-securejoin` are now licensed under the Mozilla +> Public License (version 2). Please see [COPYING.md][] as well as the the +> license header in each file for more details. + +[COPYING.md]: ./COPYING.md + +### Breaking ### +- The new API introduced in the [0.3.0][] release has been moved to a new + subpackage called `pathrs-lite`. This was primarily done to better indicate + the split between the new and old APIs, as well as indicate to users the + purpose of this subpackage (it is a less complete version of [libpathrs][]). + + We have added some wrappers to the top-level package to ease the transition, + but those are deprecated and will be removed in the next minor release of + filepath-securejoin. Users should update their import paths. + + This new subpackage has also been relicensed under the Mozilla Public License + (version 2), please see [COPYING.md][] for more details. + +### Added ### +- Most of the key bits the safe `procfs` API have now been exported and are + available in `github.com/cyphar/filepath-securejoin/pathrs-lite/procfs`. At + the moment this primarily consists of a new `procfs.Handle` API: + + * `OpenProcRoot` returns a new handle to `/proc`, endeavouring to make it + safe if possible (`subset=pid` to protect against mistaken write attacks + and leaks, as well as using `fsopen(2)` to avoid racing mount attacks). + + `OpenUnsafeProcRoot` returns a handle without attempting to create one + with `subset=pid`, which makes it more dangerous to leak. Most users + should use `OpenProcRoot` (even if you need to use `ProcRoot` as the base + of an operation, as filepath-securejoin will internally open a handle when + necessary). + + * The `(*procfs.Handle).Open*` family of methods lets you get a safe + `O_PATH` handle to subpaths within `/proc` for certain subpaths. + + For `OpenThreadSelf`, the returned `ProcThreadSelfCloser` needs to be + called after you completely finish using the handle (this is necessary + because Go is multi-threaded and `ProcThreadSelf` references + `/proc/thread-self` which may disappear if we do not + `runtime.LockOSThread` -- `ProcThreadSelfCloser` is currently equivalent + to `runtime.UnlockOSThread`). + + Note that you cannot open any `procfs` symlinks (most notably magic-links) + using this API. At the moment, filepath-securejoin does not support this + feature (but [libpathrs][] does). + + * `ProcSelfFdReadlink` lets you get the in-kernel path representation of a + file descriptor (think `readlink("/proc/self/fd/...")`), except that we + verify that there aren't any tricky overmounts that could fool the + process. + + Please be aware that the returned string is simply a snapshot at that + particular moment, and an attacker could move the file being pointed to. + In addition, complex namespace configurations could result in non-sensical + or confusing paths to be returned. The value received from this function + should only be used as secondary verification of some security property, + not as proof that a particular handle has a particular path. + + The procfs handle used internally by the API is the same as the rest of + `filepath-securejoin` (for privileged programs this is usually a private + in-process `procfs` instance created with `fsopen(2)`). + + As before, this is intended as a stop-gap before users migrate to + [libpathrs][], which provides a far more extensive safe `procfs` API and is + generally more robust. + +- Previously, the hardened procfs implementation (used internally within + `Reopen` and `Open(at)InRoot`) only protected against overmount attacks on + systems with `openat2(2)` (Linux 5.6) or systems with `fsopen(2)` or + `open_tree(2)` (Linux 5.2) and programs with privileges to use them (with + some caveats about locked mounts that probably affect very few users). For + other users, an attacker with the ability to create malicious mounts (on most + systems, a sysadmin) could trick you into operating on files you didn't + expect. This attack only really makes sense in the context of container + runtime implementations. + + This was considered a reasonable trade-off, as the long-term intention was to + get all users to just switch to [libpathrs][] if they wanted to use the safe + `procfs` API (which had more extensive protections, and is what these new + protections in `filepath-securejoin` are based on). However, as the API + is now being exported it seems unwise to advertise the API as "safe" if we do + not protect against known attacks. + + The procfs API is now more protected against attackers on systems lacking the + aforementioned protections. However, the most comprehensive of these + protections effectively rely on [`statx(STATX_MNT_ID)`][statx.2] (Linux 5.8). + On older kernel versions, there is no effective protection (there is some + minimal protection against non-`procfs` filesystem components but a + sufficiently clever attacker can work around those). In addition, + `STATX_MNT_ID` is vulnerable to mount ID reuse attacks by sufficiently + motivated and privileged attackers -- this problem is mitigated with + `STATX_MNT_ID_UNIQUE` (Linux 6.8) but that raises the minimum kernel version + for more protection. + + The fact that these protections are quite limited despite needing a fair bit + of extra code to handle was one of the primary reasons we did not initially + implement this in `filepath-securejoin` ([libpathrs][] supports all of this, + of course). + +### Fixed ### +- RHEL 8 kernels have backports of `fsopen(2)` but in some testing we've found + that it has very bad (and very difficult to debug) performance issues, and so + we will explicitly refuse to use `fsopen(2)` if the running kernel version is + pre-5.2 and will instead fallback to `open("/proc")`. + +[CVE-2024-21626]: https://github.com/opencontainers/runc/security/advisories/GHSA-xr7r-f8xq-vfvv +[libpathrs]: https://github.com/cyphar/libpathrs +[statx.2]: https://www.man7.org/linux/man-pages/man2/statx.2.html ## [0.4.1] - 2025-01-28 ## @@ -173,7 +333,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). safe to start migrating to as we have extensive tests ensuring they behave correctly and are safe against various races and other attacks. -[libpathrs]: https://github.com/openSUSE/libpathrs +[libpathrs]: https://github.com/cyphar/libpathrs [open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html ## [0.2.5] - 2024-05-03 ## @@ -238,7 +398,9 @@ This is our first release of `github.com/cyphar/filepath-securejoin`, containing a full implementation with a coverage of 93.5% (the only missing cases are the error cases, which are hard to mocktest at the moment). -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD +[Unreleased 0.5.z]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.1...release-0.5 +[0.5.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...v0.5.1 +[0.5.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...v0.5.0 [0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1 [0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0 [0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6 diff --git a/vendor/github.com/cyphar/filepath-securejoin/COPYING.md b/vendor/github.com/cyphar/filepath-securejoin/COPYING.md new file mode 100644 index 000000000..520e822b1 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/COPYING.md @@ -0,0 +1,447 @@ +## COPYING ## + +`SPDX-License-Identifier: BSD-3-Clause AND MPL-2.0` + +This project is made up of code licensed under different licenses. Which code +you use will have an impact on whether only one or both licenses apply to your +usage of this library. + +Note that **each file** in this project individually has a code comment at the +start describing the license of that particular file -- this is the most +accurate license information of this project; in case there is any conflict +between this document and the comment at the start of a file, the comment shall +take precedence. The only purpose of this document is to work around [a known +technical limitation of pkg.go.dev's license checking tool when dealing with +non-trivial project licenses][go75067]. + +[go75067]: https://go.dev/issue/75067 + +### `BSD-3-Clause` ### + +At time of writing, the following files and directories are licensed under the +BSD-3-Clause license: + + * `doc.go` + * `join*.go` + * `vfs.go` + * `internal/consts/*.go` + * `pathrs-lite/internal/gocompat/*.go` + * `pathrs-lite/internal/kernelversion/*.go` + +The text of the BSD-3-Clause license used by this project is the following (the +text is also available from the [`LICENSE.BSD`](./LICENSE.BSD) file): + +``` +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017-2024 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` + +### `MPL-2.0` ### + +All other files (unless otherwise marked) are licensed under the Mozilla Public +License (version 2.0). + +The text of the Mozilla Public License (version 2.0) is the following (the text +is also available from the [`LICENSE.MPL-2.0`](./LICENSE.MPL-2.0) file): + +``` +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at https://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. +``` diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE.BSD similarity index 100% rename from vendor/github.com/cyphar/filepath-securejoin/LICENSE rename to vendor/github.com/cyphar/filepath-securejoin/LICENSE.BSD diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE.MPL-2.0 b/vendor/github.com/cyphar/filepath-securejoin/LICENSE.MPL-2.0 new file mode 100644 index 000000000..d0a1fa148 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE.MPL-2.0 @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at https://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index eaeb53fcd..6673abfc8 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -67,7 +67,8 @@ func SecureJoin(root, unsafePath string) (string, error) { [libpathrs]: https://github.com/openSUSE/libpathrs [go#20126]: https://github.com/golang/go/issues/20126 -### New API ### +### New API ### +[#new-api]: #new-api While we recommend users switch to [libpathrs][libpathrs] as soon as it has a stable release, some methods implemented by libpathrs have been ported to this @@ -165,5 +166,19 @@ after `MkdirAll`). ### License ### -The license of this project is the same as Go, which is a BSD 3-clause license -available in the `LICENSE` file. +`SPDX-License-Identifier: BSD-3-Clause AND MPL-2.0` + +Some of the code in this project is derived from Go, and is licensed under a +BSD 3-clause license (available in `LICENSE.BSD`). Other files (many of which +are derived from [libpathrs][libpathrs]) are licensed under the Mozilla Public +License version 2.0 (available in `LICENSE.MPL-2.0`). If you are using the +["New API" described above][#new-api], you are probably using code from files +released under this license. + +Every source file in this project has a copyright header describing its +license. Please check the license headers of each file to see what license +applies to it. + +See [COPYING.md](./COPYING.md) for some more details. + +[umoci]: https://github.com/opencontainers/umoci diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index 267577d47..cb0c939a9 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.4.1 +0.5.2 diff --git a/vendor/github.com/cyphar/filepath-securejoin/codecov.yml b/vendor/github.com/cyphar/filepath-securejoin/codecov.yml new file mode 100644 index 000000000..ff284dbfa --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/codecov.yml @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: MPL-2.0 + +# Copyright (C) 2025 Aleksa Sarai +# Copyright (C) 2025 SUSE LLC +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +comment: + layout: "condensed_header, reach, diff, components, condensed_files, condensed_footer" + require_changes: true + branches: + - main + +coverage: + range: 60..100 + status: + project: + default: + target: 85% + threshold: 0% + patch: + default: + target: auto + informational: true + +github_checks: + annotations: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/deprecated_linux.go b/vendor/github.com/cyphar/filepath-securejoin/deprecated_linux.go new file mode 100644 index 000000000..3e427b164 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/deprecated_linux.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package securejoin + +import ( + "github.com/cyphar/filepath-securejoin/pathrs-lite" +) + +var ( + // MkdirAll is a wrapper around [pathrs.MkdirAll]. + // + // Deprecated: You should use [pathrs.MkdirAll] directly instead. This + // wrapper will be removed in filepath-securejoin v0.6. + MkdirAll = pathrs.MkdirAll + + // MkdirAllHandle is a wrapper around [pathrs.MkdirAllHandle]. + // + // Deprecated: You should use [pathrs.MkdirAllHandle] directly instead. + // This wrapper will be removed in filepath-securejoin v0.6. + MkdirAllHandle = pathrs.MkdirAllHandle + + // OpenInRoot is a wrapper around [pathrs.OpenInRoot]. + // + // Deprecated: You should use [pathrs.OpenInRoot] directly instead. This + // wrapper will be removed in filepath-securejoin v0.6. + OpenInRoot = pathrs.OpenInRoot + + // OpenatInRoot is a wrapper around [pathrs.OpenatInRoot]. + // + // Deprecated: You should use [pathrs.OpenatInRoot] directly instead. This + // wrapper will be removed in filepath-securejoin v0.6. + OpenatInRoot = pathrs.OpenatInRoot + + // Reopen is a wrapper around [pathrs.Reopen]. + // + // Deprecated: You should use [pathrs.Reopen] directly instead. This + // wrapper will be removed in filepath-securejoin v0.6. + Reopen = pathrs.Reopen +) diff --git a/vendor/github.com/cyphar/filepath-securejoin/doc.go b/vendor/github.com/cyphar/filepath-securejoin/doc.go index 1ec7d065e..1438fc9c0 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/doc.go +++ b/vendor/github.com/cyphar/filepath-securejoin/doc.go @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause + // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. // Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style @@ -14,14 +16,13 @@ // **not** safe against race conditions where an attacker changes the // filesystem after (or during) the [SecureJoin] operation. // -// The new API is made up of [OpenInRoot] and [MkdirAll] (and derived -// functions). These are safe against racing attackers and have several other -// protections that are not provided by the legacy API. There are many more -// operations that most programs expect to be able to do safely, but we do not -// provide explicit support for them because we want to encourage users to -// switch to [libpathrs](https://github.com/openSUSE/libpathrs) which is a -// cross-language next-generation library that is entirely designed around -// operating on paths safely. +// The new API is available in the [pathrs-lite] subpackage, and provide +// protections against racing attackers as well as several other key +// protections against attacks often seen by container runtimes. As the name +// suggests, [pathrs-lite] is a stripped down (pure Go) reimplementation of +// [libpathrs]. The main APIs provided are [OpenInRoot], [MkdirAll], and +// [procfs.Handle] -- other APIs are not planned to be ported. The long-term +// goal is for users to migrate to [libpathrs] which is more fully-featured. // // securejoin has been used by several container runtimes (Docker, runc, // Kubernetes, etc) for quite a few years as a de-facto standard for operating @@ -31,9 +32,16 @@ // API as soon as possible (or even better, switch to libpathrs). // // This project was initially intended to be included in the Go standard -// library, but [it was rejected](https://go.dev/issue/20126). There is now a -// [new Go proposal](https://go.dev/issue/67002) for a safe path resolution API -// that shares some of the goals of filepath-securejoin. However, that design -// is intended to work like `openat2(RESOLVE_BENEATH)` which does not fit the -// usecase of container runtimes and most system tools. +// library, but it was rejected (see https://go.dev/issue/20126). Much later, +// [os.Root] was added to the Go stdlib that shares some of the goals of +// filepath-securejoin. However, its design is intended to work like +// openat2(RESOLVE_BENEATH) which does not fit the usecase of container +// runtimes and most system tools. +// +// [pathrs-lite]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite +// [libpathrs]: https://github.com/openSUSE/libpathrs +// [OpenInRoot]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite#OpenInRoot +// [MkdirAll]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite#MkdirAll +// [procfs.Handle]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs#Handle +// [os.Root]: https:///pkg.go.dev/os#Root package securejoin diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go deleted file mode 100644 index ddd6fa9a4..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build linux && go1.21 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "slices" - "sync" -) - -func slices_DeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S { - return slices.DeleteFunc(slice, delFn) -} - -func slices_Contains[S ~[]E, E comparable](slice S, val E) bool { - return slices.Contains(slice, val) -} - -func slices_Clone[S ~[]E, E any](slice S) S { - return slices.Clone(slice) -} - -func sync_OnceValue[T any](f func() T) func() T { - return sync.OnceValue(f) -} - -func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - return sync.OnceValues(f) -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go deleted file mode 100644 index f1e6fe7e7..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go +++ /dev/null @@ -1,124 +0,0 @@ -//go:build linux && !go1.21 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "sync" -) - -// These are very minimal implementations of functions that appear in Go 1.21's -// stdlib, included so that we can build on older Go versions. Most are -// borrowed directly from the stdlib, and a few are modified to be "obviously -// correct" without needing to copy too many other helpers. - -// clearSlice is equivalent to the builtin clear from Go 1.21. -// Copied from the Go 1.24 stdlib implementation. -func clearSlice[S ~[]E, E any](slice S) { - var zero E - for i := range slice { - slice[i] = zero - } -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := slices_IndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// Similar to the stdlib slices.Contains, except that we don't have -// slices.Index so we need to use slices.IndexFunc for this non-Func helper. -func slices_Contains[S ~[]E, E comparable](s S, v E) bool { - return slices_IndexFunc(s, func(e E) bool { return e == v }) >= 0 -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// Copied from the Go 1.24 stdlib implementation. -func sync_OnceValue[T any](f func() T) func() T { - var ( - once sync.Once - valid bool - p any - result T - ) - g := func() { - defer func() { - p = recover() - if !valid { - panic(p) - } - }() - result = f() - f = nil - valid = true - } - return func() T { - once.Do(g) - if !valid { - panic(p) - } - return result - } -} - -// Copied from the Go 1.24 stdlib implementation. -func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - var ( - once sync.Once - valid bool - p any - r1 T1 - r2 T2 - ) - g := func() { - defer func() { - p = recover() - if !valid { - panic(p) - } - }() - r1, r2 = f() - f = nil - valid = true - } - return func() (T1, T2) { - once.Do(g) - if !valid { - panic(p) - } - return r1, r2 - } -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/internal/consts/consts.go b/vendor/github.com/cyphar/filepath-securejoin/internal/consts/consts.go new file mode 100644 index 000000000..c69c4da91 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/internal/consts/consts.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: BSD-3-Clause + +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017-2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package consts contains the definitions of internal constants used +// throughout filepath-securejoin. +package consts + +// MaxSymlinkLimit is the maximum number of symlinks that can be encountered +// during a single lookup before returning -ELOOP. At time of writing, Linux +// has an internal limit of 40. +const MaxSymlinkLimit = 255 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index e6634d477..199c1d839 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause + // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. // Copyright (C) 2017-2025 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style @@ -11,9 +13,9 @@ import ( "path/filepath" "strings" "syscall" -) -const maxSymlinkLimit = 255 + "github.com/cyphar/filepath-securejoin/internal/consts" +) // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is @@ -49,12 +51,13 @@ func hasDotDot(path string) bool { return strings.Contains("/"+path+"/", "/../") } -// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except -// that the returned path is guaranteed to be scoped inside the provided root -// path (when evaluated). Any symbolic links in the path are evaluated with the -// given root treated as the root of the filesystem, similar to a chroot. The -// filesystem state is evaluated through the given [VFS] interface (if nil, the -// standard [os].* family of functions are used). +// SecureJoinVFS joins the two given path components (similar to +// [filepath.Join]) except that the returned path is guaranteed to be scoped +// inside the provided root path (when evaluated). Any symbolic links in the +// path are evaluated with the given root treated as the root of the +// filesystem, similar to a chroot. The filesystem state is evaluated through +// the given [VFS] interface (if nil, the standard [os].* family of functions +// are used). // // Note that the guarantees provided by this function only apply if the path // components in the returned string are not modified (in other words are not @@ -78,7 +81,7 @@ func hasDotDot(path string) bool { // fully resolved using [filepath.EvalSymlinks] or otherwise constructed to // avoid containing symlink components. Of course, the root also *must not* be // attacker-controlled. -func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { //nolint:revive // name is part of public API // The root path must not contain ".." components, otherwise when we join // the subpath we will end up with a weird path. We could work around this // in other ways but users shouldn't be giving us non-lexical root paths in @@ -138,7 +141,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { // It's a symlink, so get its contents and expand it by prepending it // to the yet-unparsed path. linksWalked++ - if linksWalked > maxSymlinkLimit { + if linksWalked > consts.MaxSymlinkLimit { return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} } diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go deleted file mode 100644 index f7a13e69c..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go +++ /dev/null @@ -1,127 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -var hasOpenat2 = sync_OnceValue(func() bool { - fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, - }) - if err != nil { - return false - } - _ = unix.Close(fd) - return true -}) - -func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { - // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve - // ".." while a mount or rename occurs anywhere on the system. This could - // happen spuriously, or as the result of an attacker trying to mess with - // us during lookup. - // - // In addition, scoped lookups have a "safety check" at the end of - // complete_walk which will return -EXDEV if the final path is not in the - // root. - return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && - (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) -} - -const scopedLookupMaxRetries = 10 - -func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) { - fullPath := dir.Name() + "/" + path - // Make sure we always set O_CLOEXEC. - how.Flags |= unix.O_CLOEXEC - var tries int - for tries < scopedLookupMaxRetries { - fd, err := unix.Openat2(int(dir.Fd()), path, how) - if err != nil { - if scopedLookupShouldRetry(how, err) { - // We retry a couple of times to avoid the spurious errors, and - // if we are being attacked then returning -EAGAIN is the best - // we can do. - tries++ - continue - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} - } - // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. - // NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise - // you'll get infinite recursion here. - if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { - if actualPath, err := rawProcSelfFdReadlink(fd); err == nil { - fullPath = actualPath - } - } - return os.NewFile(uintptr(fd), fullPath), nil - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack} -} - -func lookupOpenat2(root *os.File, unsafePath string, partial bool) (*os.File, string, error) { - if !partial { - file, err := openat2File(root, unsafePath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - return file, "", err - } - return partialLookupOpenat2(root, unsafePath) -} - -// partialLookupOpenat2 is an alternative implementation of -// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a -// handle to the deepest existing child of the requested path within the root. -func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) { - // TODO: Implement this as a git-bisect-like binary search. - - unsafePath = filepath.ToSlash(unsafePath) // noop - endIdx := len(unsafePath) - var lastError error - for endIdx > 0 { - subpath := unsafePath[:endIdx] - - handle, err := openat2File(root, subpath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - if err == nil { - // Jump over the slash if we have a non-"" remainingPath. - if endIdx < len(unsafePath) { - endIdx += 1 - } - // We found a subpath! - return handle, unsafePath[endIdx:], lastError - } - if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { - // That path doesn't exist, let's try the next directory up. - endIdx = strings.LastIndexByte(subpath, '/') - lastError = err - continue - } - return nil, "", fmt.Errorf("open subpath: %w", err) - } - // If we couldn't open anything, the whole subpath is missing. Return a - // copy of the root fd so that the caller doesn't close this one by - // accident. - rootClone, err := dupFile(root) - if err != nil { - return nil, "", err - } - return rootClone, unsafePath, lastError -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go deleted file mode 100644 index 949fb5f2d..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -func dupFile(f *os.File) (*os.File, error) { - fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) - } - return os.NewFile(uintptr(fd), f.Name()), nil -} - -func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.O_CLOEXEC - fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode)) - if err != nil { - return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err} - } - // All of the paths we use with openatFile(2) are guaranteed to be - // lexically safe, so we can use path.Join here. - fullPath := filepath.Join(dir.Name(), path) - return os.NewFile(uintptr(fd), fullPath), nil -} - -func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) { - var stat unix.Stat_t - if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil { - return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err} - } - return stat, nil -} - -func readlinkatFile(dir *os.File, path string) (string, error) { - size := 4096 - for { - linkBuf := make([]byte, size) - n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf) - if err != nil { - return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err} - } - if n != size { - return string(linkBuf[:n]), nil - } - // Possible truncation, resize the buffer. - size *= 2 - } -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/README.md b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/README.md new file mode 100644 index 000000000..1be727e75 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/README.md @@ -0,0 +1,33 @@ +## `pathrs-lite` ## + +`github.com/cyphar/filepath-securejoin/pathrs-lite` provides a minimal **pure +Go** implementation of the core bits of [libpathrs][]. This is not intended to +be a complete replacement for libpathrs, instead it is mainly intended to be +useful as a transition tool for existing Go projects. + +The long-term plan for `pathrs-lite` is to provide a build tag that will cause +all `pathrs-lite` operations to call into libpathrs directly, thus removing +code duplication for projects that wish to make use of libpathrs (and providing +the ability for software packagers to opt-in to libpathrs support without +needing to patch upstream). + +[libpathrs]: https://github.com/cyphar/libpathrs + +### License ### + +Most of this subpackage is licensed under the Mozilla Public License (version +2.0). For more information, see the top-level [COPYING.md][] and +[LICENSE.MPL-2.0][] files, as well as the individual license headers for each +file. + +``` +Copyright (C) 2024-2025 Aleksa Sarai +Copyright (C) 2024-2025 SUSE LLC + +This Source Code Form is subject to the terms of the Mozilla Public +License, v. 2.0. If a copy of the MPL was not distributed with this +file, You can obtain one at https://mozilla.org/MPL/2.0/. +``` + +[COPYING.md]: ../COPYING.md +[LICENSE.MPL-2.0]: ../LICENSE.MPL-2.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/doc.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/doc.go new file mode 100644 index 000000000..d3d745175 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/doc.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package pathrs (pathrs-lite) is a less complete pure Go implementation of +// some of the APIs provided by [libpathrs]. +package pathrs diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert/assert.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert/assert.go new file mode 100644 index 000000000..595dfbf1a --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert/assert.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MPL-2.0 + +// Copyright (C) 2025 Aleksa Sarai +// Copyright (C) 2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package assert provides some basic assertion helpers for Go. +package assert + +import ( + "fmt" +) + +// Assert panics if the predicate is false with the provided argument. +func Assert(predicate bool, msg any) { + if !predicate { + panic(msg) + } +} + +// Assertf panics if the predicate is false and formats the message using the +// same formatting as [fmt.Printf]. +// +// [fmt.Printf]: https://pkg.go.dev/fmt#Printf +func Assertf(predicate bool, fmtMsg string, args ...any) { + Assert(predicate, fmt.Sprintf(fmtMsg, args...)) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/errors_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/errors_linux.go new file mode 100644 index 000000000..d0b200f4f --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/errors_linux.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package internal contains unexported common code for filepath-securejoin. +package internal + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type xdevErrorish struct { + description string +} + +func (err xdevErrorish) Error() string { return err.description } +func (err xdevErrorish) Is(target error) bool { return target == unix.EXDEV } + +var ( + // ErrPossibleAttack indicates that some attack was detected. + ErrPossibleAttack error = xdevErrorish{"possible attack detected"} + + // ErrPossibleBreakout indicates that during an operation we ended up in a + // state that could be a breakout but we detected it. + ErrPossibleBreakout error = xdevErrorish{"possible breakout detected"} + + // ErrInvalidDirectory indicates an unlinked directory. + ErrInvalidDirectory = errors.New("wandered into deleted directory") + + // ErrDeletedInode indicates an unlinked file (non-directory). + ErrDeletedInode = errors.New("cannot verify path of deleted inode") +) diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/at_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/at_linux.go new file mode 100644 index 000000000..091054913 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/at_linux.go @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package fd + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" +) + +// prepareAtWith returns -EBADF (an invalid fd) if dir is nil, otherwise using +// the dir.Fd(). We use -EBADF because in filepath-securejoin we generally +// don't want to allow relative-to-cwd paths. The returned path is an +// *informational* string that describes a reasonable pathname for the given +// *at(2) arguments. You must not use the full path for any actual filesystem +// operations. +func prepareAt(dir Fd, path string) (dirFd int, unsafeUnmaskedPath string) { + dirFd, dirPath := -int(unix.EBADF), "." + if dir != nil { + dirFd, dirPath = int(dir.Fd()), dir.Name() + } + if !filepath.IsAbs(path) { + // only prepend the dirfd path for relative paths + path = dirPath + "/" + path + } + // NOTE: If path is "." or "", the returned path won't be filepath.Clean, + // but that's okay since this path is either used for errors (in which case + // a trailing "/" or "/." is important information) or will be + // filepath.Clean'd later (in the case of fd.Openat). + return dirFd, path +} + +// Openat is an [Fd]-based wrapper around unix.Openat. +func Openat(dir Fd, path string, flags int, mode int) (*os.File, error) { //nolint:unparam // wrapper func + dirFd, fullPath := prepareAt(dir, path) + // Make sure we always set O_CLOEXEC. + flags |= unix.O_CLOEXEC + fd, err := unix.Openat(dirFd, path, flags, uint32(mode)) + if err != nil { + return nil, &os.PathError{Op: "openat", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + // openat is only used with lexically-safe paths so we can use + // filepath.Clean here, and also the path itself is not going to be used + // for actual path operations. + fullPath = filepath.Clean(fullPath) + return os.NewFile(uintptr(fd), fullPath), nil +} + +// Fstatat is an [Fd]-based wrapper around unix.Fstatat. +func Fstatat(dir Fd, path string, flags int) (unix.Stat_t, error) { + dirFd, fullPath := prepareAt(dir, path) + var stat unix.Stat_t + if err := unix.Fstatat(dirFd, path, &stat, flags); err != nil { + return stat, &os.PathError{Op: "fstatat", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return stat, nil +} + +// Faccessat is an [Fd]-based wrapper around unix.Faccessat. +func Faccessat(dir Fd, path string, mode uint32, flags int) error { + dirFd, fullPath := prepareAt(dir, path) + err := unix.Faccessat(dirFd, path, mode, flags) + if err != nil { + err = &os.PathError{Op: "faccessat", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return err +} + +// Readlinkat is an [Fd]-based wrapper around unix.Readlinkat. +func Readlinkat(dir Fd, path string) (string, error) { + dirFd, fullPath := prepareAt(dir, path) + size := 4096 + for { + linkBuf := make([]byte, size) + n, err := unix.Readlinkat(dirFd, path, linkBuf) + if err != nil { + return "", &os.PathError{Op: "readlinkat", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + if n != size { + return string(linkBuf[:n]), nil + } + // Possible truncation, resize the buffer. + size *= 2 + } +} + +const ( + // STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to + // avoid bumping the requirement for a single constant we can just define it + // ourselves. + _STATX_MNT_ID_UNIQUE = 0x4000 //nolint:revive // unix.* name + + // We don't care which mount ID we get. The kernel will give us the unique + // one if it is supported. If the kernel doesn't support + // STATX_MNT_ID_UNIQUE, the bit is ignored and the returned request mask + // will only contain STATX_MNT_ID (if supported). + wantStatxMntMask = _STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID +) + +var hasStatxMountID = gocompat.SyncOnceValue(func() bool { + var stx unix.Statx_t + err := unix.Statx(-int(unix.EBADF), "/", 0, wantStatxMntMask, &stx) + return err == nil && stx.Mask&wantStatxMntMask != 0 +}) + +// GetMountID gets the mount identifier associated with the fd and path +// combination. It is effectively a wrapper around fetching +// STATX_MNT_ID{,_UNIQUE} with unix.Statx, but with a fallback to 0 if the +// kernel doesn't support the feature. +func GetMountID(dir Fd, path string) (uint64, error) { + // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. + if !hasStatxMountID() { + return 0, nil + } + + dirFd, fullPath := prepareAt(dir, path) + + var stx unix.Statx_t + err := unix.Statx(dirFd, path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, wantStatxMntMask, &stx) + if stx.Mask&wantStatxMntMask == 0 { + // It's not a kernel limitation, for some reason we couldn't get a + // mount ID. Assume it's some kind of attack. + err = fmt.Errorf("could not get mount id: %w", err) + } + if err != nil { + return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return stx.Mnt_id, nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd.go new file mode 100644 index 000000000..d2206a386 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MPL-2.0 + +// Copyright (C) 2025 Aleksa Sarai +// Copyright (C) 2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package fd provides a drop-in interface-based replacement of [*os.File] that +// allows for things like noop-Close wrappers to be used. +// +// [*os.File]: https://pkg.go.dev/os#File +package fd + +import ( + "io" + "os" +) + +// Fd is an interface that mirrors most of the API of [*os.File], allowing you +// to create wrappers that can be used in place of [*os.File]. +// +// [*os.File]: https://pkg.go.dev/os#File +type Fd interface { + io.Closer + Name() string + Fd() uintptr +} + +// Compile-time interface checks. +var ( + _ Fd = (*os.File)(nil) + _ Fd = noClose{} +) + +type noClose struct{ inner Fd } + +func (f noClose) Name() string { return f.inner.Name() } +func (f noClose) Fd() uintptr { return f.inner.Fd() } + +func (f noClose) Close() error { return nil } + +// NopCloser returns an [*os.File]-like object where the [Close] method is now +// a no-op. +// +// Note that for [*os.File] and similar objects, the Go garbage collector will +// still call [Close] on the underlying file unless you use +// [runtime.SetFinalizer] to disable this behaviour. This is up to the caller +// to do (if necessary). +// +// [*os.File]: https://pkg.go.dev/os#File +// [Close]: https://pkg.go.dev/io#Closer +// [runtime.SetFinalizer]: https://pkg.go.dev/runtime#SetFinalizer +func NopCloser(f Fd) Fd { return noClose{inner: f} } diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd_linux.go new file mode 100644 index 000000000..e1ec3c0b8 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/fd_linux.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package fd + +import ( + "fmt" + "os" + "runtime" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" +) + +// DupWithName creates a new file descriptor referencing the same underlying +// file, but with the provided name instead of fd.Name(). +func DupWithName(fd Fd, name string) (*os.File, error) { + fd2, err := unix.FcntlInt(fd.Fd(), unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) + } + runtime.KeepAlive(fd) + return os.NewFile(uintptr(fd2), name), nil +} + +// Dup creates a new file description referencing the same underlying file. +func Dup(fd Fd) (*os.File, error) { + return DupWithName(fd, fd.Name()) +} + +// Fstat is an [Fd]-based wrapper around unix.Fstat. +func Fstat(fd Fd) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstat(int(fd.Fd()), &stat); err != nil { + return stat, &os.PathError{Op: "fstat", Path: fd.Name(), Err: err} + } + runtime.KeepAlive(fd) + return stat, nil +} + +// Fstatfs is an [Fd]-based wrapper around unix.Fstatfs. +func Fstatfs(fd Fd) (unix.Statfs_t, error) { + var statfs unix.Statfs_t + if err := unix.Fstatfs(int(fd.Fd()), &statfs); err != nil { + return statfs, &os.PathError{Op: "fstatfs", Path: fd.Name(), Err: err} + } + runtime.KeepAlive(fd) + return statfs, nil +} + +// IsDeadInode detects whether the file has been unlinked from a filesystem and +// is thus a "dead inode" from the kernel's perspective. +func IsDeadInode(file Fd) error { + // If the nlink of a file drops to 0, there is an attacker deleting + // directories during our walk, which could result in weird /proc values. + // It's better to error out in this case. + stat, err := Fstat(file) + if err != nil { + return fmt.Errorf("check for dead inode: %w", err) + } + if stat.Nlink == 0 { + err := internal.ErrDeletedInode + if stat.Mode&unix.S_IFMT == unix.S_IFDIR { + err = internal.ErrInvalidDirectory + } + return fmt.Errorf("%w %q", err, file.Name()) + } + return nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/mount_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/mount_linux.go new file mode 100644 index 000000000..77549c7a9 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/mount_linux.go @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package fd + +import ( + "os" + "runtime" + + "golang.org/x/sys/unix" +) + +// Fsopen is an [Fd]-based wrapper around unix.Fsopen. +func Fsopen(fsName string, flags int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSOPEN_CLOEXEC + fd, err := unix.Fsopen(fsName, flags) + if err != nil { + return nil, os.NewSyscallError("fsopen "+fsName, err) + } + return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil +} + +// Fsmount is an [Fd]-based wrapper around unix.Fsmount. +func Fsmount(ctx Fd, flags, mountAttrs int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSMOUNT_CLOEXEC + fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) + if err != nil { + return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) + } + return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil +} + +// OpenTree is an [Fd]-based wrapper around unix.OpenTree. +func OpenTree(dir Fd, path string, flags uint) (*os.File, error) { + dirFd, fullPath := prepareAt(dir, path) + // Make sure we always set O_CLOEXEC. + flags |= unix.OPEN_TREE_CLOEXEC + fd, err := unix.OpenTree(dirFd, path, flags) + if err != nil { + return nil, &os.PathError{Op: "open_tree", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return os.NewFile(uintptr(fd), fullPath), nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/openat2_linux.go new file mode 100644 index 000000000..63863647d --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd/openat2_linux.go @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package fd + +import ( + "errors" + "os" + "runtime" + + "golang.org/x/sys/unix" +) + +func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { + // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve + // ".." while a mount or rename occurs anywhere on the system. This could + // happen spuriously, or as the result of an attacker trying to mess with + // us during lookup. + // + // In addition, scoped lookups have a "safety check" at the end of + // complete_walk which will return -EXDEV if the final path is not in the + // root. + return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && + (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) +} + +// This is a fairly arbitrary limit we have just to avoid an attacker being +// able to make us spin in an infinite retry loop -- callers can choose to +// retry on EAGAIN if they prefer. +const scopedLookupMaxRetries = 128 + +// Openat2 is an [Fd]-based wrapper around unix.Openat2, but with some retry +// logic in case of EAGAIN errors. +// +// NOTE: This is a variable so that the lookup tests can force openat2 to fail. +var Openat2 = func(dir Fd, path string, how *unix.OpenHow) (*os.File, error) { + dirFd, fullPath := prepareAt(dir, path) + // Make sure we always set O_CLOEXEC. + how.Flags |= unix.O_CLOEXEC + var tries int + for { + fd, err := unix.Openat2(dirFd, path, how) + if err != nil { + if scopedLookupShouldRetry(how, err) && tries < scopedLookupMaxRetries { + // We retry a couple of times to avoid the spurious errors, and + // if we are being attacked then returning -EAGAIN is the best + // we can do. + tries++ + continue + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} + } + runtime.KeepAlive(dir) + return os.NewFile(uintptr(fd), fullPath), nil + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/README.md b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/README.md new file mode 100644 index 000000000..5dcb6ae00 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/README.md @@ -0,0 +1,10 @@ +## gocompat ## + +This directory contains backports of stdlib functions from later Go versions so +the filepath-securejoin can continue to be used by projects that are stuck with +Go 1.18 support. Note that often filepath-securejoin is added in security +patches for old releases, so avoiding the need to bump Go compiler requirements +is a huge plus to downstreams. + +The source code is licensed under the same license as the Go stdlib. See the +source files for the precise license information. diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/doc.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/doc.go new file mode 100644 index 000000000..4b1803f58 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/doc.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux && go1.20 + +// Copyright (C) 2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocompat includes compatibility shims (backported from future Go +// stdlib versions) to permit filepath-securejoin to be used with older Go +// versions (often filepath-securejoin is added in security patches for old +// releases, so avoiding the need to bump Go compiler requirements is a huge +// plus to downstreams). +package gocompat diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_atomic_go119.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_atomic_go119.go new file mode 100644 index 000000000..ac93cb045 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_atomic_go119.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && go1.19 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocompat + +import ( + "sync/atomic" +) + +// A Bool is an atomic boolean value. +// The zero value is false. +// +// Bool must not be copied after first use. +type Bool = atomic.Bool diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_atomic_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_atomic_unsupported.go new file mode 100644 index 000000000..21b5b29ad --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_atomic_unsupported.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !go1.19 + +// Copyright (C) 2024-2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocompat + +import ( + "sync/atomic" +) + +// noCopy may be added to structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +// +// Note that it must not be embedded, due to the Lock and Unlock methods. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} + +// b32 returns a uint32 0 or 1 representing b. +func b32(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// A Bool is an atomic boolean value. +// The zero value is false. +// +// Bool must not be copied after first use. +type Bool struct { + _ noCopy + v uint32 +} + +// Load atomically loads and returns the value stored in x. +func (x *Bool) Load() bool { return atomic.LoadUint32(&x.v) != 0 } + +// Store atomically stores val into x. +func (x *Bool) Store(val bool) { atomic.StoreUint32(&x.v, b32(val)) } diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_go120.go similarity index 69% rename from vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_go120.go index 42452bbf9..4a114bd3d 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_go120.go @@ -1,18 +1,19 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux && go1.20 // Copyright (C) 2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package securejoin +package gocompat import ( "fmt" ) -// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except +// WrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except // that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) // is only guaranteed to give you baseErr. -func wrapBaseError(baseErr, extraErr error) error { +func WrapBaseError(baseErr, extraErr error) error { return fmt.Errorf("%w: %w", extraErr, baseErr) } diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_unsupported.go similarity index 80% rename from vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_unsupported.go index e7adca3fd..3061016a6 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_errors_unsupported.go @@ -1,10 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause + //go:build linux && !go1.20 // Copyright (C) 2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package securejoin +package gocompat import ( "fmt" @@ -27,10 +29,10 @@ func (err wrappedError) Error() string { return fmt.Sprintf("%v: %v", err.isError, err.inner) } -// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except +// WrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except // that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) // is only guaranteed to give you baseErr. -func wrapBaseError(baseErr, extraErr error) error { +func WrapBaseError(baseErr, extraErr error) error { return wrappedError{ inner: baseErr, isError: extraErr, diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_go121.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_go121.go new file mode 100644 index 000000000..d4a938186 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_go121.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && go1.21 + +// Copyright (C) 2024-2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocompat + +import ( + "cmp" + "slices" + "sync" +) + +// SlicesDeleteFunc is equivalent to Go 1.21's slices.DeleteFunc. +func SlicesDeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S { + return slices.DeleteFunc(slice, delFn) +} + +// SlicesContains is equivalent to Go 1.21's slices.Contains. +func SlicesContains[S ~[]E, E comparable](slice S, val E) bool { + return slices.Contains(slice, val) +} + +// SlicesClone is equivalent to Go 1.21's slices.Clone. +func SlicesClone[S ~[]E, E any](slice S) S { + return slices.Clone(slice) +} + +// SyncOnceValue is equivalent to Go 1.21's sync.OnceValue. +func SyncOnceValue[T any](f func() T) func() T { + return sync.OnceValue(f) +} + +// SyncOnceValues is equivalent to Go 1.21's sync.OnceValues. +func SyncOnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { + return sync.OnceValues(f) +} + +// CmpOrdered is equivalent to Go 1.21's cmp.Ordered generic type definition. +type CmpOrdered = cmp.Ordered + +// CmpCompare is equivalent to Go 1.21's cmp.Compare. +func CmpCompare[T CmpOrdered](x, y T) int { + return cmp.Compare(x, y) +} + +// Max2 is equivalent to Go 1.21's max builtin (but only for two parameters). +func Max2[T CmpOrdered](x, y T) T { + return max(x, y) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_unsupported.go new file mode 100644 index 000000000..0ea6218aa --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat/gocompat_generics_unsupported.go @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !go1.21 + +// Copyright (C) 2021, 2022 The Go Authors. All rights reserved. +// Copyright (C) 2024-2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +package gocompat + +import ( + "sync" +) + +// These are very minimal implementations of functions that appear in Go 1.21's +// stdlib, included so that we can build on older Go versions. Most are +// borrowed directly from the stdlib, and a few are modified to be "obviously +// correct" without needing to copy too many other helpers. + +// clearSlice is equivalent to Go 1.21's builtin clear. +// Copied from the Go 1.24 stdlib implementation. +func clearSlice[S ~[]E, E any](slice S) { + var zero E + for i := range slice { + slice[i] = zero + } +} + +// slicesIndexFunc is equivalent to Go 1.21's slices.IndexFunc. +// Copied from the Go 1.24 stdlib implementation. +func slicesIndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// SlicesDeleteFunc is equivalent to Go 1.21's slices.DeleteFunc. +// Copied from the Go 1.24 stdlib implementation. +func SlicesDeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := slicesIndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// SlicesContains is equivalent to Go 1.21's slices.Contains. +// Similar to the stdlib slices.Contains, except that we don't have +// slices.Index so we need to use slices.IndexFunc for this non-Func helper. +func SlicesContains[S ~[]E, E comparable](s S, v E) bool { + return slicesIndexFunc(s, func(e E) bool { return e == v }) >= 0 +} + +// SlicesClone is equivalent to Go 1.21's slices.Clone. +// Copied from the Go 1.24 stdlib implementation. +func SlicesClone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// SyncOnceValue is equivalent to Go 1.21's sync.OnceValue. +// Copied from the Go 1.25 stdlib implementation. +func SyncOnceValue[T any](f func() T) func() T { + // Use a struct so that there's a single heap allocation. + d := struct { + f func() T + once sync.Once + valid bool + p any + result T + }{ + f: f, + } + return func() T { + d.once.Do(func() { + defer func() { + d.f = nil + d.p = recover() + if !d.valid { + panic(d.p) + } + }() + d.result = d.f() + d.valid = true + }) + if !d.valid { + panic(d.p) + } + return d.result + } +} + +// SyncOnceValues is equivalent to Go 1.21's sync.OnceValues. +// Copied from the Go 1.25 stdlib implementation. +func SyncOnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { + // Use a struct so that there's a single heap allocation. + d := struct { + f func() (T1, T2) + once sync.Once + valid bool + p any + r1 T1 + r2 T2 + }{ + f: f, + } + return func() (T1, T2) { + d.once.Do(func() { + defer func() { + d.f = nil + d.p = recover() + if !d.valid { + panic(d.p) + } + }() + d.r1, d.r2 = d.f() + d.valid = true + }) + if !d.valid { + panic(d.p) + } + return d.r1, d.r2 + } +} + +// CmpOrdered is equivalent to Go 1.21's cmp.Ordered generic type definition. +// Copied from the Go 1.25 stdlib implementation. +type CmpOrdered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | + ~float32 | ~float64 | + ~string +} + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +// Copied from the Go 1.25 stdlib implementation. +func isNaN[T CmpOrdered](x T) bool { + return x != x +} + +// CmpCompare is equivalent to Go 1.21's cmp.Compare. +// Copied from the Go 1.25 stdlib implementation. +func CmpCompare[T CmpOrdered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN { + if yNaN { + return 0 + } + return -1 + } + if yNaN { + return +1 + } + if x < y { + return -1 + } + if x > y { + return +1 + } + return 0 +} + +// Max2 is equivalent to Go 1.21's max builtin for two parameters. +func Max2[T CmpOrdered](x, y T) T { + m := x + if y > m { + m = y + } + return m +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion/kernel_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion/kernel_linux.go new file mode 100644 index 000000000..cb6de4186 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion/kernel_linux.go @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: BSD-3-Clause + +// Copyright (C) 2022 The Go Authors. All rights reserved. +// Copyright (C) 2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// The parsing logic is very loosely based on the Go stdlib's +// src/internal/syscall/unix/kernel_version_linux.go but with an API that looks +// a bit like runc's libcontainer/system/kernelversion. +// +// TODO(cyphar): This API has been copied around to a lot of different projects +// (Docker, containerd, runc, and now filepath-securejoin) -- maybe we should +// put it in a separate project? + +// Package kernelversion provides a simple mechanism for checking whether the +// running kernel is at least as new as some baseline kernel version. This is +// often useful when checking for features that would be too complicated to +// test support for (or in cases where we know that some kernel features in +// backport-heavy kernels are broken and need to be avoided). +package kernelversion + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" +) + +// KernelVersion is a numeric representation of the key numerical elements of a +// kernel version (for instance, "4.1.2-default-1" would be represented as +// KernelVersion{4, 1, 2}). +type KernelVersion []uint64 + +func (kver KernelVersion) String() string { + var str strings.Builder + for idx, elem := range kver { + if idx != 0 { + _, _ = str.WriteRune('.') + } + _, _ = str.WriteString(strconv.FormatUint(elem, 10)) + } + return str.String() +} + +var errInvalidKernelVersion = errors.New("invalid kernel version") + +// parseKernelVersion parses a string and creates a KernelVersion based on it. +func parseKernelVersion(kverStr string) (KernelVersion, error) { + kver := make(KernelVersion, 1, 3) + for idx, ch := range kverStr { + if '0' <= ch && ch <= '9' { + v := &kver[len(kver)-1] + *v = (*v * 10) + uint64(ch-'0') + } else { + if idx == 0 || kverStr[idx-1] < '0' || '9' < kverStr[idx-1] { + // "." must be preceded by a digit while in version section + return nil, fmt.Errorf("%w %q: kernel version has dot(s) followed by non-digit in version section", errInvalidKernelVersion, kverStr) + } + if ch != '.' { + break + } + kver = append(kver, 0) + } + } + if len(kver) < 2 { + return nil, fmt.Errorf("%w %q: kernel versions must contain at least two components", errInvalidKernelVersion, kverStr) + } + return kver, nil +} + +// getKernelVersion gets the current kernel version. +var getKernelVersion = gocompat.SyncOnceValues(func() (KernelVersion, error) { + var uts unix.Utsname + if err := unix.Uname(&uts); err != nil { + return nil, err + } + // Remove the \x00 from the release. + release := uts.Release[:] + return parseKernelVersion(string(release[:bytes.IndexByte(release, 0)])) +}) + +// GreaterEqualThan returns true if the the host kernel version is greater than +// or equal to the provided [KernelVersion]. When doing this comparison, any +// non-numerical suffixes of the host kernel version are ignored. +// +// If the number of components provided is not equal to the number of numerical +// components of the host kernel version, any missing components are treated as +// 0. This means that GreaterEqualThan(KernelVersion{4}) will be treated the +// same as GreaterEqualThan(KernelVersion{4, 0, 0, ..., 0, 0}), and that if the +// host kernel version is "4" then GreaterEqualThan(KernelVersion{4, 1}) will +// return false (because the host version will be treated as "4.0"). +func GreaterEqualThan(wantKver KernelVersion) (bool, error) { + hostKver, err := getKernelVersion() + if err != nil { + return false, err + } + + // Pad out the kernel version lengths to match one another. + cmpLen := gocompat.Max2(len(hostKver), len(wantKver)) + hostKver = append(hostKver, make(KernelVersion, cmpLen-len(hostKver))...) + wantKver = append(wantKver, make(KernelVersion, cmpLen-len(wantKver))...) + + for i := 0; i < cmpLen; i++ { + switch gocompat.CmpCompare(hostKver[i], wantKver[i]) { + case -1: + // host < want + return false, nil + case +1: + // host > want + return true, nil + case 0: + continue + } + } + // equal version values + return true, nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/doc.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/doc.go new file mode 100644 index 000000000..4635714f6 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/doc.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MPL-2.0 + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package linux returns information about what features are supported on the +// running kernel. +package linux diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/mount_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/mount_linux.go new file mode 100644 index 000000000..b29905bff --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/mount_linux.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package linux + +import ( + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion" +) + +// HasNewMountAPI returns whether the new fsopen(2) mount API is supported on +// the running kernel. +var HasNewMountAPI = gocompat.SyncOnceValue(func() bool { + // All of the pieces of the new mount API we use (fsopen, fsconfig, + // fsmount, open_tree) were added together in Linux 5.2[1,2], so we can + // just check for one of the syscalls and the others should also be + // available. + // + // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. + // This is equivalent to openat(2), but tells us if open_tree is + // available (and thus all of the other basic new mount API syscalls). + // open_tree(2) is most light-weight syscall to test here. + // + // [1]: merge commit 400913252d09 + // [2]: + fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) + if err != nil { + return false + } + _ = unix.Close(fd) + + // RHEL 8 has a backport of fsopen(2) that appears to have some very + // difficult to debug performance pathology. As such, it seems prudent to + // simply reject pre-5.2 kernels. + isNotBackport, _ := kernelversion.GreaterEqualThan(kernelversion.KernelVersion{5, 2}) + return isNotBackport +}) diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/openat2_linux.go new file mode 100644 index 000000000..dc5f65cef --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux/openat2_linux.go @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package linux + +import ( + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" +) + +// sawOpenat2Error stores whether we have seen an error from HasOpenat2. This +// is a one-way toggle, so as soon as we see an error we "lock" into that mode. +// We cannot use sync.OnceValue to store the success/fail state once because it +// is possible for the program we are running in to apply a seccomp-bpf filter +// and thus disable openat2 during execution. +var sawOpenat2Error gocompat.Bool + +// HasOpenat2 returns whether openat2(2) is supported on the running kernel. +var HasOpenat2 = func() bool { + if sawOpenat2Error.Load() { + return false + } + + fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, + }) + if err != nil { + sawOpenat2Error.Store(true) // doesn't matter if we race here + return false + } + _ = unix.Close(fd) + return true +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_linux.go new file mode 100644 index 000000000..21e0a62e8 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_linux.go @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package procfs provides a safe API for operating on /proc on Linux. Note +// that this is the *internal* procfs API, mainy needed due to Go's +// restrictions on cyclic dependencies and its incredibly minimal visibility +// system without making a separate internal/ package. +package procfs + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "strconv" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" +) + +// The kernel guarantees that the root inode of a procfs mount has an +// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. +const ( + procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC + procRootIno = 1 // PROC_ROOT_INO +) + +// verifyProcHandle checks that the handle is from a procfs filesystem. +// Contrast this to [verifyProcRoot], which also verifies that the handle is +// the root of a procfs mount. +func verifyProcHandle(procHandle fd.Fd) error { + if statfs, err := fd.Fstatfs(procHandle); err != nil { + return err + } else if statfs.Type != procSuperMagic { + return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + return nil +} + +// verifyProcRoot verifies that the handle is the root of a procfs filesystem. +// Contrast this to [verifyProcHandle], which only verifies if the handle is +// some file on procfs (regardless of what file it is). +func verifyProcRoot(procRoot fd.Fd) error { + if err := verifyProcHandle(procRoot); err != nil { + return err + } + if stat, err := fd.Fstat(procRoot); err != nil { + return err + } else if stat.Ino != procRootIno { + return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) + } + return nil +} + +type procfsFeatures struct { + // hasSubsetPid was added in Linux 5.8, along with hidepid=ptraceable (and + // string-based hidepid= values). Before this patchset, it was not really + // safe to try to modify procfs superblock flags because the superblock was + // shared -- so if this feature is not available, **you should not set any + // superblock flags**. + // + // 6814ef2d992a ("proc: add option to mount only a pids subset") + // fa10fed30f25 ("proc: allow to mount many instances of proc in one pid namespace") + // 24a71ce5c47f ("proc: instantiate only pids that we can ptrace on 'hidepid=4' mount option") + // 1c6c4d112e81 ("proc: use human-readable values for hidepid") + // 9ff7258575d5 ("Merge branch 'proc-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace") + hasSubsetPid bool +} + +var getProcfsFeatures = gocompat.SyncOnceValue(func() procfsFeatures { + if !linux.HasNewMountAPI() { + return procfsFeatures{} + } + procfsCtx, err := fd.Fsopen("proc", unix.FSOPEN_CLOEXEC) + if err != nil { + return procfsFeatures{} + } + defer procfsCtx.Close() //nolint:errcheck // close failures aren't critical here + + return procfsFeatures{ + hasSubsetPid: unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") == nil, + } +}) + +func newPrivateProcMount(subset bool) (_ *Handle, Err error) { + procfsCtx, err := fd.Fsopen("proc", unix.FSOPEN_CLOEXEC) + if err != nil { + return nil, err + } + defer procfsCtx.Close() //nolint:errcheck // close failures aren't critical here + + if subset && getProcfsFeatures().hasSubsetPid { + // Try to configure hidepid=ptraceable,subset=pid if possible, but + // ignore errors. + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") + } + + // Get an actual handle. + if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { + return nil, os.NewSyscallError("fsconfig create procfs", err) + } + // TODO: Output any information from the fscontext log to debug logs. + procRoot, err := fd.Fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) + if err != nil { + return nil, err + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + return newHandle(procRoot) +} + +func clonePrivateProcMount() (_ *Handle, Err error) { + // Try to make a clone without using AT_RECURSIVE if we can. If this works, + // we can be sure there are no over-mounts and so if the root is valid then + // we're golden. Otherwise, we have to deal with over-mounts. + procRoot, err := fd.OpenTree(nil, "/proc", unix.OPEN_TREE_CLONE) + if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procRoot) { + procRoot, err = fd.OpenTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) + } + if err != nil { + return nil, fmt.Errorf("creating a detached procfs clone: %w", err) + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + return newHandle(procRoot) +} + +func privateProcRoot(subset bool) (*Handle, error) { + if !linux.HasNewMountAPI() || hookForceGetProcRootUnsafe() { + return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) + } + // Try to create a new procfs mount from scratch if we can. This ensures we + // can get a procfs mount even if /proc is fake (for whatever reason). + procRoot, err := newPrivateProcMount(subset) + if err != nil || hookForcePrivateProcRootOpenTree(procRoot) { + // Try to clone /proc then... + procRoot, err = clonePrivateProcMount() + } + return procRoot, err +} + +func unsafeHostProcRoot() (_ *Handle, Err error) { + procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + return newHandle(procRoot) +} + +// Handle is a wrapper around an *os.File handle to "/proc", which can be used +// to do further procfs-related operations in a safe way. +type Handle struct { + Inner fd.Fd + // Does this handle have subset=pid set? + isSubset bool +} + +func newHandle(procRoot fd.Fd) (*Handle, error) { + if err := verifyProcRoot(procRoot); err != nil { + // This is only used in methods that + _ = procRoot.Close() + return nil, err + } + proc := &Handle{Inner: procRoot} + // With subset=pid we can be sure that /proc/uptime will not exist. + if err := fd.Faccessat(proc.Inner, "uptime", unix.F_OK, unix.AT_SYMLINK_NOFOLLOW); err != nil { + proc.isSubset = errors.Is(err, os.ErrNotExist) + } + return proc, nil +} + +// Close closes the underlying file for the Handle. +func (proc *Handle) Close() error { return proc.Inner.Close() } + +var getCachedProcRoot = gocompat.SyncOnceValue(func() *Handle { + procRoot, err := getProcRoot(true) + if err != nil { + return nil // just don't cache if we see an error + } + if !procRoot.isSubset { + return nil // we only cache verified subset=pid handles + } + + // Disarm (*Handle).Close() to stop someone from accidentally closing + // the global handle. + procRoot.Inner = fd.NopCloser(procRoot.Inner) + return procRoot +}) + +// OpenProcRoot tries to open a "safer" handle to "/proc". +func OpenProcRoot() (*Handle, error) { + if proc := getCachedProcRoot(); proc != nil { + return proc, nil + } + return getProcRoot(true) +} + +// OpenUnsafeProcRoot opens a handle to "/proc" without any overmounts or +// masked paths (but also without "subset=pid"). +func OpenUnsafeProcRoot() (*Handle, error) { return getProcRoot(false) } + +func getProcRoot(subset bool) (*Handle, error) { + proc, err := privateProcRoot(subset) + if err != nil { + // Fall back to using a /proc handle if making a private mount failed. + // If we have openat2, at least we can avoid some kinds of over-mount + // attacks, but without openat2 there's not much we can do. + proc, err = unsafeHostProcRoot() + } + return proc, err +} + +var hasProcThreadSelf = gocompat.SyncOnceValue(func() bool { + return unix.Access("/proc/thread-self/", unix.F_OK) == nil +}) + +var errUnsafeProcfs = errors.New("unsafe procfs detected") + +// lookup is a very minimal wrapper around [procfsLookupInRoot] which is +// intended to be called from the external API. +func (proc *Handle) lookup(subpath string) (*os.File, error) { + handle, err := procfsLookupInRoot(proc.Inner, subpath) + if err != nil { + return nil, err + } + return handle, nil +} + +// procfsBase is an enum indicating the prefix of a subpath in operations +// involving [Handle]s. +type procfsBase string + +const ( + // ProcRoot refers to the root of the procfs (i.e., "/proc/"). + ProcRoot procfsBase = "/proc" + // ProcSelf refers to the current process' subdirectory (i.e., + // "/proc/self/"). + ProcSelf procfsBase = "/proc/self" + // ProcThreadSelf refers to the current thread's subdirectory (i.e., + // "/proc/thread-self/"). In multi-threaded programs (i.e., all Go + // programs) where one thread has a different CLONE_FS, it is possible for + // "/proc/self" to point the wrong thread and so "/proc/thread-self" may be + // necessary. Note that on pre-3.17 kernels, "/proc/thread-self" doesn't + // exist and so a fallback will be used in that case. + ProcThreadSelf procfsBase = "/proc/thread-self" + // TODO: Switch to an interface setup so we can have a more type-safe + // version of ProcPid and remove the need to worry about invalid string + // values. +) + +// prefix returns a prefix that can be used with the given [Handle]. +func (base procfsBase) prefix(proc *Handle) (string, error) { + switch base { + case ProcRoot: + return ".", nil + case ProcSelf: + return "self", nil + case ProcThreadSelf: + threadSelf := "thread-self" + if !hasProcThreadSelf() || hookForceProcSelfTask() { + // Pre-3.17 kernels don't have /proc/thread-self, so do it + // manually. + threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + if err := fd.Faccessat(proc.Inner, threadSelf, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() { + // In this case, we running in a pid namespace that doesn't + // match the /proc mount we have. This can happen inside runc. + // + // Unfortunately, there is no nice way to get the correct TID + // to use here because of the age of the kernel, so we have to + // just use /proc/self and hope that it works. + threadSelf = "self" + } + } + return threadSelf, nil + } + return "", fmt.Errorf("invalid procfs base %q", base) +} + +// ProcThreadSelfCloser is a callback that needs to be called when you are done +// operating on an [os.File] fetched using [ProcThreadSelf]. +// +// [os.File]: https://pkg.go.dev/os#File +type ProcThreadSelfCloser func() + +// open is the core lookup operation for [Handle]. It returns a handle to +// "/proc//". If the returned [ProcThreadSelfCloser] is non-nil, +// you should call it after you are done interacting with the returned handle. +// +// In general you should use prefer to use the other helpers, as they remove +// the need to interact with [procfsBase] and do not return a nil +// [ProcThreadSelfCloser] for [procfsBase] values other than [ProcThreadSelf] +// where it is necessary. +func (proc *Handle) open(base procfsBase, subpath string) (_ *os.File, closer ProcThreadSelfCloser, Err error) { + prefix, err := base.prefix(proc) + if err != nil { + return nil, nil, err + } + subpath = prefix + "/" + subpath + + switch base { + case ProcRoot: + file, err := proc.lookup(subpath) + if errors.Is(err, os.ErrNotExist) { + // The Handle handle in use might be a subset=pid one, which will + // result in spurious errors. In this case, just open a temporary + // unmasked procfs handle for this operation. + proc, err2 := OpenUnsafeProcRoot() // !subset=pid + if err2 != nil { + return nil, nil, err + } + defer proc.Close() //nolint:errcheck // close failures aren't critical here + + file, err = proc.lookup(subpath) + } + return file, nil, err + + case ProcSelf: + file, err := proc.lookup(subpath) + return file, nil, err + + case ProcThreadSelf: + // We need to lock our thread until the caller is done with the handle + // because between getting the handle and using it we could get + // interrupted by the Go runtime and hit the case where the underlying + // thread is swapped out and the original thread is killed, resulting + // in pull-your-hair-out-hard-to-debug issues in the caller. + runtime.LockOSThread() + defer func() { + if Err != nil { + runtime.UnlockOSThread() + closer = nil + } + }() + + file, err := proc.lookup(subpath) + return file, runtime.UnlockOSThread, err + } + // should never be reached + return nil, nil, fmt.Errorf("[internal error] invalid procfs base %q", base) +} + +// OpenThreadSelf returns a handle to "/proc/thread-self/" (or an +// equivalent handle on older kernels where "/proc/thread-self" doesn't exist). +// Once finished with the handle, you must call the returned closer function +// (runtime.UnlockOSThread). You must not pass the returned *os.File to other +// Go threads or use the handle after calling the closer. +func (proc *Handle) OpenThreadSelf(subpath string) (_ *os.File, _ ProcThreadSelfCloser, Err error) { + return proc.open(ProcThreadSelf, subpath) +} + +// OpenSelf returns a handle to /proc/self/. +func (proc *Handle) OpenSelf(subpath string) (*os.File, error) { + file, closer, err := proc.open(ProcSelf, subpath) + assert.Assert(closer == nil, "closer for ProcSelf must be nil") + return file, err +} + +// OpenRoot returns a handle to /proc/. +func (proc *Handle) OpenRoot(subpath string) (*os.File, error) { + file, closer, err := proc.open(ProcRoot, subpath) + assert.Assert(closer == nil, "closer for ProcRoot must be nil") + return file, err +} + +// OpenPid returns a handle to /proc/$pid/ (pid can be a pid or tid). +// This is mainly intended for usage when operating on other processes. +func (proc *Handle) OpenPid(pid int, subpath string) (*os.File, error) { + return proc.OpenRoot(strconv.Itoa(pid) + "/" + subpath) +} + +// checkSubpathOvermount checks if the dirfd and path combination is on the +// same mount as the given root. +func checkSubpathOvermount(root, dir fd.Fd, path string) error { + // Get the mntID of our procfs handle. + expectedMountID, err := fd.GetMountID(root, "") + if err != nil { + return fmt.Errorf("get root mount id: %w", err) + } + // Get the mntID of the target magic-link. + gotMountID, err := fd.GetMountID(dir, path) + if err != nil { + return fmt.Errorf("get subpath mount id: %w", err) + } + // As long as the directory mount is alive, even with wrapping mount IDs, + // we would expect to see a different mount ID here. (Of course, if we're + // using unsafeHostProcRoot() then an attaker could change this after we + // did this check.) + if expectedMountID != gotMountID { + return fmt.Errorf("%w: subpath %s/%s has an overmount obscuring the real path (mount ids do not match %d != %d)", + errUnsafeProcfs, dir.Name(), path, expectedMountID, gotMountID) + } + return nil +} + +// Readlink performs a readlink operation on "/proc//" in a way +// that should be free from race attacks. This is most commonly used to get the +// real path of a file by looking at "/proc/self/fd/$n", with the same safety +// protections as [Open] (as well as some additional checks against +// overmounts). +func (proc *Handle) Readlink(base procfsBase, subpath string) (string, error) { + link, closer, err := proc.open(base, subpath) + if closer != nil { + defer closer() + } + if err != nil { + return "", fmt.Errorf("get safe %s/%s handle: %w", base, subpath, err) + } + defer link.Close() //nolint:errcheck // close failures aren't critical here + + // Try to detect if there is a mount on top of the magic-link. This should + // be safe in general (a mount on top of the path afterwards would not + // affect the handle itself) and will definitely be safe if we are using + // privateProcRoot() (at least since Linux 5.12[1], when anonymous mount + // namespaces were completely isolated from external mounts including mount + // propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + if err := checkSubpathOvermount(proc.Inner, link, ""); err != nil { + return "", fmt.Errorf("check safety of %s/%s magiclink: %w", base, subpath, err) + } + + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit + // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty + // relative pathnames"). + return fd.Readlinkat(link, "") +} + +// ProcSelfFdReadlink gets the real path of the given file by looking at +// readlink(/proc/thread-self/fd/$n). +// +// This is just a wrapper around [Handle.Readlink]. +func ProcSelfFdReadlink(fd fd.Fd) (string, error) { + procRoot, err := OpenProcRoot() // subset=pid + if err != nil { + return "", err + } + defer procRoot.Close() //nolint:errcheck // close failures aren't critical here + + fdPath := "fd/" + strconv.Itoa(int(fd.Fd())) + return procRoot.Readlink(ProcThreadSelf, fdPath) +} + +// CheckProcSelfFdPath returns whether the given file handle matches the +// expected path. (This is inherently racy.) +func CheckProcSelfFdPath(path string, file fd.Fd) error { + if err := fd.IsDeadInode(file); err != nil { + return err + } + actualPath, err := ProcSelfFdReadlink(file) + if err != nil { + return fmt.Errorf("get path of handle: %w", err) + } + if actualPath != path { + return fmt.Errorf("%w: handle path %q doesn't match expected path %q", internal.ErrPossibleBreakout, actualPath, path) + } + return nil +} + +// ReopenFd takes an existing file descriptor and "re-opens" it through +// /proc/thread-self/fd/. This allows for O_PATH file descriptors to be +// upgraded to regular file descriptors, as well as changing the open mode of a +// regular file descriptor. Some filesystems have unique handling of open(2) +// which make this incredibly useful (such as /dev/ptmx). +func ReopenFd(handle fd.Fd, flags int) (*os.File, error) { + procRoot, err := OpenProcRoot() // subset=pid + if err != nil { + return nil, err + } + defer procRoot.Close() //nolint:errcheck // close failures aren't critical here + + // We can't operate on /proc/thread-self/fd/$n directly when doing a + // re-open, so we need to open /proc/thread-self/fd and then open a single + // final component. + procFdDir, closer, err := procRoot.OpenThreadSelf("fd/") + if err != nil { + return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) + } + defer procFdDir.Close() //nolint:errcheck // close failures aren't critical here + defer closer() + + // Try to detect if there is a mount on top of the magic-link we are about + // to open. If we are using unsafeHostProcRoot(), this could change after + // we check it (and there's nothing we can do about that) but for + // privateProcRoot() this should be guaranteed to be safe (at least since + // Linux 5.12[1], when anonymous mount namespaces were completely isolated + // from external mounts including mount propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + fdStr := strconv.Itoa(int(handle.Fd())) + if err := checkSubpathOvermount(procRoot.Inner, procFdDir, fdStr); err != nil { + return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) + } + + flags |= unix.O_CLOEXEC + // Rather than just wrapping fd.Openat, open-code it so we can copy + // handle.Name(). + reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) + if err != nil { + return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) + } + return os.NewFile(uintptr(reopenFd), handle.Name()), nil +} + +// Test hooks used in the procfs tests to verify that the fallback logic works. +// See testing_mocks_linux_test.go and procfs_linux_test.go for more details. +var ( + hookForcePrivateProcRootOpenTree = hookDummyFile + hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile + hookForceGetProcRootUnsafe = hookDummy + + hookForceProcSelfTask = hookDummy + hookForceProcSelf = hookDummy +) + +func hookDummy() bool { return false } +func hookDummyFile(_ io.Closer) bool { return false } diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_lookup_linux.go new file mode 100644 index 000000000..1ad1f18ee --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs/procfs_lookup_linux.go @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// This code is adapted to be a minimal version of the libpathrs proc resolver +// . +// As we only need O_PATH|O_NOFOLLOW support, this is not too much to port. + +package procfs + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/internal/consts" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" +) + +// procfsLookupInRoot is a stripped down version of completeLookupInRoot, +// entirely designed to support the very small set of features necessary to +// make procfs handling work. Unlike completeLookupInRoot, we always have +// O_PATH|O_NOFOLLOW behaviour for trailing symlinks. +// +// The main restrictions are: +// +// - ".." is not supported (as it requires either os.Root-style replays, +// which is more bug-prone; or procfs verification, which is not possible +// due to re-entrancy issues). +// - Absolute symlinks for the same reason (and all absolute symlinks in +// procfs are magic-links, which we want to skip anyway). +// - If statx is supported (checkSymlinkOvermount), any mount-point crossings +// (which is the main attack of concern against /proc). +// - Partial lookups are not supported, so the symlink stack is not needed. +// - Trailing slash special handling is not necessary in most cases (if we +// operating on procfs, it's usually with programmer-controlled strings +// that will then be re-opened), so we skip it since whatever re-opens it +// can deal with it. It's a creature comfort anyway. +// +// If the system supports openat2(), this is implemented using equivalent flags +// (RESOLVE_BENEATH | RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS). +func procfsLookupInRoot(procRoot fd.Fd, unsafePath string) (Handle *os.File, _ error) { + unsafePath = filepath.ToSlash(unsafePath) // noop + + // Make sure that an empty unsafe path still returns something sane, even + // with openat2 (which doesn't have AT_EMPTY_PATH semantics yet). + if unsafePath == "" { + unsafePath = "." + } + + // This is already checked by getProcRoot, but make sure here since the + // core security of this lookup is based on this assumption. + if err := verifyProcRoot(procRoot); err != nil { + return nil, err + } + + if linux.HasOpenat2() { + // We prefer being able to use RESOLVE_NO_XDEV if we can, to be + // absolutely sure we are operating on a clean /proc handle that + // doesn't have any cheeky overmounts that could trick us (including + // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't + // strictly needed, but just use it since we have it. + // + // NOTE: /proc/self is technically a magic-link (the contents of the + // symlink are generated dynamically), but it doesn't use + // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. + // + // TODO: It would be nice to have RESOLVE_NO_DOTDOT, purely for + // self-consistency with the backup O_PATH resolver. + handle, err := fd.Openat2(procRoot, unsafePath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, + }) + if err != nil { + // TODO: Once we bump the minimum Go version to 1.20, we can use + // multiple %w verbs for this wrapping. For now we need to use a + // compatibility shim for older Go versions. + // err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) + return nil, gocompat.WrapBaseError(err, errUnsafeProcfs) + } + return handle, nil + } + + // To mirror openat2(RESOLVE_BENEATH), we need to return an error if the + // path is absolute. + if path.IsAbs(unsafePath) { + return nil, fmt.Errorf("%w: cannot resolve absolute paths in procfs resolver", internal.ErrPossibleBreakout) + } + + currentDir, err := fd.Dup(procRoot) + if err != nil { + return nil, fmt.Errorf("clone root fd: %w", err) + } + defer func() { + // If a handle is not returned, close the internal handle. + if Handle == nil { + _ = currentDir.Close() + } + }() + + var ( + linksWalked int + currentPath string + remainingPath = unsafePath + ) + for remainingPath != "" { + // Get the next path component. + var part string + if i := strings.IndexByte(remainingPath, '/'); i == -1 { + part, remainingPath = remainingPath, "" + } else { + part, remainingPath = remainingPath[:i], remainingPath[i+1:] + } + if part == "" { + // no-op component, but treat it the same as "." + part = "." + } + if part == ".." { + // not permitted + return nil, fmt.Errorf("%w: cannot walk into '..' in procfs resolver", internal.ErrPossibleBreakout) + } + + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. (Not to mention that ".." isn't allowed.) + nextPath := path.Join("/", currentPath, part) + // If we logically hit the root, just clone the root rather than + // opening the part and doing all of the other checks. + if nextPath == "/" { + // Jump to root. + rootClone, err := fd.Dup(procRoot) + if err != nil { + return nil, fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = nextPath + continue + } + + // Try to open the next component. + nextDir, err := fd.Openat(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + + // Make sure we are still on procfs and haven't crossed mounts. + if err := verifyProcHandle(nextDir); err != nil { + _ = nextDir.Close() + return nil, fmt.Errorf("check %q component is on procfs: %w", part, err) + } + if err := checkSubpathOvermount(procRoot, nextDir, ""); err != nil { + _ = nextDir.Close() + return nil, fmt.Errorf("check %q component is not overmounted: %w", part, err) + } + + // We are emulating O_PATH|O_NOFOLLOW, so we only need to traverse into + // trailing symlinks if we are not the final component. Otherwise we + // can just return the currentDir. + if remainingPath != "" { + st, err := nextDir.Stat() + if err != nil { + _ = nextDir.Close() + return nil, fmt.Errorf("stat component %q: %w", part, err) + } + + if st.Mode()&os.ModeType == os.ModeSymlink { + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See + // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and + // fstatat() with empty relative pathnames"). + linkDest, err := fd.Readlinkat(nextDir, "") + // We don't need the handle anymore. + _ = nextDir.Close() + if err != nil { + return nil, err + } + + linksWalked++ + if linksWalked > consts.MaxSymlinkLimit { + return nil, &os.PathError{Op: "securejoin.procfsLookupInRoot", Path: "/proc/" + unsafePath, Err: unix.ELOOP} + } + + // Update our logical remaining path. + remainingPath = linkDest + "/" + remainingPath + // Absolute symlinks are probably magiclinks, we reject them. + if path.IsAbs(linkDest) { + return nil, fmt.Errorf("%w: cannot jump to / in procfs resolver -- possible magiclink", internal.ErrPossibleBreakout) + } + continue + } + } + + // Walk into the next component. + _ = currentDir.Close() + currentDir = nextDir + currentPath = nextPath + } + + // One final sanity-check. + if err := verifyProcHandle(currentDir); err != nil { + return nil, fmt.Errorf("check final handle is on procfs: %w", err) + } + if err := checkSubpathOvermount(procRoot, currentDir, ""); err != nil { + return nil, fmt.Errorf("check final handle is not overmounted: %w", err) + } + return currentDir, nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/lookup_linux.go similarity index 83% rename from vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/lookup_linux.go index be81e498d..05d7dbcc1 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/lookup_linux.go @@ -1,10 +1,15 @@ +// SPDX-License-Identifier: MPL-2.0 + //go:build linux -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. -package securejoin +package pathrs import ( "errors" @@ -15,6 +20,12 @@ import ( "strings" "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/internal/consts" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs" ) type symlinkStackEntry struct { @@ -112,12 +123,12 @@ func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) erro return nil } // Split the link target and clean up any "" parts. - linkTargetParts := slices_DeleteFunc( + linkTargetParts := gocompat.SlicesDeleteFunc( strings.Split(linkTarget, "/"), func(part string) bool { return part == "" || part == "." }) // Copy the directory so the caller doesn't close our copy. - dirCopy, err := dupFile(dir) + dirCopy, err := fd.Dup(dir) if err != nil { return err } @@ -159,11 +170,11 @@ func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) { // within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing // component of the requested path, returning a file handle to the final // existing component and a string containing the remaining path components. -func partialLookupInRoot(root *os.File, unsafePath string) (*os.File, string, error) { +func partialLookupInRoot(root fd.Fd, unsafePath string) (*os.File, string, error) { return lookupInRoot(root, unsafePath, true) } -func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) { +func completeLookupInRoot(root fd.Fd, unsafePath string) (*os.File, error) { handle, remainingPath, err := lookupInRoot(root, unsafePath, false) if remainingPath != "" && err == nil { // should never happen @@ -174,7 +185,7 @@ func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) { return handle, err } -func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { +func lookupInRoot(root fd.Fd, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { unsafePath = filepath.ToSlash(unsafePath) // noop // This is very similar to SecureJoin, except that we operate on the @@ -182,20 +193,25 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi // managed open, along with the remaining path components not opened. // Try to use openat2 if possible. - if hasOpenat2() { - return lookupOpenat2(root, unsafePath, partial) + // + // NOTE: If openat2(2) works normally but fails for this lookup, it is + // probably not a good idea to fall-back to the O_PATH resolver. An + // attacker could find a bug in the O_PATH resolver and uncontionally + // falling back to the O_PATH resolver would form a downgrade attack. + if handle, remainingPath, err := lookupOpenat2(root, unsafePath, partial); err == nil || linux.HasOpenat2() { + return handle, remainingPath, err } // Get the "actual" root path from /proc/self/fd. This is necessary if the // root is some magic-link like /proc/$pid/root, in which case we want to - // make sure when we do checkProcSelfFdPath that we are using the correct - // root path. - logicalRootPath, err := procSelfFdReadlink(root) + // make sure when we do procfs.CheckProcSelfFdPath that we are using the + // correct root path. + logicalRootPath, err := procfs.ProcSelfFdReadlink(root) if err != nil { return nil, "", fmt.Errorf("get real root path: %w", err) } - currentDir, err := dupFile(root) + currentDir, err := fd.Dup(root) if err != nil { return nil, "", fmt.Errorf("clone root fd: %w", err) } @@ -260,7 +276,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err) } // Jump to root. - rootClone, err := dupFile(root) + rootClone, err := fd.Dup(root) if err != nil { return nil, "", fmt.Errorf("clone root fd: %w", err) } @@ -271,21 +287,21 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi } // Try to open the next component. - nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - switch { - case err == nil: + nextDir, err := fd.Openat(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + switch err { + case nil: st, err := nextDir.Stat() if err != nil { _ = nextDir.Close() return nil, "", fmt.Errorf("stat component %q: %w", part, err) } - switch st.Mode() & os.ModeType { + switch st.Mode() & os.ModeType { //nolint:exhaustive // just a glorified if statement case os.ModeSymlink: // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and // fstatat() with empty relative pathnames"). - linkDest, err := readlinkatFile(nextDir, "") + linkDest, err := fd.Readlinkat(nextDir, "") // We don't need the handle anymore. _ = nextDir.Close() if err != nil { @@ -293,7 +309,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi } linksWalked++ - if linksWalked > maxSymlinkLimit { + if linksWalked > consts.MaxSymlinkLimit { return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP} } @@ -307,7 +323,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi // Absolute symlinks reset any work we've already done. if path.IsAbs(linkDest) { // Jump to root. - rootClone, err := dupFile(root) + rootClone, err := fd.Dup(root) if err != nil { return nil, "", fmt.Errorf("clone root fd: %w", err) } @@ -335,12 +351,12 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi // rename or mount on the system. if part == ".." { // Make sure the root hasn't moved. - if err := checkProcSelfFdPath(logicalRootPath, root); err != nil { + if err := procfs.CheckProcSelfFdPath(logicalRootPath, root); err != nil { return nil, "", fmt.Errorf("root path moved during lookup: %w", err) } // Make sure the path is what we expect. fullPath := logicalRootPath + nextPath - if err := checkProcSelfFdPath(fullPath, currentDir); err != nil { + if err := procfs.CheckProcSelfFdPath(fullPath, currentDir); err != nil { return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err) } } @@ -371,7 +387,7 @@ func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.Fi // context of openat2, a trailing slash and a trailing "/." are completely // equivalent. if strings.HasSuffix(unsafePath, "/") { - nextDir, err := openatFile(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + nextDir, err := fd.Openat(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) if err != nil { if !partial { _ = currentDir.Close() diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/mkdir_linux.go similarity index 86% rename from vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/mkdir_linux.go index a17ae3b03..f3c62b0da 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/mkdir_linux.go @@ -1,10 +1,15 @@ +// SPDX-License-Identifier: MPL-2.0 + //go:build linux -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. -package securejoin +package pathrs import ( "errors" @@ -14,13 +19,14 @@ import ( "strings" "golang.org/x/sys/unix" -) -var ( - errInvalidMode = errors.New("invalid permission mode") - errPossibleAttack = errors.New("possible attack detected") + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat" + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux" ) +var errInvalidMode = errors.New("invalid permission mode") + // modePermExt is like os.ModePerm except that it also includes the set[ug]id // and sticky bits. const modePermExt = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky @@ -66,6 +72,8 @@ func toUnixMode(mode os.FileMode) (uint32, error) { // a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after // doing [MkdirAll]. If you intend to open the directory after creating it, you // should use MkdirAllHandle. +// +// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) { unixMode, err := toUnixMode(mode) if err != nil { @@ -102,7 +110,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F // // This is mostly a quality-of-life check, because mkdir will simply fail // later if the attacker deletes the tree after this check. - if err := isDeadInode(currentDir); err != nil { + if err := fd.IsDeadInode(currentDir); err != nil { return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err) } @@ -113,13 +121,13 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR) } else if err != nil { return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err) - } else { + } else { //nolint:revive // indent-error-flow lint doesn't make sense here _ = currentDir.Close() currentDir = reopenDir } remainingParts := strings.Split(remainingPath, string(filepath.Separator)) - if slices_Contains(remainingParts, "..") { + if gocompat.SlicesContains(remainingParts, "..") { // The path contained ".." components after the end of the "real" // components. We could try to safely resolve ".." here but that would // add a bunch of extra logic for something that it's not clear even @@ -150,12 +158,12 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) { err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} // Make the error a bit nicer if the directory is dead. - if deadErr := isDeadInode(currentDir); deadErr != nil { + if deadErr := fd.IsDeadInode(currentDir); deadErr != nil { // TODO: Once we bump the minimum Go version to 1.20, we can use // multiple %w verbs for this wrapping. For now we need to use a // compatibility shim for older Go versions. - //err = fmt.Errorf("%w (%w)", err, deadErr) - err = wrapBaseError(err, deadErr) + // err = fmt.Errorf("%w (%w)", err, deadErr) + err = gocompat.WrapBaseError(err, deadErr) } return nil, err } @@ -163,13 +171,13 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F // Get a handle to the next component. O_DIRECTORY means we don't need // to use O_PATH. var nextDir *os.File - if hasOpenat2() { - nextDir, err = openat2File(currentDir, part, &unix.OpenHow{ + if linux.HasOpenat2() { + nextDir, err = openat2(currentDir, part, &unix.OpenHow{ Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC, Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV, }) } else { - nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + nextDir, err = fd.Openat(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) } if err != nil { return nil, err @@ -220,12 +228,14 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.F // If you plan to open the directory after you have created it or want to use // an open directory handle as the root, you should use [MkdirAllHandle] instead. // This function is a wrapper around [MkdirAllHandle]. +// +// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin func MkdirAll(root, unsafePath string, mode os.FileMode) error { rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { return err } - defer rootDir.Close() + defer rootDir.Close() //nolint:errcheck // close failures aren't critical here f, err := MkdirAllHandle(rootDir, unsafePath, mode) if err != nil { diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/open_linux.go similarity index 56% rename from vendor/github.com/cyphar/filepath-securejoin/open_linux.go rename to vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/open_linux.go index 230be73f0..7492d8cfa 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/open_linux.go @@ -1,17 +1,22 @@ +// SPDX-License-Identifier: MPL-2.0 + //go:build linux -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. -package securejoin +package pathrs import ( - "fmt" "os" - "strconv" "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs" ) // OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided @@ -40,12 +45,14 @@ func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { // disconnected TTY that could cause a DoS, or some other issue). In order to // use the returned handle, you can "upgrade" it to a proper handle using // [Reopen]. +// +// [SecureJoin]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin#SecureJoin func OpenInRoot(root, unsafePath string) (*os.File, error) { rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) if err != nil { return nil, err } - defer rootDir.Close() + defer rootDir.Close() //nolint:errcheck // close failures aren't critical here return OpenatInRoot(rootDir, unsafePath) } @@ -63,41 +70,5 @@ func OpenInRoot(root, unsafePath string) (*os.File, error) { // // [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw func Reopen(handle *os.File, flags int) (*os.File, error) { - procRoot, err := getProcRoot() - if err != nil { - return nil, err - } - - // We can't operate on /proc/thread-self/fd/$n directly when doing a - // re-open, so we need to open /proc/thread-self/fd and then open a single - // final component. - procFdDir, closer, err := procThreadSelf(procRoot, "fd/") - if err != nil { - return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) - } - defer procFdDir.Close() - defer closer() - - // Try to detect if there is a mount on top of the magic-link we are about - // to open. If we are using unsafeHostProcRoot(), this could change after - // we check it (and there's nothing we can do about that) but for - // privateProcRoot() this should be guaranteed to be safe (at least since - // Linux 5.12[1], when anonymous mount namespaces were completely isolated - // from external mounts including mount propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - fdStr := strconv.Itoa(int(handle.Fd())) - if err := checkSymlinkOvermount(procRoot, procFdDir, fdStr); err != nil { - return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) - } - - flags |= unix.O_CLOEXEC - // Rather than just wrapping openatFile, open-code it so we can copy - // handle.Name(). - reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) - if err != nil { - return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) - } - return os.NewFile(uintptr(reopenFd), handle.Name()), nil + return procfs.ReopenFd(handle, flags) } diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/openat2_linux.go new file mode 100644 index 000000000..dbbb88c23 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/openat2_linux.go @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +package pathrs + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd" + "github.com/cyphar/filepath-securejoin/pathrs-lite/procfs" +) + +func openat2(dir fd.Fd, path string, how *unix.OpenHow) (*os.File, error) { + file, err := fd.Openat2(dir, path, how) + if err != nil { + return nil, err + } + // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. + if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { + if actualPath, err := procfs.ProcSelfFdReadlink(file); err == nil { + // TODO: Ideally we would not need to dup the fd, but you cannot + // easily just swap an *os.File with one from the same fd + // (the GC will close the old one, and you cannot clear the + // finaliser easily because it is associated with an internal + // field of *os.File not *os.File itself). + newFile, err := fd.DupWithName(file, actualPath) + if err != nil { + return nil, err + } + _ = file.Close() + file = newFile + } + } + return file, nil +} + +func lookupOpenat2(root fd.Fd, unsafePath string, partial bool) (*os.File, string, error) { + if !partial { + file, err := openat2(root, unsafePath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + return file, "", err + } + return partialLookupOpenat2(root, unsafePath) +} + +// partialLookupOpenat2 is an alternative implementation of +// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a +// handle to the deepest existing child of the requested path within the root. +func partialLookupOpenat2(root fd.Fd, unsafePath string) (*os.File, string, error) { + // TODO: Implement this as a git-bisect-like binary search. + + unsafePath = filepath.ToSlash(unsafePath) // noop + endIdx := len(unsafePath) + var lastError error + for endIdx > 0 { + subpath := unsafePath[:endIdx] + + handle, err := openat2(root, subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + if err == nil { + // Jump over the slash if we have a non-"" remainingPath. + if endIdx < len(unsafePath) { + endIdx++ + } + // We found a subpath! + return handle, unsafePath[endIdx:], lastError + } + if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { + // That path doesn't exist, let's try the next directory up. + endIdx = strings.LastIndexByte(subpath, '/') + lastError = err + continue + } + return nil, "", fmt.Errorf("open subpath: %w", err) + } + // If we couldn't open anything, the whole subpath is missing. Return a + // copy of the root fd so that the caller doesn't close this one by + // accident. + rootClone, err := fd.Dup(root) + if err != nil { + return nil, "", err + } + return rootClone, unsafePath, lastError +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs/procfs_linux.go new file mode 100644 index 000000000..ec187a414 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs/procfs_linux.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: MPL-2.0 + +//go:build linux + +// Copyright (C) 2024-2025 Aleksa Sarai +// Copyright (C) 2024-2025 SUSE LLC +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Package procfs provides a safe API for operating on /proc on Linux. +package procfs + +import ( + "os" + + "github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs" +) + +// This package mostly just wraps internal/procfs APIs. This is necessary +// because we are forced to export some things from internal/procfs in order to +// avoid some dependency cycle issues, but we don't want users to see or use +// them. + +// ProcThreadSelfCloser is a callback that needs to be called when you are done +// operating on an [os.File] fetched using [Handle.OpenThreadSelf]. +// +// [os.File]: https://pkg.go.dev/os#File +type ProcThreadSelfCloser = procfs.ProcThreadSelfCloser + +// Handle is a wrapper around an *os.File handle to "/proc", which can be used +// to do further procfs-related operations in a safe way. +type Handle struct { + inner *procfs.Handle +} + +// Close close the resources associated with this [Handle]. Note that if this +// [Handle] was created with [OpenProcRoot], on some kernels the underlying +// procfs handle is cached and so this Close operation may be a no-op. However, +// you should always call Close on [Handle]s once you are done with them. +func (proc *Handle) Close() error { return proc.inner.Close() } + +// OpenProcRoot tries to open a "safer" handle to "/proc" (i.e., one with the +// "subset=pid" mount option applied, available from Linux 5.8). Unless you +// plan to do many [Handle.OpenRoot] operations, users should prefer to use +// this over [OpenUnsafeProcRoot] which is far more dangerous to keep open. +// +// If a safe handle cannot be opened, OpenProcRoot will fall back to opening a +// regular "/proc" handle. +// +// Note that using [Handle.OpenRoot] will still work with handles returned by +// this function. If a subpath cannot be operated on with a safe "/proc" +// handle, then [OpenUnsafeProcRoot] will be called internally and a temporary +// unsafe handle will be used. +func OpenProcRoot() (*Handle, error) { + proc, err := procfs.OpenProcRoot() + if err != nil { + return nil, err + } + return &Handle{inner: proc}, nil +} + +// OpenUnsafeProcRoot opens a handle to "/proc" without any overmounts or +// masked paths. You must be extremely careful to make sure this handle is +// never leaked to a container and that you program cannot be tricked into +// writing to arbitrary paths within it. +// +// This is not necessary if you just wish to use [Handle.OpenRoot], as handles +// returned by [OpenProcRoot] will fall back to using a *temporary* unsafe +// handle in that case. You should only really use this if you need to do many +// operations with [Handle.OpenRoot] and the performance overhead of making +// many procfs handles is an issue. If you do use OpenUnsafeProcRoot, you +// should make sure to close the handle as soon as possible to avoid +// known-fd-number attacks. +func OpenUnsafeProcRoot() (*Handle, error) { + proc, err := procfs.OpenUnsafeProcRoot() + if err != nil { + return nil, err + } + return &Handle{inner: proc}, nil +} + +// OpenThreadSelf returns a handle to "/proc/thread-self/" (or an +// equivalent handle on older kernels where "/proc/thread-self" doesn't exist). +// Once finished with the handle, you must call the returned closer function +// ([runtime.UnlockOSThread]). You must not pass the returned *os.File to other +// Go threads or use the handle after calling the closer. +// +// [runtime.UnlockOSThread]: https://pkg.go.dev/runtime#UnlockOSThread +func (proc *Handle) OpenThreadSelf(subpath string) (*os.File, ProcThreadSelfCloser, error) { + return proc.inner.OpenThreadSelf(subpath) +} + +// OpenSelf returns a handle to /proc/self/. +// +// Note that in Go programs with non-homogenous threads, this may result in +// spurious errors. If you are monkeying around with APIs that are +// thread-specific, you probably want to use [Handle.OpenThreadSelf] instead +// which will guarantee that the handle refers to the same thread as the caller +// is executing on. +func (proc *Handle) OpenSelf(subpath string) (*os.File, error) { + return proc.inner.OpenSelf(subpath) +} + +// OpenRoot returns a handle to /proc/. +// +// You should only use this when you need to operate on global procfs files +// (such as sysctls in /proc/sys). Unlike [Handle.OpenThreadSelf], +// [Handle.OpenSelf], and [Handle.OpenPid], the procfs handle used internally +// for this operation will never use "subset=pid", which makes it a more juicy +// target for [CVE-2024-21626]-style attacks (and doing something like opening +// a directory with OpenRoot effectively leaks [OpenUnsafeProcRoot] as long as +// the file descriptor is open). +// +// [CVE-2024-21626]: https://github.com/opencontainers/runc/security/advisories/GHSA-xr7r-f8xq-vfvv +func (proc *Handle) OpenRoot(subpath string) (*os.File, error) { + return proc.inner.OpenRoot(subpath) +} + +// OpenPid returns a handle to /proc/$pid/ (pid can be a pid or tid). +// This is mainly intended for usage when operating on other processes. +// +// You should not use this for the current thread, as special handling is +// needed for /proc/thread-self (or /proc/self/task/) when dealing with +// goroutine scheduling -- use [Handle.OpenThreadSelf] instead. +// +// To refer to the current thread-group, you should use prefer +// [Handle.OpenSelf] to passing os.Getpid as the pid argument. +func (proc *Handle) OpenPid(pid int, subpath string) (*os.File, error) { + return proc.inner.OpenPid(pid, subpath) +} + +// ProcSelfFdReadlink gets the real path of the given file by looking at +// /proc/self/fd/ with [readlink]. It is effectively just shorthand for +// something along the lines of: +// +// proc, err := procfs.OpenProcRoot() +// if err != nil { +// return err +// } +// link, err := proc.OpenThreadSelf(fmt.Sprintf("fd/%d", f.Fd())) +// if err != nil { +// return err +// } +// defer link.Close() +// var buf [4096]byte +// n, err := unix.Readlinkat(int(link.Fd()), "", buf[:]) +// if err != nil { +// return err +// } +// pathname := buf[:n] +// +// [readlink]: https://pkg.go.dev/golang.org/x/sys/unix#Readlinkat +func ProcSelfFdReadlink(f *os.File) (string, error) { + return procfs.ProcSelfFdReadlink(f) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go deleted file mode 100644 index 809a579cb..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go +++ /dev/null @@ -1,452 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "runtime" - "strconv" - - "golang.org/x/sys/unix" -) - -func fstat(f *os.File) (unix.Stat_t, error) { - var stat unix.Stat_t - if err := unix.Fstat(int(f.Fd()), &stat); err != nil { - return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err} - } - return stat, nil -} - -func fstatfs(f *os.File) (unix.Statfs_t, error) { - var statfs unix.Statfs_t - if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil { - return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err} - } - return statfs, nil -} - -// The kernel guarantees that the root inode of a procfs mount has an -// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. -const ( - procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC - procRootIno = 1 // PROC_ROOT_INO -) - -func verifyProcRoot(procRoot *os.File) error { - if statfs, err := fstatfs(procRoot); err != nil { - return err - } else if statfs.Type != procSuperMagic { - return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) - } - if stat, err := fstat(procRoot); err != nil { - return err - } else if stat.Ino != procRootIno { - return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) - } - return nil -} - -var hasNewMountApi = sync_OnceValue(func() bool { - // All of the pieces of the new mount API we use (fsopen, fsconfig, - // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can - // just check for one of the syscalls and the others should also be - // available. - // - // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. - // This is equivalent to openat(2), but tells us if open_tree is - // available (and thus all of the other basic new mount API syscalls). - // open_tree(2) is most light-weight syscall to test here. - // - // [1]: merge commit 400913252d09 - // [2]: - fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) - if err != nil { - return false - } - _ = unix.Close(fd) - return true -}) - -func fsopen(fsName string, flags int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSOPEN_CLOEXEC - fd, err := unix.Fsopen(fsName, flags) - if err != nil { - return nil, os.NewSyscallError("fsopen "+fsName, err) - } - return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil -} - -func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSMOUNT_CLOEXEC - fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) - if err != nil { - return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) - } - return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil -} - -func newPrivateProcMount() (*os.File, error) { - procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC) - if err != nil { - return nil, err - } - defer procfsCtx.Close() - - // Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors. - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") - - // Get an actual handle. - if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { - return nil, os.NewSyscallError("fsconfig create procfs", err) - } - return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) -} - -func openTree(dir *os.File, path string, flags uint) (*os.File, error) { - dirFd := -int(unix.EBADF) - dirName := "." - if dir != nil { - dirFd = int(dir.Fd()) - dirName = dir.Name() - } - // Make sure we always set O_CLOEXEC. - flags |= unix.OPEN_TREE_CLOEXEC - fd, err := unix.OpenTree(dirFd, path, flags) - if err != nil { - return nil, &os.PathError{Op: "open_tree", Path: path, Err: err} - } - return os.NewFile(uintptr(fd), dirName+"/"+path), nil -} - -func clonePrivateProcMount() (_ *os.File, Err error) { - // Try to make a clone without using AT_RECURSIVE if we can. If this works, - // we can be sure there are no over-mounts and so if the root is valid then - // we're golden. Otherwise, we have to deal with over-mounts. - procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE) - if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { - procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) - } - if err != nil { - return nil, fmt.Errorf("creating a detached procfs clone: %w", err) - } - defer func() { - if Err != nil { - _ = procfsHandle.Close() - } - }() - if err := verifyProcRoot(procfsHandle); err != nil { - return nil, err - } - return procfsHandle, nil -} - -func privateProcRoot() (*os.File, error) { - if !hasNewMountApi() || hookForceGetProcRootUnsafe() { - return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) - } - // Try to create a new procfs mount from scratch if we can. This ensures we - // can get a procfs mount even if /proc is fake (for whatever reason). - procRoot, err := newPrivateProcMount() - if err != nil || hookForcePrivateProcRootOpenTree(procRoot) { - // Try to clone /proc then... - procRoot, err = clonePrivateProcMount() - } - return procRoot, err -} - -func unsafeHostProcRoot() (_ *os.File, Err error) { - procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - defer func() { - if Err != nil { - _ = procRoot.Close() - } - }() - if err := verifyProcRoot(procRoot); err != nil { - return nil, err - } - return procRoot, nil -} - -func doGetProcRoot() (*os.File, error) { - procRoot, err := privateProcRoot() - if err != nil { - // Fall back to using a /proc handle if making a private mount failed. - // If we have openat2, at least we can avoid some kinds of over-mount - // attacks, but without openat2 there's not much we can do. - procRoot, err = unsafeHostProcRoot() - } - return procRoot, err -} - -var getProcRoot = sync_OnceValues(func() (*os.File, error) { - return doGetProcRoot() -}) - -var hasProcThreadSelf = sync_OnceValue(func() bool { - return unix.Access("/proc/thread-self/", unix.F_OK) == nil -}) - -var errUnsafeProcfs = errors.New("unsafe procfs detected") - -type procThreadSelfCloser func() - -// procThreadSelf returns a handle to /proc/thread-self/ (or an -// equivalent handle on older kernels where /proc/thread-self doesn't exist). -// Once finished with the handle, you must call the returned closer function -// (runtime.UnlockOSThread). You must not pass the returned *os.File to other -// Go threads or use the handle after calling the closer. -// -// This is similar to ProcThreadSelf from runc, but with extra hardening -// applied and using *os.File. -func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) { - // We need to lock our thread until the caller is done with the handle - // because between getting the handle and using it we could get interrupted - // by the Go runtime and hit the case where the underlying thread is - // swapped out and the original thread is killed, resulting in - // pull-your-hair-out-hard-to-debug issues in the caller. - runtime.LockOSThread() - defer func() { - if Err != nil { - runtime.UnlockOSThread() - } - }() - - // Figure out what prefix we want to use. - threadSelf := "thread-self/" - if !hasProcThreadSelf() || hookForceProcSelfTask() { - /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually. - threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/" - if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() { - // In this case, we running in a pid namespace that doesn't match - // the /proc mount we have. This can happen inside runc. - // - // Unfortunately, there is no nice way to get the correct TID to - // use here because of the age of the kernel, so we have to just - // use /proc/self and hope that it works. - threadSelf = "self/" - } - } - - // Grab the handle. - var ( - handle *os.File - err error - ) - if hasOpenat2() { - // We prefer being able to use RESOLVE_NO_XDEV if we can, to be - // absolutely sure we are operating on a clean /proc handle that - // doesn't have any cheeky overmounts that could trick us (including - // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't - // strictly needed, but just use it since we have it. - // - // NOTE: /proc/self is technically a magic-link (the contents of the - // symlink are generated dynamically), but it doesn't use - // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. - // - // NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses - // procSelfFdReadlink to clean up the returned f.Name() if we use - // RESOLVE_IN_ROOT (which would lead to an infinite recursion). - handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, - }) - if err != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) - return nil, nil, wrapBaseError(err, errUnsafeProcfs) - } - } else { - handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - if err != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) - return nil, nil, wrapBaseError(err, errUnsafeProcfs) - } - defer func() { - if Err != nil { - _ = handle.Close() - } - }() - // We can't detect bind-mounts of different parts of procfs on top of - // /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we - // aren't on the wrong filesystem here. - if statfs, err := fstatfs(handle); err != nil { - return nil, nil, err - } else if statfs.Type != procSuperMagic { - return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type) - } - } - return handle, runtime.UnlockOSThread, nil -} - -// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to -// avoid bumping the requirement for a single constant we can just define it -// ourselves. -const STATX_MNT_ID_UNIQUE = 0x4000 - -var hasStatxMountId = sync_OnceValue(func() bool { - var ( - stx unix.Statx_t - // We don't care which mount ID we get. The kernel will give us the - // unique one if it is supported. - wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID - ) - err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) - return err == nil && stx.Mask&wantStxMask != 0 -}) - -func getMountId(dir *os.File, path string) (uint64, error) { - // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. - if !hasStatxMountId() { - return 0, nil - } - - var ( - stx unix.Statx_t - // We don't care which mount ID we get. The kernel will give us the - // unique one if it is supported. - wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID - ) - - err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx) - if stx.Mask&wantStxMask == 0 { - // It's not a kernel limitation, for some reason we couldn't get a - // mount ID. Assume it's some kind of attack. - err = fmt.Errorf("%w: could not get mount id", errUnsafeProcfs) - } - if err != nil { - return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: dir.Name() + "/" + path, Err: err} - } - return stx.Mnt_id, nil -} - -func checkSymlinkOvermount(procRoot *os.File, dir *os.File, path string) error { - // Get the mntId of our procfs handle. - expectedMountId, err := getMountId(procRoot, "") - if err != nil { - return err - } - // Get the mntId of the target magic-link. - gotMountId, err := getMountId(dir, path) - if err != nil { - return err - } - // As long as the directory mount is alive, even with wrapping mount IDs, - // we would expect to see a different mount ID here. (Of course, if we're - // using unsafeHostProcRoot() then an attaker could change this after we - // did this check.) - if expectedMountId != gotMountId { - return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId) - } - return nil -} - -func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) { - fdPath := fmt.Sprintf("fd/%d", fd) - procFdLink, closer, err := procThreadSelf(procRoot, fdPath) - if err != nil { - return "", fmt.Errorf("get safe /proc/thread-self/%s handle: %w", fdPath, err) - } - defer procFdLink.Close() - defer closer() - - // Try to detect if there is a mount on top of the magic-link. Since we use the handle directly - // provide to the closure. If the closure uses the handle directly, this - // should be safe in general (a mount on top of the path afterwards would - // not affect the handle itself) and will definitely be safe if we are - // using privateProcRoot() (at least since Linux 5.12[1], when anonymous - // mount namespaces were completely isolated from external mounts including - // mount propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - if err := checkSymlinkOvermount(procRoot, procFdLink, ""); err != nil { - return "", fmt.Errorf("check safety of /proc/thread-self/fd/%d magiclink: %w", fd, err) - } - - // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit - // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty - // relative pathnames"). - return readlinkatFile(procFdLink, "") -} - -func rawProcSelfFdReadlink(fd int) (string, error) { - procRoot, err := getProcRoot() - if err != nil { - return "", err - } - return doRawProcSelfFdReadlink(procRoot, fd) -} - -func procSelfFdReadlink(f *os.File) (string, error) { - return rawProcSelfFdReadlink(int(f.Fd())) -} - -var ( - errPossibleBreakout = errors.New("possible breakout detected") - errInvalidDirectory = errors.New("wandered into deleted directory") - errDeletedInode = errors.New("cannot verify path of deleted inode") -) - -func isDeadInode(file *os.File) error { - // If the nlink of a file drops to 0, there is an attacker deleting - // directories during our walk, which could result in weird /proc values. - // It's better to error out in this case. - stat, err := fstat(file) - if err != nil { - return fmt.Errorf("check for dead inode: %w", err) - } - if stat.Nlink == 0 { - err := errDeletedInode - if stat.Mode&unix.S_IFMT == unix.S_IFDIR { - err = errInvalidDirectory - } - return fmt.Errorf("%w %q", err, file.Name()) - } - return nil -} - -func checkProcSelfFdPath(path string, file *os.File) error { - if err := isDeadInode(file); err != nil { - return err - } - actualPath, err := procSelfFdReadlink(file) - if err != nil { - return fmt.Errorf("get path of handle: %w", err) - } - if actualPath != path { - return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path) - } - return nil -} - -// Test hooks used in the procfs tests to verify that the fallback logic works. -// See testing_mocks_linux_test.go and procfs_linux_test.go for more details. -var ( - hookForcePrivateProcRootOpenTree = hookDummyFile - hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile - hookForceGetProcRootUnsafe = hookDummy - - hookForceProcSelfTask = hookDummy - hookForceProcSelf = hookDummy -) - -func hookDummy() bool { return false } -func hookDummyFile(_ *os.File) bool { return false } diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go index 36373f8c5..4d89a481c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause + // Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index ba4d74640..8a51690be 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -17,6 +17,7 @@ package profile import ( "encoding/binary" "fmt" + "slices" "sort" "strconv" "strings" @@ -78,12 +79,10 @@ func Merge(srcs []*Profile) (*Profile, error) { } } - for _, s := range p.Sample { - if isZeroSample(s) { - // If there are any zero samples, re-merge the profile to GC - // them. - return Merge([]*Profile{p}) - } + if slices.ContainsFunc(p.Sample, isZeroSample) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) } return p, nil diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index f47a24390..18df65a8d 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -24,6 +24,7 @@ import ( "math" "path/filepath" "regexp" + "slices" "sort" "strings" "sync" @@ -277,7 +278,7 @@ func (p *Profile) massageMappings() { // Use heuristics to identify main binary and move it to the top of the list of mappings for i, m := range p.Mapping { - file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + file := strings.TrimSpace(strings.ReplaceAll(m.File, "(deleted)", "")) if len(file) == 0 { continue } @@ -734,12 +735,7 @@ func (p *Profile) RemoveLabel(key string) { // HasLabel returns true if a sample has a label with indicated key and value. func (s *Sample) HasLabel(key, value string) bool { - for _, v := range s.Label[key] { - if v == value { - return true - } - } - return false + return slices.Contains(s.Label[key], value) } // SetNumLabel sets the specified key to the specified value for all samples in the @@ -852,7 +848,17 @@ func (p *Profile) HasFileLines() bool { // "[vdso]", "[vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" + switch { + case strings.HasPrefix(name, "["): + case strings.HasPrefix(name, "linux-vdso"): + case strings.HasPrefix(m.File, "/dev/dri/"): + case m.File == "//anon": + case m.File == "": + case strings.HasPrefix(m.File, "/memfd:"): + default: + return false + } + return true } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go index a15696ba1..31bf6bca6 100644 --- a/vendor/github.com/google/pprof/profile/proto.go +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -36,6 +36,7 @@ package profile import ( "errors" "fmt" + "slices" ) type buffer struct { @@ -187,6 +188,16 @@ func le32(p []byte) uint32 { return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 } +func peekNumVarints(data []byte) (numVarints int) { + for ; len(data) > 0; numVarints++ { + var err error + if _, data, err = decodeVarint(data); err != nil { + break + } + } + return numVarints +} + func decodeVarint(data []byte) (uint64, []byte, error) { var u uint64 for i := 0; ; i++ { @@ -286,6 +297,9 @@ func decodeInt64(b *buffer, x *int64) error { func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding + dataLen := peekNumVarints(b.data) + *x = slices.Grow(*x, dataLen) + data := b.data for len(data) > 0 { var u uint64 @@ -316,8 +330,11 @@ func decodeUint64(b *buffer, x *uint64) error { func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { - data := b.data // Packed encoding + dataLen := peekNumVarints(b.data) + *x = slices.Grow(*x, dataLen) + + data := b.data for len(data) > 0 { var u uint64 var err error diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go index b2f9fd546..7bba31e8c 100644 --- a/vendor/github.com/google/pprof/profile/prune.go +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -19,6 +19,7 @@ package profile import ( "fmt" "regexp" + "slices" "strings" ) @@ -40,13 +41,7 @@ func simplifyFunc(f string) string { // Account for unsimplified names -- try to remove the argument list by trimming // starting from the first '(', but skipping reserved names that have '('. for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { - foundReserved := false - for _, res := range reservedNames { - if funcName[ind[0]:ind[1]] == res { - foundReserved = true - break - } - } + foundReserved := slices.Contains(reservedNames, funcName[ind[0]:ind[1]]) if !foundReserved { funcName = funcName[:ind[0]] break diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index b7d7309f3..91e65521b 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,18 @@ +## 1.39.1 + +Update all dependencies. This auto-updated the required version of Go to 1.24, consistent with the fact that Go 1.23 has been out of support for almost six months. + +## 1.39.0 + +### Features + +Add `MatchErrorStrictly` which only passes if `errors.Is(actual, expected)` returns true. `MatchError`, by contrast, will fallback to string comparison. + +## 1.38.3 + +### Fixes +make string formatitng more consistent for users who use format.Object directly + ## 1.38.2 - roll back to go 1.23.0 [c404969] diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 96f04b210..6c23ba338 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -262,7 +262,7 @@ func Object(object any, indentation uint) string { if err, ok := object.(error); ok && !isNilValue(value) { // isNilValue check needed here to avoid nil deref due to boxed nil commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent } - return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation)) + return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation, true)) } /* @@ -306,7 +306,7 @@ func formatType(v reflect.Value) string { } } -func formatValue(value reflect.Value, indentation uint) string { +func formatValue(value reflect.Value, indentation uint, isTopLevel bool) string { if indentation > MaxDepth { return "..." } @@ -367,11 +367,11 @@ func formatValue(value reflect.Value, indentation uint) string { case reflect.Func: return fmt.Sprintf("0x%x", value.Pointer()) case reflect.Ptr: - return formatValue(value.Elem(), indentation) + return formatValue(value.Elem(), indentation, isTopLevel) case reflect.Slice: return truncateLongStrings(formatSlice(value, indentation)) case reflect.String: - return truncateLongStrings(formatString(value.String(), indentation)) + return truncateLongStrings(formatString(value.String(), indentation, isTopLevel)) case reflect.Array: return truncateLongStrings(formatSlice(value, indentation)) case reflect.Map: @@ -392,8 +392,8 @@ func formatValue(value reflect.Value, indentation uint) string { } } -func formatString(object any, indentation uint) string { - if indentation == 1 { +func formatString(object any, indentation uint, isTopLevel bool) string { + if isTopLevel { s := fmt.Sprintf("%s", object) components := strings.Split(s, "\n") result := "" @@ -416,14 +416,14 @@ func formatString(object any, indentation uint) string { func formatSlice(v reflect.Value, indentation uint) string { if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) { - return formatString(v.Bytes(), indentation) + return formatString(v.Bytes(), indentation, false) } l := v.Len() result := make([]string, l) longest := 0 - for i := 0; i < l; i++ { - result[i] = formatValue(v.Index(i), indentation+1) + for i := range l { + result[i] = formatValue(v.Index(i), indentation+1, false) if len(result[i]) > longest { longest = len(result[i]) } @@ -443,7 +443,7 @@ func formatMap(v reflect.Value, indentation uint) string { longest := 0 for i, key := range v.MapKeys() { value := v.MapIndex(key) - result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1)) + result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1, false), formatValue(value, indentation+1, false)) if len(result[i]) > longest { longest = len(result[i]) } @@ -462,10 +462,10 @@ func formatStruct(v reflect.Value, indentation uint) string { l := v.NumField() result := []string{} longest := 0 - for i := 0; i < l; i++ { + for i := range l { structField := t.Field(i) fieldEntry := v.Field(i) - representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1)) + representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1, false)) result = append(result, representation) if len(representation) > longest { longest = len(representation) @@ -479,7 +479,7 @@ func formatStruct(v reflect.Value, indentation uint) string { } func formatInterface(v reflect.Value, indentation uint) string { - return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation)) + return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation, false)) } func isNilValue(a reflect.Value) bool { diff --git a/vendor/github.com/onsi/gomega/gmeasure/experiment.go b/vendor/github.com/onsi/gomega/gmeasure/experiment.go index 9d1b74a78..f4368738d 100644 --- a/vendor/github.com/onsi/gomega/gmeasure/experiment.go +++ b/vendor/github.com/onsi/gomega/gmeasure/experiment.go @@ -456,10 +456,7 @@ func (e *Experiment) Sample(callback func(idx int), samplingConfig SamplingConfi if samplingConfig.N > 0 { maxN = samplingConfig.N } - numParallel := 1 - if samplingConfig.NumParallel > numParallel { - numParallel = samplingConfig.NumParallel - } + numParallel := max(samplingConfig.NumParallel, 1) minSamplingInterval := samplingConfig.MinSamplingInterval work := make(chan int) diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index fdba34ee9..87c70692b 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.38.2" +const GOMEGA_VERSION = "1.39.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 10b6693fd..16ca8f46d 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -146,6 +146,24 @@ func MatchError(expected any, functionErrorDescription ...any) types.GomegaMatch } } +// MatchErrorStrictly succeeds iff actual is a non-nil error that matches the passed in +// expected error according to errors.Is(actual, expected). +// +// This behavior differs from MatchError where +// +// Expect(errors.New("some error")).To(MatchError(errors.New("some error"))) +// +// succeeds, but errors.Is would return false so: +// +// Expect(errors.New("some error")).To(MatchErrorStrictly(errors.New("some error"))) +// +// fails. +func MatchErrorStrictly(expected error) types.GomegaMatcher { + return &matchers.MatchErrorStrictlyMatcher{ + Expected: expected, + } +} + // BeClosed succeeds if actual is a closed channel. // It is an error to pass a non-channel to BeClosed, it is also an error to pass nil // @@ -515,8 +533,8 @@ func HaveExistingField(field string) types.GomegaMatcher { // and even interface values. // // actual := 42 -// Expect(actual).To(HaveValue(42)) -// Expect(&actual).To(HaveValue(42)) +// Expect(actual).To(HaveValue(Equal(42))) +// Expect(&actual).To(HaveValue(Equal(42))) func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher { return &matchers.HaveValueMatcher{ Matcher: matcher, diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go index 9e16dcf5d..16630c18e 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -39,7 +39,7 @@ func (matcher *HaveKeyMatcher) Match(actual any) (success bool, err error) { } keys := reflect.ValueOf(actual).MapKeys() - for i := 0; i < len(keys); i++ { + for i := range keys { success, err := keyMatcher.Match(keys[i].Interface()) if err != nil { return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go index 1c53f1e56..0cd708153 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -52,7 +52,7 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual any) (success bool, err err } keys := reflect.ValueOf(actual).MapKeys() - for i := 0; i < len(keys); i++ { + for i := range keys { success, err := keyMatcher.Match(keys[i].Interface()) if err != nil { return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error()) diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_strictly_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_strictly_matcher.go new file mode 100644 index 000000000..63969b266 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_strictly_matcher.go @@ -0,0 +1,39 @@ +package matchers + +import ( + "errors" + "fmt" + + "github.com/onsi/gomega/format" +) + +type MatchErrorStrictlyMatcher struct { + Expected error +} + +func (matcher *MatchErrorStrictlyMatcher) Match(actual any) (success bool, err error) { + + if isNil(matcher.Expected) { + return false, fmt.Errorf("Expected error is nil, use \"ToNot(HaveOccurred())\" to explicitly check for nil errors") + } + + if isNil(actual) { + return false, fmt.Errorf("Expected an error, got nil") + } + + if !isError(actual) { + return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) + } + + actualErr := actual.(error) + + return errors.Is(actualErr, matcher.Expected), nil +} + +func (matcher *MatchErrorStrictlyMatcher) FailureMessage(actual any) (message string) { + return format.Message(actual, "to match error", matcher.Expected) +} + +func (matcher *MatchErrorStrictlyMatcher) NegatedFailureMessage(actual any) (message string) { + return format.Message(actual, "not to match error", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go index 8c38411b2..72edba20f 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go @@ -1,6 +1,9 @@ package edge -import . "github.com/onsi/gomega/matchers/support/goraph/node" +import ( + . "github.com/onsi/gomega/matchers/support/goraph/node" + "slices" +) type Edge struct { Node1 int @@ -20,13 +23,7 @@ func (ec EdgeSet) Free(node Node) bool { } func (ec EdgeSet) Contains(edge Edge) bool { - for _, e := range ec { - if e == edge { - return true - } - } - - return false + return slices.Contains(ec, edge) } func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index d1236ba72..3ef333387 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -31,6 +31,8 @@ type Spec struct { VM *VM `json:"vm,omitempty" platform:"vm"` // ZOS is platform-specific configuration for z/OS based containers. ZOS *ZOS `json:"zos,omitempty" platform:"zos"` + // FreeBSD is platform-specific configuration for FreeBSD based containers. + FreeBSD *FreeBSD `json:"freebsd,omitempty" platform:"freebsd"` } // Scheduler represents the scheduling attributes for a process. It is based on @@ -83,7 +85,7 @@ type Process struct { // Rlimits specifies rlimit options to apply to the process. Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. - NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` + NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux,zos"` // ApparmorProfile specifies the apparmor profile for the container. ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` // Specify an oom_score_adj for the container. @@ -94,10 +96,12 @@ type Process struct { SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` // IOPriority contains the I/O priority settings for the cgroup. IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"` + // ExecCPUAffinity specifies CPU affinity for exec processes. + ExecCPUAffinity *CPUAffinity `json:"execCPUAffinity,omitempty" platform:"linux"` } // LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. -// http://man7.org/linux/man-pages/man7/capabilities.7.html +// https://man7.org/linux/man-pages/man7/capabilities.7.html type LinuxCapabilities struct { // Bounding is the set of capabilities checked by the kernel. Bounding []string `json:"bounding,omitempty" platform:"linux"` @@ -127,6 +131,12 @@ const ( IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE" ) +// CPUAffinity specifies process' CPU affinity. +type CPUAffinity struct { + Initial string `json:"initial,omitempty"` + Final string `json:"final,omitempty"` +} + // Box specifies dimensions of a rectangle. Used for specifying the size of a console. type Box struct { // Height is the vertical dimension of a box. @@ -162,7 +172,7 @@ type Mount struct { // Destination is the absolute path where the mount will be placed in the container. Destination string `json:"destination"` // Type specifies the mount kind. - Type string `json:"type,omitempty" platform:"linux,solaris,zos"` + Type string `json:"type,omitempty" platform:"linux,solaris,zos,freebsd"` // Source specifies the source path of the mount. Source string `json:"source,omitempty"` // Options are fstab style mount options. @@ -228,6 +238,8 @@ type Linux struct { Namespaces []LinuxNamespace `json:"namespaces,omitempty"` // Devices are a list of device nodes that are created for the container Devices []LinuxDevice `json:"devices,omitempty"` + // NetDevices are key-value pairs, keyed by network device name on the host, moved to the container's network namespace. + NetDevices map[string]LinuxNetDevice `json:"netDevices,omitempty"` // Seccomp specifies the seccomp security settings for the container. Seccomp *LinuxSeccomp `json:"seccomp,omitempty"` // RootfsPropagation is the rootfs mount propagation mode for the container. @@ -241,6 +253,8 @@ type Linux struct { // IntelRdt contains Intel Resource Director Technology (RDT) information for // handling resource constraints and monitoring metrics (e.g., L3 cache, memory bandwidth) for the container IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` + // MemoryPolicy contains NUMA memory policy for the container. + MemoryPolicy *LinuxMemoryPolicy `json:"memoryPolicy,omitempty"` // Personality contains configuration for the Linux personality syscall Personality *LinuxPersonality `json:"personality,omitempty"` // TimeOffsets specifies the offset for supporting time namespaces. @@ -422,7 +436,7 @@ type LinuxCPU struct { // LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3) type LinuxPids struct { // Maximum number of PIDs. Default is "no limit". - Limit int64 `json:"limit"` + Limit *int64 `json:"limit,omitempty"` } // LinuxNetwork identification and priority configuration @@ -483,6 +497,12 @@ type LinuxDevice struct { GID *uint32 `json:"gid,omitempty"` } +// LinuxNetDevice represents a single network device to be added to the container's network namespace +type LinuxNetDevice struct { + // Name of the device in the container namespace + Name string `json:"name,omitempty"` +} + // LinuxDeviceCgroup represents a device rule for the devices specified to // the device controller type LinuxDeviceCgroup struct { @@ -627,6 +647,17 @@ type WindowsCPUResources struct { // cycles per 10,000 cycles. Set processor `maximum` to a percentage times // 100. Maximum *uint16 `json:"maximum,omitempty"` + // Set of CPUs to affinitize for this container. + Affinity []WindowsCPUGroupAffinity `json:"affinity,omitempty"` +} + +// Similar to _GROUP_AFFINITY struct defined in +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/miniport/ns-miniport-_group_affinity +type WindowsCPUGroupAffinity struct { + // CPU mask relative to this CPU group. + Mask uint64 `json:"mask,omitempty"` + // Processor group the mask refers to, as returned by GetLogicalProcessorInformationEx. + Group uint32 `json:"group,omitempty"` } // WindowsStorageResources contains storage resource management settings. @@ -659,6 +690,32 @@ type WindowsHyperV struct { UtilityVMPath string `json:"utilityVMPath,omitempty"` } +// IOMems contains information about iomem addresses that should be passed to the VM. +type IOMems struct { + // Guest Frame Number to map the iomem range. If GFN is not specified, the mapping will be done to the same Frame Number as was provided in FirstMFN. + FirstGFN *uint64 `json:"firstGFN,omitempty"` + // Physical page number of iomem regions. + FirstMFN *uint64 `json:"firstMFN"` + // Number of pages to be mapped. + NrMFNs *uint64 `json:"nrMFNs"` +} + +// Hardware configuration for the VM image +type HWConfig struct { + // Path to the container device-tree file that should be passed to the VM configuration. + DeviceTree string `json:"deviceTree,omitempty"` + // Number of virtual cpus for the VM. + VCPUs *uint32 `json:"vcpus,omitempty"` + // Maximum memory in bytes allocated to the VM. + Memory *uint64 `json:"memory,omitempty"` + // Host device tree nodes to passthrough to the VM. + DtDevs []string `json:"dtdevs,omitempty"` + // Allow auto-translated domains to access specific hardware I/O memory pages. + IOMems []IOMems `json:"iomems,omitempty"` + // Allows VM to access specific physical IRQs. + Irqs []uint32 `json:"irqs,omitempty"` +} + // VM contains information for virtual-machine-based containers. type VM struct { // Hypervisor specifies hypervisor-related configuration for virtual-machine-based containers. @@ -667,6 +724,8 @@ type VM struct { Kernel VMKernel `json:"kernel"` // Image specifies guest image related configuration for virtual-machine-based containers. Image VMImage `json:"image,omitempty"` + // Hardware configuration that should be passed to the VM. + HwConfig *HWConfig `json:"hwconfig,omitempty"` } // VMHypervisor contains information about the hypervisor to use for a virtual machine. @@ -751,6 +810,10 @@ const ( ArchPARISC Arch = "SCMP_ARCH_PARISC" ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" ArchRISCV64 Arch = "SCMP_ARCH_RISCV64" + ArchLOONGARCH64 Arch = "SCMP_ARCH_LOONGARCH64" + ArchM68K Arch = "SCMP_ARCH_M68K" + ArchSH Arch = "SCMP_ARCH_SH" + ArchSHEB Arch = "SCMP_ARCH_SHEB" ) // LinuxSeccompAction taken upon Seccomp rule match @@ -805,49 +868,92 @@ type LinuxSyscall struct { type LinuxIntelRdt struct { // The identity for RDT Class of Service ClosID string `json:"closID,omitempty"` + + // Schemata specifies the complete schemata to be written as is to the + // schemata file in resctrl fs. Each element represents a single line in the schemata file. + // NOTE: This will overwrite schemas specified in the L3CacheSchema and/or + // MemBwSchema fields. + Schemata []string `json:"schemata,omitempty"` + // The schema for L3 cache id and capacity bitmask (CBM) // Format: "L3:=;=;..." + // NOTE: Should not be specified if Schemata is non-empty. L3CacheSchema string `json:"l3CacheSchema,omitempty"` // The schema of memory bandwidth per L3 cache id // Format: "MB:=bandwidth0;=bandwidth1;..." // The unit of memory bandwidth is specified in "percentages" by // default, and in "MBps" if MBA Software Controller is enabled. + // NOTE: Should not be specified if Schemata is non-empty. MemBwSchema string `json:"memBwSchema,omitempty"` - // EnableCMT is the flag to indicate if the Intel RDT CMT is enabled. CMT (Cache Monitoring Technology) supports monitoring of - // the last-level cache (LLC) occupancy for the container. - EnableCMT bool `json:"enableCMT,omitempty"` + // EnableMonitoring enables resctrl monitoring for the container. This will + // create a dedicated resctrl monitoring group for the container. + EnableMonitoring bool `json:"enableMonitoring,omitempty"` +} + +// LinuxMemoryPolicy represents input for the set_mempolicy syscall. +type LinuxMemoryPolicy struct { + // Mode for the set_mempolicy syscall. + Mode MemoryPolicyModeType `json:"mode"` - // EnableMBM is the flag to indicate if the Intel RDT MBM is enabled. MBM (Memory Bandwidth Monitoring) supports monitoring of - // total and local memory bandwidth for the container. - EnableMBM bool `json:"enableMBM,omitempty"` + // Nodes representing the nodemask for the set_mempolicy syscall in comma separated ranges format. + // Format: "-,,-,..." + Nodes string `json:"nodes"` + + // Flags for the set_mempolicy syscall. + Flags []MemoryPolicyFlagType `json:"flags,omitempty"` } // ZOS contains platform-specific configuration for z/OS based containers. type ZOS struct { - // Devices are a list of device nodes that are created for the container - Devices []ZOSDevice `json:"devices,omitempty"` + // Namespaces contains the namespaces that are created and/or joined by the container + Namespaces []ZOSNamespace `json:"namespaces,omitempty"` } -// ZOSDevice represents the mknod information for a z/OS special device file -type ZOSDevice struct { - // Path to the device. - Path string `json:"path"` - // Device type, block, char, etc. - Type string `json:"type"` - // Major is the device's major number. - Major int64 `json:"major"` - // Minor is the device's minor number. - Minor int64 `json:"minor"` - // FileMode permission bits for the device. - FileMode *os.FileMode `json:"fileMode,omitempty"` - // UID of the device. - UID *uint32 `json:"uid,omitempty"` - // Gid of the device. - GID *uint32 `json:"gid,omitempty"` +// ZOSNamespace is the configuration for a z/OS namespace +type ZOSNamespace struct { + // Type is the type of namespace + Type ZOSNamespaceType `json:"type"` + // Path is a path to an existing namespace persisted on disk that can be joined + // and is of the same type + Path string `json:"path,omitempty"` } +// ZOSNamespaceType is one of the z/OS namespaces +type ZOSNamespaceType string + +const ( + // PIDNamespace for isolating process IDs + ZOSPIDNamespace ZOSNamespaceType = "pid" + // MountNamespace for isolating mount points + ZOSMountNamespace ZOSNamespaceType = "mount" + // IPCNamespace for isolating System V IPC, POSIX message queues + ZOSIPCNamespace ZOSNamespaceType = "ipc" + // UTSNamespace for isolating hostname and NIS domain name + ZOSUTSNamespace ZOSNamespaceType = "uts" +) + +type MemoryPolicyModeType string + +const ( + MpolDefault MemoryPolicyModeType = "MPOL_DEFAULT" + MpolBind MemoryPolicyModeType = "MPOL_BIND" + MpolInterleave MemoryPolicyModeType = "MPOL_INTERLEAVE" + MpolWeightedInterleave MemoryPolicyModeType = "MPOL_WEIGHTED_INTERLEAVE" + MpolPreferred MemoryPolicyModeType = "MPOL_PREFERRED" + MpolPreferredMany MemoryPolicyModeType = "MPOL_PREFERRED_MANY" + MpolLocal MemoryPolicyModeType = "MPOL_LOCAL" +) + +type MemoryPolicyFlagType string + +const ( + MpolFNumaBalancing MemoryPolicyFlagType = "MPOL_F_NUMA_BALANCING" + MpolFRelativeNodes MemoryPolicyFlagType = "MPOL_F_RELATIVE_NODES" + MpolFStaticNodes MemoryPolicyFlagType = "MPOL_F_STATIC_NODES" +) + // LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler type LinuxSchedulerPolicy string @@ -887,3 +993,75 @@ const ( // SchedFlagUtilClampMin represents the utilization clamp maximum scheduling flag SchedFlagUtilClampMax LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MAX" ) + +// FreeBSD contains platform-specific configuration for FreeBSD based containers. +type FreeBSD struct { + // Devices which are accessible in the container + Devices []FreeBSDDevice `json:"devices,omitempty"` + // Jail definition for this container + Jail *FreeBSDJail `json:"jail,omitempty"` +} + +type FreeBSDDevice struct { + // Path to the device, relative to /dev. + Path string `json:"path"` + // FileMode permission bits for the device. + Mode *os.FileMode `json:"mode,omitempty"` +} + +// FreeBSDJail describes how to configure the container's jail +type FreeBSDJail struct { + // Parent jail name - this can be used to share a single vnet + // across several containers + Parent string `json:"parent,omitempty"` + // Whether to use parent UTS names or override in the container + Host FreeBSDSharing `json:"host,omitempty"` + // IPv4 address sharing for the container + Ip4 FreeBSDSharing `json:"ip4,omitempty"` + // IPv4 addresses for the container + Ip4Addr []string `json:"ip4Addr,omitempty"` + // IPv6 address sharing for the container + Ip6 FreeBSDSharing `json:"ip6,omitempty"` + // IPv6 addresses for the container + Ip6Addr []string `json:"ip6Addr,omitempty"` + // Which network stack to use for the container + Vnet FreeBSDSharing `json:"vnet,omitempty"` + // If set, Ip4Addr and Ip6Addr addresses will be added to this interface + Interface string `json:"interface,omitempty"` + // List interfaces to be moved to the container's vnet + VnetInterfaces []string `json:"vnetInterfaces,omitempty"` + // SystemV IPC message sharing for the container + SysVMsg FreeBSDSharing `json:"sysvmsg,omitempty"` + // SystemV semaphore message sharing for the container + SysVSem FreeBSDSharing `json:"sysvsem,omitempty"` + // SystemV memory sharing for the container + SysVShm FreeBSDSharing `json:"sysvshm,omitempty"` + // Mount visibility (see jail(8) for details) + EnforceStatfs *int `json:"enforceStatfs,omitempty"` + // Jail capabilities + Allow *FreeBSDJailAllow `json:"allow,omitempty"` +} + +// These values are used to control access to features in the container, either +// disabling the feature, sharing state with the parent or creating new private +// state in the container. +type FreeBSDSharing string + +const ( + FreeBSDShareDisable FreeBSDSharing = "disable" + FreeBSDShareNew FreeBSDSharing = "new" + FreeBSDShareInherit FreeBSDSharing = "inherit" +) + +// FreeBSDJailAllow describes jail capabilities +type FreeBSDJailAllow struct { + SetHostname bool `json:"setHostname,omitempty"` + RawSockets bool `json:"rawSockets,omitempty"` + Chflags bool `json:"chflags,omitempty"` + Mount []string `json:"mount,omitempty"` + Quotas bool `json:"quotas,omitempty"` + SocketAf bool `json:"socketAf,omitempty"` + Mlock bool `json:"mlock,omitempty"` + ReservedPorts bool `json:"reservedPorts,omitempty"` + Suser bool `json:"suser,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 503971e05..0257dba3e 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 2 + VersionMinor = 3 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go index 07e0f77dc..884a8b805 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go @@ -6,78 +6,11 @@ import ( "github.com/opencontainers/selinux/go-selinux" ) -// Deprecated: use selinux.ROFileLabel -var ROMountLabel = selinux.ROFileLabel - -// SetProcessLabel takes a process label and tells the kernel to assign the -// label to the next program executed by the current process. -// Deprecated: use selinux.SetExecLabel -var SetProcessLabel = selinux.SetExecLabel - -// ProcessLabel returns the process label that the kernel will assign -// to the next program executed by the current process. If "" is returned -// this indicates that the default labeling will happen for the process. -// Deprecated: use selinux.ExecLabel -var ProcessLabel = selinux.ExecLabel - -// SetSocketLabel takes a process label and tells the kernel to assign the -// label to the next socket that gets created -// Deprecated: use selinux.SetSocketLabel -var SetSocketLabel = selinux.SetSocketLabel - -// SocketLabel retrieves the current default socket label setting -// Deprecated: use selinux.SocketLabel -var SocketLabel = selinux.SocketLabel - -// SetKeyLabel takes a process label and tells the kernel to assign the -// label to the next kernel keyring that gets created -// Deprecated: use selinux.SetKeyLabel -var SetKeyLabel = selinux.SetKeyLabel - -// KeyLabel retrieves the current default kernel keyring label setting -// Deprecated: use selinux.KeyLabel -var KeyLabel = selinux.KeyLabel - -// FileLabel returns the label for specified path -// Deprecated: use selinux.FileLabel -var FileLabel = selinux.FileLabel - -// PidLabel will return the label of the process running with the specified pid -// Deprecated: use selinux.PidLabel -var PidLabel = selinux.PidLabel - // Init initialises the labeling system func Init() { _ = selinux.GetEnabled() } -// ClearLabels will clear all reserved labels -// Deprecated: use selinux.ClearLabels -var ClearLabels = selinux.ClearLabels - -// ReserveLabel will record the fact that the MCS label has already been used. -// This will prevent InitLabels from using the MCS label in a newly created -// container -// Deprecated: use selinux.ReserveLabel -func ReserveLabel(label string) error { - selinux.ReserveLabel(label) - return nil -} - -// ReleaseLabel will remove the reservation of the MCS label. -// This will allow InitLabels to use the MCS label in a newly created -// containers -// Deprecated: use selinux.ReleaseLabel -func ReleaseLabel(label string) error { - selinux.ReleaseLabel(label) - return nil -} - -// DupSecOpt takes a process label and returns security options that -// can be used to set duplicate labels on future container processes -// Deprecated: use selinux.DupSecOpt -var DupSecOpt = selinux.DupSecOpt - // FormatMountLabel returns a string to be used by the mount command. Using // the SELinux `context` mount option. Changing labels of files on mount // points with this option can never be changed. diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go index e49e6d53f..95f29e21f 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go @@ -18,7 +18,7 @@ var validOptions = map[string]bool{ "level": true, } -var ErrIncompatibleLabel = errors.New("Bad SELinux option z and Z can not be used together") +var ErrIncompatibleLabel = errors.New("bad SELinux option: z and Z can not be used together") // InitLabels returns the process label and file labels to be used within // the container. A list of options can be passed into this function to alter @@ -52,11 +52,11 @@ func InitLabels(options []string) (plabel string, mlabel string, retErr error) { return "", selinux.PrivContainerMountLabel(), nil } if i := strings.Index(opt, ":"); i == -1 { - return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt) + return "", "", fmt.Errorf("bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt) } con := strings.SplitN(opt, ":", 2) if !validOptions[con[0]] { - return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0]) + return "", "", fmt.Errorf("bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0]) } if con[0] == "filetype" { mcon["type"] = con[1] @@ -79,12 +79,6 @@ func InitLabels(options []string) (plabel string, mlabel string, retErr error) { return processLabel, mountLabel, nil } -// Deprecated: The GenLabels function is only to be used during the transition -// to the official API. Use InitLabels(strings.Fields(options)) instead. -func GenLabels(options string) (string, string, error) { - return InitLabels(strings.Fields(options)) -} - // SetFileLabel modifies the "path" label to the specified file label func SetFileLabel(path string, fileLabel string) error { if !selinux.GetEnabled() || fileLabel == "" { @@ -123,11 +117,6 @@ func Relabel(path string, fileLabel string, shared bool) error { return selinux.Chcon(path, fileLabel, true) } -// DisableSecOpt returns a security opt that can disable labeling -// support for future container processes -// Deprecated: use selinux.DisableSecOpt -var DisableSecOpt = selinux.DisableSecOpt - // Validate checks that the label does not include unexpected options func Validate(label string) error { if strings.Contains(label, "z") && strings.Contains(label, "Z") { diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go index 1c260cb27..7a54afc5e 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go @@ -10,12 +10,6 @@ func InitLabels([]string) (string, string, error) { return "", "", nil } -// Deprecated: The GenLabels function is only to be used during the transition -// to the official API. Use InitLabels(strings.Fields(options)) instead. -func GenLabels(string) (string, string, error) { - return "", "", nil -} - func SetFileLabel(string, string) error { return nil } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go index af058b84b..15150d475 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -41,6 +41,10 @@ var ( // ErrVerifierNil is returned when a context verifier function is nil. ErrVerifierNil = errors.New("verifier function is nil") + // ErrNotTGLeader is returned by [SetKeyLabel] if the calling thread + // is not the thread group leader. + ErrNotTGLeader = errors.New("calling thread is not the thread group leader") + // CategoryRange allows the upper bound on the category range to be adjusted CategoryRange = DefaultCategoryRange @@ -149,7 +153,7 @@ func CalculateGlbLub(sourceRange, targetRange string) (string, error) { // of the program is finished to guarantee another goroutine does not migrate to the current // thread before execution is complete. func SetExecLabel(label string) error { - return writeCon(attrPath("exec"), label) + return writeConThreadSelf("attr/exec", label) } // SetTaskLabel sets the SELinux label for the current thread, or an error. @@ -157,7 +161,7 @@ func SetExecLabel(label string) error { // be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() to guarantee // the current thread does not run in a new mislabeled thread. func SetTaskLabel(label string) error { - return writeCon(attrPath("current"), label) + return writeConThreadSelf("attr/current", label) } // SetSocketLabel takes a process label and tells the kernel to assign the @@ -166,12 +170,12 @@ func SetTaskLabel(label string) error { // the socket is created to guarantee another goroutine does not migrate // to the current thread before execution is complete. func SetSocketLabel(label string) error { - return writeCon(attrPath("sockcreate"), label) + return writeConThreadSelf("attr/sockcreate", label) } // SocketLabel retrieves the current socket label setting func SocketLabel() (string, error) { - return readCon(attrPath("sockcreate")) + return readConThreadSelf("attr/sockcreate") } // PeerLabel retrieves the label of the client on the other side of a socket @@ -180,17 +184,21 @@ func PeerLabel(fd uintptr) (string, error) { } // SetKeyLabel takes a process label and tells the kernel to assign the -// label to the next kernel keyring that gets created. Calls to SetKeyLabel -// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until -// the kernel keyring is created to guarantee another goroutine does not migrate -// to the current thread before execution is complete. +// label to the next kernel keyring that gets created. +// +// Calls to SetKeyLabel should be wrapped in +// runtime.LockOSThread()/runtime.UnlockOSThread() until the kernel keyring is +// created to guarantee another goroutine does not migrate to the current +// thread before execution is complete. +// +// Only the thread group leader can set key label. func SetKeyLabel(label string) error { return setKeyLabel(label) } // KeyLabel retrieves the current kernel keyring label setting func KeyLabel() (string, error) { - return readCon("/proc/self/attr/keycreate") + return keyLabel() } // Get returns the Context as a string diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index c80c10971..6d7f8e270 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -17,8 +17,11 @@ import ( "strings" "sync" - "github.com/opencontainers/selinux/pkg/pwalkdir" + "github.com/cyphar/filepath-securejoin/pathrs-lite" + "github.com/cyphar/filepath-securejoin/pathrs-lite/procfs" "golang.org/x/sys/unix" + + "github.com/opencontainers/selinux/pkg/pwalkdir" ) const ( @@ -45,7 +48,7 @@ type selinuxState struct { type level struct { cats *big.Int - sens uint + sens int } type mlsRange struct { @@ -73,10 +76,6 @@ var ( mcsList: make(map[string]bool), } - // for attrPath() - attrPathOnce sync.Once - haveThreadSelf bool - // for policyRoot() policyRootOnce sync.Once policyRootVal string @@ -138,6 +137,7 @@ func verifySELinuxfsMount(mnt string) bool { return false } + //#nosec G115 -- there is no overflow here. if uint32(buf.Type) != uint32(unix.SELINUX_MAGIC) { return false } @@ -255,48 +255,183 @@ func readConfig(target string) string { return "" } -func isProcHandle(fh *os.File) error { - var buf unix.Statfs_t +func readConFd(in *os.File) (string, error) { + data, err := io.ReadAll(in) + if err != nil { + return "", err + } + return string(bytes.TrimSuffix(data, []byte{0})), nil +} - for { - err := unix.Fstatfs(int(fh.Fd()), &buf) - if err == nil { - break - } - if err != unix.EINTR { - return &os.PathError{Op: "fstatfs", Path: fh.Name(), Err: err} - } +func writeConFd(out *os.File, val string) error { + var err error + if val != "" { + _, err = out.Write([]byte(val)) + } else { + _, err = out.Write(nil) } - if buf.Type != unix.PROC_SUPER_MAGIC { - return fmt.Errorf("file %q is not on procfs", fh.Name()) + return err +} + +// openProcThreadSelf is a small wrapper around [procfs.Handle.OpenThreadSelf] +// and [pathrs.Reopen] to make "one-shot opens" slightly more ergonomic. The +// provided mode must be os.O_* flags to indicate what mode the returned file +// should be opened with (flags like os.O_CREAT and os.O_EXCL are not +// supported). +// +// If no error occurred, the returned handle is guaranteed to be exactly +// /proc/thread-self/ with no tricky mounts or symlinks causing you to +// operate on an unexpected path (with some caveats on pre-openat2 or +// pre-fsopen kernels). +func openProcThreadSelf(subpath string, mode int) (*os.File, procfs.ProcThreadSelfCloser, error) { + if subpath == "" { + return nil, nil, ErrEmptyPath } - return nil -} + proc, err := procfs.OpenProcRoot() + if err != nil { + return nil, nil, err + } + defer proc.Close() -func readCon(fpath string) (string, error) { - if fpath == "" { - return "", ErrEmptyPath + handle, closer, err := proc.OpenThreadSelf(subpath) + if err != nil { + return nil, nil, fmt.Errorf("open /proc/thread-self/%s handle: %w", subpath, err) + } + defer handle.Close() // we will return a re-opened handle + + file, err := pathrs.Reopen(handle, mode) + if err != nil { + closer() + return nil, nil, fmt.Errorf("reopen /proc/thread-self/%s handle (%#x): %w", subpath, mode, err) } + return file, closer, nil +} - in, err := os.Open(fpath) +// Read the contents of /proc/thread-self/. +func readConThreadSelf(fpath string) (string, error) { + in, closer, err := openProcThreadSelf(fpath, os.O_RDONLY|unix.O_CLOEXEC) if err != nil { return "", err } + defer closer() defer in.Close() - if err := isProcHandle(in); err != nil { + return readConFd(in) +} + +// Write to /proc/thread-self/. +func writeConThreadSelf(fpath, val string) error { + if val == "" { + if !getEnabled() { + return nil + } + } + + out, closer, err := openProcThreadSelf(fpath, os.O_WRONLY|unix.O_CLOEXEC) + if err != nil { + return err + } + defer closer() + defer out.Close() + + return writeConFd(out, val) +} + +// openProcSelf is a small wrapper around [procfs.Handle.OpenSelf] and +// [pathrs.Reopen] to make "one-shot opens" slightly more ergonomic. The +// provided mode must be os.O_* flags to indicate what mode the returned file +// should be opened with (flags like os.O_CREAT and os.O_EXCL are not +// supported). +// +// If no error occurred, the returned handle is guaranteed to be exactly +// /proc/self/ with no tricky mounts or symlinks causing you to +// operate on an unexpected path (with some caveats on pre-openat2 or +// pre-fsopen kernels). +func openProcSelf(subpath string, mode int) (*os.File, error) { + if subpath == "" { + return nil, ErrEmptyPath + } + + proc, err := procfs.OpenProcRoot() + if err != nil { + return nil, err + } + defer proc.Close() + + handle, err := proc.OpenSelf(subpath) + if err != nil { + return nil, fmt.Errorf("open /proc/self/%s handle: %w", subpath, err) + } + defer handle.Close() // we will return a re-opened handle + + file, err := pathrs.Reopen(handle, mode) + if err != nil { + return nil, fmt.Errorf("reopen /proc/self/%s handle (%#x): %w", subpath, mode, err) + } + return file, nil +} + +// Read the contents of /proc/self/. +func readConSelf(fpath string) (string, error) { + in, err := openProcSelf(fpath, os.O_RDONLY|unix.O_CLOEXEC) + if err != nil { return "", err } + defer in.Close() + return readConFd(in) } -func readConFd(in *os.File) (string, error) { - data, err := io.ReadAll(in) +// Write to /proc/self/. +func writeConSelf(fpath, val string) error { + if val == "" { + if !getEnabled() { + return nil + } + } + + out, err := openProcSelf(fpath, os.O_WRONLY|unix.O_CLOEXEC) if err != nil { - return "", err + return err } - return string(bytes.TrimSuffix(data, []byte{0})), nil + defer out.Close() + + return writeConFd(out, val) +} + +// openProcPid is a small wrapper around [procfs.Handle.OpenPid] and +// [pathrs.Reopen] to make "one-shot opens" slightly more ergonomic. The +// provided mode must be os.O_* flags to indicate what mode the returned file +// should be opened with (flags like os.O_CREAT and os.O_EXCL are not +// supported). +// +// If no error occurred, the returned handle is guaranteed to be exactly +// /proc/self/ with no tricky mounts or symlinks causing you to +// operate on an unexpected path (with some caveats on pre-openat2 or +// pre-fsopen kernels). +func openProcPid(pid int, subpath string, mode int) (*os.File, error) { + if subpath == "" { + return nil, ErrEmptyPath + } + + proc, err := procfs.OpenProcRoot() + if err != nil { + return nil, err + } + defer proc.Close() + + handle, err := proc.OpenPid(pid, subpath) + if err != nil { + return nil, fmt.Errorf("open /proc/%d/%s handle: %w", pid, subpath, err) + } + defer handle.Close() // we will return a re-opened handle + + file, err := pathrs.Reopen(handle, mode) + if err != nil { + return nil, fmt.Errorf("reopen /proc/%d/%s handle (%#x): %w", pid, subpath, mode, err) + } + return file, nil } // classIndex returns the int index for an object class in the loaded policy, @@ -392,78 +527,34 @@ func lFileLabel(fpath string) (string, error) { } func setFSCreateLabel(label string) error { - return writeCon(attrPath("fscreate"), label) + return writeConThreadSelf("attr/fscreate", label) } // fsCreateLabel returns the default label the kernel which the kernel is using // for file system objects created by this task. "" indicates default. func fsCreateLabel() (string, error) { - return readCon(attrPath("fscreate")) + return readConThreadSelf("attr/fscreate") } // currentLabel returns the SELinux label of the current process thread, or an error. func currentLabel() (string, error) { - return readCon(attrPath("current")) + return readConThreadSelf("attr/current") } // pidLabel returns the SELinux label of the given pid, or an error. func pidLabel(pid int) (string, error) { - return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) + it, err := openProcPid(pid, "attr/current", os.O_RDONLY|unix.O_CLOEXEC) + if err != nil { + return "", nil + } + defer it.Close() + return readConFd(it) } // ExecLabel returns the SELinux label that the kernel will use for any programs // that are executed by the current process thread, or an error. func execLabel() (string, error) { - return readCon(attrPath("exec")) -} - -func writeCon(fpath, val string) error { - if fpath == "" { - return ErrEmptyPath - } - if val == "" { - if !getEnabled() { - return nil - } - } - - out, err := os.OpenFile(fpath, os.O_WRONLY, 0) - if err != nil { - return err - } - defer out.Close() - - if err := isProcHandle(out); err != nil { - return err - } - - if val != "" { - _, err = out.Write([]byte(val)) - } else { - _, err = out.Write(nil) - } - if err != nil { - return err - } - return nil -} - -func attrPath(attr string) string { - // Linux >= 3.17 provides this - const threadSelfPrefix = "/proc/thread-self/attr" - - attrPathOnce.Do(func() { - st, err := os.Stat(threadSelfPrefix) - if err == nil && st.Mode().IsDir() { - haveThreadSelf = true - } - }) - - if haveThreadSelf { - return filepath.Join(threadSelfPrefix, attr) - } - - return filepath.Join("/proc/self/task", strconv.Itoa(unix.Gettid()), "attr", attr) + return readConThreadSelf("exec") } // canonicalizeContext takes a context string and writes it to the kernel @@ -501,14 +592,14 @@ func catsToBitset(cats string) (*big.Int, error) { return nil, err } for i := catstart; i <= catend; i++ { - bitset.SetBit(bitset, int(i), 1) + bitset.SetBit(bitset, i, 1) } } else { cat, err := parseLevelItem(ranges[0], category) if err != nil { return nil, err } - bitset.SetBit(bitset, int(cat), 1) + bitset.SetBit(bitset, cat, 1) } } @@ -516,16 +607,17 @@ func catsToBitset(cats string) (*big.Int, error) { } // parseLevelItem parses and verifies that a sensitivity or category are valid -func parseLevelItem(s string, sep levelItem) (uint, error) { +func parseLevelItem(s string, sep levelItem) (int, error) { if len(s) < minSensLen || levelItem(s[0]) != sep { return 0, ErrLevelSyntax } - val, err := strconv.ParseUint(s[1:], 10, 32) + const bitSize = 31 // Make sure the result fits into signed int32. + val, err := strconv.ParseUint(s[1:], 10, bitSize) if err != nil { return 0, err } - return uint(val), nil + return int(val), nil } // parseLevel fills a level from a string that contains @@ -582,7 +674,8 @@ func bitsetToStr(c *big.Int) string { var str string length := 0 - for i := int(c.TrailingZeroBits()); i < c.BitLen(); i++ { + i0 := int(c.TrailingZeroBits()) //#nosec G115 -- don't expect TralingZeroBits to return values with highest bit set. + for i := i0; i < c.BitLen(); i++ { if c.Bit(i) == 0 { continue } @@ -622,7 +715,7 @@ func (l *level) equal(l2 *level) bool { // String returns an mlsRange as a string. func (m mlsRange) String() string { - low := "s" + strconv.Itoa(int(m.low.sens)) + low := "s" + strconv.Itoa(m.low.sens) if m.low.cats != nil && m.low.cats.BitLen() > 0 { low += ":" + bitsetToStr(m.low.cats) } @@ -631,7 +724,7 @@ func (m mlsRange) String() string { return low } - high := "s" + strconv.Itoa(int(m.high.sens)) + high := "s" + strconv.Itoa(m.high.sens) if m.high.cats != nil && m.high.cats.BitLen() > 0 { high += ":" + bitsetToStr(m.high.cats) } @@ -639,15 +732,16 @@ func (m mlsRange) String() string { return low + "-" + high } -// TODO: remove min and max once Go < 1.21 is not supported. -func max(a, b uint) uint { +// TODO: remove these in favor of built-in min/max +// once we stop supporting Go < 1.21. +func maxInt(a, b int) int { if a > b { return a } return b } -func min(a, b uint) uint { +func minInt(a, b int) int { if a < b { return a } @@ -676,10 +770,10 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) { outrange := &mlsRange{low: &level{}, high: &level{}} /* take the greatest of the low */ - outrange.low.sens = max(s.low.sens, t.low.sens) + outrange.low.sens = maxInt(s.low.sens, t.low.sens) /* take the least of the high */ - outrange.high.sens = min(s.high.sens, t.high.sens) + outrange.high.sens = minInt(s.high.sens, t.high.sens) /* find the intersecting categories */ if s.low.cats != nil && t.low.cats != nil { @@ -724,16 +818,29 @@ func peerLabel(fd uintptr) (string, error) { // setKeyLabel takes a process label and tells the kernel to assign the // label to the next kernel keyring that gets created func setKeyLabel(label string) error { - err := writeCon("/proc/self/attr/keycreate", label) + // Rather than using /proc/thread-self, we want to use /proc/self to + // operate on the thread-group leader. + err := writeConSelf("attr/keycreate", label) if errors.Is(err, os.ErrNotExist) { return nil } if label == "" && errors.Is(err, os.ErrPermission) { return nil } + if errors.Is(err, unix.EACCES) && unix.Getpid() != unix.Gettid() { + return ErrNotTGLeader + } return err } +// KeyLabel retrieves the current kernel keyring label setting for this +// thread-group. +func keyLabel() (string, error) { + // Rather than using /proc/thread-self, we want to use /proc/self to + // operate on the thread-group leader. + return readConSelf("attr/keycreate") +} + // get returns the Context as a string func (c Context) get() string { if l := c["level"]; l != "" { @@ -809,8 +916,7 @@ func enforceMode() int { // setEnforceMode sets the current SELinux mode Enforcing, Permissive. // Disabled is not valid, since this needs to be set at boot time. func setEnforceMode(mode int) error { - //nolint:gosec // ignore G306: permissions to be 0600 or less. - return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) + return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0) } // defaultEnforceMode returns the systems default SELinux mode Enforcing, @@ -1017,8 +1123,7 @@ func addMcs(processLabel, fileLabel string) (string, string) { // securityCheckContext validates that the SELinux label is understood by the kernel func securityCheckContext(val string) error { - //nolint:gosec // ignore G306: permissions to be 0600 or less. - return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) + return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0) } // copyLevel returns a label with the MLS/MCS level from src label replaced on diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index 0889fbe0e..382244e50 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -3,15 +3,11 @@ package selinux -func attrPath(string) string { - return "" -} - -func readCon(string) (string, error) { +func readConThreadSelf(string) (string, error) { return "", nil } -func writeCon(string, string) error { +func writeConThreadSelf(string, string) error { return nil } @@ -81,6 +77,10 @@ func setKeyLabel(string) error { return nil } +func keyLabel() (string, error) { + return "", nil +} + func (c Context) get() string { return "" } diff --git a/vendor/github.com/openshift/api/.coderabbit.yaml b/vendor/github.com/openshift/api/.coderabbit.yaml index 1cb17f1e1..a3ee2d122 100644 --- a/vendor/github.com/openshift/api/.coderabbit.yaml +++ b/vendor/github.com/openshift/api/.coderabbit.yaml @@ -14,6 +14,7 @@ reviews: - "!payload-manifests" - "!**/zz_generated.crd-manifests/*" # Contains files - "!**/zz_generated.featuregated-crd-manifests/**" # Contains folders + - "!openapi/**" - "!**/vendor/**" - "!vendor/**" tools: diff --git a/vendor/github.com/openshift/api/.golangci.go-validated.yaml b/vendor/github.com/openshift/api/.golangci.go-validated.yaml index 44c73149d..ed8fcdbe2 100644 --- a/vendor/github.com/openshift/api/.golangci.go-validated.yaml +++ b/vendor/github.com/openshift/api/.golangci.go-validated.yaml @@ -12,6 +12,7 @@ linters: linters: enable: - optionalfields + - nonpointerstructs disable: - "*" lintersConfig: diff --git a/vendor/github.com/openshift/api/.golangci.yaml b/vendor/github.com/openshift/api/.golangci.yaml index 516339b48..1f1c43fc4 100644 --- a/vendor/github.com/openshift/api/.golangci.yaml +++ b/vendor/github.com/openshift/api/.golangci.yaml @@ -13,12 +13,15 @@ linters: enable: - forbiddenmarkers - maxlength + - minlength - namingconventions - nobools - nomaps + - preferredmarkers - statussubresource disable: - statusoptional # This is legacy and not something we currently recommend. + - nonpointerstructs # This is intended for native types, not CRD types. lintersConfig: conditions: isFirstField: Warn @@ -30,7 +33,6 @@ linters: - identifier: "openshift:validation:FeatureSetAwareEnum" - identifier: "openshift:validation:FeatureSetAwareXValidation" - identifier: "kubebuilder:validation:UniqueItems" - - identifier: "kubebuilder:validation:Pattern" # Use CEL expressions instead optionalfields: pointers: preference: WhenRequired @@ -44,6 +46,11 @@ linters: # This will force omitzero on optional struct fields. # This means they can be omitted correctly and prevents the need for pointers to structs. policy: SuggestFix + preferredmarkers: + markers: + - preferredIdentifier: "kubebuilder:validation:XValidation" + equivalentIdentifiers: + - identifier: "kubebuilder:validation:Pattern" # Use CEL expressions instead to allow more expressive error messages. requiredfields: pointers: # This will force pointers when the field is required, but only when the zero @@ -60,15 +67,13 @@ linters: policy: SuggestFix namingconventions: conventions: - - name: norefs - violationMatcher: "(?i)ref(erence)?s?$" - operation: Drop - message: "reference fields should not need to be named ref(s)/reference(s)" - name: nokind violationMatcher: "^Kind$" operation: Replacement replacement: "Resource" message: "API Kinds can be ambiguous and should be replaced with Resource" + noreferences: + policy: NoReferences uniquemarkers: customMarkers: - identifier: "openshift:validation:FeatureGateAwareEnum" @@ -101,6 +106,11 @@ linters: # This regex must always be updated in tandem with the regex in .golangci.go-validated.yaml that prevents `optionalfields` from being applied to the files in the path. path: machine/v1beta1/(types_awsprovider.go|types_azureprovider.go|types_gcpprovider.go|types_vsphereprovider.go)|machine/v1alpha1/types_openstack.go text: "optionalfields" + - linters: + - kubeapilinter + # Silence norefs lint for `Ref` field in ClusterAPI as it refers to an OCI image reference, not a kube object reference. + path: operator/v1alpha1/types_clusterapi.go + text: "noreferences: naming convention \"no-references\": field ClusterAPIInstallerComponentImage.Ref: field names should not contain reference-related words" issues: # We have a lot of existing issues. # Want to make sure that those adding new fields have an diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index c069d8040..9b32b58e4 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -114,15 +114,17 @@ update-scripts: update-compatibility update-openapi update-deepcopy update-proto # Update codegen runs all generators in the order they are defined in the root.go file. # The per group generators are:[compatibility, deepcopy, swagger-docs, empty-partial-schema, schema-patch, crd-manifest-merge] # The multi group generators are:[openapi] +# The payload generation must come after these generators have run so they are included here as well, rather than in update-non-codegen. .PHONY: update-codegen update-codegen: hack/update-codegen.sh + make update-payload-crds update-payload-featuregates # Update non-codegen runs all generators that are not part of the codegen utility, or # are part of it, but are not run by default when invoking codegen without a specific generator. # E.g. the payload feature gates which is not part of the generator style, but is still a subcommand. .PHONY: update-non-codegen -update-non-codegen: update-protobuf tests-vendor update-prerelease-lifecycle-gen update-payload-crds update-payload-featuregates +update-non-codegen: update-protobuf tests-vendor update-prerelease-lifecycle-gen .PHONY: update-compatibility update-compatibility: diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go b/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go index 46e211cd5..5abbfec7c 100644 --- a/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go @@ -21,6 +21,7 @@ import ( // +kubebuilder:subresource:status // +kubebuilder:resource:path=compatibilityrequirements,scope=Cluster // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2479 +// +kubebuilder:metadata:annotations="release.openshift.io/feature-gate=CRDCompatibilityRequirementOperator" type CompatibilityRequirement struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 319f2b335..433546401 100644 --- a/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -1,5 +1,6 @@ compatibilityrequirements.apiextensions.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/feature-gate: CRDCompatibilityRequirementOperator ApprovedPRNumber: https://github.com/openshift/api/pull/2479 CRDName: compatibilityrequirements.apiextensions.openshift.io Capability: "" diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 0afe7b1d8..31d888185 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -212,6 +212,7 @@ type APIServerEncryption struct { // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";identity;aescbc;aesgcm // +openshift:validation:FeatureGateAwareEnum:featureGate=KMSEncryptionProvider,enum="";identity;aescbc;aesgcm;KMS +// +openshift:validation:FeatureGateAwareEnum:featureGate=KMSEncryption,enum="";identity;aescbc;aesgcm;KMS type EncryptionType string const ( diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index e300d4eab..e7433281f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -80,8 +80,7 @@ type AuthenticationSpec struct { // +optional ServiceAccountIssuer string `json:"serviceAccountIssuer"` - // oidcProviders are OIDC identity providers that can issue tokens - // for this cluster + // oidcProviders are OIDC identity providers that can issue tokens for this cluster // Can only be set if "Type" is set to "OIDC". // // At most one provider can be configured. @@ -113,8 +112,7 @@ type AuthenticationStatus struct { // +optional IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` - // oidcClients is where participating operators place the current OIDC client status - // for OIDC clients that can be customized by the cluster-admin. + // oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. // // +listType=map // +listMapKey=componentNamespace @@ -146,8 +144,7 @@ type AuthenticationType string const ( // None means that no cluster managed authentication system is in place. - // Note that user login will only work if a manually configured system is in place and - // referenced in authentication spec via oauthMetadata and + // Note that user login will only work if a manually configured system is in place and referenced in authentication spec via oauthMetadata and // webhookTokenAuthenticator/oidcProviders AuthenticationTypeNone AuthenticationType = "None" @@ -199,10 +196,8 @@ const ( ) type OIDCProvider struct { - // name is a required field that configures the unique human-readable identifier - // associated with the identity provider. - // It is used to distinguish between multiple identity providers - // and has no impact on token validation or authentication mechanics. + // name is a required field that configures the unique human-readable identifier associated with the identity provider. + // It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics. // // name must not be an empty string (""). // @@ -210,15 +205,12 @@ type OIDCProvider struct { // +required Name string `json:"name"` - // issuer is a required field that configures how the platform interacts - // with the identity provider and how tokens issued from the identity provider - // are evaluated by the Kubernetes API server. + // issuer is a required field that configures how the platform interacts with the identity provider and how tokens issued from the identity provider are evaluated by the Kubernetes API server. // // +required Issuer TokenIssuer `json:"issuer"` - // oidcClients is an optional field that configures how on-cluster, - // platform clients should request tokens from the identity provider. + // oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. // oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. // // +listType=map @@ -228,16 +220,12 @@ type OIDCProvider struct { // +optional OIDCClients []OIDCClientConfig `json:"oidcClients"` - // claimMappings is a required field that configures the rules to be used by - // the Kubernetes API server for translating claims in a JWT token, issued - // by the identity provider, to a cluster identity. + // claimMappings is a required field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity. // // +required ClaimMappings TokenClaimMappings `json:"claimMappings"` - // claimValidationRules is an optional field that configures the rules to - // be used by the Kubernetes API server for validating the claims in a JWT - // token issued by the identity provider. + // claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider. // // Validation rules are joined via an AND operation. // @@ -245,9 +233,7 @@ type OIDCProvider struct { // +optional ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` - // userValidationRules is an optional field that configures the set of rules - // used to validate the cluster user identity that was constructed via - // mapping token claims to user identity attributes. + // userValidationRules is an optional field that configures the set of rules used to validate the cluster user identity that was constructed via mapping token claims to user identity attributes. // Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. // If any rule in the chain of rules evaluates to 'false', authentication will fail. // When specified, at least one rule must be specified and no more than 64 rules may be specified. @@ -266,10 +252,8 @@ type TokenAudience string // +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="self.?discoveryURL.orValue(\"\").size() > 0 ? (self.issuerURL.size() == 0 || self.discoveryURL.find('^.+[^/]') != self.issuerURL.find('^.+[^/]')) : true",message="discoveryURL must be different from issuerURL" type TokenIssuer struct { - // issuerURL is a required field that configures the URL used to issue tokens - // by the identity provider. - // The Kubernetes API server determines how authentication tokens should be handled - // by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + // issuerURL is a required field that configures the URL used to issue tokens by the identity provider. + // The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. // // Must be at least 1 character and must not exceed 512 characters in length. // Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. @@ -284,8 +268,7 @@ type TokenIssuer struct { // +required URL string `json:"issuerURL"` - // audiences is a required field that configures the acceptable audiences - // the JWT token, issued by the identity provider, must be issued to. + // audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. // At least one of the entries must match the 'aud' claim in the JWT token. // // audiences must contain at least one entry and must not exceed ten entries. @@ -296,24 +279,20 @@ type TokenIssuer struct { // +required Audiences []TokenAudience `json:"audiences"` - // issuerCertificateAuthority is an optional field that configures the - // certificate authority, used by the Kubernetes API server, to validate - // the connection to the identity provider when fetching discovery information. + // issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information. // // When not specified, the system trust is used. // - // When specified, it must reference a ConfigMap in the openshift-config - // namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - // key in the data field of the ConfigMap. + // When specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap. // // +optional CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` - // discoveryURL is an optional field that, if specified, overrides the default discovery endpoint - // used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` - // as "{issuerURL}/.well-known/openid-configuration". + // discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. + // By default, the discovery URL is derived from `issuerURL` as "{issuerURL}/.well-known/openid-configuration". // - // The discoveryURL must be a valid absolute HTTPS URL. It must not contain query - // parameters, user information, or fragments. Additionally, it must differ from the value of `url` (ignoring trailing slashes). + // The discoveryURL must be a valid absolute HTTPS URL. + // It must not contain query parameters, user information, or fragments. + // Additionally, it must differ from the value of `issuerURL` (ignoring trailing slashes). // The discoveryURL value must be at least 1 character long and no longer than 2048 characters. // // +optional @@ -329,39 +308,36 @@ type TokenIssuer struct { } type TokenClaimMappings struct { - // username is a required field that configures how the username of a cluster identity - // should be constructed from the claims in a JWT token issued by the identity provider. + // username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. // // +required Username UsernameClaimMapping `json:"username"` - // groups is an optional field that configures how the groups of a cluster identity - // should be constructed from the claims in a JWT token issued - // by the identity provider. - // When referencing a claim, if the claim is present in the JWT - // token, its value must be a list of groups separated by a comma (','). + // groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. + // + // When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). + // // For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. // // +optional Groups PrefixedClaimMapping `json:"groups,omitempty"` - // uid is an optional field for configuring the claim mapping - // used to construct the uid for the cluster identity. + // uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity. // // When using uid.claim to specify the claim it must be a single string value. // When using uid.expression the expression must result in a single string value. // - // When omitted, this means the user has no opinion and the platform - // is left to choose a default, which is subject to change over time. + // When omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. + // // The current default is to use the 'sub' claim. // // +optional // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings UID *TokenClaimOrExpressionMapping `json:"uid,omitempty"` - // extra is an optional field for configuring the mappings - // used to construct the extra attribute for the cluster identity. + // extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. // When omitted, no extra attributes will be present on the cluster identity. + // // key values for extra mappings must be unique. // A maximum of 32 extra attribute mappings may be provided. // @@ -373,52 +349,39 @@ type TokenClaimMappings struct { Extra []ExtraMapping `json:"extra,omitempty"` } -// TokenClaimMapping allows specifying a JWT token -// claim to be used when mapping claims from an -// authentication token to cluster identities. +// TokenClaimMapping allows specifying a JWT token claim to be used when mapping claims from an authentication token to cluster identities. type TokenClaimMapping struct { - // claim is a required field that configures the JWT token - // claim whose value is assigned to the cluster identity - // field associated with this mapping. + // claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. // // +required Claim string `json:"claim"` } -// TokenClaimOrExpressionMapping allows specifying either a JWT -// token claim or CEL expression to be used when mapping claims -// from an authentication token to cluster identities. +// TokenClaimOrExpressionMapping allows specifying either a JWT token claim or CEL expression to be used when mapping claims from an authentication token to cluster identities. // +kubebuilder:validation:XValidation:rule="has(self.claim) ? !has(self.expression) : has(self.expression)",message="precisely one of claim or expression must be set" type TokenClaimOrExpressionMapping struct { - // claim is an optional field for specifying the - // JWT token claim that is used in the mapping. - // The value of this claim will be assigned to - // the field in which this mapping is associated. + // claim is an optional field for specifying the JWT token claim that is used in the mapping. + // The value of this claim will be assigned to the field in which this mapping is associated. // // Precisely one of claim or expression must be set. // claim must not be specified when expression is set. - // When specified, claim must be at least 1 character in length - // and must not exceed 256 characters in length. + // When specified, claim must be at least 1 character in length and must not exceed 256 characters in length. // // +optional // +kubebuilder:validation:MaxLength=256 // +kubebuilder:validation:MinLength=1 Claim string `json:"claim,omitempty"` - // expression is an optional field for specifying a - // CEL expression that produces a string value from - // JWT token claims. + // expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims. // - // CEL expressions have access to the token claims - // through a CEL variable, 'claims'. + // CEL expressions have access to the token claims through a CEL variable, 'claims'. // 'claims' is a map of claim names to claim values. // For example, the 'sub' claim value can be accessed as 'claims.sub'. // Nested claims can be accessed using dot notation ('claims.foo.bar'). // // Precisely one of claim or expression must be set. // expression must not be specified when claim is set. - // When specified, expression must be at least 1 character in length - // and must not exceed 1024 characters in length. + // When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length. // // +optional // +kubebuilder:validation:MaxLength=1024 @@ -426,13 +389,10 @@ type TokenClaimOrExpressionMapping struct { Expression string `json:"expression,omitempty"` } -// ExtraMapping allows specifying a key and CEL expression -// to evaluate the keys' value. It is used to create additional -// mappings and attributes added to a cluster identity from -// a provided authentication token. +// ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. +// It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token. type ExtraMapping struct { - // key is a required field that specifies the string - // to use as the extra attribute key. + // key is a required field that specifies the string to use as the extra attribute key. // // key must be a domain-prefix path (e.g 'example.org/foo'). // key must not exceed 510 characters in length. @@ -445,8 +405,7 @@ type ExtraMapping struct { // It must only contain lower case alphanumeric characters and '-' or '.'. // It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". // - // The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - // alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + // The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. // It must not exceed 256 characters in length. // // +required @@ -468,14 +427,12 @@ type ExtraMapping struct { // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[1].size() <= 256",message="the path of the key must not exceed 256 characters in length" Key string `json:"key"` - // valueExpression is a required field to specify the CEL expression to extract - // the extra attribute value from a JWT token's claims. + // valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. // valueExpression must produce a string or string array value. // "", [], and null are treated as the extra mapping not being present. // Empty string values within an array are filtered out. // - // CEL expressions have access to the token claims - // through a CEL variable, 'claims'. + // CEL expressions have access to the token claims through a CEL variable, 'claims'. // 'claims' is a map of claim names to claim values. // For example, the 'sub' claim value can be accessed as 'claims.sub'. // Nested claims can be accessed using dot notation ('claims.foo.bar'). @@ -489,12 +446,10 @@ type ExtraMapping struct { ValueExpression string `json:"valueExpression"` } -// OIDCClientConfig configures how platform clients -// interact with identity providers as an authentication -// method +// OIDCClientConfig configures how platform clients interact with identity providers as an authentication method. type OIDCClientConfig struct { - // componentName is a required field that specifies the name of the platform - // component being configured to use the identity provider as an authentication mode. + // componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. + // // It is used in combination with componentNamespace as a unique identifier. // // componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -504,9 +459,8 @@ type OIDCClientConfig struct { // +required ComponentName string `json:"componentName"` - // componentNamespace is a required field that specifies the namespace in which the - // platform component being configured to use the identity provider as an authentication - // mode is running. + // componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. + // // It is used in combination with componentName as a unique identifier. // // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -516,11 +470,8 @@ type OIDCClientConfig struct { // +required ComponentNamespace string `json:"componentNamespace"` - // clientID is a required field that configures the client identifier, from - // the identity provider, that the platform component uses for authentication - // requests made to the identity provider. - // The identity provider must accept this identifier for platform components - // to be able to use the identity provider as an authentication mode. + // clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. + // The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode. // // clientID must not be an empty string (""). // @@ -528,27 +479,21 @@ type OIDCClientConfig struct { // +required ClientID string `json:"clientID"` - // clientSecret is an optional field that configures the client secret used - // by the platform component when making authentication requests to the identity provider. + // clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider. + // + // When not specified, no client secret will be used when making authentication requests to the identity provider. // - // When not specified, no client secret will be used when making authentication requests - // to the identity provider. + // When specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. // - // When specified, clientSecret references a Secret in the 'openshift-config' - // namespace that contains the client secret in the 'clientSecret' key of the '.data' field. // The client secret will be used when making authentication requests to the identity provider. // - // Public clients do not require a client secret but private - // clients do require a client secret to work with the identity provider. + // Public clients do not require a client secret but private clients do require a client secret to work with the identity provider. // // +optional ClientSecret SecretNameReference `json:"clientSecret"` - // extraScopes is an optional field that configures the extra scopes that should - // be requested by the platform component when making authentication requests to the - // identity provider. - // This is useful if you have configured claim mappings that requires specific - // scopes to be requested beyond the standard OIDC scopes. + // extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. + // This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes. // // When omitted, no additional scopes are requested. // @@ -561,8 +506,7 @@ type OIDCClientConfig struct { // of platform components and how they interact with // the configured identity providers. type OIDCClientStatus struct { - // componentName is a required field that specifies the name of the platform - // component using the identity provider as an authentication mode. + // componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. // It is used in combination with componentNamespace as a unique identifier. // // componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -572,9 +516,8 @@ type OIDCClientStatus struct { // +required ComponentName string `json:"componentName"` - // componentNamespace is a required field that specifies the namespace in which the - // platform component using the identity provider as an authentication - // mode is running. + // componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. + // // It is used in combination with componentName as a unique identifier. // // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -585,6 +528,7 @@ type OIDCClientStatus struct { ComponentNamespace string `json:"componentNamespace"` // currentOIDCClients is an optional list of clients that the component is currently using. + // // Entries must have unique issuerURL/clientID pairs. // // +listType=map @@ -593,8 +537,7 @@ type OIDCClientStatus struct { // +optional CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` - // consumingUsers is an optional list of ServiceAccounts requiring - // read permissions on the `clientSecret` secret. + // consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret. // // consumingUsers must not exceed 5 entries. // @@ -620,8 +563,7 @@ type OIDCClientStatus struct { // OIDCClientReference is a reference to a platform component // client configuration. type OIDCClientReference struct { - // oidcProviderName is a required reference to the 'name' of the identity provider - // configured in 'oidcProviders' that this client is associated with. + // oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with. // // oidcProviderName must not be an empty string (""). // @@ -629,8 +571,7 @@ type OIDCClientReference struct { // +required OIDCProviderName string `json:"oidcProviderName"` - // issuerURL is a required field that specifies the URL of the identity - // provider that this client is configured to make requests against. + // issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against. // // issuerURL must use the 'https' scheme. // @@ -638,9 +579,7 @@ type OIDCClientReference struct { // +required IssuerURL string `json:"issuerURL"` - // clientID is a required field that specifies the client identifier, from - // the identity provider, that the platform component is using for authentication - // requests made to the identity provider. + // clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider. // // clientID must not be empty. // @@ -652,9 +591,7 @@ type OIDCClientReference struct { // +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" // +union type UsernameClaimMapping struct { - // claim is a required field that configures the JWT token - // claim whose value is assigned to the cluster identity - // field associated with this mapping. + // claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. // // claim must not be an empty string ("") and must not exceed 256 characters. // @@ -663,23 +600,21 @@ type UsernameClaimMapping struct { // +kubebuilder:validation:MaxLength:=256 Claim string `json:"claim"` - // prefixPolicy is an optional field that configures how a prefix should be - // applied to the value of the JWT claim specified in the 'claim' field. + // prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field. // // Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). // - // When set to 'Prefix', the value specified in the prefix field will be - // prepended to the value of the JWT claim. + // When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. + // // The prefix field must be set when prefixPolicy is 'Prefix'. // - // When set to 'NoPrefix', no prefix will be prepended to the value - // of the JWT claim. + // When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. + // + // When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. + // Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. // - // When omitted, this means no opinion and the platform is left to choose - // any prefixes that are applied which is subject to change over time. - // Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - // when the claim is not 'email'. // As an example, consider the following scenario: + // // `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", // and `claim` is set to: @@ -691,8 +626,7 @@ type UsernameClaimMapping struct { // +unionDiscriminator PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"` - // prefix configures the prefix that should be prepended to the value - // of the JWT claim. + // prefix configures the prefix that should be prepended to the value of the JWT claim. // // prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. // @@ -701,9 +635,7 @@ type UsernameClaimMapping struct { Prefix *UsernamePrefix `json:"prefix"` } -// UsernamePrefixPolicy configures how prefixes should be applied -// to values extracted from the JWT claims during the process of mapping -// JWT claims to cluster identity attributes. +// UsernamePrefixPolicy configures how prefixes should be applied to values extracted from the JWT claims during the process of mapping JWT claims to cluster identity attributes. // +enum type UsernamePrefixPolicy string @@ -722,9 +654,7 @@ var ( // UsernamePrefix configures the string that should // be used as a prefix for username claim mappings. type UsernamePrefix struct { - // prefixString is a required field that configures the prefix that will - // be applied to cluster identity username attribute - // during the process of mapping JWT claims to cluster identity attributes. + // prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes. // // prefixString must not be an empty string (""). // @@ -738,15 +668,11 @@ type UsernamePrefix struct { type PrefixedClaimMapping struct { TokenClaimMapping `json:",inline"` - // prefix is an optional field that configures the prefix that will be - // applied to the cluster identity attribute during the process of mapping - // JWT claims to cluster identity attributes. + // prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. // // When omitted (""), no prefix is applied to the cluster identity attribute. // - // Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - // an array of strings "a", "b" and "c", the mapping will result in an - // array of string "myoidc:a", "myoidc:b" and "myoidc:c". + // Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". // // +optional Prefix string `json:"prefix"` @@ -780,19 +706,15 @@ type TokenClaimValidationRule struct { // // Allowed values are "RequiredClaim" and "CEL". // - // When set to 'RequiredClaim', the Kubernetes API server will be configured - // to validate that the incoming JWT contains the required claim and that its - // value matches the required value. + // When set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value. // - // When set to 'CEL', the Kubernetes API server will be configured - // to validate the incoming JWT against the configured CEL expression. + // When set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression. // +required Type TokenValidationRuleType `json:"type"` // requiredClaim allows configuring a required claim name and its expected value. - // This field is required when `type` is set to RequiredClaim, and must be omitted - // when `type` is set to any other value. The Kubernetes API server uses this field - // to validate if an incoming JWT is valid for this identity provider. + // This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. + // The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider. // // +optional RequiredClaim *TokenRequiredClaim `json:"requiredClaim,omitempty"` @@ -814,10 +736,8 @@ type TokenRequiredClaim struct { // +required Claim string `json:"claim"` - // requiredValue is a required field that configures the value that 'claim' must - // have when taken from the incoming JWT claims. - // If the value in the JWT claims does not match, the token - // will be rejected for authentication. + // requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. + // If the value in the JWT claims does not match, the token will be rejected for authentication. // // requiredValue must not be an empty string (""). // @@ -836,8 +756,7 @@ type TokenClaimValidationCELRule struct { // +required Expression string `json:"expression,omitempty"` - // message is a required human-readable message to be logged by the Kubernetes API server - // if the CEL expression defined in 'expression' fails. + // message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. // message must be at least 1 character in length and must not exceed 256 characters. // +required // +kubebuilder:validation:MinLength=1 @@ -848,8 +767,8 @@ type TokenClaimValidationCELRule struct { // TokenUserValidationRule provides a CEL-based rule used to validate a token subject. // Each rule contains a CEL expression that is evaluated against the token’s claims. type TokenUserValidationRule struct { - // expression is a required CEL expression that performs a validation - // on cluster user identity attributes like username, groups, etc. + // expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc. + // // The expression must evaluate to a boolean value. // When the expression evaluates to 'true', the cluster user identity is considered valid. // When the expression evaluates to 'false', the cluster user identity is not considered valid. @@ -859,8 +778,7 @@ type TokenUserValidationRule struct { // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=1024 Expression string `json:"expression,omitempty"` - // message is a required human-readable message to be logged by the Kubernetes API server - // if the CEL expression defined in 'expression' fails. + // message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. // message must be at least 1 character in length and must not exceed 256 characters. // +required // +kubebuilder:validation:MinLength=1 diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 313ed57a4..369ba1e7a 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -302,9 +302,10 @@ type PlatformSpec struct { // balancers, dynamic volume provisioning, machine creation and deletion, and // other integrations are enabled. If None, no infrastructure automation is // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", - // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, - // and must handle unrecognized platforms as None if they do not support that platform. + // "OpenStack", "VSphere", "oVirt", "IBMCloud", "KubeVirt", "EquinixMetal", + // "PowerVS", "AlibabaCloud", "Nutanix", "External", and "None". Individual + // components may not support all platforms, and must handle unrecognized + // platforms as None if they do not support that platform. // // +unionDiscriminator Type PlatformType `json:"type"` diff --git a/vendor/github.com/openshift/api/config/v1/types_insights.go b/vendor/github.com/openshift/api/config/v1/types_insights.go index b0959881f..710d4303d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1/types_insights.go @@ -13,6 +13,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2448 // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 // +openshift:enable:FeatureGate=InsightsConfig +// +openshift:capability=Insights // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index c0d1602b3..fb8ed2fff 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -41,7 +41,7 @@ type Network struct { // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkDiagnosticsConfig,rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" +// +kubebuilder:validation:XValidation:rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" type NetworkSpec struct { // IP address pool to use for pod IPs. // This field is immutable after installation. @@ -85,7 +85,6 @@ type NetworkSpec struct { // the network diagnostics feature will be disabled. // // +optional - // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"` } @@ -119,7 +118,6 @@ type NetworkStatus struct { // +optional // +listType=map // +listMapKey=type - // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go index 1e5189796..48657b089 100644 --- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -7,9 +7,10 @@ type TLSSecurityProfile struct { // type is one of Old, Intermediate, Modern or Custom. Custom provides the // ability to specify individual TLS security profile parameters. // - // The profiles are currently based on version 5.0 of the Mozilla Server Side TLS - // configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for - // forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json + // The profiles are based on version 5.7 of the Mozilla Server Side TLS + // configuration guidelines. The cipher lists consist of the configuration's + // "ciphersuites" followed by the Go-specific "ciphers" from the guidelines. + // See: https://ssl-config.mozilla.org/guidelines/5.7.json // // The profiles are intent based, so they may change over time as new ciphers are // developed and existing ciphers are found to be insecure. Depending on @@ -22,9 +23,6 @@ type TLSSecurityProfile struct { // old is a TLS profile for use when services need to be accessed by very old // clients or libraries and should be used only as a last resort. // - // The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - // by the "old" profile ciphers. - // // This profile is equivalent to a Custom profile specified as: // minTLSVersion: VersionTLS10 // ciphers: @@ -37,23 +35,15 @@ type TLSSecurityProfile struct { // - ECDHE-RSA-AES256-GCM-SHA384 // - ECDHE-ECDSA-CHACHA20-POLY1305 // - ECDHE-RSA-CHACHA20-POLY1305 - // - DHE-RSA-AES128-GCM-SHA256 - // - DHE-RSA-AES256-GCM-SHA384 - // - DHE-RSA-CHACHA20-POLY1305 // - ECDHE-ECDSA-AES128-SHA256 // - ECDHE-RSA-AES128-SHA256 // - ECDHE-ECDSA-AES128-SHA // - ECDHE-RSA-AES128-SHA - // - ECDHE-ECDSA-AES256-SHA384 - // - ECDHE-RSA-AES256-SHA384 // - ECDHE-ECDSA-AES256-SHA // - ECDHE-RSA-AES256-SHA - // - DHE-RSA-AES128-SHA256 - // - DHE-RSA-AES256-SHA256 // - AES128-GCM-SHA256 // - AES256-GCM-SHA384 // - AES128-SHA256 - // - AES256-SHA256 // - AES128-SHA // - AES256-SHA // - DES-CBC3-SHA @@ -66,9 +56,6 @@ type TLSSecurityProfile struct { // legacy clients and want to remain highly secure while being compatible with // most clients currently in use. // - // The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - // by the "intermediate" profile ciphers. - // // This profile is equivalent to a Custom profile specified as: // minTLSVersion: VersionTLS12 // ciphers: @@ -81,8 +68,6 @@ type TLSSecurityProfile struct { // - ECDHE-RSA-AES256-GCM-SHA384 // - ECDHE-ECDSA-CHACHA20-POLY1305 // - ECDHE-RSA-CHACHA20-POLY1305 - // - DHE-RSA-AES128-GCM-SHA256 - // - DHE-RSA-AES256-GCM-SHA384 // // +optional // +nullable @@ -160,12 +145,14 @@ const ( // TLSProfileSpec is the desired behavior of a TLSSecurityProfile. type TLSProfileSpec struct { // ciphers is used to specify the cipher algorithms that are negotiated - // during the TLS handshake. Operators may remove entries their operands - // do not support. For example, to use DES-CBC3-SHA (yaml): + // during the TLS handshake. Operators may remove entries that their operands + // do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): // // ciphers: - // - DES-CBC3-SHA + // - ECDHE-RSA-AES128-GCM-SHA256 // + // TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + // and are always enabled when TLS 1.3 is negotiated. // +listType=atomic Ciphers []string `json:"ciphers"` // minTLSVersion is used to specify the minimal version of the TLS protocol @@ -200,9 +187,11 @@ const ( // TLSProfiles contains a map of TLSProfileType names to TLSProfileSpec. // -// These profiles are based on version 5.0 of the Mozilla Server Side TLS -// configuration guidelines (2019-06-28) with TLS 1.3 cipher suites prepended for -// forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json +// These profiles are based on version 5.7 of the Mozilla Server Side TLS +// configuration guidelines. See: https://ssl-config.mozilla.org/guidelines/5.7.json +// +// Each Ciphers slice is the configuration's "ciphersuites" followed by the +// Go-specific "ciphers" from the guidelines JSON. // // NOTE: The caller needs to make sure to check that these constants are valid // for their binary. Not all entries map to values for all binaries. In the case @@ -220,23 +209,15 @@ var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305", - "DHE-RSA-AES128-GCM-SHA256", - "DHE-RSA-AES256-GCM-SHA384", - "DHE-RSA-CHACHA20-POLY1305", "ECDHE-ECDSA-AES128-SHA256", "ECDHE-RSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA", "ECDHE-RSA-AES128-SHA", - "ECDHE-ECDSA-AES256-SHA384", - "ECDHE-RSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA", "ECDHE-RSA-AES256-SHA", - "DHE-RSA-AES128-SHA256", - "DHE-RSA-AES256-SHA256", "AES128-GCM-SHA256", "AES256-GCM-SHA384", "AES128-SHA256", - "AES256-SHA256", "AES128-SHA", "AES256-SHA", "DES-CBC3-SHA", @@ -254,8 +235,6 @@ var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305", - "DHE-RSA-AES128-GCM-SHA256", - "DHE-RSA-AES256-GCM-SHA384", }, MinTLSVersion: VersionTLS12, }, diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml index 86a514606..2e45da09e 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml @@ -316,11 +316,14 @@ spec: ciphers: description: |- ciphers is used to specify the cipher algorithms that are negotiated - during the TLS handshake. Operators may remove entries their operands - do not support. For example, to use DES-CBC3-SHA (yaml): + during the TLS handshake. Operators may remove entries that their operands + do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): ciphers: - - DES-CBC3-SHA + - ECDHE-RSA-AES128-GCM-SHA256 + + TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + and are always enabled when TLS 1.3 is negotiated. items: type: string type: array @@ -345,9 +348,6 @@ spec: legacy clients and want to remain highly secure while being compatible with most clients currently in use. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "intermediate" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS12 ciphers: @@ -360,8 +360,6 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 nullable: true type: object modern: @@ -382,9 +380,6 @@ spec: old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "old" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS10 ciphers: @@ -397,23 +392,15 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 - - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - - ECDHE-ECDSA-AES256-SHA384 - - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - - DHE-RSA-AES128-SHA256 - - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA @@ -424,9 +411,10 @@ spec: type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. - The profiles are currently based on version 5.0 of the Mozilla Server Side TLS - configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for - forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json + The profiles are based on version 5.7 of the Mozilla Server Side TLS + configuration guidelines. The cipher lists consist of the configuration's + "ciphersuites" followed by the Go-specific "ciphers" from the guidelines. + See: https://ssl-config.mozilla.org/guidelines/5.7.json The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-Default.crd.yaml index 505489517..272d49db0 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-Default.crd.yaml @@ -247,11 +247,14 @@ spec: ciphers: description: |- ciphers is used to specify the cipher algorithms that are negotiated - during the TLS handshake. Operators may remove entries their operands - do not support. For example, to use DES-CBC3-SHA (yaml): + during the TLS handshake. Operators may remove entries that their operands + do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): ciphers: - - DES-CBC3-SHA + - ECDHE-RSA-AES128-GCM-SHA256 + + TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + and are always enabled when TLS 1.3 is negotiated. items: type: string type: array @@ -276,9 +279,6 @@ spec: legacy clients and want to remain highly secure while being compatible with most clients currently in use. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "intermediate" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS12 ciphers: @@ -291,8 +291,6 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 nullable: true type: object modern: @@ -313,9 +311,6 @@ spec: old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "old" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS10 ciphers: @@ -328,23 +323,15 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 - - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - - ECDHE-ECDSA-AES256-SHA384 - - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - - DHE-RSA-AES128-SHA256 - - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA @@ -355,9 +342,10 @@ spec: type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. - The profiles are currently based on version 5.0 of the Mozilla Server Side TLS - configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for - forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json + The profiles are based on version 5.7 of the Mozilla Server Side TLS + configuration guidelines. The cipher lists consist of the configuration's + "ciphersuites" followed by the Go-specific "ciphers" from the guidelines. + See: https://ssl-config.mozilla.org/guidelines/5.7.json The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml index f4dec2739..23c438144 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml @@ -316,11 +316,14 @@ spec: ciphers: description: |- ciphers is used to specify the cipher algorithms that are negotiated - during the TLS handshake. Operators may remove entries their operands - do not support. For example, to use DES-CBC3-SHA (yaml): + during the TLS handshake. Operators may remove entries that their operands + do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): ciphers: - - DES-CBC3-SHA + - ECDHE-RSA-AES128-GCM-SHA256 + + TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + and are always enabled when TLS 1.3 is negotiated. items: type: string type: array @@ -345,9 +348,6 @@ spec: legacy clients and want to remain highly secure while being compatible with most clients currently in use. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "intermediate" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS12 ciphers: @@ -360,8 +360,6 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 nullable: true type: object modern: @@ -382,9 +380,6 @@ spec: old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "old" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS10 ciphers: @@ -397,23 +392,15 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 - - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - - ECDHE-ECDSA-AES256-SHA384 - - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - - DHE-RSA-AES128-SHA256 - - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA @@ -424,9 +411,10 @@ spec: type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. - The profiles are currently based on version 5.0 of the Mozilla Server Side TLS - configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for - forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json + The profiles are based on version 5.7 of the Mozilla Server Side TLS + configuration guidelines. The cipher lists consist of the configuration's + "ciphersuites" followed by the Go-specific "ciphers" from the guidelines. + See: https://ssl-config.mozilla.org/guidelines/5.7.json The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-OKD.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-OKD.crd.yaml index 3809b7864..3c81a12e8 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-OKD.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-OKD.crd.yaml @@ -247,11 +247,14 @@ spec: ciphers: description: |- ciphers is used to specify the cipher algorithms that are negotiated - during the TLS handshake. Operators may remove entries their operands - do not support. For example, to use DES-CBC3-SHA (yaml): + during the TLS handshake. Operators may remove entries that their operands + do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): ciphers: - - DES-CBC3-SHA + - ECDHE-RSA-AES128-GCM-SHA256 + + TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + and are always enabled when TLS 1.3 is negotiated. items: type: string type: array @@ -276,9 +279,6 @@ spec: legacy clients and want to remain highly secure while being compatible with most clients currently in use. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "intermediate" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS12 ciphers: @@ -291,8 +291,6 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 nullable: true type: object modern: @@ -313,9 +311,6 @@ spec: old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "old" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS10 ciphers: @@ -328,23 +323,15 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 - - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - - ECDHE-ECDSA-AES256-SHA384 - - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - - DHE-RSA-AES128-SHA256 - - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA @@ -355,9 +342,10 @@ spec: type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. - The profiles are currently based on version 5.0 of the Mozilla Server Side TLS - configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for - forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json + The profiles are based on version 5.7 of the Mozilla Server Side TLS + configuration guidelines. The cipher lists consist of the configuration's + "ciphersuites" followed by the Go-specific "ciphers" from the guidelines. + See: https://ssl-config.mozilla.org/guidelines/5.7.json The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml index 6206ad77a..1d75d68e5 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml @@ -158,69 +158,6 @@ spec: description: encryption allows the configuration of encryption of resources at the datastore layer. properties: - kms: - description: |- - kms defines the configuration for the external KMS instance that manages the encryption keys, - when KMS encryption is enabled sensitive resources will be encrypted using keys managed by an - externally configured KMS instance. - - The Key Management Service (KMS) instance provides symmetric encryption and is responsible for - managing the lifecyle of the encryption keys outside of the control plane. - This allows integration with an external provider to manage the data encryption keys securely. - properties: - aws: - description: |- - aws defines the key config for using an AWS KMS instance - for the encryption. The AWS KMS instance is managed - by the user outside the purview of the control plane. - properties: - keyARN: - description: |- - keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. - The value must adhere to the format `arn:aws:kms:::key/`, where: - - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - - `` is a 12-digit numeric identifier for the AWS account. - - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens. - maxLength: 128 - minLength: 1 - type: string - x-kubernetes-validations: - - message: keyARN must follow the format `arn:aws:kms:::key/`. - The account ID must be a 12 digit number and the region - and key ID should consist only of lowercase hexadecimal - characters and hyphens (-). - rule: self.matches('^arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key/[a-f0-9-]+$') - region: - description: |- - region specifies the AWS region where the KMS instance exists, and follows the format - `--`, e.g.: `us-east-1`. - Only lowercase letters and hyphens followed by numbers are allowed. - maxLength: 64 - minLength: 1 - type: string - x-kubernetes-validations: - - message: region must be a valid AWS region, consisting - of lowercase characters, digits and hyphens (-) only. - rule: self.matches('^[a-z0-9]+(-[a-z0-9]+)*$') - required: - - keyARN - - region - type: object - type: - description: |- - type defines the kind of platform for the KMS provider. - Available provider types are AWS only. - enum: - - AWS - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: aws config is required when kms provider type is AWS, - and forbidden otherwise - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) - : !has(self.aws)' type: description: |- type defines what encryption type should be used to encrypt resources at the datastore layer. @@ -244,11 +181,6 @@ spec: - KMS type: string type: object - x-kubernetes-validations: - - message: kms config is required when encryption type is KMS, and - forbidden otherwise - rule: 'has(self.type) && self.type == ''KMS'' ? has(self.kms) : - !has(self.kms)' servingCerts: description: |- servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates @@ -316,11 +248,14 @@ spec: ciphers: description: |- ciphers is used to specify the cipher algorithms that are negotiated - during the TLS handshake. Operators may remove entries their operands - do not support. For example, to use DES-CBC3-SHA (yaml): + during the TLS handshake. Operators may remove entries that their operands + do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): ciphers: - - DES-CBC3-SHA + - ECDHE-RSA-AES128-GCM-SHA256 + + TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + and are always enabled when TLS 1.3 is negotiated. items: type: string type: array @@ -345,9 +280,6 @@ spec: legacy clients and want to remain highly secure while being compatible with most clients currently in use. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "intermediate" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS12 ciphers: @@ -360,8 +292,6 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 nullable: true type: object modern: @@ -382,9 +312,6 @@ spec: old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "old" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS10 ciphers: @@ -397,23 +324,15 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 - - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - - ECDHE-ECDSA-AES256-SHA384 - - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - - DHE-RSA-AES128-SHA256 - - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA @@ -424,9 +343,10 @@ spec: type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. - The profiles are currently based on version 5.0 of the Mozilla Server Side TLS - configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for - forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json + The profiles are based on version 5.7 of the Mozilla Server Side TLS + configuration guidelines. The cipher lists consist of the configuration's + "ciphersuites" followed by the Go-specific "ciphers" from the guidelines. + See: https://ssl-config.mozilla.org/guidelines/5.7.json The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-CustomNoUpgrade.crd.yaml index 7476fd465..e3c2202ea 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-CustomNoUpgrade.crd.yaml @@ -71,37 +71,33 @@ spec: type: object oidcProviders: description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster + oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if "Type" is set to "OIDC". At most one provider can be configured. items: properties: claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. + description: claimMappings is a required field that configures + the rules to be used by the Kubernetes API server for translating + claims in a JWT token, issued by the identity provider, to + a cluster identity. properties: extra: description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. + extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. + key values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided. items: description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. + ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. + It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token. properties: key: description: |- - key is a required field that specifies the string - to use as the extra attribute key. + key is a required field that specifies the string to use as the extra attribute key. key must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. @@ -114,8 +110,7 @@ spec: It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length. maxLength: 510 minLength: 1 @@ -159,14 +154,12 @@ spec: rule: self.split('/', 2)[1].size() <= 256 valueExpression: description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. + valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. "", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). @@ -187,76 +180,62 @@ spec: x-kubernetes-list-type: map groups: description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). + groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. + + When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). + For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. properties: claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + description: claim is a required field that configures + the JWT token claim whose value is assigned to the + cluster identity field associated with this mapping. type: string prefix: description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. + prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. When omitted (""), no prefix is applied to the cluster identity attribute. - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". + Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string required: - claim type: object uid: description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. + uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity. When using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value. - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. + When omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. + The current default is to use the 'sub' claim. properties: claim: description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. + claim is an optional field for specifying the JWT token claim that is used in the mapping. + The value of this claim will be assigned to the field in which this mapping is associated. Precisely one of claim or expression must be set. claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. + When specified, claim must be at least 1 character in length and must not exceed 256 characters in length. maxLength: 256 minLength: 1 type: string expression: description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. + expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). Precisely one of claim or expression must be set. expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 1024 characters in length. + When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length. maxLength: 1024 minLength: 1 type: string @@ -266,15 +245,14 @@ spec: set rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. + description: username is a required field that configures + how the username of a cluster identity should be constructed + from the claims in a JWT token issued by the identity + provider. properties: claim: description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. claim must not be an empty string ("") and must not exceed 256 characters. maxLength: 256 @@ -282,16 +260,13 @@ spec: type: string prefix: description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. + prefix configures the prefix that should be prepended to the value of the JWT claim. prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. properties: prefixString: description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. + prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes. prefixString must not be an empty string (""). minLength: 1 @@ -301,23 +276,21 @@ spec: type: object prefixPolicy: description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. + prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field. Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. + When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. + The prefix field must be set when prefixPolicy is 'Prefix'. - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. + When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. + + When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. + Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. As an example, consider the following scenario: + `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", and `claim` is set to: @@ -342,9 +315,7 @@ spec: type: object claimValidationRules: description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. + claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider. Validation rules are joined via an AND operation. items: @@ -368,8 +339,7 @@ spec: type: string message: description: |- - message is a required human-readable message to be logged by the Kubernetes API server - if the CEL expression defined in 'expression' fails. + message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters. maxLength: 256 minLength: 1 @@ -381,9 +351,8 @@ spec: requiredClaim: description: |- requiredClaim allows configuring a required claim name and its expected value. - This field is required when `type` is set to RequiredClaim, and must be omitted - when `type` is set to any other value. The Kubernetes API server uses this field - to validate if an incoming JWT is valid for this identity provider. + This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. + The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider. properties: claim: description: |- @@ -395,10 +364,8 @@ spec: type: string requiredValue: description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. + requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. + If the value in the JWT claims does not match, the token will be rejected for authentication. requiredValue must not be an empty string (""). minLength: 1 @@ -413,12 +380,9 @@ spec: Allowed values are "RequiredClaim" and "CEL". - When set to 'RequiredClaim', the Kubernetes API server will be configured - to validate that the incoming JWT contains the required claim and that its - value matches the required value. + When set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value. - When set to 'CEL', the Kubernetes API server will be configured - to validate the incoming JWT against the configured CEL expression. + When set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression. enum: - RequiredClaim - CEL @@ -438,15 +402,14 @@ spec: type: array x-kubernetes-list-type: atomic issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. + description: issuer is a required field that configures how + the platform interacts with the identity provider and how + tokens issued from the identity provider are evaluated by + the Kubernetes API server. properties: audiences: description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. + audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token. audiences must contain at least one entry and must not exceed ten entries. @@ -459,12 +422,12 @@ spec: x-kubernetes-list-type: set discoveryURL: description: |- - discoveryURL is an optional field that, if specified, overrides the default discovery endpoint - used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` - as "{issuerURL}/.well-known/openid-configuration". + discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. + By default, the discovery URL is derived from `issuerURL` as "{issuerURL}/.well-known/openid-configuration". - The discoveryURL must be a valid absolute HTTPS URL. It must not contain query - parameters, user information, or fragments. Additionally, it must differ from the value of `url` (ignoring trailing slashes). + The discoveryURL must be a valid absolute HTTPS URL. + It must not contain query parameters, user information, or fragments. + Additionally, it must differ from the value of `issuerURL` (ignoring trailing slashes). The discoveryURL value must be at least 1 character long and no longer than 2048 characters. maxLength: 2048 minLength: 1 @@ -482,15 +445,11 @@ spec: rule: '!self.matches(''^https://.+:.+@.+/.*$'')' issuerCertificateAuthority: description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. + issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information. When not specified, the system trust is used. - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. + When specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap. properties: name: description: name is the metadata.name of the referenced @@ -501,10 +460,8 @@ spec: type: object issuerURL: description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + issuerURL is a required field that configures the URL used to issue tokens by the identity provider. + The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. Must be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. @@ -533,50 +490,39 @@ spec: : true' name: description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. + name is a required field that configures the unique human-readable identifier associated with the identity provider. + It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics. name must not be an empty string (""). minLength: 1 type: string oidcClients: description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. + oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method + description: OIDCClientConfig configures how platform clients + interact with identity providers as an authentication method. properties: clientID: description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. + clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. + The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode. clientID must not be an empty string (""). minLength: 1 type: string clientSecret: description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. + clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider. + + When not specified, no client secret will be used when making authentication requests to the identity provider. - When not specified, no client secret will be used when making authentication requests - to the identity provider. + When specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. The client secret will be used when making authentication requests to the identity provider. - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. + Public clients do not require a client secret but private clients do require a client secret to work with the identity provider. properties: name: description: name is the metadata.name of the referenced @@ -587,8 +533,8 @@ spec: type: object componentName: description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. + It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -597,9 +543,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -608,11 +553,8 @@ spec: type: string extraScopes: description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. + extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. + This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes. When omitted, no additional scopes are requested. items: @@ -632,9 +574,7 @@ spec: x-kubernetes-list-type: map userValidationRules: description: |- - userValidationRules is an optional field that configures the set of rules - used to validate the cluster user identity that was constructed via - mapping token claims to user identity attributes. + userValidationRules is an optional field that configures the set of rules used to validate the cluster user identity that was constructed via mapping token claims to user identity attributes. Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. If any rule in the chain of rules evaluates to 'false', authentication will fail. When specified, at least one rule must be specified and no more than 64 rules may be specified. @@ -645,8 +585,8 @@ spec: properties: expression: description: |- - expression is a required CEL expression that performs a validation - on cluster user identity attributes like username, groups, etc. + expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc. + The expression must evaluate to a boolean value. When the expression evaluates to 'true', the cluster user identity is considered valid. When the expression evaluates to 'false', the cluster user identity is not considered valid. @@ -656,8 +596,7 @@ spec: type: string message: description: |- - message is a required human-readable message to be logged by the Kubernetes API server - if the CEL expression defined in 'expression' fails. + message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters. maxLength: 256 minLength: 1 @@ -793,9 +732,9 @@ spec: - name type: object oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. + description: oidcClients is where participating operators place the + current OIDC client status for OIDC clients that can be customized + by the cluster-admin. items: description: |- OIDCClientStatus represents the current state @@ -804,8 +743,7 @@ spec: properties: componentName: description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -814,9 +752,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -893,8 +830,7 @@ spec: x-kubernetes-list-type: map consumingUsers: description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. + consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret. consumingUsers must not exceed 5 entries. items: @@ -910,6 +846,7 @@ spec: currentOIDCClients: description: |- currentOIDCClients is an optional list of clients that the component is currently using. + Entries must have unique issuerURL/clientID pairs. items: description: |- @@ -918,25 +855,21 @@ spec: properties: clientID: description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. + clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider. clientID must not be empty. minLength: 1 type: string issuerURL: description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. + issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against. issuerURL must use the 'https' scheme. pattern: ^https:\/\/[^\s] type: string oidcProviderName: description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. + oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with. oidcProviderName must not be an empty string (""). minLength: 1 diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Default.crd.yaml index 5d93d98cc..64b650232 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Default.crd.yaml @@ -71,37 +71,33 @@ spec: type: object oidcProviders: description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster + oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if "Type" is set to "OIDC". At most one provider can be configured. items: properties: claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. + description: claimMappings is a required field that configures + the rules to be used by the Kubernetes API server for translating + claims in a JWT token, issued by the identity provider, to + a cluster identity. properties: extra: description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. + extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. + key values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided. items: description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. + ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. + It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token. properties: key: description: |- - key is a required field that specifies the string - to use as the extra attribute key. + key is a required field that specifies the string to use as the extra attribute key. key must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. @@ -114,8 +110,7 @@ spec: It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length. maxLength: 510 minLength: 1 @@ -159,14 +154,12 @@ spec: rule: self.split('/', 2)[1].size() <= 256 valueExpression: description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. + valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. "", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). @@ -187,76 +180,62 @@ spec: x-kubernetes-list-type: map groups: description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). + groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. + + When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). + For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. properties: claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + description: claim is a required field that configures + the JWT token claim whose value is assigned to the + cluster identity field associated with this mapping. type: string prefix: description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. + prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. When omitted (""), no prefix is applied to the cluster identity attribute. - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". + Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string required: - claim type: object uid: description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. + uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity. When using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value. - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. + When omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. + The current default is to use the 'sub' claim. properties: claim: description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. + claim is an optional field for specifying the JWT token claim that is used in the mapping. + The value of this claim will be assigned to the field in which this mapping is associated. Precisely one of claim or expression must be set. claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. + When specified, claim must be at least 1 character in length and must not exceed 256 characters in length. maxLength: 256 minLength: 1 type: string expression: description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. + expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). Precisely one of claim or expression must be set. expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 1024 characters in length. + When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length. maxLength: 1024 minLength: 1 type: string @@ -266,15 +245,14 @@ spec: set rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. + description: username is a required field that configures + how the username of a cluster identity should be constructed + from the claims in a JWT token issued by the identity + provider. properties: claim: description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. claim must not be an empty string ("") and must not exceed 256 characters. maxLength: 256 @@ -282,16 +260,13 @@ spec: type: string prefix: description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. + prefix configures the prefix that should be prepended to the value of the JWT claim. prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. properties: prefixString: description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. + prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes. prefixString must not be an empty string (""). minLength: 1 @@ -301,23 +276,21 @@ spec: type: object prefixPolicy: description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. + prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field. Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. + When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. + The prefix field must be set when prefixPolicy is 'Prefix'. - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. + When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. + + When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. + Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. As an example, consider the following scenario: + `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", and `claim` is set to: @@ -342,9 +315,7 @@ spec: type: object claimValidationRules: description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. + claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider. Validation rules are joined via an AND operation. items: @@ -356,9 +327,8 @@ spec: requiredClaim: description: |- requiredClaim allows configuring a required claim name and its expected value. - This field is required when `type` is set to RequiredClaim, and must be omitted - when `type` is set to any other value. The Kubernetes API server uses this field - to validate if an incoming JWT is valid for this identity provider. + This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. + The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider. properties: claim: description: |- @@ -370,10 +340,8 @@ spec: type: string requiredValue: description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. + requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. + If the value in the JWT claims does not match, the token will be rejected for authentication. requiredValue must not be an empty string (""). minLength: 1 @@ -388,12 +356,9 @@ spec: Allowed values are "RequiredClaim" and "CEL". - When set to 'RequiredClaim', the Kubernetes API server will be configured - to validate that the incoming JWT contains the required claim and that its - value matches the required value. + When set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value. - When set to 'CEL', the Kubernetes API server will be configured - to validate the incoming JWT against the configured CEL expression. + When set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression. enum: - RequiredClaim type: string @@ -408,15 +373,14 @@ spec: type: array x-kubernetes-list-type: atomic issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. + description: issuer is a required field that configures how + the platform interacts with the identity provider and how + tokens issued from the identity provider are evaluated by + the Kubernetes API server. properties: audiences: description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. + audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token. audiences must contain at least one entry and must not exceed ten entries. @@ -429,15 +393,11 @@ spec: x-kubernetes-list-type: set issuerCertificateAuthority: description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. + issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information. When not specified, the system trust is used. - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. + When specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap. properties: name: description: name is the metadata.name of the referenced @@ -448,10 +408,8 @@ spec: type: object issuerURL: description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + issuerURL is a required field that configures the URL used to issue tokens by the identity provider. + The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. Must be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. @@ -475,50 +433,39 @@ spec: type: object name: description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. + name is a required field that configures the unique human-readable identifier associated with the identity provider. + It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics. name must not be an empty string (""). minLength: 1 type: string oidcClients: description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. + oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method + description: OIDCClientConfig configures how platform clients + interact with identity providers as an authentication method. properties: clientID: description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. + clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. + The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode. clientID must not be an empty string (""). minLength: 1 type: string clientSecret: description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. + clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider. - When not specified, no client secret will be used when making authentication requests - to the identity provider. + When not specified, no client secret will be used when making authentication requests to the identity provider. + + When specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. The client secret will be used when making authentication requests to the identity provider. - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. + Public clients do not require a client secret but private clients do require a client secret to work with the identity provider. properties: name: description: name is the metadata.name of the referenced @@ -529,8 +476,8 @@ spec: type: object componentName: description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. + It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -539,9 +486,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -550,11 +496,8 @@ spec: type: string extraScopes: description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. + extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. + This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes. When omitted, no additional scopes are requested. items: @@ -693,9 +636,9 @@ spec: - name type: object oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. + description: oidcClients is where participating operators place the + current OIDC client status for OIDC clients that can be customized + by the cluster-admin. items: description: |- OIDCClientStatus represents the current state @@ -704,8 +647,7 @@ spec: properties: componentName: description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -714,9 +656,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -793,8 +734,7 @@ spec: x-kubernetes-list-type: map consumingUsers: description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. + consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret. consumingUsers must not exceed 5 entries. items: @@ -810,6 +750,7 @@ spec: currentOIDCClients: description: |- currentOIDCClients is an optional list of clients that the component is currently using. + Entries must have unique issuerURL/clientID pairs. items: description: |- @@ -818,25 +759,21 @@ spec: properties: clientID: description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. + clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider. clientID must not be empty. minLength: 1 type: string issuerURL: description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. + issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against. issuerURL must use the 'https' scheme. pattern: ^https:\/\/[^\s] type: string oidcProviderName: description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. + oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with. oidcProviderName must not be an empty string (""). minLength: 1 diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-DevPreviewNoUpgrade.crd.yaml index 1a6afefa6..2f4c3180d 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-DevPreviewNoUpgrade.crd.yaml @@ -71,37 +71,33 @@ spec: type: object oidcProviders: description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster + oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if "Type" is set to "OIDC". At most one provider can be configured. items: properties: claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. + description: claimMappings is a required field that configures + the rules to be used by the Kubernetes API server for translating + claims in a JWT token, issued by the identity provider, to + a cluster identity. properties: extra: description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. + extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. + key values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided. items: description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. + ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. + It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token. properties: key: description: |- - key is a required field that specifies the string - to use as the extra attribute key. + key is a required field that specifies the string to use as the extra attribute key. key must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. @@ -114,8 +110,7 @@ spec: It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length. maxLength: 510 minLength: 1 @@ -159,14 +154,12 @@ spec: rule: self.split('/', 2)[1].size() <= 256 valueExpression: description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. + valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. "", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). @@ -187,76 +180,62 @@ spec: x-kubernetes-list-type: map groups: description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). + groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. + + When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). + For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. properties: claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + description: claim is a required field that configures + the JWT token claim whose value is assigned to the + cluster identity field associated with this mapping. type: string prefix: description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. + prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. When omitted (""), no prefix is applied to the cluster identity attribute. - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". + Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string required: - claim type: object uid: description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. + uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity. When using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value. - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. + When omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. + The current default is to use the 'sub' claim. properties: claim: description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. + claim is an optional field for specifying the JWT token claim that is used in the mapping. + The value of this claim will be assigned to the field in which this mapping is associated. Precisely one of claim or expression must be set. claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. + When specified, claim must be at least 1 character in length and must not exceed 256 characters in length. maxLength: 256 minLength: 1 type: string expression: description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. + expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). Precisely one of claim or expression must be set. expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 1024 characters in length. + When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length. maxLength: 1024 minLength: 1 type: string @@ -266,15 +245,14 @@ spec: set rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. + description: username is a required field that configures + how the username of a cluster identity should be constructed + from the claims in a JWT token issued by the identity + provider. properties: claim: description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. claim must not be an empty string ("") and must not exceed 256 characters. maxLength: 256 @@ -282,16 +260,13 @@ spec: type: string prefix: description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. + prefix configures the prefix that should be prepended to the value of the JWT claim. prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. properties: prefixString: description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. + prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes. prefixString must not be an empty string (""). minLength: 1 @@ -301,23 +276,21 @@ spec: type: object prefixPolicy: description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. + prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field. Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. + When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. + The prefix field must be set when prefixPolicy is 'Prefix'. - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. + When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. + + When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. + Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. As an example, consider the following scenario: + `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", and `claim` is set to: @@ -342,9 +315,7 @@ spec: type: object claimValidationRules: description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. + claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider. Validation rules are joined via an AND operation. items: @@ -368,8 +339,7 @@ spec: type: string message: description: |- - message is a required human-readable message to be logged by the Kubernetes API server - if the CEL expression defined in 'expression' fails. + message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters. maxLength: 256 minLength: 1 @@ -381,9 +351,8 @@ spec: requiredClaim: description: |- requiredClaim allows configuring a required claim name and its expected value. - This field is required when `type` is set to RequiredClaim, and must be omitted - when `type` is set to any other value. The Kubernetes API server uses this field - to validate if an incoming JWT is valid for this identity provider. + This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. + The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider. properties: claim: description: |- @@ -395,10 +364,8 @@ spec: type: string requiredValue: description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. + requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. + If the value in the JWT claims does not match, the token will be rejected for authentication. requiredValue must not be an empty string (""). minLength: 1 @@ -413,12 +380,9 @@ spec: Allowed values are "RequiredClaim" and "CEL". - When set to 'RequiredClaim', the Kubernetes API server will be configured - to validate that the incoming JWT contains the required claim and that its - value matches the required value. + When set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value. - When set to 'CEL', the Kubernetes API server will be configured - to validate the incoming JWT against the configured CEL expression. + When set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression. enum: - RequiredClaim - CEL @@ -438,15 +402,14 @@ spec: type: array x-kubernetes-list-type: atomic issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. + description: issuer is a required field that configures how + the platform interacts with the identity provider and how + tokens issued from the identity provider are evaluated by + the Kubernetes API server. properties: audiences: description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. + audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token. audiences must contain at least one entry and must not exceed ten entries. @@ -459,12 +422,12 @@ spec: x-kubernetes-list-type: set discoveryURL: description: |- - discoveryURL is an optional field that, if specified, overrides the default discovery endpoint - used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` - as "{issuerURL}/.well-known/openid-configuration". + discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. + By default, the discovery URL is derived from `issuerURL` as "{issuerURL}/.well-known/openid-configuration". - The discoveryURL must be a valid absolute HTTPS URL. It must not contain query - parameters, user information, or fragments. Additionally, it must differ from the value of `url` (ignoring trailing slashes). + The discoveryURL must be a valid absolute HTTPS URL. + It must not contain query parameters, user information, or fragments. + Additionally, it must differ from the value of `issuerURL` (ignoring trailing slashes). The discoveryURL value must be at least 1 character long and no longer than 2048 characters. maxLength: 2048 minLength: 1 @@ -482,15 +445,11 @@ spec: rule: '!self.matches(''^https://.+:.+@.+/.*$'')' issuerCertificateAuthority: description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. + issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information. When not specified, the system trust is used. - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. + When specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap. properties: name: description: name is the metadata.name of the referenced @@ -501,10 +460,8 @@ spec: type: object issuerURL: description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + issuerURL is a required field that configures the URL used to issue tokens by the identity provider. + The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. Must be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. @@ -533,50 +490,39 @@ spec: : true' name: description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. + name is a required field that configures the unique human-readable identifier associated with the identity provider. + It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics. name must not be an empty string (""). minLength: 1 type: string oidcClients: description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. + oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method + description: OIDCClientConfig configures how platform clients + interact with identity providers as an authentication method. properties: clientID: description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. + clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. + The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode. clientID must not be an empty string (""). minLength: 1 type: string clientSecret: description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. + clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider. + + When not specified, no client secret will be used when making authentication requests to the identity provider. - When not specified, no client secret will be used when making authentication requests - to the identity provider. + When specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. The client secret will be used when making authentication requests to the identity provider. - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. + Public clients do not require a client secret but private clients do require a client secret to work with the identity provider. properties: name: description: name is the metadata.name of the referenced @@ -587,8 +533,8 @@ spec: type: object componentName: description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. + It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -597,9 +543,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -608,11 +553,8 @@ spec: type: string extraScopes: description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. + extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. + This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes. When omitted, no additional scopes are requested. items: @@ -632,9 +574,7 @@ spec: x-kubernetes-list-type: map userValidationRules: description: |- - userValidationRules is an optional field that configures the set of rules - used to validate the cluster user identity that was constructed via - mapping token claims to user identity attributes. + userValidationRules is an optional field that configures the set of rules used to validate the cluster user identity that was constructed via mapping token claims to user identity attributes. Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. If any rule in the chain of rules evaluates to 'false', authentication will fail. When specified, at least one rule must be specified and no more than 64 rules may be specified. @@ -645,8 +585,8 @@ spec: properties: expression: description: |- - expression is a required CEL expression that performs a validation - on cluster user identity attributes like username, groups, etc. + expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc. + The expression must evaluate to a boolean value. When the expression evaluates to 'true', the cluster user identity is considered valid. When the expression evaluates to 'false', the cluster user identity is not considered valid. @@ -656,8 +596,7 @@ spec: type: string message: description: |- - message is a required human-readable message to be logged by the Kubernetes API server - if the CEL expression defined in 'expression' fails. + message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters. maxLength: 256 minLength: 1 @@ -793,9 +732,9 @@ spec: - name type: object oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. + description: oidcClients is where participating operators place the + current OIDC client status for OIDC clients that can be customized + by the cluster-admin. items: description: |- OIDCClientStatus represents the current state @@ -804,8 +743,7 @@ spec: properties: componentName: description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -814,9 +752,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -893,8 +830,7 @@ spec: x-kubernetes-list-type: map consumingUsers: description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. + consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret. consumingUsers must not exceed 5 entries. items: @@ -910,6 +846,7 @@ spec: currentOIDCClients: description: |- currentOIDCClients is an optional list of clients that the component is currently using. + Entries must have unique issuerURL/clientID pairs. items: description: |- @@ -918,25 +855,21 @@ spec: properties: clientID: description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. + clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider. clientID must not be empty. minLength: 1 type: string issuerURL: description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. + issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against. issuerURL must use the 'https' scheme. pattern: ^https:\/\/[^\s] type: string oidcProviderName: description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. + oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with. oidcProviderName must not be an empty string (""). minLength: 1 diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-OKD.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-OKD.crd.yaml index 5233cc52f..2aff1f514 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-OKD.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-OKD.crd.yaml @@ -71,37 +71,33 @@ spec: type: object oidcProviders: description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster + oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if "Type" is set to "OIDC". At most one provider can be configured. items: properties: claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. + description: claimMappings is a required field that configures + the rules to be used by the Kubernetes API server for translating + claims in a JWT token, issued by the identity provider, to + a cluster identity. properties: extra: description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. + extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. + key values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided. items: description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. + ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. + It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token. properties: key: description: |- - key is a required field that specifies the string - to use as the extra attribute key. + key is a required field that specifies the string to use as the extra attribute key. key must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. @@ -114,8 +110,7 @@ spec: It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length. maxLength: 510 minLength: 1 @@ -159,14 +154,12 @@ spec: rule: self.split('/', 2)[1].size() <= 256 valueExpression: description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. + valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. "", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). @@ -187,76 +180,62 @@ spec: x-kubernetes-list-type: map groups: description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). + groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. + + When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). + For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. properties: claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + description: claim is a required field that configures + the JWT token claim whose value is assigned to the + cluster identity field associated with this mapping. type: string prefix: description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. + prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. When omitted (""), no prefix is applied to the cluster identity attribute. - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". + Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string required: - claim type: object uid: description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. + uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity. When using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value. - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. + When omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. + The current default is to use the 'sub' claim. properties: claim: description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. + claim is an optional field for specifying the JWT token claim that is used in the mapping. + The value of this claim will be assigned to the field in which this mapping is associated. Precisely one of claim or expression must be set. claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. + When specified, claim must be at least 1 character in length and must not exceed 256 characters in length. maxLength: 256 minLength: 1 type: string expression: description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. + expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). Precisely one of claim or expression must be set. expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 1024 characters in length. + When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length. maxLength: 1024 minLength: 1 type: string @@ -266,15 +245,14 @@ spec: set rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. + description: username is a required field that configures + how the username of a cluster identity should be constructed + from the claims in a JWT token issued by the identity + provider. properties: claim: description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. claim must not be an empty string ("") and must not exceed 256 characters. maxLength: 256 @@ -282,16 +260,13 @@ spec: type: string prefix: description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. + prefix configures the prefix that should be prepended to the value of the JWT claim. prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. properties: prefixString: description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. + prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes. prefixString must not be an empty string (""). minLength: 1 @@ -301,23 +276,21 @@ spec: type: object prefixPolicy: description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. + prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field. Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. + When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. + The prefix field must be set when prefixPolicy is 'Prefix'. - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. + When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. + + When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. + Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. As an example, consider the following scenario: + `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", and `claim` is set to: @@ -342,9 +315,7 @@ spec: type: object claimValidationRules: description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. + claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider. Validation rules are joined via an AND operation. items: @@ -356,9 +327,8 @@ spec: requiredClaim: description: |- requiredClaim allows configuring a required claim name and its expected value. - This field is required when `type` is set to RequiredClaim, and must be omitted - when `type` is set to any other value. The Kubernetes API server uses this field - to validate if an incoming JWT is valid for this identity provider. + This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. + The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider. properties: claim: description: |- @@ -370,10 +340,8 @@ spec: type: string requiredValue: description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. + requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. + If the value in the JWT claims does not match, the token will be rejected for authentication. requiredValue must not be an empty string (""). minLength: 1 @@ -388,12 +356,9 @@ spec: Allowed values are "RequiredClaim" and "CEL". - When set to 'RequiredClaim', the Kubernetes API server will be configured - to validate that the incoming JWT contains the required claim and that its - value matches the required value. + When set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value. - When set to 'CEL', the Kubernetes API server will be configured - to validate the incoming JWT against the configured CEL expression. + When set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression. enum: - RequiredClaim type: string @@ -408,15 +373,14 @@ spec: type: array x-kubernetes-list-type: atomic issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. + description: issuer is a required field that configures how + the platform interacts with the identity provider and how + tokens issued from the identity provider are evaluated by + the Kubernetes API server. properties: audiences: description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. + audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token. audiences must contain at least one entry and must not exceed ten entries. @@ -429,15 +393,11 @@ spec: x-kubernetes-list-type: set issuerCertificateAuthority: description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. + issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information. When not specified, the system trust is used. - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. + When specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap. properties: name: description: name is the metadata.name of the referenced @@ -448,10 +408,8 @@ spec: type: object issuerURL: description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + issuerURL is a required field that configures the URL used to issue tokens by the identity provider. + The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. Must be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. @@ -475,50 +433,39 @@ spec: type: object name: description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. + name is a required field that configures the unique human-readable identifier associated with the identity provider. + It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics. name must not be an empty string (""). minLength: 1 type: string oidcClients: description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. + oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method + description: OIDCClientConfig configures how platform clients + interact with identity providers as an authentication method. properties: clientID: description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. + clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. + The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode. clientID must not be an empty string (""). minLength: 1 type: string clientSecret: description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. + clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider. - When not specified, no client secret will be used when making authentication requests - to the identity provider. + When not specified, no client secret will be used when making authentication requests to the identity provider. + + When specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. The client secret will be used when making authentication requests to the identity provider. - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. + Public clients do not require a client secret but private clients do require a client secret to work with the identity provider. properties: name: description: name is the metadata.name of the referenced @@ -529,8 +476,8 @@ spec: type: object componentName: description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. + It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -539,9 +486,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -550,11 +496,8 @@ spec: type: string extraScopes: description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. + extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. + This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes. When omitted, no additional scopes are requested. items: @@ -693,9 +636,9 @@ spec: - name type: object oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. + description: oidcClients is where participating operators place the + current OIDC client status for OIDC clients that can be customized + by the cluster-admin. items: description: |- OIDCClientStatus represents the current state @@ -704,8 +647,7 @@ spec: properties: componentName: description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -714,9 +656,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -793,8 +734,7 @@ spec: x-kubernetes-list-type: map consumingUsers: description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. + consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret. consumingUsers must not exceed 5 entries. items: @@ -810,6 +750,7 @@ spec: currentOIDCClients: description: |- currentOIDCClients is an optional list of clients that the component is currently using. + Entries must have unique issuerURL/clientID pairs. items: description: |- @@ -818,25 +759,21 @@ spec: properties: clientID: description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. + clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider. clientID must not be empty. minLength: 1 type: string issuerURL: description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. + issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against. issuerURL must use the 'https' scheme. pattern: ^https:\/\/[^\s] type: string oidcProviderName: description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. + oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with. oidcProviderName must not be an empty string (""). minLength: 1 diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-TechPreviewNoUpgrade.crd.yaml index 60698312a..11281f286 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-TechPreviewNoUpgrade.crd.yaml @@ -71,37 +71,33 @@ spec: type: object oidcProviders: description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster + oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if "Type" is set to "OIDC". At most one provider can be configured. items: properties: claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. + description: claimMappings is a required field that configures + the rules to be used by the Kubernetes API server for translating + claims in a JWT token, issued by the identity provider, to + a cluster identity. properties: extra: description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. + extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. + key values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided. items: description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. + ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. + It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token. properties: key: description: |- - key is a required field that specifies the string - to use as the extra attribute key. + key is a required field that specifies the string to use as the extra attribute key. key must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. @@ -114,8 +110,7 @@ spec: It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length. maxLength: 510 minLength: 1 @@ -159,14 +154,12 @@ spec: rule: self.split('/', 2)[1].size() <= 256 valueExpression: description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. + valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. "", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). @@ -187,76 +180,62 @@ spec: x-kubernetes-list-type: map groups: description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). + groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. + + When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). + For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. properties: claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + description: claim is a required field that configures + the JWT token claim whose value is assigned to the + cluster identity field associated with this mapping. type: string prefix: description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. + prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. When omitted (""), no prefix is applied to the cluster identity attribute. - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". + Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string required: - claim type: object uid: description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. + uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity. When using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value. - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. + When omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. + The current default is to use the 'sub' claim. properties: claim: description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. + claim is an optional field for specifying the JWT token claim that is used in the mapping. + The value of this claim will be assigned to the field in which this mapping is associated. Precisely one of claim or expression must be set. claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. + When specified, claim must be at least 1 character in length and must not exceed 256 characters in length. maxLength: 256 minLength: 1 type: string expression: description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. + expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims. - CEL expressions have access to the token claims - through a CEL variable, 'claims'. + CEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). Precisely one of claim or expression must be set. expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 1024 characters in length. + When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length. maxLength: 1024 minLength: 1 type: string @@ -266,15 +245,14 @@ spec: set rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. + description: username is a required field that configures + how the username of a cluster identity should be constructed + from the claims in a JWT token issued by the identity + provider. properties: claim: description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. + claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. claim must not be an empty string ("") and must not exceed 256 characters. maxLength: 256 @@ -282,16 +260,13 @@ spec: type: string prefix: description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. + prefix configures the prefix that should be prepended to the value of the JWT claim. prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. properties: prefixString: description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. + prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes. prefixString must not be an empty string (""). minLength: 1 @@ -301,23 +276,21 @@ spec: type: object prefixPolicy: description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. + prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field. Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. + When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. + The prefix field must be set when prefixPolicy is 'Prefix'. - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. + When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. + + When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. + Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. As an example, consider the following scenario: + `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", and `claim` is set to: @@ -342,9 +315,7 @@ spec: type: object claimValidationRules: description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. + claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider. Validation rules are joined via an AND operation. items: @@ -368,8 +339,7 @@ spec: type: string message: description: |- - message is a required human-readable message to be logged by the Kubernetes API server - if the CEL expression defined in 'expression' fails. + message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters. maxLength: 256 minLength: 1 @@ -381,9 +351,8 @@ spec: requiredClaim: description: |- requiredClaim allows configuring a required claim name and its expected value. - This field is required when `type` is set to RequiredClaim, and must be omitted - when `type` is set to any other value. The Kubernetes API server uses this field - to validate if an incoming JWT is valid for this identity provider. + This field is required when `type` is set to RequiredClaim, and must be omitted when `type` is set to any other value. + The Kubernetes API server uses this field to validate if an incoming JWT is valid for this identity provider. properties: claim: description: |- @@ -395,10 +364,8 @@ spec: type: string requiredValue: description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. + requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. + If the value in the JWT claims does not match, the token will be rejected for authentication. requiredValue must not be an empty string (""). minLength: 1 @@ -413,12 +380,9 @@ spec: Allowed values are "RequiredClaim" and "CEL". - When set to 'RequiredClaim', the Kubernetes API server will be configured - to validate that the incoming JWT contains the required claim and that its - value matches the required value. + When set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value. - When set to 'CEL', the Kubernetes API server will be configured - to validate the incoming JWT against the configured CEL expression. + When set to 'CEL', the Kubernetes API server will be configured to validate the incoming JWT against the configured CEL expression. enum: - RequiredClaim - CEL @@ -438,15 +402,14 @@ spec: type: array x-kubernetes-list-type: atomic issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. + description: issuer is a required field that configures how + the platform interacts with the identity provider and how + tokens issued from the identity provider are evaluated by + the Kubernetes API server. properties: audiences: description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. + audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token. audiences must contain at least one entry and must not exceed ten entries. @@ -459,12 +422,12 @@ spec: x-kubernetes-list-type: set discoveryURL: description: |- - discoveryURL is an optional field that, if specified, overrides the default discovery endpoint - used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` - as "{issuerURL}/.well-known/openid-configuration". + discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. + By default, the discovery URL is derived from `issuerURL` as "{issuerURL}/.well-known/openid-configuration". - The discoveryURL must be a valid absolute HTTPS URL. It must not contain query - parameters, user information, or fragments. Additionally, it must differ from the value of `url` (ignoring trailing slashes). + The discoveryURL must be a valid absolute HTTPS URL. + It must not contain query parameters, user information, or fragments. + Additionally, it must differ from the value of `issuerURL` (ignoring trailing slashes). The discoveryURL value must be at least 1 character long and no longer than 2048 characters. maxLength: 2048 minLength: 1 @@ -482,15 +445,11 @@ spec: rule: '!self.matches(''^https://.+:.+@.+/.*$'')' issuerCertificateAuthority: description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. + issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information. When not specified, the system trust is used. - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. + When specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap. properties: name: description: name is the metadata.name of the referenced @@ -501,10 +460,8 @@ spec: type: object issuerURL: description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + issuerURL is a required field that configures the URL used to issue tokens by the identity provider. + The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. Must be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. @@ -533,50 +490,39 @@ spec: : true' name: description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. + name is a required field that configures the unique human-readable identifier associated with the identity provider. + It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics. name must not be an empty string (""). minLength: 1 type: string oidcClients: description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. + oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method + description: OIDCClientConfig configures how platform clients + interact with identity providers as an authentication method. properties: clientID: description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. + clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. + The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode. clientID must not be an empty string (""). minLength: 1 type: string clientSecret: description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. + clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider. + + When not specified, no client secret will be used when making authentication requests to the identity provider. - When not specified, no client secret will be used when making authentication requests - to the identity provider. + When specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. The client secret will be used when making authentication requests to the identity provider. - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. + Public clients do not require a client secret but private clients do require a client secret to work with the identity provider. properties: name: description: name is the metadata.name of the referenced @@ -587,8 +533,8 @@ spec: type: object componentName: description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. + It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -597,9 +543,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -608,11 +553,8 @@ spec: type: string extraScopes: description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. + extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. + This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes. When omitted, no additional scopes are requested. items: @@ -632,9 +574,7 @@ spec: x-kubernetes-list-type: map userValidationRules: description: |- - userValidationRules is an optional field that configures the set of rules - used to validate the cluster user identity that was constructed via - mapping token claims to user identity attributes. + userValidationRules is an optional field that configures the set of rules used to validate the cluster user identity that was constructed via mapping token claims to user identity attributes. Rules are CEL expressions that must evaluate to 'true' for authentication to succeed. If any rule in the chain of rules evaluates to 'false', authentication will fail. When specified, at least one rule must be specified and no more than 64 rules may be specified. @@ -645,8 +585,8 @@ spec: properties: expression: description: |- - expression is a required CEL expression that performs a validation - on cluster user identity attributes like username, groups, etc. + expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc. + The expression must evaluate to a boolean value. When the expression evaluates to 'true', the cluster user identity is considered valid. When the expression evaluates to 'false', the cluster user identity is not considered valid. @@ -656,8 +596,7 @@ spec: type: string message: description: |- - message is a required human-readable message to be logged by the Kubernetes API server - if the CEL expression defined in 'expression' fails. + message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters. maxLength: 256 minLength: 1 @@ -793,9 +732,9 @@ spec: - name type: object oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. + description: oidcClients is where participating operators place the + current OIDC client status for OIDC clients that can be customized + by the cluster-admin. items: description: |- OIDCClientStatus represents the current state @@ -804,8 +743,7 @@ spec: properties: componentName: description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. + componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier. componentName must not be an empty string ("") and must not exceed 256 characters in length. @@ -814,9 +752,8 @@ spec: type: string componentNamespace: description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. + componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. + It is used in combination with componentName as a unique identifier. componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. @@ -893,8 +830,7 @@ spec: x-kubernetes-list-type: map consumingUsers: description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. + consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret. consumingUsers must not exceed 5 entries. items: @@ -910,6 +846,7 @@ spec: currentOIDCClients: description: |- currentOIDCClients is an optional list of clients that the component is currently using. + Entries must have unique issuerURL/clientID pairs. items: description: |- @@ -918,25 +855,21 @@ spec: properties: clientID: description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. + clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider. clientID must not be empty. minLength: 1 type: string issuerURL: description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. + issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against. issuerURL must use the 'https' scheme. pattern: ^https:\/\/[^\s] type: string oidcProviderName: description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. + oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with. oidcProviderName must not be an empty string (""). minLength: 1 diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml index 7d200f4d6..9086d4a57 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml @@ -628,9 +628,10 @@ spec: balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", - "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, - and must handle unrecognized platforms as None if they do not support that platform. + "OpenStack", "VSphere", "oVirt", "IBMCloud", "KubeVirt", "EquinixMetal", + "PowerVS", "AlibabaCloud", "Nutanix", "External", and "None". Individual + components may not support all platforms, and must handle unrecognized + platforms as None if they do not support that platform. enum: - "" - AWS diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml index 4ecbc18e9..803c48a1e 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml @@ -562,9 +562,10 @@ spec: balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", - "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, - and must handle unrecognized platforms as None if they do not support that platform. + "OpenStack", "VSphere", "oVirt", "IBMCloud", "KubeVirt", "EquinixMetal", + "PowerVS", "AlibabaCloud", "Nutanix", "External", and "None". Individual + components may not support all platforms, and must handle unrecognized + platforms as None if they do not support that platform. enum: - "" - AWS diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml index 7308c87b8..de1a68c90 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml @@ -628,9 +628,10 @@ spec: balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", - "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, - and must handle unrecognized platforms as None if they do not support that platform. + "OpenStack", "VSphere", "oVirt", "IBMCloud", "KubeVirt", "EquinixMetal", + "PowerVS", "AlibabaCloud", "Nutanix", "External", and "None". Individual + components may not support all platforms, and must handle unrecognized + platforms as None if they do not support that platform. enum: - "" - AWS diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-OKD.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-OKD.crd.yaml index ed9fb2c3b..245bc3ea6 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-OKD.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-OKD.crd.yaml @@ -562,9 +562,10 @@ spec: balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", - "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, - and must handle unrecognized platforms as None if they do not support that platform. + "OpenStack", "VSphere", "oVirt", "IBMCloud", "KubeVirt", "EquinixMetal", + "PowerVS", "AlibabaCloud", "Nutanix", "External", and "None". Individual + components may not support all platforms, and must handle unrecognized + platforms as None if they do not support that platform. enum: - "" - AWS diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml index f14dd3abe..c45b7d6e8 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml @@ -628,9 +628,10 @@ spec: balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", - "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, - and must handle unrecognized platforms as None if they do not support that platform. + "OpenStack", "VSphere", "oVirt", "IBMCloud", "KubeVirt", "EquinixMetal", + "PowerVS", "AlibabaCloud", "Nutanix", "External", and "None". Individual + components may not support all platforms, and must handle unrecognized + platforms as None if they do not support that platform. enum: - "" - AWS diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-CustomNoUpgrade.crd.yaml index 8e7d3c392..97698ce0b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-CustomNoUpgrade.crd.yaml @@ -4,6 +4,7 @@ metadata: annotations: api-approved.openshift.io: https://github.com/openshift/api/pull/2448 api.openshift.io/merged-by-featuregates: "true" + capability.openshift.io/name: Insights include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: CustomNoUpgrade diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml index 0ecdc12f2..1400f1a65 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml @@ -4,6 +4,7 @@ metadata: annotations: api-approved.openshift.io: https://github.com/openshift/api/pull/2448 api.openshift.io/merged-by-featuregates: "true" + capability.openshift.io/name: Insights include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: DevPreviewNoUpgrade diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-TechPreviewNoUpgrade.crd.yaml index afaf9c37d..d53757282 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-TechPreviewNoUpgrade.crd.yaml @@ -4,6 +4,7 @@ metadata: annotations: api-approved.openshift.io: https://github.com/openshift/api/pull/2448 api.openshift.io/merged-by-featuregates: "true" + capability.openshift.io/name: Insights include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: TechPreviewNoUpgrade diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index 5d4794e4b..eb7c485e0 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -6,6 +6,7 @@ apiservers.config.openshift.io: Capability: "" Category: "" FeatureGates: + - KMSEncryption - KMSEncryptionProvider FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" @@ -416,7 +417,7 @@ insightsdatagathers.config.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/2448 CRDName: insightsdatagathers.config.openshift.io - Capability: "" + Capability: Insights Category: "" FeatureGates: - InsightsConfig @@ -442,8 +443,7 @@ networks.config.openshift.io: CRDName: networks.config.openshift.io Capability: "" Category: "" - FeatureGates: - - NetworkDiagnosticsConfig + FeatureGates: [] FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index e7bc0aebb..69fb37c52 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -407,11 +407,11 @@ func (ExtraMapping) SwaggerDoc() map[string]string { } var map_OIDCClientConfig = map[string]string{ - "": "OIDCClientConfig configures how platform clients interact with identity providers as an authentication method", - "componentName": "componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", - "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. It is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "": "OIDCClientConfig configures how platform clients interact with identity providers as an authentication method.", + "componentName": "componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode.\n\nIt is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running.\n\nIt is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", "clientID": "clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode.\n\nclientID must not be an empty string (\"\").", - "clientSecret": "clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider.\n\nWhen not specified, no client secret will be used when making authentication requests to the identity provider.\n\nWhen specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. The client secret will be used when making authentication requests to the identity provider.\n\nPublic clients do not require a client secret but private clients do require a client secret to work with the identity provider.", + "clientSecret": "clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider.\n\nWhen not specified, no client secret will be used when making authentication requests to the identity provider.\n\nWhen specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field.\n\nThe client secret will be used when making authentication requests to the identity provider.\n\nPublic clients do not require a client secret but private clients do require a client secret to work with the identity provider.", "extraScopes": "extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes.\n\nWhen omitted, no additional scopes are requested.", } @@ -433,8 +433,8 @@ func (OIDCClientReference) SwaggerDoc() map[string]string { var map_OIDCClientStatus = map[string]string{ "": "OIDCClientStatus represents the current state of platform components and how they interact with the configured identity providers.", "componentName": "componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", - "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. It is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", - "currentOIDCClients": "currentOIDCClients is an optional list of clients that the component is currently using. Entries must have unique issuerURL/clientID pairs.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running.\n\nIt is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "currentOIDCClients": "currentOIDCClients is an optional list of clients that the component is currently using.\n\nEntries must have unique issuerURL/clientID pairs.", "consumingUsers": "consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret.\n\nconsumingUsers must not exceed 5 entries.", "conditions": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", } @@ -458,7 +458,7 @@ func (OIDCProvider) SwaggerDoc() map[string]string { var map_PrefixedClaimMapping = map[string]string{ "": "PrefixedClaimMapping configures a claim mapping that allows for an optional prefix.", - "prefix": "prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes.\n\nWhen omitted (\"\"), no prefix is applied to the cluster identity attribute.\n\nExample: if `prefix` is set to \"myoidc:\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", + "prefix": "prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes.\n\nWhen omitted (\"\"), no prefix is applied to the cluster identity attribute.\n\nExample: if `prefix` is set to \"myoidc:\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", } func (PrefixedClaimMapping) SwaggerDoc() map[string]string { @@ -476,9 +476,9 @@ func (TokenClaimMapping) SwaggerDoc() map[string]string { var map_TokenClaimMappings = map[string]string{ "username": "username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.", - "groups": "groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). For example - '\"example\"' and '\"exampleOne\", \"exampleTwo\", \"exampleThree\"' are valid claim values.", - "uid": "uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity.\n\nWhen using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. The current default is to use the 'sub' claim.", - "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. key values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided.", + "groups": "groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.\n\nWhen referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (',').\n\nFor example - '\"example\"' and '\"exampleOne\", \"exampleTwo\", \"exampleThree\"' are valid claim values.", + "uid": "uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity.\n\nWhen using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time.\n\nThe current default is to use the 'sub' claim.", + "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity.\n\nkey values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided.", } func (TokenClaimMappings) SwaggerDoc() map[string]string { @@ -519,7 +519,7 @@ var map_TokenIssuer = map[string]string{ "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nMust be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user.", "audiences": "audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token.\n\naudiences must contain at least one entry and must not exceed ten entries.", "issuerCertificateAuthority": "issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information.\n\nWhen not specified, the system trust is used.\n\nWhen specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap.", - "discoveryURL": "discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` as \"{issuerURL}/.well-known/openid-configuration\".\n\nThe discoveryURL must be a valid absolute HTTPS URL. It must not contain query parameters, user information, or fragments. Additionally, it must differ from the value of `url` (ignoring trailing slashes). The discoveryURL value must be at least 1 character long and no longer than 2048 characters.", + "discoveryURL": "discoveryURL is an optional field that, if specified, overrides the default discovery endpoint used to retrieve OIDC configuration metadata. By default, the discovery URL is derived from `issuerURL` as \"{issuerURL}/.well-known/openid-configuration\".\n\nThe discoveryURL must be a valid absolute HTTPS URL. It must not contain query parameters, user information, or fragments. Additionally, it must differ from the value of `issuerURL` (ignoring trailing slashes). The discoveryURL value must be at least 1 character long and no longer than 2048 characters.", } func (TokenIssuer) SwaggerDoc() map[string]string { @@ -537,7 +537,7 @@ func (TokenRequiredClaim) SwaggerDoc() map[string]string { var map_TokenUserValidationRule = map[string]string{ "": "TokenUserValidationRule provides a CEL-based rule used to validate a token subject. Each rule contains a CEL expression that is evaluated against the token’s claims.", - "expression": "expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc. The expression must evaluate to a boolean value. When the expression evaluates to 'true', the cluster user identity is considered valid. When the expression evaluates to 'false', the cluster user identity is not considered valid. expression must be at least 1 character in length and must not exceed 1024 characters.", + "expression": "expression is a required CEL expression that performs a validation on cluster user identity attributes like username, groups, etc.\n\nThe expression must evaluate to a boolean value. When the expression evaluates to 'true', the cluster user identity is considered valid. When the expression evaluates to 'false', the cluster user identity is not considered valid. expression must be at least 1 character in length and must not exceed 1024 characters.", "message": "message is a required human-readable message to be logged by the Kubernetes API server if the CEL expression defined in 'expression' fails. message must be at least 1 character in length and must not exceed 256 characters.", } @@ -547,7 +547,7 @@ func (TokenUserValidationRule) SwaggerDoc() map[string]string { var map_UsernameClaimMapping = map[string]string{ "claim": "claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping.\n\nclaim must not be an empty string (\"\") and must not exceed 256 characters.", - "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. The prefix field must be set when prefixPolicy is 'Prefix'.\n\nWhen set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim.\n\nWhen omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. As an example, consider the following scenario:\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", + "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim.\n\nThe prefix field must be set when prefixPolicy is 'Prefix'.\n\nWhen set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim.\n\nWhen omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'.\n\nAs an example, consider the following scenario:\n\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", "prefix": "prefix configures the prefix that should be prepended to the value of the JWT claim.\n\nprefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise.", } @@ -1924,7 +1924,7 @@ func (OvirtPlatformStatus) SwaggerDoc() map[string]string { var map_PlatformSpec = map[string]string{ "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", - "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"IBMCloud\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\", \"External\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", "azure": "azure contains settings specific to the Azure infrastructure provider.", "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", @@ -3004,7 +3004,7 @@ func (OldTLSProfile) SwaggerDoc() map[string]string { var map_TLSProfileSpec = map[string]string{ "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", - "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", + "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries that their operands do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml):\n\n ciphers:\n - ECDHE-RSA-AES128-GCM-SHA256\n\nTLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable and are always enabled when TLS 1.3 is negotiated.", "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11", } @@ -3014,9 +3014,9 @@ func (TLSProfileSpec) SwaggerDoc() map[string]string { var map_TLSSecurityProfile = map[string]string{ "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", - "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters.\n\nThe profiles are currently based on version 5.0 of the Mozilla Server Side TLS configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.", - "old": "old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort.\n\nThe cipher list includes TLS 1.3 ciphers for forward compatibility, followed by the \"old\" profile ciphers.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS10\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA", - "intermediate": "intermediate is a TLS profile for use when you do not need compatibility with legacy clients and want to remain highly secure while being compatible with most clients currently in use.\n\nThe cipher list includes TLS 1.3 ciphers for forward compatibility, followed by the \"intermediate\" profile ciphers.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS12\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters.\n\nThe profiles are based on version 5.7 of the Mozilla Server Side TLS configuration guidelines. The cipher lists consist of the configuration's \"ciphersuites\" followed by the Go-specific \"ciphers\" from the guidelines. See: https://ssl-config.mozilla.org/guidelines/5.7.json\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.", + "old": "old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS10\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA", + "intermediate": "intermediate is a TLS profile for use when you do not need compatibility with legacy clients and want to remain highly secure while being compatible with most clients currently in use.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS12\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305", "modern": "modern is a TLS security profile for use with clients that support TLS 1.3 and do not need backward compatibility for older clients.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS13\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256", "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n minTLSVersion: VersionTLS11\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256", } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/register.go b/vendor/github.com/openshift/api/config/v1alpha1/register.go index 4b30ea380..c90962495 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/register.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/register.go @@ -40,6 +40,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ImagePolicyList{}, &ClusterImagePolicy{}, &ClusterImagePolicyList{}, + &CRIOCredentialProviderConfig{}, + &CRIOCredentialProviderConfigList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go index 77df372d4..0f3da5184 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go @@ -93,7 +93,7 @@ type EtcdBackupSpec struct { PVCName string `json:"pvcName"` } -// RetentionType is the enumeration of valid retention policy types +// RetentionType is the enumeration of valid retention policy types. // +enum // +kubebuilder:validation:Enum:="RetentionNumber";"RetentionSize" type RetentionType string @@ -115,7 +115,6 @@ type RetentionPolicy struct { // The current default is RetentionNumber with 15 backups kept. // +unionDiscriminator // +required - // +kubebuilder:validation:Enum:="";"RetentionNumber";"RetentionSize" RetentionType RetentionType `json:"retentionType"` // retentionNumber configures the retention policy based on the number of backups diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go index 0653eeb5a..29bf8ba48 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go @@ -94,6 +94,11 @@ type ClusterMonitoringSpec struct { // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. // +optional MetricsServerConfig MetricsServerConfig `json:"metricsServerConfig,omitempty,omitzero"` + // prometheusOperatorConfig is an optional field that can be used to configure the Prometheus Operator component. + // Specifically, it can configure how the Prometheus Operator instance is deployed, pod scheduling, and resource allocation. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // +optional + PrometheusOperatorConfig PrometheusOperatorConfig `json:"prometheusOperatorConfig,omitempty,omitzero"` } // UserDefinedMonitoring config for user-defined projects. @@ -185,6 +190,7 @@ type AlertmanagerCustomConfig struct { // limit: null // Maximum length for this list is 10. // Minimum length for this list is 1. + // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name @@ -218,8 +224,8 @@ type AlertmanagerCustomConfig struct { // When omitted, this means the user has no opinion and the platform is left // to choose reasonable defaults. These defaults are subject to change over time. // Defaults are empty/unset. - // Maximum length for this list is 10 - // Minimum length for this list is 1 + // Maximum length for this list is 10. + // Minimum length for this list is 1. // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:MinItems=1 // +listType=atomic @@ -235,7 +241,7 @@ type AlertmanagerCustomConfig struct { // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. // Default is empty list. // Maximum length for this list is 10. - // Minimum length for this list is 1 + // Minimum length for this list is 1. // Entries must have unique topologyKey and whenUnsatisfiable pairs. // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:MinItems=1 @@ -356,8 +362,8 @@ type MetricsServerConfig struct { // When omitted, this means the user has no opinion and the platform is left // to choose reasonable defaults. These defaults are subject to change over time. // Defaults are empty/unset. - // Maximum length for this list is 10 - // Minimum length for this list is 1 + // Maximum length for this list is 10. + // Minimum length for this list is 1. // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:MinItems=1 // +listType=atomic @@ -389,6 +395,7 @@ type MetricsServerConfig struct { // limit: null // Maximum length for this list is 10. // Minimum length for this list is 1. + // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name @@ -405,7 +412,91 @@ type MetricsServerConfig struct { // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. // Default is empty list. // Maximum length for this list is 10. - // Minimum length for this list is 1 + // Minimum length for this list is 1. + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// PrometheusOperatorConfig provides configuration options for the Prometheus Operator instance +// Use this configuration to control how the Prometheus Operator instance is deployed, how it logs, and how its pods are scheduled. +// +kubebuilder:validation:MinProperties=1 +type PrometheusOperatorConfig struct { + // logLevel defines the verbosity of logs emitted by Prometheus Operator. + // This field allows users to control the amount and severity of logs generated, which can be useful + // for debugging issues or reducing noise in production environments. + // Allowed values are Error, Warn, Info, and Debug. + // When set to Error, only errors will be logged. + // When set to Warn, both warnings and errors will be logged. + // When set to Info, general information, warnings, and errors will all be logged. + // When set to Debug, detailed debugging information will be logged. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + // The current default value is `Info`. + // +optional + LogLevel LogLevel `json:"logLevel,omitempty"` + // nodeSelector defines the nodes on which the Pods are scheduled + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the Prometheus Operator container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 4m + // limit: null + // - name: memory + // request: 40Mi + // limit: null + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how Prometheus Operator Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // Maximum length for this list is 10. + // Minimum length for this list is 1. // Entries must have unique topologyKey and whenUnsatisfiable pairs. // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:MinItems=1 diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_crio_credential_provider_config.go b/vendor/github.com/openshift/api/config/v1alpha1/types_crio_credential_provider_config.go new file mode 100644 index 000000000..9e2e0d39d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_crio_credential_provider_config.go @@ -0,0 +1,186 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CRIOCredentialProviderConfig holds cluster-wide singleton resource configurations for CRI-O credential provider, the name of this instance is "cluster". CRI-O credential provider is a binary shipped with CRI-O that provides a way to obtain container image pull credentials from external sources. +// For example, it can be used to fetch mirror registry credentials from secrets resources in the cluster within the same namespace the pod will be running in. +// CRIOCredentialProviderConfig configuration specifies the pod image sources registries that should trigger the CRI-O credential provider execution, which will resolve the CRI-O mirror configurations and obtain the necessary credentials for pod creation. +// Note: Configuration changes will only take effect after the kubelet restarts, which is automatically managed by the cluster during rollout. +// +// The resource is a singleton named "cluster". +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=criocredentialproviderconfigs,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2557 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=CRIOCredentialProviderConfig +// +openshift:compatibility-gen:level=4 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="criocredentialproviderconfig is a singleton, .metadata.name must be 'cluster'" +type CRIOCredentialProviderConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitzero"` + + // spec defines the desired configuration of the CRI-O Credential Provider. + // This field is required and must be provided when creating the resource. + // +required + Spec *CRIOCredentialProviderConfigSpec `json:"spec,omitempty,omitzero"` + + // status represents the current state of the CRIOCredentialProviderConfig. + // When omitted or nil, it indicates that the status has not yet been set by the controller. + // The controller will populate this field with validation conditions and operational state. + // +optional + Status CRIOCredentialProviderConfigStatus `json:"status,omitzero,omitempty"` +} + +// CRIOCredentialProviderConfigSpec defines the desired configuration of the CRI-O Credential Provider. +// +kubebuilder:validation:MinProperties=0 +type CRIOCredentialProviderConfigSpec struct { + // matchImages is a list of string patterns used to determine whether + // the CRI-O credential provider should be invoked for a given image. This list is + // passed to the kubelet CredentialProviderConfig, and if any pattern matches + // the requested image, CRI-O credential provider will be invoked to obtain credentials for pulling + // that image or its mirrors. + // Depending on the platform, the CRI-O credential provider may be installed alongside an existing platform specific provider. + // Conflicts between the existing platform specific provider image match configuration and this list will be handled by + // the following precedence rule: credentials from built-in kubelet providers (e.g., ECR, GCR, ACR) take precedence over those + // from the CRIOCredentialProviderConfig when both match the same image. + // To avoid uncertainty, it is recommended to avoid configuring your private image patterns to overlap with + // existing platform specific provider config(e.g., the entries from https://github.com/openshift/machine-config-operator/blob/main/templates/common/aws/files/etc-kubernetes-credential-providers-ecr-credential-provider.yaml). + // You can check the resource's Status conditions + // to see if any entries were ignored due to exact matches with known built-in provider patterns. + // + // This field is optional, the items of the list must contain between 1 and 50 entries. + // The list is treated as a set, so duplicate entries are not allowed. + // + // For more details, see: + // https://kubernetes.io/docs/tasks/administer-cluster/kubelet-credential-provider/ + // https://github.com/cri-o/crio-credential-provider#architecture + // + // Each entry in matchImages is a pattern which can optionally contain a port and a path. Each entry must be no longer than 512 characters. + // Wildcards ('*') are supported for full subdomain labels, such as '*.k8s.io' or 'k8s.*.io', + // and for top-level domains, such as 'k8s.*' (which matches 'k8s.io' or 'k8s.net'). + // A global wildcard '*' (matching any domain) is not allowed. + // Wildcards may replace an entire hostname label (e.g., *.example.com), but they cannot appear within a label (e.g., f*oo.example.com) and are not allowed in the port or path. + // For example, 'example.*.com' is valid, but 'exa*mple.*.com' is not. + // Each wildcard matches only a single domain label, + // so '*.io' does **not** match '*.k8s.io'. + // + // A match exists between an image and a matchImage when all of the below are true: + // Both contain the same number of domain parts and each part matches. + // The URL path of an matchImages must be a prefix of the target image URL path. + // If the matchImages contains a port, then the port must match in the image as well. + // + // Example values of matchImages: + // - 123456789.dkr.ecr.us-east-1.amazonaws.com + // - *.azurecr.io + // - gcr.io + // - *.*.registry.io + // - registry.io:8080/path + // + // +kubebuilder:validation:MaxItems=50 + // +kubebuilder:validation:MinItems=1 + // +listType=set + // +optional + MatchImages []MatchImage `json:"matchImages,omitempty"` +} + +// MatchImage is a string pattern used to match container image registry addresses. +// It must be a valid fully qualified domain name with optional wildcard, port, and path. +// The maximum length is 512 characters. +// +// Wildcards ('*') are supported for full subdomain labels and top-level domains. +// Each entry can optionally contain a port (e.g., :8080) and a path (e.g., /path). +// Wildcards are not allowed in the port or path portions. +// +// Examples: +// - "registry.io" - matches exactly registry.io +// - "*.azurecr.io" - matches any single subdomain of azurecr.io +// - "registry.io:8080/path" - matches with specific port and path prefix +// +// +kubebuilder:validation:MaxLength=512 +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:XValidation:rule="self != '*'",message="global wildcard '*' is not allowed" +// +kubebuilder:validation:XValidation:rule=`self.matches('^((\\*|[a-z0-9]([a-z0-9-]*[a-z0-9])?)(\\.(\\*|[a-z0-9]([a-z0-9-]*[a-z0-9])?))*)(:[0-9]+)?(/[-a-z0-9._/]*)?$')`,message="invalid matchImages value, must be a valid fully qualified domain name in lowercase with optional wildcard, port, and path" +type MatchImage string + +// +k8s:deepcopy-gen=true +// CRIOCredentialProviderConfigStatus defines the observed state of CRIOCredentialProviderConfig +// +kubebuilder:validation:MinProperties=1 +type CRIOCredentialProviderConfigStatus struct { + // conditions represent the latest available observations of the configuration state. + // When omitted, it indicates that no conditions have been reported yet. + // The maximum number of conditions is 16. + // Conditions are stored as a map keyed by condition type, ensuring uniqueness. + // + // Expected condition types include: + // "Validated": indicates whether the matchImages configuration is valid + // +optional + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CRIOCredentialProviderConfigList contains a list of CRIOCredentialProviderConfig resources +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type CRIOCredentialProviderConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []CRIOCredentialProviderConfig `json:"items"` +} + +const ( + // ConditionTypeValidated is a condition type that indicates whether the CRIOCredentialProviderConfig + // matchImages configuration has been validated successfully. + // When True, all matchImage patterns are valid and have been applied. + // When False, the configuration contains errors (see Reason for details). + // Possible reasons for False status: + // - ValidationFailed: matchImages contains invalid patterns + // - ConfigurationPartiallyApplied: some matchImage entries were ignored due to conflicts + ConditionTypeValidated = "Validated" + + // ReasonValidationFailed is a condition reason used with ConditionTypeValidated=False + // to indicate that the matchImages configuration contains one or more invalid registry patterns + // that do not conform to the required format (valid FQDN with optional wildcard, port, and path). + ReasonValidationFailed = "ValidationFailed" + + // ReasonConfigurationPartiallyApplied is a condition reason used with ConditionTypeValidated=False + // to indicate that some matchImage entries were ignored due to conflicts or overlapping patterns. + // The condition message will contain details about which entries were ignored and why. + ReasonConfigurationPartiallyApplied = "ConfigurationPartiallyApplied" + + // ConditionTypeMachineConfigRendered is a condition type that indicates whether + // the CRIOCredentialProviderConfig has been successfully rendered into a + // MachineConfig object. + // When True, the corresponding MachineConfig is present in the cluster. + // When False, rendering failed. + ConditionTypeMachineConfigRendered = "MachineConfigRendered" + + // ReasonMachineConfigRenderingSucceeded is a condition reason used with ConditionTypeMachineConfigRendered=True + // to indicate that the MachineConfig was successfully created/updated in the API server. + ReasonMachineConfigRenderingSucceeded = "MachineConfigRenderingSucceeded" + + // ReasonMachineConfigRenderingFailed is a condition reason used with ConditionTypeMachineConfigRendered=False + // to indicate that the MachineConfig creation/update failed. + // The condition message will contain details about the failure. + ReasonMachineConfigRenderingFailed = "MachineConfigRenderingFailed" +) diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go index 46666ae3b..bef31b905 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go @@ -16,6 +16,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 // +openshift:enable:FeatureGate=InsightsConfig // +openshift:compatibility-gen:level=4 +// +openshift:capability=Insights type InsightsDataGather struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go index 9ead6aba2..dc51326b9 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -192,6 +192,115 @@ func (in *BackupStatus) DeepCopy() *BackupStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRIOCredentialProviderConfig) DeepCopyInto(out *CRIOCredentialProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(CRIOCredentialProviderConfigSpec) + (*in).DeepCopyInto(*out) + } + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIOCredentialProviderConfig. +func (in *CRIOCredentialProviderConfig) DeepCopy() *CRIOCredentialProviderConfig { + if in == nil { + return nil + } + out := new(CRIOCredentialProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CRIOCredentialProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRIOCredentialProviderConfigList) DeepCopyInto(out *CRIOCredentialProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CRIOCredentialProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIOCredentialProviderConfigList. +func (in *CRIOCredentialProviderConfigList) DeepCopy() *CRIOCredentialProviderConfigList { + if in == nil { + return nil + } + out := new(CRIOCredentialProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CRIOCredentialProviderConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRIOCredentialProviderConfigSpec) DeepCopyInto(out *CRIOCredentialProviderConfigSpec) { + *out = *in + if in.MatchImages != nil { + in, out := &in.MatchImages, &out.MatchImages + *out = make([]MatchImage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIOCredentialProviderConfigSpec. +func (in *CRIOCredentialProviderConfigSpec) DeepCopy() *CRIOCredentialProviderConfigSpec { + if in == nil { + return nil + } + out := new(CRIOCredentialProviderConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRIOCredentialProviderConfigStatus) DeepCopyInto(out *CRIOCredentialProviderConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIOCredentialProviderConfigStatus. +func (in *CRIOCredentialProviderConfigStatus) DeepCopy() *CRIOCredentialProviderConfigStatus { + if in == nil { + return nil + } + out := new(CRIOCredentialProviderConfigStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterImagePolicy) DeepCopyInto(out *ClusterImagePolicy) { *out = *in @@ -365,6 +474,7 @@ func (in *ClusterMonitoringSpec) DeepCopyInto(out *ClusterMonitoringSpec) { out.UserDefined = in.UserDefined in.AlertmanagerConfig.DeepCopyInto(&out.AlertmanagerConfig) in.MetricsServerConfig.DeepCopyInto(&out.MetricsServerConfig) + in.PrometheusOperatorConfig.DeepCopyInto(&out.PrometheusOperatorConfig) return } @@ -952,6 +1062,50 @@ func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusOperatorConfig) DeepCopyInto(out *PrometheusOperatorConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusOperatorConfig. +func (in *PrometheusOperatorConfig) DeepCopy() *PrometheusOperatorConfig { + if in == nil { + return nil + } + out := new(PrometheusOperatorConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RetentionNumberConfig) DeepCopyInto(out *RetentionNumberConfig) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 2f79f801d..14091b587 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -21,6 +21,29 @@ backups.config.openshift.io: - AutomatedEtcdBackup Version: v1alpha1 +criocredentialproviderconfigs.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2557 + CRDName: criocredentialproviderconfigs.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - CRIOCredentialProviderConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: CRIOCredentialProviderConfig + Labels: {} + PluralName: criocredentialproviderconfigs + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - CRIOCredentialProviderConfig + Version: v1alpha1 + clusterimagepolicies.config.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1457 @@ -97,7 +120,7 @@ insightsdatagathers.config.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1245 CRDName: insightsdatagathers.config.openshift.io - Capability: "" + Capability: Insights Category: "" FeatureGates: - InsightsConfig diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index 59a5b3708..c060ce874 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -132,10 +132,10 @@ var map_AlertmanagerCustomConfig = map[string]string{ "": "AlertmanagerCustomConfig represents the configuration for a custom Alertmanager deployment. alertmanagerCustomConfig provides configuration options for the default Alertmanager instance that runs in the `openshift-monitoring` namespace. Use this configuration to control whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled.", "logLevel": "logLevel defines the verbosity of logs emitted by Alertmanager. This field allows users to control the amount and severity of logs generated, which can be useful for debugging issues or reducing noise in production environments. Allowed values are Error, Warn, Info, and Debug. When set to Error, only errors will be logged. When set to Warn, both warnings and errors will be logged. When set to Info, general information, warnings, and errors will all be logged. When set to Debug, detailed debugging information will be logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Info`.", "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", - "resources": "resources defines the compute resource requests and limits for the Alertmanager container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1.", + "resources": "resources defines the compute resource requests and limits for the Alertmanager container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", "secrets": "secrets defines a list of secrets that need to be mounted into the Alertmanager. The secrets must reside within the same namespace as the Alertmanager object. They will be added as volumes named secret- and mounted at /etc/alertmanager/secrets/ within the 'alertmanager' container of the Alertmanager Pods.\n\nThese secrets can be used to authenticate Alertmanager with endpoint receivers. For example, you can use secrets to: - Provide certificates for TLS authentication with receivers that require private CA certificates - Store credentials for Basic HTTP authentication with receivers that require password-based auth - Store any other authentication credentials needed by your alert receivers\n\nThis field is optional. Maximum length for this list is 10. Minimum length for this list is 1. Entries in this list must be unique.", - "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10 Minimum length for this list is 1", - "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Alertmanager Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1 Entries must have unique topologyKey and whenUnsatisfiable pairs.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Alertmanager Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", "volumeClaimTemplate": "volumeClaimTemplate Defines persistent storage for Alertmanager. Use this setting to configure the persistent volume claim, including storage class, volume size, and name. If omitted, the Pod uses ephemeral storage and alert data will not persist across restarts. This field is optional.", } @@ -174,10 +174,11 @@ func (ClusterMonitoringList) SwaggerDoc() map[string]string { } var map_ClusterMonitoringSpec = map[string]string{ - "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", - "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. userDefined is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is `Disabled`.", - "alertmanagerConfig": "alertmanagerConfig allows users to configure how the default Alertmanager instance should be deployed in the `openshift-monitoring` namespace. alertmanagerConfig is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `DefaultConfig`.", - "metricsServerConfig": "metricsServerConfig is an optional field that can be used to configure the Kubernetes Metrics Server that runs in the openshift-monitoring namespace. Specifically, it can configure how the Metrics Server instance is deployed, pod scheduling, its audit policy and log verbosity. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", + "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", + "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. userDefined is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is `Disabled`.", + "alertmanagerConfig": "alertmanagerConfig allows users to configure how the default Alertmanager instance should be deployed in the `openshift-monitoring` namespace. alertmanagerConfig is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `DefaultConfig`.", + "metricsServerConfig": "metricsServerConfig is an optional field that can be used to configure the Kubernetes Metrics Server that runs in the openshift-monitoring namespace. Specifically, it can configure how the Metrics Server instance is deployed, pod scheduling, its audit policy and log verbosity. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", + "prometheusOperatorConfig": "prometheusOperatorConfig is an optional field that can be used to configure the Prometheus Operator component. Specifically, it can configure how the Prometheus Operator instance is deployed, pod scheduling, and resource allocation. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", } func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { @@ -207,16 +208,29 @@ var map_MetricsServerConfig = map[string]string{ "": "MetricsServerConfig provides configuration options for the Metrics Server instance that runs in the `openshift-monitoring` namespace. Use this configuration to control how the Metrics Server instance is deployed, how it logs, and how its pods are scheduled.", "audit": "audit defines the audit configuration used by the Metrics Server instance. audit is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default sets audit.profile to Metadata", "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", - "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10 Minimum length for this list is 1", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", "verbosity": "verbosity defines the verbosity of log messages for Metrics Server. Valid values are Errors, Info, Trace, TraceAll and omitted. When set to Errors, only critical messages and errors are logged. When set to Info, only basic information messages are logged. When set to Trace, information useful for general debugging is logged. When set to TraceAll, detailed information about metric scraping is logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Errors`", - "resources": "resources defines the compute resource requests and limits for the Metrics Server container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1.", - "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1 Entries must have unique topologyKey and whenUnsatisfiable pairs.", + "resources": "resources defines the compute resource requests and limits for the Metrics Server container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", } func (MetricsServerConfig) SwaggerDoc() map[string]string { return map_MetricsServerConfig } +var map_PrometheusOperatorConfig = map[string]string{ + "": "PrometheusOperatorConfig provides configuration options for the Prometheus Operator instance Use this configuration to control how the Prometheus Operator instance is deployed, how it logs, and how its pods are scheduled.", + "logLevel": "logLevel defines the verbosity of logs emitted by Prometheus Operator. This field allows users to control the amount and severity of logs generated, which can be useful for debugging issues or reducing noise in production environments. Allowed values are Error, Warn, Info, and Debug. When set to Error, only errors will be logged. When set to Warn, both warnings and errors will be logged. When set to Info, general information, warnings, and errors will all be logged. When set to Debug, detailed debugging information will be logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Info`.", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`. When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries.", + "resources": "resources defines the compute resource requests and limits for the Prometheus Operator container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Prometheus Operator Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", +} + +func (PrometheusOperatorConfig) SwaggerDoc() map[string]string { + return map_PrometheusOperatorConfig +} + var map_UserDefinedMonitoring = map[string]string{ "": "UserDefinedMonitoring config for user-defined projects.", "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. The current default value is `Disabled`.", @@ -226,6 +240,44 @@ func (UserDefinedMonitoring) SwaggerDoc() map[string]string { return map_UserDefinedMonitoring } +var map_CRIOCredentialProviderConfig = map[string]string{ + "": "CRIOCredentialProviderConfig holds cluster-wide singleton resource configurations for CRI-O credential provider, the name of this instance is \"cluster\". CRI-O credential provider is a binary shipped with CRI-O that provides a way to obtain container image pull credentials from external sources. For example, it can be used to fetch mirror registry credentials from secrets resources in the cluster within the same namespace the pod will be running in. CRIOCredentialProviderConfig configuration specifies the pod image sources registries that should trigger the CRI-O credential provider execution, which will resolve the CRI-O mirror configurations and obtain the necessary credentials for pod creation. Note: Configuration changes will only take effect after the kubelet restarts, which is automatically managed by the cluster during rollout.\n\nThe resource is a singleton named \"cluster\".\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the desired configuration of the CRI-O Credential Provider. This field is required and must be provided when creating the resource.", + "status": "status represents the current state of the CRIOCredentialProviderConfig. When omitted or nil, it indicates that the status has not yet been set by the controller. The controller will populate this field with validation conditions and operational state.", +} + +func (CRIOCredentialProviderConfig) SwaggerDoc() map[string]string { + return map_CRIOCredentialProviderConfig +} + +var map_CRIOCredentialProviderConfigList = map[string]string{ + "": "CRIOCredentialProviderConfigList contains a list of CRIOCredentialProviderConfig resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (CRIOCredentialProviderConfigList) SwaggerDoc() map[string]string { + return map_CRIOCredentialProviderConfigList +} + +var map_CRIOCredentialProviderConfigSpec = map[string]string{ + "": "CRIOCredentialProviderConfigSpec defines the desired configuration of the CRI-O Credential Provider.", + "matchImages": "matchImages is a list of string patterns used to determine whether the CRI-O credential provider should be invoked for a given image. This list is passed to the kubelet CredentialProviderConfig, and if any pattern matches the requested image, CRI-O credential provider will be invoked to obtain credentials for pulling that image or its mirrors. Depending on the platform, the CRI-O credential provider may be installed alongside an existing platform specific provider. Conflicts between the existing platform specific provider image match configuration and this list will be handled by the following precedence rule: credentials from built-in kubelet providers (e.g., ECR, GCR, ACR) take precedence over those from the CRIOCredentialProviderConfig when both match the same image. To avoid uncertainty, it is recommended to avoid configuring your private image patterns to overlap with existing platform specific provider config(e.g., the entries from https://github.com/openshift/machine-config-operator/blob/main/templates/common/aws/files/etc-kubernetes-credential-providers-ecr-credential-provider.yaml). You can check the resource's Status conditions to see if any entries were ignored due to exact matches with known built-in provider patterns.\n\nThis field is optional, the items of the list must contain between 1 and 50 entries. The list is treated as a set, so duplicate entries are not allowed.\n\nFor more details, see: https://kubernetes.io/docs/tasks/administer-cluster/kubelet-credential-provider/ https://github.com/cri-o/crio-credential-provider#architecture\n\nEach entry in matchImages is a pattern which can optionally contain a port and a path. Each entry must be no longer than 512 characters. Wildcards ('*') are supported for full subdomain labels, such as '*.k8s.io' or 'k8s.*.io', and for top-level domains, such as 'k8s.*' (which matches 'k8s.io' or 'k8s.net'). A global wildcard '*' (matching any domain) is not allowed. Wildcards may replace an entire hostname label (e.g., *.example.com), but they cannot appear within a label (e.g., f*oo.example.com) and are not allowed in the port or path. For example, 'example.*.com' is valid, but 'exa*mple.*.com' is not. Each wildcard matches only a single domain label, so '*.io' does **not** match '*.k8s.io'.\n\nA match exists between an image and a matchImage when all of the below are true: Both contain the same number of domain parts and each part matches. The URL path of an matchImages must be a prefix of the target image URL path. If the matchImages contains a port, then the port must match in the image as well.\n\nExample values of matchImages: - 123456789.dkr.ecr.us-east-1.amazonaws.com - *.azurecr.io - gcr.io - *.*.registry.io - registry.io:8080/path", +} + +func (CRIOCredentialProviderConfigSpec) SwaggerDoc() map[string]string { + return map_CRIOCredentialProviderConfigSpec +} + +var map_CRIOCredentialProviderConfigStatus = map[string]string{ + "": "CRIOCredentialProviderConfigStatus defines the observed state of CRIOCredentialProviderConfig", + "conditions": "conditions represent the latest available observations of the configuration state. When omitted, it indicates that no conditions have been reported yet. The maximum number of conditions is 16. Conditions are stored as a map keyed by condition type, ensuring uniqueness.\n\nExpected condition types include: \"Validated\": indicates whether the matchImages configuration is valid", +} + +func (CRIOCredentialProviderConfigStatus) SwaggerDoc() map[string]string { + return map_CRIOCredentialProviderConfigStatus +} + var map_ImagePolicy = map[string]string{ "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go index d59f5920b..fbe666249 100644 --- a/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go @@ -16,6 +16,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 // +openshift:enable:FeatureGate=InsightsConfig // +openshift:compatibility-gen:level=4 +// +openshift:capability=Insights type InsightsDataGather struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. diff --git a/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml index 99fe308ef..1f73e723e 100644 --- a/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml @@ -2,7 +2,7 @@ insightsdatagathers.config.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/2195 CRDName: insightsdatagathers.config.openshift.io - Capability: "" + Capability: Insights Category: "" FeatureGates: - InsightsConfig diff --git a/vendor/github.com/openshift/api/console/v1/types_console_sample.go b/vendor/github.com/openshift/api/console/v1/types_console_sample.go index bd0f65696..c296059b7 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_sample.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_sample.go @@ -125,7 +125,8 @@ type ConsoleSampleSpec struct { // ConsoleSampleSourceType is an enumeration of the supported sample types. // Unsupported samples types will be ignored in the web console. -// +kubebuilder:validation:Enum:=GitImport;ContainerImport +// +kubebuilder:validation:Enum:="GitImport";"ContainerImport" +// +enum type ConsoleSampleSourceType string const ( @@ -144,7 +145,6 @@ type ConsoleSampleSource struct { // type of the sample, currently supported: "GitImport";"ContainerImport" // +unionDiscriminator // +required - // +kubebuilder:validation:Enum:="GitImport";"ContainerImport" Type ConsoleSampleSourceType `json:"type"` // gitImport allows the user to import code from a git repository. diff --git a/vendor/github.com/openshift/api/etcd/README.md b/vendor/github.com/openshift/api/etcd/README.md new file mode 100644 index 000000000..b92d553df --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/README.md @@ -0,0 +1,211 @@ +# etcd.openshift.io API Group + +This API group contains CRDs related to etcd cluster management in Two Node OpenShift with Fencing deployments. + +## API Versions + +### v1alpha1 + +Contains the `PacemakerCluster` custom resource for monitoring Pacemaker cluster health in Two Node OpenShift with Fencing deployments. + +#### PacemakerCluster + +- **Feature Gate**: `DualReplica` +- **Component**: `two-node-fencing` +- **Scope**: Cluster-scoped singleton resource (must be named "cluster") +- **Resource Path**: `pacemakerclusters.etcd.openshift.io` + +The `PacemakerCluster` resource provides visibility into the health and status of a Pacemaker-managed cluster. +It is periodically updated by the cluster-etcd-operator's status collector. + +### Status Subresource Design + +This resource uses the standard Kubernetes status subresource pattern (`+kubebuilder:subresource:status`). +The status collector creates the resource without status, then immediately populates it via the `/status` endpoint. + +**Why not atomic create-with-status?** + +We initially explored removing the status subresource to allow creating the resource with status in a single +atomic operation. This would ensure the resource is never observed in an incomplete state. However: + +1. The Kubernetes API server strips the `status` field from create requests when a status subresource is enabled +2. Without the subresource, we cannot use separate RBAC for spec vs status updates +3. The OpenShift API test framework assumes status subresource exists for status update tests + +The status collector performs a two-step operation: create resource, then immediately update status. +The brief window where status is empty is acceptable since the healthcheck controller handles missing status gracefully. + +### Pacemaker Resources + +A **pacemaker resource** is a unit of work managed by pacemaker. In pacemaker terminology, resources are services +or applications that pacemaker monitors, starts, stops, and moves between nodes to maintain high availability. + +For Two Node OpenShift with Fencing, we manage three resource types: +- **Kubelet**: The Kubernetes node agent and a prerequisite for etcd +- **Etcd**: The distributed key-value store +- **FencingAgent**: Used to isolate failed nodes during a quorum loss event (tracked separately) + +### Status Structure + +```yaml +status: # Optional on creation, populated via status subresource + conditions: # Required when status present (min 3 items) + - type: Healthy + - type: InService + - type: NodeCountAsExpected + lastUpdated: # Required when status present, cannot decrease + nodes: # Control-plane nodes (0-5, expects 2 for TNF) + - nodeName: # RFC 1123 subdomain name + addresses: # Required: List of node addresses (1-8 items) + - type: InternalIP # Currently only InternalIP is supported + address: # First address used for etcd peer URLs + conditions: # Required: Node-level conditions (min 9 items) + - type: Healthy + - type: Online + - type: InService + - type: Active + - type: Ready + - type: Clean + - type: Member + - type: FencingAvailable + - type: FencingHealthy + resources: # Required: Pacemaker resources on this node (min 2) + - name: Kubelet # Both Kubelet and Etcd must be present + conditions: # Required: Resource-level conditions (min 8 items) + - type: Healthy + - type: InService + - type: Managed + - type: Enabled + - type: Operational + - type: Active + - type: Started + - type: Schedulable + - name: Etcd + conditions: [...] # Same 8 conditions as Kubelet (abbreviated) + fencingAgents: # Required: Fencing agents for THIS node (1-8) + - name: # e.g., "master-0_redfish" (unique, max 300 chars) + method: # Fencing method: "Redfish" or "IPMI" + conditions: [...] # Same 8 conditions as resources (abbreviated) +``` + +### Fencing Agents + +Fencing agents are STONITH (Shoot The Other Node In The Head) devices used to isolate failed nodes. +Unlike regular pacemaker resources (Kubelet, Etcd), fencing agents are tracked separately because: + +1. **Mapping by target, not schedule**: Resources are mapped to the node where they are scheduled to run. + Fencing agents are mapped to the node they can *fence* (their target), regardless of which node + their monitoring operations are scheduled on. + +2. **Multiple agents per node**: A node can have multiple fencing agents for redundancy + (e.g., both Redfish and IPMI). Expected: 1 per node, supported: up to 8. + +3. **Health tracking via two node-level conditions**: + - **FencingAvailable**: True if at least one agent is healthy (fencing works), False if all agents unhealthy (degrades operator) + - **FencingHealthy**: True if all agents are healthy (ideal state), False if any agent is unhealthy (emits warning events) + +### Cluster-Level Conditions + +| Condition | True | False | +|-----------|------|-------| +| `Healthy` | Cluster is healthy (`ClusterHealthy`) | Cluster has issues (`ClusterUnhealthy`) | +| `InService` | In service (`InService`) | In maintenance (`InMaintenance`) | +| `NodeCountAsExpected` | Node count is as expected (`AsExpected`) | Wrong count (`InsufficientNodes`, `ExcessiveNodes`) | + +### Node-Level Conditions + +| Condition | True | False | +|-----------|------|-------| +| `Healthy` | Node is healthy (`NodeHealthy`) | Node has issues (`NodeUnhealthy`) | +| `Online` | Node is online (`Online`) | Node is offline (`Offline`) | +| `InService` | In service (`InService`) | In maintenance (`InMaintenance`) | +| `Active` | Node is active (`Active`) | Node is in standby (`Standby`) | +| `Ready` | Node is ready (`Ready`) | Node is pending (`Pending`) | +| `Clean` | Node is clean (`Clean`) | Node is unclean (`Unclean`) | +| `Member` | Node is a member (`Member`) | Not a member (`NotMember`) | +| `FencingAvailable` | At least one agent healthy (`FencingAvailable`) | All agents unhealthy (`FencingUnavailable`) - degrades operator | +| `FencingHealthy` | All agents healthy (`FencingHealthy`) | Some agents unhealthy (`FencingUnhealthy`) - emits warnings | + +### Resource-Level Conditions + +Each resource in the `resources` array and each fencing agent in the `fencingAgents` array has its own conditions. + +| Condition | True | False | +|-----------|------|-------| +| `Healthy` | Resource is healthy (`ResourceHealthy`) | Resource has issues (`ResourceUnhealthy`) | +| `InService` | In service (`InService`) | In maintenance (`InMaintenance`) | +| `Managed` | Managed by pacemaker (`Managed`) | Not managed (`Unmanaged`) | +| `Enabled` | Resource is enabled (`Enabled`) | Resource is disabled (`Disabled`) | +| `Operational` | Resource is operational (`Operational`) | Resource has failed (`Failed`) | +| `Active` | Resource is active (`Active`) | Resource is not active (`Inactive`) | +| `Started` | Resource is started (`Started`) | Resource is stopped (`Stopped`) | +| `Schedulable` | Resource is schedulable (`Schedulable`) | Resource is not schedulable (`Unschedulable`) | + +### Validation Rules + +**Resource naming:** +- Resource name must be "cluster" (singleton) + +**Node name validation:** +- Must be a lowercase RFC 1123 subdomain name +- Consists of lowercase alphanumeric characters, '-' or '.' +- Must start and end with an alphanumeric character +- Maximum 253 characters + +**Node addresses:** +- Uses `PacemakerNodeAddress` type (similar to `corev1.NodeAddress` but with IP validation) +- Currently only `InternalIP` type is supported +- Pacemaker allows multiple addresses for Corosync communication between nodes (1-8 addresses) +- The first address in the list is used for IP-based peer URLs for etcd membership +- IP validation: + - Must be a valid global unicast IPv4 or IPv6 address + - Must be in canonical form (e.g., `192.168.1.1` not `192.168.001.001`, or `2001:db8::1` not `2001:0db8::1`) + - Excludes loopback, link-local, and multicast addresses + - Maximum length is 39 characters (full IPv6 address) + +**Timestamp validation:** +- `lastUpdated` is required when status is present +- Once set, cannot be set to an earlier timestamp (validation uses `!has(oldSelf.lastUpdated)` to handle initial creation) +- Timestamps must always increase (prevents stale updates from overwriting newer data) + +**Status fields:** +- `status` - Optional on creation (pointer type), populated via status subresource +- When status is present, all fields within are required: + - `conditions` - Required array of cluster conditions (min 3 items) + - `lastUpdated` - Required timestamp for staleness detection + - `nodes` - Required array of control-plane node statuses (min 0, max 5; empty allowed for catastrophic failures) + +**Node fields (when node present):** +- `nodeName` - Required, RFC 1123 subdomain +- `addresses` - Required (min 1, max 8 items) +- `conditions` - Required (min 9 items with specific types enforced via XValidation) +- `resources` - Required (min 2 items: Kubelet and Etcd) +- `fencingAgents` - Required (min 1, max 8 items) + +**Conditions validation:** +- Cluster-level: MinItems=3 (Healthy, InService, NodeCountAsExpected) +- Node-level: MinItems=9 (Healthy, Online, InService, Active, Ready, Clean, Member, FencingAvailable, FencingHealthy) +- Resource-level: MinItems=8 (Healthy, InService, Managed, Enabled, Operational, Active, Started, Schedulable) +- Fencing agent-level: MinItems=8 (same conditions as resources) + +All condition arrays have XValidation rules to ensure specific condition types are present. + +**Resource names:** +- Valid values are: `Kubelet`, `Etcd` +- Both resources must be present in each node's `resources` array + +**Fencing agent fields:** +- `name`: Unique identifier for the fencing agent (e.g., "master-0_redfish") + - Must be unique within the `fencingAgents` array + - May contain alphanumeric characters, dots, hyphens, and underscores (`^[a-zA-Z0-9._-]+$`) + - Maximum 300 characters (provides headroom beyond 253 node name + underscore + method) +- `method`: Fencing method enum - valid values are `Redfish` or `IPMI` +- `conditions`: Required, same 8 conditions as resources + +Note: The target node is implied by the parent `PacemakerClusterNodeStatus` - fencing agents are nested under the node they can fence. + +### Usage + +The cluster-etcd-operator healthcheck controller watches this resource and updates operator conditions based on +the cluster state. The aggregate `Healthy` conditions at each level (cluster, node, resource) provide a quick +way to determine overall health. diff --git a/vendor/github.com/openshift/api/etcd/install.go b/vendor/github.com/openshift/api/etcd/install.go new file mode 100644 index 000000000..7e7474152 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/install.go @@ -0,0 +1,26 @@ +package etcd + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1alpha1 "github.com/openshift/api/etcd/v1alpha1" +) + +const ( + GroupName = "etcd.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(v1alpha1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/etcd/v1alpha1/Makefile b/vendor/github.com/openshift/api/etcd/v1alpha1/Makefile new file mode 100644 index 000000000..3d019662a --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="etcd.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/etcd/v1alpha1/doc.go b/vendor/github.com/openshift/api/etcd/v1alpha1/doc.go new file mode 100644 index 000000000..aea92fb38 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +openshift:featuregated-schema-gen=true +// +groupName=etcd.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/etcd/v1alpha1/register.go b/vendor/github.com/openshift/api/etcd/v1alpha1/register.go new file mode 100644 index 000000000..1dc6482f8 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1alpha1/register.go @@ -0,0 +1,39 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "etcd.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &PacemakerCluster{}, + &PacemakerClusterList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/etcd/v1alpha1/types_pacemakercluster.go b/vendor/github.com/openshift/api/etcd/v1alpha1/types_pacemakercluster.go new file mode 100644 index 000000000..ab06d0e39 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1alpha1/types_pacemakercluster.go @@ -0,0 +1,736 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PacemakerCluster is used in Two Node OpenShift with Fencing deployments to monitor the health +// of etcd running under pacemaker. + +// Cluster-level condition types for PacemakerCluster.status.conditions +const ( + // ClusterHealthyConditionType tracks the overall health of the pacemaker cluster. + // This is an aggregate condition that reflects the health of all cluster-level conditions and node health. + // Specifically, it aggregates the following conditions: + // - ClusterInServiceConditionType + // - ClusterNodeCountAsExpectedConditionType + // - NodeHealthyConditionType (for each node) + // When True, the cluster is healthy with reason "ClusterHealthy". + // When False, the cluster is unhealthy with reason "ClusterUnhealthy". + ClusterHealthyConditionType = "Healthy" + + // ClusterInServiceConditionType tracks whether the cluster is in service (not in maintenance mode). + // Maintenance mode is a cluster-wide setting that prevents pacemaker from starting or stopping resources. + // When True, the cluster is in service with reason "InService". This is the normal operating state. + // When False, the cluster is in maintenance mode with reason "InMaintenance". This is an unexpected state. + ClusterInServiceConditionType = "InService" + + // ClusterNodeCountAsExpectedConditionType tracks whether the cluster has the expected number of nodes. + // For Two Node OpenShift with Fencing, we are expecting exactly 2 nodes. + // When True, the expected number of nodes are present with reason "AsExpected". + // When False, the node count is incorrect with reason "InsufficientNodes" or "ExcessiveNodes". + ClusterNodeCountAsExpectedConditionType = "NodeCountAsExpected" +) + +// ClusterHealthy condition reasons +const ( + // ClusterHealthyReasonHealthy means the pacemaker cluster is healthy and operating normally. + ClusterHealthyReasonHealthy = "ClusterHealthy" + + // ClusterHealthyReasonUnhealthy means the pacemaker cluster has issues that need investigation. + ClusterHealthyReasonUnhealthy = "ClusterUnhealthy" +) + +// ClusterInService condition reasons +const ( + // ClusterInServiceReasonInService means the cluster is in service (not in maintenance mode). + // This is the normal operating state. + ClusterInServiceReasonInService = "InService" + + // ClusterInServiceReasonInMaintenance means the cluster is in maintenance mode. + // In maintenance mode, pacemaker will not start or stop any resources. Entering and exiting this state requires + // manual user intervention, and is unexpected during normal cluster operation. + ClusterInServiceReasonInMaintenance = "InMaintenance" +) + +// ClusterNodeCountAsExpected condition reasons +const ( + // ClusterNodeCountAsExpectedReasonAsExpected means the expected number of nodes are present. + // For Two Node OpenShift with Fencing, we are expecting exactly 2 nodes. This is the expected healthy state. + ClusterNodeCountAsExpectedReasonAsExpected = "AsExpected" + + // ClusterNodeCountAsExpectedReasonInsufficientNodes means fewer nodes than expected are present. + // For Two Node OpenShift with Fencing, this means that less than 2 nodes are present. Under normal operation, this will only happen during + // a node replacement operation. It's also possible to enter this state with manual user intervention, but + // will also require user intervention to restore normal functionality. + ClusterNodeCountAsExpectedReasonInsufficientNodes = "InsufficientNodes" + + // ClusterNodeCountAsExpectedReasonExcessiveNodes means more nodes than expected are present. + // For Two Node OpenShift with Fencing, this means more than 2 nodes are present. This should be investigated as it is unexpected and should + // never happen during normal cluster operation. It is possible to enter this state with manual user intervention, + // but will also require user intervention to restore normal functionality. + ClusterNodeCountAsExpectedReasonExcessiveNodes = "ExcessiveNodes" +) + +// Node-level condition types for PacemakerCluster.status.nodes[].conditions +const ( + // NodeHealthyConditionType tracks the overall health of a node in the pacemaker cluster. + // This is an aggregate condition that reflects the health of all node-level conditions and resource health. + // Specifically, it aggregates the following conditions: + // - NodeOnlineConditionType + // - NodeInServiceConditionType + // - NodeActiveConditionType + // - NodeReadyConditionType + // - NodeCleanConditionType + // - NodeMemberConditionType + // - NodeFencingAvailableConditionType + // - NodeFencingHealthyConditionType + // - ResourceHealthyConditionType (for each resource in the node's resources list) + // When True, the node is healthy with reason "NodeHealthy". + // When False, the node is unhealthy with reason "NodeUnhealthy". + NodeHealthyConditionType = "Healthy" + + // NodeOnlineConditionType tracks whether a node is online. + // When True, the node is online with reason "Online". This is the normal operating state. + // When False, the node is offline with reason "Offline". This can occur during reboots, failures, maintenance, or replacement. + NodeOnlineConditionType = "Online" + + // NodeInServiceConditionType tracks whether a node is in service (not in maintenance mode). + // A node in maintenance mode is ignored by pacemaker while maintenance mode is active. + // When True, the node is in service with reason "InService". This is the normal operating state. + // When False, the node is in maintenance mode with reason "InMaintenance". This is an unexpected state. + NodeInServiceConditionType = "InService" + + // NodeActiveConditionType tracks whether a node is active (not in standby mode). + // When a node enters standby mode, pacemaker moves its resources to other nodes in the cluster. + // In Two Node OpenShift with Fencing, we do not use standby mode during normal operation. + // When True, the node is active with reason "Active". This is the normal operating state. + // When False, the node is in standby mode with reason "Standby". This is an unexpected state. + NodeActiveConditionType = "Active" + + // NodeReadyConditionType tracks whether a node is ready (not in a pending state). + // A node in a pending state is in the process of joining or leaving the cluster. + // When True, the node is ready with reason "Ready". This is the normal operating state. + // When False, the node is pending with reason "Pending". This is expected to be temporary. + NodeReadyConditionType = "Ready" + + // NodeCleanConditionType tracks whether a node is in a clean state. + // An unclean state means that pacemaker was unable to confirm the node's state, which signifies issues + // in fencing, communication, or configuration. + // When True, the node is clean with reason "Clean". This is the normal operating state. + // When False, the node is unclean with reason "Unclean". This is an unexpected state. + NodeCleanConditionType = "Clean" + + // NodeMemberConditionType tracks whether a node is a member of the cluster. + // Some configurations may use remote nodes or ping nodes, which are nodes that are not members. + // For Two Node OpenShift with Fencing, we expect both nodes to be members. + // When True, the node is a member with reason "Member". This is the normal operating state. + // When False, the node is not a member with reason "NotMember". This is an unexpected state. + NodeMemberConditionType = "Member" + + // NodeFencingAvailableConditionType tracks whether a node can be fenced by at least one fencing agent. + // For Two Node OpenShift with Fencing, each node needs at least one healthy fencing agent to ensure + // that the cluster can recover from a node failure via STONITH (Shoot The Other Node In The Head). + // When True, at least one fencing agent is healthy with reason "FencingAvailable". + // When False, all fencing agents are unhealthy with reason "FencingUnavailable". This is a critical + // state that should degrade the operator. + NodeFencingAvailableConditionType = "FencingAvailable" + + // NodeFencingHealthyConditionType tracks whether all fencing agents for a node are healthy. + // This is an aggregate condition that reflects the health of all fencing agents targeting this node. + // When True, all fencing agents are healthy with reason "FencingHealthy". + // When False, one or more fencing agents are unhealthy with reason "FencingUnhealthy". Warning events + // should be emitted for failing agents, but the operator should not be degraded if FencingAvailable is True. + NodeFencingHealthyConditionType = "FencingHealthy" +) + +// NodeHealthy condition reasons +const ( + // NodeHealthyReasonHealthy means the node is healthy and operating normally. + NodeHealthyReasonHealthy = "NodeHealthy" + + // NodeHealthyReasonUnhealthy means the node has issues that need investigation. + NodeHealthyReasonUnhealthy = "NodeUnhealthy" +) + +// NodeOnline condition reasons +const ( + // NodeOnlineReasonOnline means the node is online. This is the normal operating state. + NodeOnlineReasonOnline = "Online" + + // NodeOnlineReasonOffline means the node is offline. + NodeOnlineReasonOffline = "Offline" +) + +// NodeInService condition reasons +const ( + // NodeInServiceReasonInService means the node is in service (not in maintenance mode). + // This is the normal operating state. + NodeInServiceReasonInService = "InService" + + // NodeInServiceReasonInMaintenance means the node is in maintenance mode. + // This is an unexpected state. + NodeInServiceReasonInMaintenance = "InMaintenance" +) + +// NodeActive condition reasons +const ( + // NodeActiveReasonActive means the node is active (not in standby mode). + // This is the normal operating state. + NodeActiveReasonActive = "Active" + + // NodeActiveReasonStandby means the node is in standby mode. + // This is an unexpected state. + NodeActiveReasonStandby = "Standby" +) + +// NodeReady condition reasons +const ( + // NodeReadyReasonReady means the node is ready (not in a pending state). + // This is the normal operating state. + NodeReadyReasonReady = "Ready" + + // NodeReadyReasonPending means the node is joining or leaving the cluster. + // This state is expected to be temporary. + NodeReadyReasonPending = "Pending" +) + +// NodeClean condition reasons +const ( + // NodeCleanReasonClean means the node is in a clean state. + // This is the normal operating state. + NodeCleanReasonClean = "Clean" + + // NodeCleanReasonUnclean means the node is in an unclean state. + // Pacemaker was unable to confirm the node's state, which signifies issues in fencing, communication, or configuration. + // This is an unexpected state. + NodeCleanReasonUnclean = "Unclean" +) + +// NodeMember condition reasons +const ( + // NodeMemberReasonMember means the node is a member of the cluster. + // For Two Node OpenShift with Fencing, we expect both nodes to be members. This is the normal operating state. + NodeMemberReasonMember = "Member" + + // NodeMemberReasonNotMember means the node is not a member of the cluster. + // This is an unexpected state. + NodeMemberReasonNotMember = "NotMember" +) + +// NodeFencingAvailable condition reasons +const ( + // NodeFencingAvailableReasonAvailable means at least one fencing agent for this node is healthy. + // The cluster can fence this node if needed. This is the normal operating state. + NodeFencingAvailableReasonAvailable = "FencingAvailable" + + // NodeFencingAvailableReasonUnavailable means all fencing agents for this node are unhealthy. + // The cluster cannot fence this node, which compromises high availability. + // This is a critical state that should degrade the operator. + NodeFencingAvailableReasonUnavailable = "FencingUnavailable" +) + +// NodeFencingHealthy condition reasons +const ( + // NodeFencingHealthyReasonHealthy means all fencing agents for this node are healthy. + // This is the ideal operating state with full redundancy. + NodeFencingHealthyReasonHealthy = "FencingHealthy" + + // NodeFencingHealthyReasonUnhealthy means one or more fencing agents for this node are unhealthy. + // Warning events should be emitted for failing agents, but the operator should not be degraded + // if FencingAvailable is still True. + NodeFencingHealthyReasonUnhealthy = "FencingUnhealthy" +) + +// Resource-level condition types for PacemakerCluster.status.nodes[].resources[].conditions +const ( + // ResourceHealthyConditionType tracks the overall health of a pacemaker resource. + // This is an aggregate condition that reflects the health of all resource-level conditions. + // Specifically, it aggregates the following conditions: + // - ResourceInServiceConditionType + // - ResourceManagedConditionType + // - ResourceEnabledConditionType + // - ResourceOperationalConditionType + // - ResourceActiveConditionType + // - ResourceStartedConditionType + // - ResourceSchedulableConditionType + // When True, the resource is healthy with reason "ResourceHealthy". + // When False, the resource is unhealthy with reason "ResourceUnhealthy". + ResourceHealthyConditionType = "Healthy" + + // ResourceInServiceConditionType tracks whether a resource is in service (not in maintenance mode). + // Resources in maintenance mode are not monitored or moved by pacemaker. + // In Two Node OpenShift with Fencing, we do not expect any resources to be in maintenance mode. + // When True, the resource is in service with reason "InService". This is the normal operating state. + // When False, the resource is in maintenance mode with reason "InMaintenance". This is an unexpected state. + ResourceInServiceConditionType = "InService" + + // ResourceManagedConditionType tracks whether a resource is managed by pacemaker. + // Resources that are not managed by pacemaker are effectively invisible to the pacemaker HA logic. + // For Two Node OpenShift with Fencing, all resources are expected to be managed. + // When True, the resource is managed with reason "Managed". This is the normal operating state. + // When False, the resource is not managed with reason "Unmanaged". This is an unexpected state. + ResourceManagedConditionType = "Managed" + + // ResourceEnabledConditionType tracks whether a resource is enabled. + // Resources that are disabled are stopped and not automatically managed or started by the cluster. + // In Two Node OpenShift with Fencing, we do not expect any resources to be disabled. + // When True, the resource is enabled with reason "Enabled". This is the normal operating state. + // When False, the resource is disabled with reason "Disabled". This is an unexpected state. + ResourceEnabledConditionType = "Enabled" + + // ResourceOperationalConditionType tracks whether a resource is operational (not failed). + // A failed resource is one that is not able to start or is in an error state. + // When True, the resource is operational with reason "Operational". This is the normal operating state. + // When False, the resource has failed with reason "Failed". This is an unexpected state. + ResourceOperationalConditionType = "Operational" + + // ResourceActiveConditionType tracks whether a resource is active. + // An active resource is running on a cluster node. + // In Two Node OpenShift with Fencing, all resources are expected to be active. + // When True, the resource is active with reason "Active". This is the normal operating state. + // When False, the resource is not active with reason "Inactive". This is an unexpected state. + ResourceActiveConditionType = "Active" + + // ResourceStartedConditionType tracks whether a resource is started. + // It's normal for a resource like etcd to become stopped in the event of a quorum loss event because + // the pacemaker recovery logic will fence a node and restore etcd quorum on the surviving node as a cluster-of-one. + // A resource that stays stopped for an extended period of time is an unexpected state and should be investigated. + // When True, the resource is started with reason "Started". This is the normal operating state. + // When False, the resource is not started with reason "Stopped". This is expected to be temporary. + ResourceStartedConditionType = "Started" + + // ResourceSchedulableConditionType tracks whether a resource is schedulable (not blocked). + // A resource that is not schedulable is unable to start or move to a different node. + // In Two Node OpenShift with Fencing, we do not expect any resources to be unschedulable. + // When True, the resource is schedulable with reason "Schedulable". This is the normal operating state. + // When False, the resource is not schedulable with reason "Unschedulable". This is an unexpected state. + ResourceSchedulableConditionType = "Schedulable" +) + +// ResourceHealthy condition reasons +const ( + // ResourceHealthyReasonHealthy means the resource is healthy and operating normally. + ResourceHealthyReasonHealthy = "ResourceHealthy" + + // ResourceHealthyReasonUnhealthy means the resource has issues that need investigation. + ResourceHealthyReasonUnhealthy = "ResourceUnhealthy" +) + +// ResourceInService condition reasons +const ( + // ResourceInServiceReasonInService means the resource is in service (not in maintenance mode). + // This is the normal operating state. + ResourceInServiceReasonInService = "InService" + + // ResourceInServiceReasonInMaintenance means the resource is in maintenance mode. + // Resources in maintenance mode are not monitored or moved by pacemaker. This is an unexpected state. + ResourceInServiceReasonInMaintenance = "InMaintenance" +) + +// ResourceManaged condition reasons +const ( + // ResourceManagedReasonManaged means the resource is managed by pacemaker. + // This is the normal operating state. + ResourceManagedReasonManaged = "Managed" + + // ResourceManagedReasonUnmanaged means the resource is not managed by pacemaker. + // Resources that are not managed by pacemaker are effectively invisible to the pacemaker HA logic. + // This is an unexpected state. + ResourceManagedReasonUnmanaged = "Unmanaged" +) + +// ResourceEnabled condition reasons +const ( + // ResourceEnabledReasonEnabled means the resource is enabled. + // This is the normal operating state. + ResourceEnabledReasonEnabled = "Enabled" + + // ResourceEnabledReasonDisabled means the resource is disabled. + // Resources that are disabled are stopped and not automatically managed or started by the cluster. + // This is an unexpected state. + ResourceEnabledReasonDisabled = "Disabled" +) + +// ResourceOperational condition reasons +const ( + // ResourceOperationalReasonOperational means the resource is operational (not failed). + // This is the normal operating state. + ResourceOperationalReasonOperational = "Operational" + + // ResourceOperationalReasonFailed means the resource has failed. + // A failed resource is one that is not able to start or is in an error state. This is an unexpected state. + ResourceOperationalReasonFailed = "Failed" +) + +// ResourceActive condition reasons +const ( + // ResourceActiveReasonActive means the resource is active. + // An active resource is running on a cluster node. This is the normal operating state. + ResourceActiveReasonActive = "Active" + + // ResourceActiveReasonInactive means the resource is not active. + // This is an unexpected state. + ResourceActiveReasonInactive = "Inactive" +) + +// ResourceStarted condition reasons +const ( + // ResourceStartedReasonStarted means the resource is started. + // This is the normal operating state. + ResourceStartedReasonStarted = "Started" + + // ResourceStartedReasonStopped means the resource is stopped. + // It's normal for a resource like etcd to become stopped in the event of a quorum loss event because + // the pacemaker recovery logic will fence a node and restore etcd quorum on the surviving node as a cluster-of-one. + // A resource that stays stopped for an extended period of time is an unexpected state and should be investigated. + ResourceStartedReasonStopped = "Stopped" +) + +// ResourceSchedulable condition reasons +const ( + // ResourceSchedulableReasonSchedulable means the resource is schedulable (not blocked). + // This is the normal operating state. + ResourceSchedulableReasonSchedulable = "Schedulable" + + // ResourceSchedulableReasonUnschedulable means the resource is not schedulable (blocked). + // A resource that is not schedulable is unable to start or move to a different node. This is an unexpected state. + ResourceSchedulableReasonUnschedulable = "Unschedulable" +) + +// PacemakerNodeAddressType represents the type of a node address. +// Currently only InternalIP is supported. +// +kubebuilder:validation:Enum=InternalIP +// +enum +type PacemakerNodeAddressType string + +const ( + // PacemakerNodeInternalIP is an internal IP address assigned to the node. + // This is typically the IP address used for intra-cluster communication. + PacemakerNodeInternalIP PacemakerNodeAddressType = "InternalIP" +) + +// PacemakerNodeAddress contains information for a node's address. +// This is similar to corev1.NodeAddress but adds validation for IP addresses. +type PacemakerNodeAddress struct { + // type is the type of node address. + // Currently only "InternalIP" is supported. + // +required + Type PacemakerNodeAddressType `json:"type,omitempty"` + + // address is the node address. + // For InternalIP, this must be a valid global unicast IPv4 or IPv6 address in canonical form. + // Canonical form means the shortest standard representation (e.g., "192.168.1.1" not "192.168.001.001", + // or "2001:db8::1" not "2001:0db8::1"). Maximum length is 39 characters (full IPv6 address). + // Global unicast includes private/RFC1918 addresses but excludes loopback, link-local, and multicast. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=39 + // +kubebuilder:validation:XValidation:rule="isIP(self) && ip.isCanonical(self) && ip(self).isGlobalUnicast()",message="must be a valid global unicast IPv4 or IPv6 address in canonical form" + // +required + Address string `json:"address,omitempty"` +} + +// PacemakerClusterResourceName represents the name of a pacemaker resource. +// Fencing agents are tracked separately in the fencingAgents field. +// +kubebuilder:validation:Enum=Kubelet;Etcd +// +enum +type PacemakerClusterResourceName string + +// PacemakerClusterResourceName values +const ( + // PacemakerClusterResourceNameKubelet is the kubelet pacemaker resource. + // The kubelet resource is a prerequisite for etcd in Two Node OpenShift with Fencing deployments. + PacemakerClusterResourceNameKubelet PacemakerClusterResourceName = "Kubelet" + + // PacemakerClusterResourceNameEtcd is the etcd pacemaker resource. + // The etcd resource may temporarily transition to stopped during pacemaker quorum-recovery operations. + PacemakerClusterResourceNameEtcd PacemakerClusterResourceName = "Etcd" +) + +// FencingMethod represents the method used by a fencing agent to isolate failed nodes. +// Valid values are "Redfish" and "IPMI". +// +kubebuilder:validation:Enum=Redfish;IPMI +// +enum +type FencingMethod string + +// FencingMethod values +const ( + // FencingMethodRedfish uses Redfish, a standard RESTful API for server management. + FencingMethodRedfish FencingMethod = "Redfish" + + // FencingMethodIPMI uses IPMI (Intelligent Platform Management Interface), a hardware management interface. + FencingMethodIPMI FencingMethod = "IPMI" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PacemakerCluster represents the current state of the pacemaker cluster as reported by the pcs status command. +// PacemakerCluster is a cluster-scoped singleton resource. The name of this instance is "cluster". This +// resource provides a view into the health and status of a pacemaker-managed cluster in Two Node OpenShift with Fencing deployments. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=pacemakerclusters,scope=Cluster,singular=pacemakercluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2544 +// +openshift:file-pattern=cvoRunLevel=0000_25,operatorName=etcd,operatorOrdering=01,operatorComponent=two-node-fencing +// +openshift:enable:FeatureGate=DualReplica +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="PacemakerCluster must be named 'cluster'" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.status) || has(self.status)",message="status may not be removed once set" +type PacemakerCluster struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ObjectMeta `json:"metadata,omitempty"` + + // status contains the actual pacemaker cluster status information collected from the cluster. + // The goal of this status is to be able to quickly identify if pacemaker is in a healthy state. + // In Two Node OpenShift with Fencing, a healthy pacemaker cluster has 2 nodes, both of which have healthy kubelet, etcd, and fencing resources. + // This field is optional on creation - the status collector populates it immediately after creating + // the resource via the status subresource. + // +optional + Status PacemakerClusterStatus `json:"status,omitzero"` +} + +// PacemakerClusterStatus contains the actual pacemaker cluster status information. As part of validating the status +// object, we need to ensure that the lastUpdated timestamp may not be set to an earlier timestamp than the current value. +// The validation rule checks if oldSelf has lastUpdated before comparing, to handle the initial status creation case. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.lastUpdated) || self.lastUpdated >= oldSelf.lastUpdated",message="lastUpdated may not be set to an earlier timestamp" +type PacemakerClusterStatus struct { + // conditions represent the observations of the pacemaker cluster's current state. + // Known condition types are: "Healthy", "InService", "NodeCountAsExpected". + // The "Healthy" condition is an aggregate that tracks the overall health of the cluster. + // The "InService" condition tracks whether the cluster is in service (not in maintenance mode). + // The "NodeCountAsExpected" condition tracks whether the expected number of nodes are present. + // Each of these conditions is required, so the array must contain at least 3 items. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=3 + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Healthy')",message="conditions must contain a condition of type Healthy" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'InService')",message="conditions must contain a condition of type InService" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'NodeCountAsExpected')",message="conditions must contain a condition of type NodeCountAsExpected" + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // lastUpdated is the timestamp when this status was last updated. This is useful for identifying + // stale status reports. It must be a valid timestamp in RFC3339 format. Once set, this field cannot + // be removed and cannot be set to an earlier timestamp than the current value. + // +kubebuilder:validation:Format=date-time + // +required + LastUpdated metav1.Time `json:"lastUpdated,omitempty,omitzero"` + + // nodes provides detailed status for each control-plane node in the Pacemaker cluster. + // While Pacemaker supports up to 32 nodes, the limit is set to 5 (max OpenShift control-plane nodes). + // For Two Node OpenShift with Fencing, exactly 2 nodes are expected in a healthy cluster. + // An empty list indicates a catastrophic failure where Pacemaker reports no nodes. + // +listType=map + // +listMapKey=nodeName + // +kubebuilder:validation:MinItems=0 + // +kubebuilder:validation:MaxItems=5 + // +required + Nodes *[]PacemakerClusterNodeStatus `json:"nodes,omitempty"` +} + +// PacemakerClusterNodeStatus represents the status of a single node in the pacemaker cluster including +// the node's conditions and the health of critical resources running on that node. +type PacemakerClusterNodeStatus struct { + // conditions represent the observations of the node's current state. + // Known condition types are: "Healthy", "Online", "InService", "Active", "Ready", "Clean", "Member", + // "FencingAvailable", "FencingHealthy". + // The "Healthy" condition is an aggregate that tracks the overall health of the node. + // The "Online" condition tracks whether the node is online. + // The "InService" condition tracks whether the node is in service (not in maintenance mode). + // The "Active" condition tracks whether the node is active (not in standby mode). + // The "Ready" condition tracks whether the node is ready (not in a pending state). + // The "Clean" condition tracks whether the node is in a clean (status known) state. + // The "Member" condition tracks whether the node is a member of the cluster. + // The "FencingAvailable" condition tracks whether this node can be fenced by at least one healthy agent. + // The "FencingHealthy" condition tracks whether all fencing agents for this node are healthy. + // Each of these conditions is required, so the array must contain at least 9 items. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=9 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Healthy')",message="conditions must contain a condition of type Healthy" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Online')",message="conditions must contain a condition of type Online" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'InService')",message="conditions must contain a condition of type InService" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Active')",message="conditions must contain a condition of type Active" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Ready')",message="conditions must contain a condition of type Ready" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Clean')",message="conditions must contain a condition of type Clean" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Member')",message="conditions must contain a condition of type Member" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'FencingAvailable')",message="conditions must contain a condition of type FencingAvailable" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'FencingHealthy')",message="conditions must contain a condition of type FencingHealthy" + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // nodeName is the name of the node. This is expected to match the Kubernetes node's name, which must be a lowercase + // RFC 1123 subdomain consisting of lowercase alphanumeric characters, '-' or '.', starting and ending with + // an alphanumeric character, and be at most 253 characters in length. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="nodeName must be a lowercase RFC 1123 subdomain consisting of lowercase alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + // +required + NodeName string `json:"nodeName,omitempty"` + + // addresses is a list of IP addresses for the node. + // Pacemaker allows multiple IP addresses for Corosync communication between nodes. + // The first address in this list is used for IP-based peer URLs for etcd membership. + // Each address must be a valid global unicast IPv4 or IPv6 address in canonical form + // (e.g., "192.168.1.1" not "192.168.001.001", or "2001:db8::1" not "2001:0db8::1"). + // This excludes loopback, link-local, and multicast addresses. + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=8 + // +required + Addresses []PacemakerNodeAddress `json:"addresses,omitempty"` + + // resources contains the status of pacemaker resources scheduled on this node. + // Each resource entry includes the resource name and its health conditions. + // For Two Node OpenShift with Fencing, we track Kubelet and Etcd resources per node. + // Both resources are required to be present, so the array must contain at least 2 items. + // Valid resource names are "Kubelet" and "Etcd". + // Fencing agents are tracked separately in the fencingAgents field. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems=2 + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:XValidation:rule="self.exists(r, r.name == 'Kubelet')",message="resources must contain a resource named Kubelet" + // +kubebuilder:validation:XValidation:rule="self.exists(r, r.name == 'Etcd')",message="resources must contain a resource named Etcd" + // +required + Resources []PacemakerClusterResourceStatus `json:"resources,omitempty"` + + // fencingAgents contains the status of fencing agents that can fence this node. + // Unlike resources (which are scheduled to run on this node), fencing agents are mapped + // to the node they can fence (their target), not the node where monitoring operations run. + // Each fencing agent entry includes a unique name, fencing type, target node, and health conditions. + // A node is considered fence-capable if at least one fencing agent is healthy. + // Expected to have 1 fencing agent per node, but up to 8 are supported for redundancy. + // Names must be unique within this array. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="fencing agent names must be unique" + // +required + FencingAgents []PacemakerClusterFencingAgentStatus `json:"fencingAgents,omitempty"` +} + +// PacemakerClusterFencingAgentStatus represents the status of a fencing agent that can fence a node. +// Fencing agents are STONITH (Shoot The Other Node In The Head) devices used to isolate failed nodes. +// Unlike regular pacemaker resources, fencing agents are mapped to their target node (the node they +// can fence), not the node where their monitoring operations are scheduled. +type PacemakerClusterFencingAgentStatus struct { + // conditions represent the observations of the fencing agent's current state. + // Known condition types are: "Healthy", "InService", "Managed", "Enabled", "Operational", + // "Active", "Started", "Schedulable". + // The "Healthy" condition is an aggregate that tracks the overall health of the fencing agent. + // The "InService" condition tracks whether the fencing agent is in service (not in maintenance mode). + // The "Managed" condition tracks whether the fencing agent is managed by pacemaker. + // The "Enabled" condition tracks whether the fencing agent is enabled. + // The "Operational" condition tracks whether the fencing agent is operational (not failed). + // The "Active" condition tracks whether the fencing agent is active (available to be used). + // The "Started" condition tracks whether the fencing agent is started. + // The "Schedulable" condition tracks whether the fencing agent is schedulable (not blocked). + // Each of these conditions is required, so the array must contain at least 8 items. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=8 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Healthy')",message="conditions must contain a condition of type Healthy" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'InService')",message="conditions must contain a condition of type InService" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Managed')",message="conditions must contain a condition of type Managed" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Enabled')",message="conditions must contain a condition of type Enabled" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Operational')",message="conditions must contain a condition of type Operational" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Active')",message="conditions must contain a condition of type Active" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Started')",message="conditions must contain a condition of type Started" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Schedulable')",message="conditions must contain a condition of type Schedulable" + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // name is the unique identifier for this fencing agent (e.g., "master-0_redfish"). + // The name must be unique within the fencingAgents array for this node. + // It may contain alphanumeric characters, dots, hyphens, and underscores. + // Maximum length is 300 characters, providing headroom beyond the typical format of + // _ (253 for RFC 1123 node name + 1 underscore + type). + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=300 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-zA-Z0-9._-]+$')",message="name must contain only alphanumeric characters, dots, hyphens, and underscores" + // +required + Name string `json:"name,omitempty"` + + // method is the fencing method used by this agent. + // Valid values are "Redfish" and "IPMI". + // Redfish is a standard RESTful API for server management. + // IPMI (Intelligent Platform Management Interface) is a hardware management interface. + // +required + Method FencingMethod `json:"method,omitempty"` +} + +// PacemakerClusterResourceStatus represents the status of a pacemaker resource scheduled on a node. +// A pacemaker resource is a unit of work managed by pacemaker. In pacemaker terminology, resources are services or +// applications that pacemaker monitors, starts, stops, and moves between nodes to maintain high availability. +// For Two Node OpenShift with Fencing, we track two resources per node: +// - Kubelet (the Kubernetes node agent and a prerequisite for etcd) +// - Etcd (the distributed key-value store) +// +// Fencing agents are tracked separately in the fencingAgents field because they are mapped to +// their target node (the node they can fence), not the node where monitoring operations are scheduled. +type PacemakerClusterResourceStatus struct { + // conditions represent the observations of the resource's current state. + // Known condition types are: "Healthy", "InService", "Managed", "Enabled", "Operational", + // "Active", "Started", "Schedulable". + // The "Healthy" condition is an aggregate that tracks the overall health of the resource. + // The "InService" condition tracks whether the resource is in service (not in maintenance mode). + // The "Managed" condition tracks whether the resource is managed by pacemaker. + // The "Enabled" condition tracks whether the resource is enabled. + // The "Operational" condition tracks whether the resource is operational (not failed). + // The "Active" condition tracks whether the resource is active (available to be used). + // The "Started" condition tracks whether the resource is started. + // The "Schedulable" condition tracks whether the resource is schedulable (not blocked). + // Each of these conditions is required, so the array must contain at least 8 items. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=8 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Healthy')",message="conditions must contain a condition of type Healthy" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'InService')",message="conditions must contain a condition of type InService" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Managed')",message="conditions must contain a condition of type Managed" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Enabled')",message="conditions must contain a condition of type Enabled" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Operational')",message="conditions must contain a condition of type Operational" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Active')",message="conditions must contain a condition of type Active" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Started')",message="conditions must contain a condition of type Started" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Schedulable')",message="conditions must contain a condition of type Schedulable" + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // name is the name of the pacemaker resource. + // Valid values are "Kubelet" and "Etcd". + // The Kubelet resource is a prerequisite for etcd in Two Node OpenShift with Fencing deployments. + // The Etcd resource may temporarily transition to stopped during pacemaker quorum-recovery operations. + // Fencing agents are tracked separately in the node's fencingAgents field. + // +required + Name PacemakerClusterResourceName `json:"name,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PacemakerClusterList contains a list of PacemakerCluster objects. PacemakerCluster is a cluster-scoped singleton +// resource; only one instance named "cluster" may exist. This list type exists only to satisfy Kubernetes API +// conventions. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type PacemakerClusterList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + // items is a list of PacemakerCluster objects. + Items []PacemakerCluster `json:"items"` +} diff --git a/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..17bf97851 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,210 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerCluster) DeepCopyInto(out *PacemakerCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerCluster. +func (in *PacemakerCluster) DeepCopy() *PacemakerCluster { + if in == nil { + return nil + } + out := new(PacemakerCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacemakerCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterFencingAgentStatus) DeepCopyInto(out *PacemakerClusterFencingAgentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterFencingAgentStatus. +func (in *PacemakerClusterFencingAgentStatus) DeepCopy() *PacemakerClusterFencingAgentStatus { + if in == nil { + return nil + } + out := new(PacemakerClusterFencingAgentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterList) DeepCopyInto(out *PacemakerClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PacemakerCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterList. +func (in *PacemakerClusterList) DeepCopy() *PacemakerClusterList { + if in == nil { + return nil + } + out := new(PacemakerClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacemakerClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterNodeStatus) DeepCopyInto(out *PacemakerClusterNodeStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]PacemakerNodeAddress, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]PacemakerClusterResourceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FencingAgents != nil { + in, out := &in.FencingAgents, &out.FencingAgents + *out = make([]PacemakerClusterFencingAgentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterNodeStatus. +func (in *PacemakerClusterNodeStatus) DeepCopy() *PacemakerClusterNodeStatus { + if in == nil { + return nil + } + out := new(PacemakerClusterNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterResourceStatus) DeepCopyInto(out *PacemakerClusterResourceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterResourceStatus. +func (in *PacemakerClusterResourceStatus) DeepCopy() *PacemakerClusterResourceStatus { + if in == nil { + return nil + } + out := new(PacemakerClusterResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterStatus) DeepCopyInto(out *PacemakerClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LastUpdated.DeepCopyInto(&out.LastUpdated) + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new([]PacemakerClusterNodeStatus) + if **in != nil { + in, out := *in, *out + *out = make([]PacemakerClusterNodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterStatus. +func (in *PacemakerClusterStatus) DeepCopy() *PacemakerClusterStatus { + if in == nil { + return nil + } + out := new(PacemakerClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerNodeAddress) DeepCopyInto(out *PacemakerNodeAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerNodeAddress. +func (in *PacemakerNodeAddress) DeepCopy() *PacemakerNodeAddress { + if in == nil { + return nil + } + out := new(PacemakerNodeAddress) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..f5a64682a --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,23 @@ +pacemakerclusters.etcd.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2544 + CRDName: pacemakerclusters.etcd.openshift.io + Capability: "" + Category: "" + FeatureGates: + - DualReplica + FilenameOperatorName: etcd + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_25" + GroupName: etcd.openshift.io + HasStatus: true + KindName: PacemakerCluster + Labels: {} + PluralName: pacemakerclusters + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - DualReplica + Version: v1alpha1 + diff --git a/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..62e1c3ebd --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,89 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PacemakerCluster = map[string]string{ + "": "PacemakerCluster represents the current state of the pacemaker cluster as reported by the pcs status command. PacemakerCluster is a cluster-scoped singleton resource. The name of this instance is \"cluster\". This resource provides a view into the health and status of a pacemaker-managed cluster in Two Node OpenShift with Fencing deployments.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "status contains the actual pacemaker cluster status information collected from the cluster. The goal of this status is to be able to quickly identify if pacemaker is in a healthy state. In Two Node OpenShift with Fencing, a healthy pacemaker cluster has 2 nodes, both of which have healthy kubelet, etcd, and fencing resources. This field is optional on creation - the status collector populates it immediately after creating the resource via the status subresource.", +} + +func (PacemakerCluster) SwaggerDoc() map[string]string { + return map_PacemakerCluster +} + +var map_PacemakerClusterFencingAgentStatus = map[string]string{ + "": "PacemakerClusterFencingAgentStatus represents the status of a fencing agent that can fence a node. Fencing agents are STONITH (Shoot The Other Node In The Head) devices used to isolate failed nodes. Unlike regular pacemaker resources, fencing agents are mapped to their target node (the node they can fence), not the node where their monitoring operations are scheduled.", + "conditions": "conditions represent the observations of the fencing agent's current state. Known condition types are: \"Healthy\", \"InService\", \"Managed\", \"Enabled\", \"Operational\", \"Active\", \"Started\", \"Schedulable\". The \"Healthy\" condition is an aggregate that tracks the overall health of the fencing agent. The \"InService\" condition tracks whether the fencing agent is in service (not in maintenance mode). The \"Managed\" condition tracks whether the fencing agent is managed by pacemaker. The \"Enabled\" condition tracks whether the fencing agent is enabled. The \"Operational\" condition tracks whether the fencing agent is operational (not failed). The \"Active\" condition tracks whether the fencing agent is active (available to be used). The \"Started\" condition tracks whether the fencing agent is started. The \"Schedulable\" condition tracks whether the fencing agent is schedulable (not blocked). Each of these conditions is required, so the array must contain at least 8 items.", + "name": "name is the unique identifier for this fencing agent (e.g., \"master-0_redfish\"). The name must be unique within the fencingAgents array for this node. It may contain alphanumeric characters, dots, hyphens, and underscores. Maximum length is 300 characters, providing headroom beyond the typical format of _ (253 for RFC 1123 node name + 1 underscore + type).", + "method": "method is the fencing method used by this agent. Valid values are \"Redfish\" and \"IPMI\". Redfish is a standard RESTful API for server management. IPMI (Intelligent Platform Management Interface) is a hardware management interface.", +} + +func (PacemakerClusterFencingAgentStatus) SwaggerDoc() map[string]string { + return map_PacemakerClusterFencingAgentStatus +} + +var map_PacemakerClusterList = map[string]string{ + "": "PacemakerClusterList contains a list of PacemakerCluster objects. PacemakerCluster is a cluster-scoped singleton resource; only one instance named \"cluster\" may exist. This list type exists only to satisfy Kubernetes API conventions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of PacemakerCluster objects.", +} + +func (PacemakerClusterList) SwaggerDoc() map[string]string { + return map_PacemakerClusterList +} + +var map_PacemakerClusterNodeStatus = map[string]string{ + "": "PacemakerClusterNodeStatus represents the status of a single node in the pacemaker cluster including the node's conditions and the health of critical resources running on that node.", + "conditions": "conditions represent the observations of the node's current state. Known condition types are: \"Healthy\", \"Online\", \"InService\", \"Active\", \"Ready\", \"Clean\", \"Member\", \"FencingAvailable\", \"FencingHealthy\". The \"Healthy\" condition is an aggregate that tracks the overall health of the node. The \"Online\" condition tracks whether the node is online. The \"InService\" condition tracks whether the node is in service (not in maintenance mode). The \"Active\" condition tracks whether the node is active (not in standby mode). The \"Ready\" condition tracks whether the node is ready (not in a pending state). The \"Clean\" condition tracks whether the node is in a clean (status known) state. The \"Member\" condition tracks whether the node is a member of the cluster. The \"FencingAvailable\" condition tracks whether this node can be fenced by at least one healthy agent. The \"FencingHealthy\" condition tracks whether all fencing agents for this node are healthy. Each of these conditions is required, so the array must contain at least 9 items.", + "nodeName": "nodeName is the name of the node. This is expected to match the Kubernetes node's name, which must be a lowercase RFC 1123 subdomain consisting of lowercase alphanumeric characters, '-' or '.', starting and ending with an alphanumeric character, and be at most 253 characters in length.", + "addresses": "addresses is a list of IP addresses for the node. Pacemaker allows multiple IP addresses for Corosync communication between nodes. The first address in this list is used for IP-based peer URLs for etcd membership. Each address must be a valid global unicast IPv4 or IPv6 address in canonical form (e.g., \"192.168.1.1\" not \"192.168.001.001\", or \"2001:db8::1\" not \"2001:0db8::1\"). This excludes loopback, link-local, and multicast addresses.", + "resources": "resources contains the status of pacemaker resources scheduled on this node. Each resource entry includes the resource name and its health conditions. For Two Node OpenShift with Fencing, we track Kubelet and Etcd resources per node. Both resources are required to be present, so the array must contain at least 2 items. Valid resource names are \"Kubelet\" and \"Etcd\". Fencing agents are tracked separately in the fencingAgents field.", + "fencingAgents": "fencingAgents contains the status of fencing agents that can fence this node. Unlike resources (which are scheduled to run on this node), fencing agents are mapped to the node they can fence (their target), not the node where monitoring operations run. Each fencing agent entry includes a unique name, fencing type, target node, and health conditions. A node is considered fence-capable if at least one fencing agent is healthy. Expected to have 1 fencing agent per node, but up to 8 are supported for redundancy. Names must be unique within this array.", +} + +func (PacemakerClusterNodeStatus) SwaggerDoc() map[string]string { + return map_PacemakerClusterNodeStatus +} + +var map_PacemakerClusterResourceStatus = map[string]string{ + "": "PacemakerClusterResourceStatus represents the status of a pacemaker resource scheduled on a node. A pacemaker resource is a unit of work managed by pacemaker. In pacemaker terminology, resources are services or applications that pacemaker monitors, starts, stops, and moves between nodes to maintain high availability. For Two Node OpenShift with Fencing, we track two resources per node:\n - Kubelet (the Kubernetes node agent and a prerequisite for etcd)\n - Etcd (the distributed key-value store)\n\nFencing agents are tracked separately in the fencingAgents field because they are mapped to their target node (the node they can fence), not the node where monitoring operations are scheduled.", + "conditions": "conditions represent the observations of the resource's current state. Known condition types are: \"Healthy\", \"InService\", \"Managed\", \"Enabled\", \"Operational\", \"Active\", \"Started\", \"Schedulable\". The \"Healthy\" condition is an aggregate that tracks the overall health of the resource. The \"InService\" condition tracks whether the resource is in service (not in maintenance mode). The \"Managed\" condition tracks whether the resource is managed by pacemaker. The \"Enabled\" condition tracks whether the resource is enabled. The \"Operational\" condition tracks whether the resource is operational (not failed). The \"Active\" condition tracks whether the resource is active (available to be used). The \"Started\" condition tracks whether the resource is started. The \"Schedulable\" condition tracks whether the resource is schedulable (not blocked). Each of these conditions is required, so the array must contain at least 8 items.", + "name": "name is the name of the pacemaker resource. Valid values are \"Kubelet\" and \"Etcd\". The Kubelet resource is a prerequisite for etcd in Two Node OpenShift with Fencing deployments. The Etcd resource may temporarily transition to stopped during pacemaker quorum-recovery operations. Fencing agents are tracked separately in the node's fencingAgents field.", +} + +func (PacemakerClusterResourceStatus) SwaggerDoc() map[string]string { + return map_PacemakerClusterResourceStatus +} + +var map_PacemakerClusterStatus = map[string]string{ + "": "PacemakerClusterStatus contains the actual pacemaker cluster status information. As part of validating the status object, we need to ensure that the lastUpdated timestamp may not be set to an earlier timestamp than the current value. The validation rule checks if oldSelf has lastUpdated before comparing, to handle the initial status creation case.", + "conditions": "conditions represent the observations of the pacemaker cluster's current state. Known condition types are: \"Healthy\", \"InService\", \"NodeCountAsExpected\". The \"Healthy\" condition is an aggregate that tracks the overall health of the cluster. The \"InService\" condition tracks whether the cluster is in service (not in maintenance mode). The \"NodeCountAsExpected\" condition tracks whether the expected number of nodes are present. Each of these conditions is required, so the array must contain at least 3 items.", + "lastUpdated": "lastUpdated is the timestamp when this status was last updated. This is useful for identifying stale status reports. It must be a valid timestamp in RFC3339 format. Once set, this field cannot be removed and cannot be set to an earlier timestamp than the current value.", + "nodes": "nodes provides detailed status for each control-plane node in the Pacemaker cluster. While Pacemaker supports up to 32 nodes, the limit is set to 5 (max OpenShift control-plane nodes). For Two Node OpenShift with Fencing, exactly 2 nodes are expected in a healthy cluster. An empty list indicates a catastrophic failure where Pacemaker reports no nodes.", +} + +func (PacemakerClusterStatus) SwaggerDoc() map[string]string { + return map_PacemakerClusterStatus +} + +var map_PacemakerNodeAddress = map[string]string{ + "": "PacemakerNodeAddress contains information for a node's address. This is similar to corev1.NodeAddress but adds validation for IP addresses.", + "type": "type is the type of node address. Currently only \"InternalIP\" is supported.", + "address": "address is the node address. For InternalIP, this must be a valid global unicast IPv4 or IPv6 address in canonical form. Canonical form means the shortest standard representation (e.g., \"192.168.1.1\" not \"192.168.001.001\", or \"2001:db8::1\" not \"2001:0db8::1\"). Maximum length is 39 characters (full IPv6 address). Global unicast includes private/RFC1918 addresses but excludes loopback, link-local, and multicast.", +} + +func (PacemakerNodeAddress) SwaggerDoc() map[string]string { + return map_PacemakerNodeAddress +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index fd8d9fa77..adfd5a7e7 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -5,13 +5,15 @@ | EventedPLEG| | | | | | | | | | MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | | | | MultiArchInstallAzure| | | | | | | | | -| NewOLMBoxCutterRuntime| | | | | | | | | | ShortCertRotation| | | | | | | | | +| ClusterAPIComputeInstall| | | Enabled | Enabled | | | | | +| ClusterAPIControlPlaneInstall| | | Enabled | Enabled | | | | | | ClusterAPIMachineManagementVSphere| | | Enabled | Enabled | | | | | | Example2| | | Enabled | Enabled | | | | | | ExternalSnapshotMetadata| | | Enabled | Enabled | | | | | -| IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | | | | | +| KMSEncryptionProvider| | | Enabled | Enabled | | | | | | NetworkConnect| | | Enabled | Enabled | | | | | +| NewOLMBoxCutterRuntime| | | | Enabled | | | | Enabled | | NewOLMCatalogdAPIV1Metas| | | | Enabled | | | | Enabled | | NewOLMPreflightPermissionChecks| | | | Enabled | | | | Enabled | | NoRegistryClusterInstall| | | | Enabled | | | | Enabled | @@ -30,6 +32,7 @@ | BootcNodeManagement| | | Enabled | Enabled | | | Enabled | Enabled | | CBORServingAndStorage| | | Enabled | Enabled | | | Enabled | Enabled | | CRDCompatibilityRequirementOperator| | | Enabled | Enabled | | | Enabled | Enabled | +| CRIOCredentialProviderConfig| | | Enabled | Enabled | | | Enabled | Enabled | | ClientsPreferCBOR| | | Enabled | Enabled | | | Enabled | Enabled | | ClusterAPIInstallIBMCloud| | | Enabled | Enabled | | | Enabled | Enabled | | ClusterAPIMachineManagement| | | Enabled | Enabled | | | Enabled | Enabled | @@ -38,8 +41,10 @@ | ClusterVersionOperatorConfiguration| | | Enabled | Enabled | | | Enabled | Enabled | | ConfigurablePKI| | | Enabled | Enabled | | | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | | | Enabled | Enabled | +| DRAPartitionableDevices| | | Enabled | Enabled | | | Enabled | Enabled | | DualReplica| | | Enabled | Enabled | | | Enabled | Enabled | | DyanmicServiceEndpointIBMCloud| | | Enabled | Enabled | | | Enabled | Enabled | +| EVPN| | | Enabled | Enabled | | | Enabled | Enabled | | EtcdBackendQuota| | | Enabled | Enabled | | | Enabled | Enabled | | EventTTL| | | Enabled | Enabled | | | Enabled | Enabled | | Example| | | Enabled | Enabled | | | Enabled | Enabled | @@ -48,19 +53,19 @@ | GCPCustomAPIEndpoints| | | Enabled | Enabled | | | Enabled | Enabled | | GCPCustomAPIEndpointsInstall| | | Enabled | Enabled | | | Enabled | Enabled | | GCPDualStackInstall| | | Enabled | Enabled | | | Enabled | Enabled | +| GatewayAPIWithoutOLM| | | Enabled | Enabled | | | Enabled | Enabled | | HyperShiftOnlyDynamicResourceAllocation| Enabled | | Enabled | | Enabled | | Enabled | | | ImageModeStatusReporting| | | Enabled | Enabled | | | Enabled | Enabled | +| IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | | | Enabled | Enabled | | InsightsConfig| | | Enabled | Enabled | | | Enabled | Enabled | | InsightsOnDemandDataGather| | | Enabled | Enabled | | | Enabled | Enabled | | IrreconcilableMachineConfig| | | Enabled | Enabled | | | Enabled | Enabled | -| KMSEncryptionProvider| | | Enabled | Enabled | | | Enabled | Enabled | +| KMSEncryption| | | Enabled | Enabled | | | Enabled | Enabled | | MachineAPIMigration| | | Enabled | Enabled | | | Enabled | Enabled | -| ManagedBootImagesCPMS| | | Enabled | Enabled | | | Enabled | Enabled | | MaxUnavailableStatefulSet| | | Enabled | Enabled | | | Enabled | Enabled | | MinimumKubeletVersion| | | Enabled | Enabled | | | Enabled | Enabled | | MixedCPUsAllocation| | | Enabled | Enabled | | | Enabled | Enabled | | MultiDiskSetup| | | Enabled | Enabled | | | Enabled | Enabled | -| MutableCSINodeAllocatableCount| | | Enabled | Enabled | | | Enabled | Enabled | | MutatingAdmissionPolicy| | | Enabled | Enabled | | | Enabled | Enabled | | NewOLM| | Enabled | | Enabled | | Enabled | | Enabled | | NewOLMOwnSingleNamespace| | Enabled | | Enabled | | Enabled | | Enabled | @@ -75,12 +80,8 @@ | VSphereHostVMGroupZonal| | | Enabled | Enabled | | | Enabled | Enabled | | VSphereMixedNodeEnv| | | Enabled | Enabled | | | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | | | Enabled | Enabled | -| AdditionalRoutingCapabilities| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| CPMSMachineNamePrefix| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ConsolePluginContentSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ExternalOIDC| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ExternalOIDCWithUIDAndExtraClaimMappings| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | @@ -95,16 +96,12 @@ | ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesAzure| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesCPMS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesvSphere| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | MetricsCollectionProfiles| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NetworkSegmentation| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| MutableCSINodeAllocatableCount| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | PinnedImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| PreconfiguredUDNAddresses| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ProcMountType| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| RouteAdvertisements| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | RouteExternalCertificate| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ServiceAccountTokenNodeBinding| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | SigstoreImageVerification| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | @@ -115,4 +112,3 @@ | UserNamespacesSupport| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereMultiDisk| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereMultiNetworks| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VolumeAttributesClass| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index 9159b43d3..4504ddb8c 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -131,13 +131,13 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() - FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("jspeed"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + FeatureGateCRIOCredentialProviderConfig = newFeatureGate("CRIOCredentialProviderConfig"). + reportProblemsToJiraComponent("node"). + contactPerson("QiWang"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1861"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() FeatureGateVSphereHostVMGroupZonal = newFeatureGate("VSphereHostVMGroupZonal"). reportProblemsToJiraComponent("splat"). @@ -163,30 +163,6 @@ var ( enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateCPMSMachineNamePrefix = newFeatureGate("CPMSMachineNamePrefix"). - reportProblemsToJiraComponent("Cloud Compute / ControlPlaneMachineSet"). - contactPerson("chiragkyal"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1714"). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("tssurya"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNetworkSegmentation = newFeatureGate("NetworkSegmentation"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("tssurya"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1623"). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateNetworkConnect = newFeatureGate("NetworkConnect"). reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("tssurya"). @@ -195,37 +171,13 @@ var ( enableIn(configv1.DevPreviewNoUpgrade). mustRegister() - FeatureGateAdditionalRoutingCapabilities = newFeatureGate("AdditionalRoutingCapabilities"). - reportProblemsToJiraComponent("Networking/cluster-network-operator"). - contactPerson("jcaamano"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateRouteAdvertisements = newFeatureGate("RouteAdvertisements"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("jcaamano"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("pliu"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNetworkDiagnosticsConfig = newFeatureGate("NetworkDiagnosticsConfig"). - reportProblemsToJiraComponent("Networking/cluster-network-operator"). - contactPerson("kyrtapz"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + FeatureGateEVPN = newFeatureGate("EVPN"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("jcaamano"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1862"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() FeatureGateOVNObservability = newFeatureGate("OVNObservability"). reportProblemsToJiraComponent("Networking"). @@ -358,7 +310,7 @@ var ( contactPerson("djoshy"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1818"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateBootImageSkewEnforcement = newFeatureGate("BootImageSkewEnforcement"). @@ -409,14 +361,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() - FeatureGateVolumeAttributesClass = newFeatureGate("VolumeAttributesClass"). - reportProblemsToJiraComponent("Storage / Kubernetes External Components"). - contactPerson("dfajmon"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/3751"). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateVolumeGroupSnapshot = newFeatureGate("VolumeGroupSnapshot"). reportProblemsToJiraComponent("Storage / Kubernetes External Components"). contactPerson("fbertina"). @@ -518,6 +462,7 @@ var ( contactPerson("pegoncal"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1890"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). @@ -618,14 +563,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() - FeatureGateProcMountType = newFeatureGate("ProcMountType"). - reportProblemsToJiraComponent("Node"). - contactPerson("haircommander"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/4265"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). - mustRegister() - FeatureGateVSphereMultiNetworks = newFeatureGate("VSphereMultiNetworks"). reportProblemsToJiraComponent("SPLAT"). contactPerson("rvanderp"). @@ -638,8 +575,8 @@ var ( reportProblemsToJiraComponent("Networking/router"). contactPerson("miciah"). productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1687"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateMinimumKubeletVersion = newFeatureGate("MinimumKubeletVersion"). @@ -663,9 +600,17 @@ var ( contactPerson("swghosh"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1682"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade). mustRegister() + FeatureGateKMSEncryption = newFeatureGate("KMSEncryption"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("ardaguclu"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1900"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateHighlyAvailableArbiter = newFeatureGate("HighlyAvailableArbiter"). reportProblemsToJiraComponent("Two Node with Arbiter"). contactPerson("eggfoobar"). @@ -790,14 +735,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGatePreconfiguredUDNAddresses = newFeatureGate("PreconfiguredUDNAddresses"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("kyrtapz"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1793"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). - mustRegister() - FeatureGateAWSServiceLBNetworkSecurityGroup = newFeatureGate("AWSServiceLBNetworkSecurityGroup"). reportProblemsToJiraComponent("Cloud Compute / Cloud Controller Manager"). contactPerson("mtulio"). @@ -913,7 +850,7 @@ var ( contactPerson("jsafrane"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/4876"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateOSStreams = newFeatureGate("OSStreams"). reportProblemsToJiraComponent("MachineConfigOperator"). @@ -954,6 +891,14 @@ var ( enableForClusterProfile(Hypershift, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() + FeatureGateDRAPartitionableDevices = newFeatureGate("DRAPartitionableDevices"). + reportProblemsToJiraComponent("Node"). + contactPerson("harche"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4815"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateConfigurablePKI = newFeatureGate("ConfigurablePKI"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("sanchezl"). @@ -961,4 +906,29 @@ var ( enhancementPR("https://github.com/openshift/enhancements/pull/1882"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + + FeatureGateClusterAPIControlPlaneInstall = newFeatureGate("ClusterAPIControlPlaneInstall"). + reportProblemsToJiraComponent("Installer / openshift-installer"). + contactPerson("patrickdillon"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1465"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() + + FeatureGateClusterAPIComputeInstall = newFeatureGate("ClusterAPIComputeInstall"). + reportProblemsToJiraComponent("Installer / openshift-installer"). + contactPerson("patrickdillon"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1465"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() + + FeatureGateGatewayAPIWithoutOLM = newFeatureGate("GatewayAPIWithoutOLM"). + reportProblemsToJiraComponent("Routing"). + contactPerson("miciah"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1933"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() ) + diff --git a/vendor/github.com/openshift/api/features/legacyfeaturegates.go b/vendor/github.com/openshift/api/features/legacyfeaturegates.go index dd11fdf66..a92c0b9bb 100644 --- a/vendor/github.com/openshift/api/features/legacyfeaturegates.go +++ b/vendor/github.com/openshift/api/features/legacyfeaturegates.go @@ -7,10 +7,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "AWSEFSDriverVolumeMetrics", // never add to this list, if you think you have an exception ask @deads2k - "AdditionalRoutingCapabilities", - // never add to this list, if you think you have an exception ask @deads2k - "AdminNetworkPolicy", - // never add to this list, if you think you have an exception ask @deads2k "AlibabaPlatform", // never add to this list, if you think you have an exception ask @deads2k "AutomatedEtcdBackup", @@ -79,12 +75,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "MultiArchInstallGCP", // never add to this list, if you think you have an exception ask @deads2k - "NetworkDiagnosticsConfig", - // never add to this list, if you think you have an exception ask @deads2k - "NetworkLiveMigration", - // never add to this list, if you think you have an exception ask @deads2k - "NetworkSegmentation", - // never add to this list, if you think you have an exception ask @deads2k "NewOLM", // never add to this list, if you think you have an exception ask @deads2k "OVNObservability", @@ -95,8 +85,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "PrivateHostedZoneAWS", // never add to this list, if you think you have an exception ask @deads2k - "RouteAdvertisements", - // never add to this list, if you think you have an exception ask @deads2k "RouteExternalCertificate", // never add to this list, if you think you have an exception ask @deads2k "SetEIPForNLBIngressController", diff --git a/vendor/github.com/openshift/api/install.go b/vendor/github.com/openshift/api/install.go index ea5f34970..e4574e7c4 100644 --- a/vendor/github.com/openshift/api/install.go +++ b/vendor/github.com/openshift/api/install.go @@ -55,6 +55,7 @@ import ( "github.com/openshift/api/cloudnetwork" "github.com/openshift/api/config" "github.com/openshift/api/console" + "github.com/openshift/api/etcd" "github.com/openshift/api/helm" "github.com/openshift/api/image" "github.com/openshift/api/imageregistry" @@ -91,6 +92,7 @@ var ( build.Install, config.Install, console.Install, + etcd.Install, helm.Install, image.Install, imageregistry.Install, diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go index d7661cf38..25ffc9f46 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go +++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go @@ -53,10 +53,9 @@ type ControlPlaneMachineSetSpec struct { // For example, if machineNamePrefix is set to 'control-plane', // and three machines are created, their names might be: // control-plane-abcde-0, control-plane-fghij-1, control-plane-klmno-2 - // +openshift:validation:FeatureGateAwareXValidation:featureGate=CPMSMachineNamePrefix,rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'). Each block, separated by periods, must start and end with an alphanumeric character. Hyphens are not allowed at the start or end of a block, and consecutive periods are not permitted." + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lowercase alphanumeric characters, hyphens ('-'), and periods ('.'). Each block, separated by periods, must start and end with an alphanumeric character. Hyphens are not allowed at the start or end of a block, and consecutive periods are not permitted." // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=245 - // +openshift:enable:FeatureGate=CPMSMachineNamePrefix // +optional MachineNamePrefix string `json:"machineNamePrefix,omitempty"` diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml index 7be04ec84..b001170fa 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml @@ -7,7 +7,6 @@ controlplanemachinesets.machine.openshift.io: Capability: MachineAPI Category: "" FeatureGates: - - CPMSMachineNamePrefix - MachineAPIMigration FilenameOperatorName: control-plane-machine-set FilenameOperatorOrdering: "01" diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index d1d5941fa..e3508d667 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -331,9 +331,16 @@ type Filter struct { // TagSpecification is the name/value pair for a tag type TagSpecification struct { - // name of the tag + // name of the tag. + // This field is required and must be a non-empty string. + // Must be between 1 and 128 characters in length. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required Name string `json:"name"` - // value of the tag + // value of the tag. + // When omitted, this creates a tag with an empty string as the value. + // +optional Value string `json:"value"` } @@ -407,6 +414,26 @@ type AWSMachineProviderStatus struct { // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` + // dedicatedHost tracks the dynamically allocated dedicated host. + // This field is populated when allocationStrategy is Dynamic (with or without DynamicHostAllocation). + // When omitted, this indicates that the dedicated host has not yet been allocated, or allocation is in progress. + // +optional + DedicatedHost *DedicatedHostStatus `json:"dedicatedHost,omitempty"` +} + +// DedicatedHostStatus defines the observed state of a dynamically allocated dedicated host +// associated with an AWSMachine. This struct is used to track the ID of the dedicated host. +type DedicatedHostStatus struct { + // id tracks the dynamically allocated dedicated host ID. + // This field is populated when allocationStrategy is Dynamic (with or without DynamicHostAllocation). + // The value must start with "h-" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). + // The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. + // Must be either 10 or 19 characters in length. + // +kubebuilder:validation:XValidation:rule="self.matches('^h-([0-9a-f]{8}|[0-9a-f]{17})$')",message="id must start with 'h-' followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f)" + // +kubebuilder:validation:MinLength=10 + // +kubebuilder:validation:MaxLength=19 + // +required + ID string `json:"id,omitempty"` } // MarketType describes the market type of an EC2 Instance @@ -454,21 +481,77 @@ type HostAffinity string const ( // HostAffinityAnyAvailable lets the platform select any available dedicated host. + HostAffinityAnyAvailable HostAffinity = "AnyAvailable" // HostAffinityDedicatedHost requires specifying a particular host via dedicatedHost.host.hostID. HostAffinityDedicatedHost HostAffinity = "DedicatedHost" ) +// AllocationStrategy selects how a dedicated host is provided to the system for assigning to the instance. +// +kubebuilder:validation:Enum:=UserProvided;Dynamic +// +enum +type AllocationStrategy string + +const ( + // AllocationStrategyUserProvided specifies that the system should assign instances to a user-provided dedicated host. + AllocationStrategyUserProvided AllocationStrategy = "UserProvided" + + // AllocationStrategyDynamic specifies that the system should dynamically allocate a dedicated host for instances. + AllocationStrategyDynamic AllocationStrategy = "Dynamic" +) + // DedicatedHost represents the configuration for the usage of dedicated host. +// +kubebuilder:validation:XValidation:rule="self.allocationStrategy == 'UserProvided' ? has(self.id) : !has(self.id)",message="id is required when allocationStrategy is UserProvided, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.dynamicHostAllocation) ? self.allocationStrategy == 'Dynamic' : true",message="dynamicHostAllocation is only allowed when allocationStrategy is Dynamic" +// +union type DedicatedHost struct { + // allocationStrategy specifies if the dedicated host will be provided by the admin through the id field or if the host will be dynamically allocated. + // Valid values are UserProvided and Dynamic. + // When omitted, the value defaults to "UserProvided", which requires the id field to be set. + // When allocationStrategy is set to UserProvided, an ID of the dedicated host to assign must be provided. + // When allocationStrategy is set to Dynamic, a dedicated host will be allocated and used to assign instances. + // When allocationStrategy is set to Dynamic, and dynamicHostAllocation is configured, a dedicated host will be allocated and the tags in dynamicHostAllocation will be assigned to that host. + // +optional + // +unionDiscriminator + // +default="UserProvided" + AllocationStrategy *AllocationStrategy `json:"allocationStrategy,omitempty"` + // id identifies the AWS Dedicated Host on which the instance must run. // The value must start with "h-" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). // The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. // Must be either 10 or 19 characters in length. - // +kubebuilder:validation:XValidation:rule="self.matches('^h-([0-9a-f]{8}|[0-9a-f]{17})$')",message="hostID must start with 'h-' followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f)" + // This field is required when allocationStrategy is UserProvided, and forbidden otherwise. + // When omitted with allocationStrategy set to Dynamic, the platform will dynamically allocate a dedicated host. + // +kubebuilder:validation:XValidation:rule="self.matches('^h-([0-9a-f]{8}|[0-9a-f]{17})$')",message="id must start with 'h-' followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f)" // +kubebuilder:validation:MinLength=10 // +kubebuilder:validation:MaxLength=19 - // +required + // +optional + // +unionMember=UserProvided ID string `json:"id,omitempty"` + + // dynamicHostAllocation specifies tags to apply to a dynamically allocated dedicated host. + // This field is only allowed when allocationStrategy is Dynamic, and is mutually exclusive with id. + // When specified, a dedicated host will be allocated with the provided tags applied. + // When omitted (and allocationStrategy is Dynamic), a dedicated host will be allocated without any additional tags. + // +optional + // +unionMember=Dynamic + DynamicHostAllocation *DynamicHostAllocationSpec `json:"dynamicHostAllocation,omitempty"` +} + +// DynamicHostAllocationSpec defines the configuration for dynamic dedicated host allocation. +// This specification always allocates exactly one dedicated host per machine. +// At least one property must be specified when this struct is used. +// Currently only Tags are available for configuring, but in the future more configs may become available. +// +kubebuilder:validation:MinProperties=1 +type DynamicHostAllocationSpec struct { + // tags specifies a set of key-value pairs to apply to the allocated dedicated host. + // When omitted, no additional user-defined tags will be applied to the allocated host. + // A maximum of 50 tags can be specified. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=50 + // +listType=map + // +listMapKey=name + // +optional + Tags *[]TagSpecification `json:"tags,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go index 9510b49fd..6bfe85081 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go @@ -185,6 +185,18 @@ const ( MachineAuthorityMigrating MachineAuthority = "Migrating" ) +// SynchronizedAPI holds the last stable value of authoritativeAPI. +// +kubebuilder:validation:Enum=MachineAPI;ClusterAPI +type SynchronizedAPI string + +const ( + // MachineAPISynchronized indicates that the Machine API is the last synchronized API. + MachineAPISynchronized SynchronizedAPI = "MachineAPI" + + // ClusterAPISynchronized indicates that the Cluster API is the last synchronized API. + ClusterAPISynchronized SynchronizedAPI = "ClusterAPI" +) + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -407,6 +419,14 @@ type MachineStatus struct { // +optional AuthoritativeAPI MachineAuthority `json:"authoritativeAPI,omitempty"` + // synchronizedAPI holds the last stable value of authoritativeAPI. + // It is used to detect migration cancellation requests and to restore the resource to its previous state. + // Valid values are "MachineAPI" and "ClusterAPI". + // When omitted, the resource has not yet been reconciled by the migration controller. + // +openshift:enable:FeatureGate=MachineAPIMigration + // +optional + SynchronizedAPI SynchronizedAPI `json:"synchronizedAPI,omitempty"` + // synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. // This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match. // +kubebuilder:validation:Minimum=0 diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go index 80cb282b7..be5476344 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -169,6 +169,14 @@ type MachineSetStatus struct { // +optional AuthoritativeAPI MachineAuthority `json:"authoritativeAPI,omitempty"` + // synchronizedAPI holds the last stable value of authoritativeAPI. + // It is used to detect migration cancellation requests and to restore the resource to its previous state. + // Valid values are "MachineAPI" and "ClusterAPI". + // When omitted, the resource has not yet been reconciled by the migration controller. + // +openshift:enable:FeatureGate=MachineAPIMigration + // +optional + SynchronizedAPI SynchronizedAPI `json:"synchronizedAPI,omitempty"` + // synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. // This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match. // +kubebuilder:validation:Minimum=0 diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-CustomNoUpgrade.crd.yaml index 3e68b107e..775b9ff34 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-CustomNoUpgrade.crd.yaml @@ -567,6 +567,16 @@ spec: serialized/deserialized from this field. type: object x-kubernetes-preserve-unknown-fields: true + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-DevPreviewNoUpgrade.crd.yaml index 9ece1ebed..e14ed6f87 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-DevPreviewNoUpgrade.crd.yaml @@ -567,6 +567,16 @@ spec: serialized/deserialized from this field. type: object x-kubernetes-preserve-unknown-fields: true + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-TechPreviewNoUpgrade.crd.yaml index b24670fd3..a91cadf51 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machines-TechPreviewNoUpgrade.crd.yaml @@ -567,6 +567,16 @@ spec: serialized/deserialized from this field. type: object x-kubernetes-preserve-unknown-fields: true + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-CustomNoUpgrade.crd.yaml index db01cca4f..7c68b7b18 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-CustomNoUpgrade.crd.yaml @@ -674,6 +674,16 @@ spec: description: replicas is the most recently observed number of replicas. format: int32 type: integer + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-DevPreviewNoUpgrade.crd.yaml index 1556758e6..37fb42ca9 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-DevPreviewNoUpgrade.crd.yaml @@ -674,6 +674,16 @@ spec: description: replicas is the most recently observed number of replicas. format: int32 type: integer + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-TechPreviewNoUpgrade.crd.yaml index f9801ce02..d93d2c15f 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-TechPreviewNoUpgrade.crd.yaml @@ -674,6 +674,16 @@ spec: description: replicas is the most recently observed number of replicas. format: int32 type: integer + synchronizedAPI: + description: |- + synchronizedAPI holds the last stable value of authoritativeAPI. + It is used to detect migration cancellation requests and to restore the resource to its previous state. + Valid values are "MachineAPI" and "ClusterAPI". + When omitted, the resource has not yet been reconciled by the migration controller. + enum: + - MachineAPI + - ClusterAPI + type: string synchronizedGeneration: description: |- synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index d08906c7d..63b9bb5ff 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -152,6 +152,11 @@ func (in *AWSMachineProviderStatus) DeepCopyInto(out *AWSMachineProviderStatus) (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DedicatedHost != nil { + in, out := &in.DedicatedHost, &out.DedicatedHost + *out = new(DedicatedHostStatus) + **out = **in + } return } @@ -512,6 +517,16 @@ func (in *DataDiskManagedDiskParameters) DeepCopy() *DataDiskManagedDiskParamete // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DedicatedHost) DeepCopyInto(out *DedicatedHost) { *out = *in + if in.AllocationStrategy != nil { + in, out := &in.AllocationStrategy, &out.AllocationStrategy + *out = new(AllocationStrategy) + **out = **in + } + if in.DynamicHostAllocation != nil { + in, out := &in.DynamicHostAllocation, &out.DynamicHostAllocation + *out = new(DynamicHostAllocationSpec) + (*in).DeepCopyInto(*out) + } return } @@ -525,6 +540,22 @@ func (in *DedicatedHost) DeepCopy() *DedicatedHost { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DedicatedHostStatus) DeepCopyInto(out *DedicatedHostStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedHostStatus. +func (in *DedicatedHostStatus) DeepCopy() *DedicatedHostStatus { + if in == nil { + return nil + } + out := new(DedicatedHostStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) { *out = *in @@ -557,6 +588,31 @@ func (in *DiskSettings) DeepCopy() *DiskSettings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicHostAllocationSpec) DeepCopyInto(out *DynamicHostAllocationSpec) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = new([]TagSpecification) + if **in != nil { + in, out := *in, *out + *out = make([]TagSpecification, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicHostAllocationSpec. +func (in *DynamicHostAllocationSpec) DeepCopy() *DynamicHostAllocationSpec { + if in == nil { + return nil + } + out := new(DynamicHostAllocationSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { *out = *in @@ -935,7 +991,7 @@ func (in *HostPlacement) DeepCopyInto(out *HostPlacement) { if in.DedicatedHost != nil { in, out := &in.DedicatedHost, &out.DedicatedHost *out = new(DedicatedHost) - **out = **in + (*in).DeepCopyInto(*out) } return } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index 093a40076..2c4a9030c 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -54,6 +54,7 @@ var map_AWSMachineProviderStatus = map[string]string{ "instanceId": "instanceId is the instance ID of the machine created in AWS", "instanceState": "instanceState is the state of the AWS instance for this machine", "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", + "dedicatedHost": "dedicatedHost tracks the dynamically allocated dedicated host. This field is populated when allocationStrategy is Dynamic (with or without DynamicHostAllocation). When omitted, this indicates that the dedicated host has not yet been allocated, or allocation is in progress.", } func (AWSMachineProviderStatus) SwaggerDoc() map[string]string { @@ -93,14 +94,34 @@ func (CPUOptions) SwaggerDoc() map[string]string { } var map_DedicatedHost = map[string]string{ - "": "DedicatedHost represents the configuration for the usage of dedicated host.", - "id": "id identifies the AWS Dedicated Host on which the instance must run. The value must start with \"h-\" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. Must be either 10 or 19 characters in length.", + "": "DedicatedHost represents the configuration for the usage of dedicated host.", + "allocationStrategy": "allocationStrategy specifies if the dedicated host will be provided by the admin through the id field or if the host will be dynamically allocated. Valid values are UserProvided and Dynamic. When omitted, the value defaults to \"UserProvided\", which requires the id field to be set. When allocationStrategy is set to UserProvided, an ID of the dedicated host to assign must be provided. When allocationStrategy is set to Dynamic, a dedicated host will be allocated and used to assign instances. When allocationStrategy is set to Dynamic, and dynamicHostAllocation is configured, a dedicated host will be allocated and the tags in dynamicHostAllocation will be assigned to that host.", + "id": "id identifies the AWS Dedicated Host on which the instance must run. The value must start with \"h-\" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. Must be either 10 or 19 characters in length. This field is required when allocationStrategy is UserProvided, and forbidden otherwise. When omitted with allocationStrategy set to Dynamic, the platform will dynamically allocate a dedicated host.", + "dynamicHostAllocation": "dynamicHostAllocation specifies tags to apply to a dynamically allocated dedicated host. This field is only allowed when allocationStrategy is Dynamic, and is mutually exclusive with id. When specified, a dedicated host will be allocated with the provided tags applied. When omitted (and allocationStrategy is Dynamic), a dedicated host will be allocated without any additional tags.", } func (DedicatedHost) SwaggerDoc() map[string]string { return map_DedicatedHost } +var map_DedicatedHostStatus = map[string]string{ + "": "DedicatedHostStatus defines the observed state of a dynamically allocated dedicated host associated with an AWSMachine. This struct is used to track the ID of the dedicated host.", + "id": "id tracks the dynamically allocated dedicated host ID. This field is populated when allocationStrategy is Dynamic (with or without DynamicHostAllocation). The value must start with \"h-\" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. Must be either 10 or 19 characters in length.", +} + +func (DedicatedHostStatus) SwaggerDoc() map[string]string { + return map_DedicatedHostStatus +} + +var map_DynamicHostAllocationSpec = map[string]string{ + "": "DynamicHostAllocationSpec defines the configuration for dynamic dedicated host allocation. This specification always allocates exactly one dedicated host per machine. At least one property must be specified when this struct is used. Currently only Tags are available for configuring, but in the future more configs may become available.", + "tags": "tags specifies a set of key-value pairs to apply to the allocated dedicated host. When omitted, no additional user-defined tags will be applied to the allocated host. A maximum of 50 tags can be specified.", +} + +func (DynamicHostAllocationSpec) SwaggerDoc() map[string]string { + return map_DynamicHostAllocationSpec +} + var map_EBSBlockDeviceSpec = map[string]string{ "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.\n\nDeprecated: setting this field has no effect.", @@ -176,8 +197,8 @@ func (SpotMarketOptions) SwaggerDoc() map[string]string { var map_TagSpecification = map[string]string{ "": "TagSpecification is the name/value pair for a tag", - "name": "name of the tag", - "value": "value of the tag", + "name": "name of the tag. This field is required and must be a non-empty string. Must be between 1 and 128 characters in length.", + "value": "value of the tag. When omitted, this creates a tag with an empty string as the value.", } func (TagSpecification) SwaggerDoc() map[string]string { @@ -626,6 +647,7 @@ var map_MachineStatus = map[string]string{ "phase": "phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", "conditions": "conditions defines the current state of the Machine", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI, ClusterAPI and Migrating. This value is updated by the migration controller to reflect the authoritative API. Machine API and Cluster API controllers use this value to determine whether or not to reconcile the resource. When set to Migrating, the migration controller is currently performing the handover of authority from one API to the other.", + "synchronizedAPI": "synchronizedAPI holds the last stable value of authoritativeAPI. It is used to detect migration cancellation requests and to restore the resource to its previous state. Valid values are \"MachineAPI\" and \"ClusterAPI\". When omitted, the resource has not yet been reconciled by the migration controller.", "synchronizedGeneration": "synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match.", } @@ -729,6 +751,7 @@ var map_MachineSetStatus = map[string]string{ "errorReason": "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption.\n\nThese fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output.", "conditions": "conditions defines the current state of the MachineSet", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI, ClusterAPI and Migrating. This value is updated by the migration controller to reflect the authoritative API. Machine API and Cluster API controllers use this value to determine whether or not to reconcile the resource. When set to Migrating, the migration controller is currently performing the handover of authority from one API to the other.", + "synchronizedAPI": "synchronizedAPI holds the last stable value of authoritativeAPI. It is used to detect migration cancellation requests and to restore the resource to its previous state. Valid values are \"MachineAPI\" and \"ClusterAPI\". When omitted, the resource has not yet been reconciled by the migration controller.", "synchronizedGeneration": "synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match.", } diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go index c6bcd22bc..f5836af0f 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -18,7 +18,8 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 // +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? self.?spec.managedBootImages.hasValue() || self.?status.managedBootImagesStatus.hasValue() : true",message="when skew enforcement is in Automatic mode, a boot image configuration is required" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?spec.managedBootImages.machineManagers.hasValue()) || self.spec.managedBootImages.machineManagers.exists(m, m.selection.mode == 'All' && m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io') : true",message="when skew enforcement is in Automatic mode, managedBootImages must contain a MachineManager opting in all MachineAPI MachineSets" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?spec.managedBootImages.machineManagers.hasValue()) || size(self.spec.managedBootImages.machineManagers) > 0 : true",message="when skew enforcement is in Automatic mode, managedBootImages.machineManagers must not be an empty list" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?spec.managedBootImages.machineManagers.hasValue()) || !self.spec.managedBootImages.machineManagers.exists(m, m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io') || self.spec.managedBootImages.machineManagers.exists(m, m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io' && m.selection.mode == 'All') : true",message="when skew enforcement is in Automatic mode, any MachineAPI MachineSet MachineManager must use selection mode 'All'" // +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?status.managedBootImagesStatus.machineManagers.hasValue()) || self.status.managedBootImagesStatus.machineManagers.exists(m, m.selection.mode == 'All' && m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io'): true",message="when skew enforcement is in Automatic mode, managedBootImagesStatus must contain a MachineManager opting in all MachineAPI MachineSets" type MachineConfiguration struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 111240eec..1cf56f549 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -54,7 +54,7 @@ type NetworkList struct { // NetworkSpec is the top-level network configuration object. // +kubebuilder:validation:XValidation:rule="!has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding) || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == oldSelf.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Restricted' || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Global'",message="invalid value for IPForwarding, valid values are 'Restricted' or 'Global'" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteAdvertisements,rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" +// +kubebuilder:validation:XValidation:rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" type NetworkSpec struct { OperatorSpec `json:",inline"` @@ -136,7 +136,6 @@ type NetworkSpec struct { // capabilities acquired through the enablement of these components but may // require specific configuration on their side to do so; refer to their // respective documentation and configuration options. - // +openshift:enable:FeatureGate=AdditionalRoutingCapabilities // +optional AdditionalRoutingCapabilities *AdditionalRoutingCapabilities `json:"additionalRoutingCapabilities,omitempty"` } @@ -157,7 +156,7 @@ const ( ) // NetworkMigration represents the cluster network migration configuration. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkLiveMigration,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == \"\" || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" +// +kubebuilder:validation:XValidation:rule="!has(self.mtu) || !has(self.networkType) || self.networkType == \"\" || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" type NetworkMigration struct { // mtu contains the MTU migration configuration. Set this to allow changing // the MTU values for the default network. If unset, the operation of @@ -465,7 +464,6 @@ type OVNKubernetesConfig struct { // means the user has no opinion and the platform is left to choose // reasonable defaults. These defaults are subject to change over time. The // current default is "Disabled". - // +openshift:enable:FeatureGate=RouteAdvertisements // +optional RouteAdvertisements RouteAdvertisementsEnablement `json:"routeAdvertisements,omitempty"` } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index e7c94e286..51a758804 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -327,10 +327,7 @@ networks.operator.openshift.io: CRDName: networks.operator.openshift.io Capability: "" Category: "" - FeatureGates: - - AdditionalRoutingCapabilities - - NetworkLiveMigration - - RouteAdvertisements + FeatureGates: [] FilenameOperatorName: network FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_70" diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/register.go b/vendor/github.com/openshift/api/operator/v1alpha1/register.go index 3e9b09cce..ec19cba3a 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/register.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/register.go @@ -41,6 +41,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &EtcdBackupList{}, &ClusterVersionOperator{}, &ClusterVersionOperatorList{}, + &ClusterAPI{}, + &ClusterAPIList{}, ) return nil diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterapi.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterapi.go new file mode 100644 index 000000000..c38fbaf97 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterapi.go @@ -0,0 +1,229 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterapis,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2564 +// +openshift:file-pattern=cvoRunLevel=0000_30,operatorName=cluster-api,operatorOrdering=01 +// +openshift:enable:FeatureGate=ClusterAPIMachineManagement +// +kubebuilder:metadata:annotations="release.openshift.io/feature-gate=ClusterAPIMachineManagement" + +// ClusterAPI provides configuration for the capi-operator. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="clusterapi is a singleton, .metadata.name must be 'cluster'" +type ClusterAPI struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the capi-operator. + // +required + Spec *ClusterAPISpec `json:"spec,omitempty"` + + // status defines the observed status of the capi-operator. + // +optional + Status ClusterAPIStatus `json:"status,omitzero"` +} + +// ClusterAPISpec defines the desired configuration of the capi-operator. +// The spec is required but we deliberately allow it to be empty. +// +kubebuilder:validation:MinProperties=0 +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.unmanagedCustomResourceDefinitions) || has(self.unmanagedCustomResourceDefinitions)",message="unmanagedCustomResourceDefinitions cannot be unset once set" +type ClusterAPISpec struct { + // unmanagedCustomResourceDefinitions is a list of ClusterResourceDefinition (CRD) + // names that should not be managed by the capi-operator installer + // controller. This allows external actors to own specific CRDs while + // capi-operator manages others. + // + // Each CRD name must be a valid DNS-1123 subdomain consisting of lowercase + // alphanumeric characters, '-' or '.', and must start and end with an + // alphanumeric character, with a maximum length of 253 characters. + // CRD names must contain at least two '.' characters. + // Example: "clusters.cluster.x-k8s.io" + // + // Items cannot be removed from this list once added. + // + // The maximum number of unmanagedCustomResourceDefinitions is 128. + // + // +optional + // +listType=set + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=128 + // +kubebuilder:validation:XValidation:rule="oldSelf.all(item, item in self)",message="items cannot be removed from unmanagedCustomResourceDefinitions list" + // +kubebuilder:validation:items:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:items:XValidation:rule="self.split('.').size() > 2",message="CRD names must contain at least two '.' characters." + // +kubebuilder:validation:items:MinLength=1 + // +kubebuilder:validation:items:MaxLength=253 + UnmanagedCustomResourceDefinitions []string `json:"unmanagedCustomResourceDefinitions,omitempty"` +} + +// RevisionName represents the name of a revision. The name must be between 1 +// and 255 characters long. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=255 +type RevisionName string + +// ClusterAPIStatus describes the current state of the capi-operator. +// +kubebuilder:validation:XValidation:rule="self.revisions.exists(r, r.name == self.desiredRevision && self.revisions.all(s, s.revision <= r.revision))",message="desiredRevision must be the name of the revision with the highest revision number" +// +kubebuilder:validation:XValidation:rule="!has(self.currentRevision) || self.revisions.exists(r, r.name == self.currentRevision)",message="currentRevision must correspond to an entry in the revisions list" +type ClusterAPIStatus struct { + // currentRevision is the name of the most recently fully applied revision. + // It is written by the installer controller. If it is absent, it indicates + // that no revision has been fully applied yet. + // If set, currentRevision must correspond to an entry in the revisions list. + // +optional + CurrentRevision RevisionName `json:"currentRevision,omitempty"` + + // desiredRevision is the name of the desired revision. It is written by the + // revision controller. It must be set to the name of the entry in the + // revisions list with the highest revision number. + // +required + DesiredRevision RevisionName `json:"desiredRevision,omitempty"` + + // revisions is a list of all currently active revisions. A revision is + // active until the installer controller updates currentRevision to a later + // revision. It is written by the revision controller. + // + // The maximum number of revisions is 16. + // All revisions must have a unique name. + // All revisions must have a unique revision number. + // When adding a revision, the revision number must be greater than the highest revision number in the list. + // Revisions are immutable, although they can be deleted. + // + // +required + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="each revision must have a unique name" + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.revision == y.revision))",message="each revision must have a unique revision number" + // +kubebuilder:validation:XValidation:rule="self.all(new, oldSelf.exists(old, old.name == new.name) || oldSelf.all(old, new.revision > old.revision))",message="new revisions must have a revision number greater than all existing revisions" + // +kubebuilder:validation:XValidation:rule="oldSelf.all(old, !self.exists(new, new.name == old.name) || self.exists(new, new == old))",message="existing revisions are immutable, but may be removed" + Revisions []ClusterAPIInstallerRevision `json:"revisions,omitempty"` +} + +// +structType=atomic +type ClusterAPIInstallerRevision struct { + // name is the name of a revision. + // +required + Name RevisionName `json:"name,omitempty"` + + // revision is a monotonically increasing number that is assigned to a revision. + // +required + // +kubebuilder:validation:Minimum=1 + Revision int64 `json:"revision,omitempty"` + + // contentID uniquely identifies the content of this revision. + // The contentID must be between 1 and 255 characters long. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + ContentID string `json:"contentID,omitempty"` + + // unmanagedCustomResourceDefinitions is a list of the names of + // ClusterResourceDefinition (CRD) objects which are included in this + // revision, but which should not be installed or updated. If not set, all + // CRDs in the revision will be managed by the CAPI operator. + // +listType=atomic + // +kubebuilder:validation:items:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:items:MinLength=1 + // +kubebuilder:validation:items:MaxLength=253 + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=128 + // +optional + UnmanagedCustomResourceDefinitions []string `json:"unmanagedCustomResourceDefinitions,omitempty"` + + // components is list of components which will be installed by this + // revision. Components will be installed in the order they are listed. + // + // The maximum number of components is 32. + // + // +required + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=32 + Components []ClusterAPIInstallerComponent `json:"components,omitempty"` +} + +// InstallerComponentType is the type of component to install. +// +kubebuilder:validation:Enum=Image +// +enum +type InstallerComponentType string + +const ( + // InstallerComponentTypeImage is an image source for a component. + InstallerComponentTypeImage InstallerComponentType = "Image" +) + +// ClusterAPIInstallerComponent defines a component which will be installed by this revision. +// +union +// +kubebuilder:validation:XValidation:rule="self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when type is Image, and forbidden otherwise" +type ClusterAPIInstallerComponent struct { + // type is the source type of the component. + // The only valid value is Image. + // When set to Image, the image field must be set and will define an image source for the component. + // +required + // +unionDiscriminator + Type InstallerComponentType `json:"type,omitempty"` + + // image defines an image source for a component. The image must contain a + // /capi-operator-installer directory containing the component manifests. + // +optional + Image ClusterAPIInstallerComponentImage `json:"image,omitzero"` +} + +// ImageDigestFormat is a type that conforms to the format host[:port][/namespace]/name@sha256:. +// The digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. +// The length of the field must be between 1 to 447 characters. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=447 +// +kubebuilder:validation:XValidation:rule=`(self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$'))`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" +// +kubebuilder:validation:XValidation:rule=`(self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$'))`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme" +type ImageDigestFormat string + +// ClusterAPIInstallerComponentImage defines an image source for a component. +type ClusterAPIInstallerComponentImage struct { + // ref is an image reference to the image containing the component manifests. The reference + // must be a valid image digest reference in the format host[:port][/namespace]/name@sha256:. + // The digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The length of the field must be between 1 to 447 characters. + // +required + Ref ImageDigestFormat `json:"ref,omitempty"` + + // profile is the name of a profile to use from the image. + // + // A profile name may be up to 255 characters long. It must consist of alphanumeric characters, '-', or '_'. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-zA-Z0-9-_]+$')",message="profile must consist of alphanumeric characters, '-', or '_'" + Profile string `json:"profile,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterAPIList contains a list of ClusterAPI configurations +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type ClusterAPIList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []ClusterAPI `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go index de4c07128..1f3fd281e 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,174 @@ func (in *BackupJobReference) DeepCopy() *BackupJobReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAPI) DeepCopyInto(out *ClusterAPI) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(ClusterAPISpec) + (*in).DeepCopyInto(*out) + } + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAPI. +func (in *ClusterAPI) DeepCopy() *ClusterAPI { + if in == nil { + return nil + } + out := new(ClusterAPI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterAPI) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAPIInstallerComponent) DeepCopyInto(out *ClusterAPIInstallerComponent) { + *out = *in + out.Image = in.Image + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAPIInstallerComponent. +func (in *ClusterAPIInstallerComponent) DeepCopy() *ClusterAPIInstallerComponent { + if in == nil { + return nil + } + out := new(ClusterAPIInstallerComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAPIInstallerComponentImage) DeepCopyInto(out *ClusterAPIInstallerComponentImage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAPIInstallerComponentImage. +func (in *ClusterAPIInstallerComponentImage) DeepCopy() *ClusterAPIInstallerComponentImage { + if in == nil { + return nil + } + out := new(ClusterAPIInstallerComponentImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAPIInstallerRevision) DeepCopyInto(out *ClusterAPIInstallerRevision) { + *out = *in + if in.UnmanagedCustomResourceDefinitions != nil { + in, out := &in.UnmanagedCustomResourceDefinitions, &out.UnmanagedCustomResourceDefinitions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]ClusterAPIInstallerComponent, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAPIInstallerRevision. +func (in *ClusterAPIInstallerRevision) DeepCopy() *ClusterAPIInstallerRevision { + if in == nil { + return nil + } + out := new(ClusterAPIInstallerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAPIList) DeepCopyInto(out *ClusterAPIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterAPI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAPIList. +func (in *ClusterAPIList) DeepCopy() *ClusterAPIList { + if in == nil { + return nil + } + out := new(ClusterAPIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterAPIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAPISpec) DeepCopyInto(out *ClusterAPISpec) { + *out = *in + if in.UnmanagedCustomResourceDefinitions != nil { + in, out := &in.UnmanagedCustomResourceDefinitions, &out.UnmanagedCustomResourceDefinitions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAPISpec. +func (in *ClusterAPISpec) DeepCopy() *ClusterAPISpec { + if in == nil { + return nil + } + out := new(ClusterAPISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAPIStatus) DeepCopyInto(out *ClusterAPIStatus) { + *out = *in + if in.Revisions != nil { + in, out := &in.Revisions, &out.Revisions + *out = make([]ClusterAPIInstallerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAPIStatus. +func (in *ClusterAPIStatus) DeepCopy() *ClusterAPIStatus { + if in == nil { + return nil + } + out := new(ClusterAPIStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterVersionOperator) DeepCopyInto(out *ClusterVersionOperator) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 0d595be80..3ad442d9d 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -1,3 +1,27 @@ +clusterapis.operator.openshift.io: + Annotations: + release.openshift.io/feature-gate: ClusterAPIMachineManagement + ApprovedPRNumber: https://github.com/openshift/api/pull/2564 + CRDName: clusterapis.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ClusterAPIMachineManagement + FilenameOperatorName: cluster-api + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_30" + GroupName: operator.openshift.io + HasStatus: true + KindName: ClusterAPI + Labels: {} + PluralName: clusterapis + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - ClusterAPIMachineManagement + Version: v1alpha1 + clusterversionoperators.operator.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/2044 diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go index 9060bf998..94d48269d 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go @@ -135,6 +135,79 @@ func (VersionAvailability) SwaggerDoc() map[string]string { return map_VersionAvailability } +var map_ClusterAPI = map[string]string{ + "": "ClusterAPI provides configuration for the capi-operator.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the capi-operator.", + "status": "status defines the observed status of the capi-operator.", +} + +func (ClusterAPI) SwaggerDoc() map[string]string { + return map_ClusterAPI +} + +var map_ClusterAPIInstallerComponent = map[string]string{ + "": "ClusterAPIInstallerComponent defines a component which will be installed by this revision.", + "type": "type is the source type of the component. The only valid value is Image. When set to Image, the image field must be set and will define an image source for the component.", + "image": "image defines an image source for a component. The image must contain a /capi-operator-installer directory containing the component manifests.", +} + +func (ClusterAPIInstallerComponent) SwaggerDoc() map[string]string { + return map_ClusterAPIInstallerComponent +} + +var map_ClusterAPIInstallerComponentImage = map[string]string{ + "": "ClusterAPIInstallerComponentImage defines an image source for a component.", + "ref": "ref is an image reference to the image containing the component manifests. The reference must be a valid image digest reference in the format host[:port][/namespace]/name@sha256:. The digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. The length of the field must be between 1 to 447 characters.", + "profile": "profile is the name of a profile to use from the image.\n\nA profile name may be up to 255 characters long. It must consist of alphanumeric characters, '-', or '_'.", +} + +func (ClusterAPIInstallerComponentImage) SwaggerDoc() map[string]string { + return map_ClusterAPIInstallerComponentImage +} + +var map_ClusterAPIInstallerRevision = map[string]string{ + "name": "name is the name of a revision.", + "revision": "revision is a monotonically increasing number that is assigned to a revision.", + "contentID": "contentID uniquely identifies the content of this revision. The contentID must be between 1 and 255 characters long.", + "unmanagedCustomResourceDefinitions": "unmanagedCustomResourceDefinitions is a list of the names of ClusterResourceDefinition (CRD) objects which are included in this revision, but which should not be installed or updated. If not set, all CRDs in the revision will be managed by the CAPI operator.", + "components": "components is list of components which will be installed by this revision. Components will be installed in the order they are listed.\n\nThe maximum number of components is 32.", +} + +func (ClusterAPIInstallerRevision) SwaggerDoc() map[string]string { + return map_ClusterAPIInstallerRevision +} + +var map_ClusterAPIList = map[string]string{ + "": "ClusterAPIList contains a list of ClusterAPI configurations\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (ClusterAPIList) SwaggerDoc() map[string]string { + return map_ClusterAPIList +} + +var map_ClusterAPISpec = map[string]string{ + "": "ClusterAPISpec defines the desired configuration of the capi-operator. The spec is required but we deliberately allow it to be empty.", + "unmanagedCustomResourceDefinitions": "unmanagedCustomResourceDefinitions is a list of ClusterResourceDefinition (CRD) names that should not be managed by the capi-operator installer controller. This allows external actors to own specific CRDs while capi-operator manages others.\n\nEach CRD name must be a valid DNS-1123 subdomain consisting of lowercase alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character, with a maximum length of 253 characters. CRD names must contain at least two '.' characters. Example: \"clusters.cluster.x-k8s.io\"\n\nItems cannot be removed from this list once added.\n\nThe maximum number of unmanagedCustomResourceDefinitions is 128.", +} + +func (ClusterAPISpec) SwaggerDoc() map[string]string { + return map_ClusterAPISpec +} + +var map_ClusterAPIStatus = map[string]string{ + "": "ClusterAPIStatus describes the current state of the capi-operator.", + "currentRevision": "currentRevision is the name of the most recently fully applied revision. It is written by the installer controller. If it is absent, it indicates that no revision has been fully applied yet. If set, currentRevision must correspond to an entry in the revisions list.", + "desiredRevision": "desiredRevision is the name of the desired revision. It is written by the revision controller. It must be set to the name of the entry in the revisions list with the highest revision number.", + "revisions": "revisions is a list of all currently active revisions. A revision is active until the installer controller updates currentRevision to a later revision. It is written by the revision controller.\n\nThe maximum number of revisions is 16. All revisions must have a unique name. All revisions must have a unique revision number. When adding a revision, the revision number must be greater than the highest revision number in the list. Revisions are immutable, although they can be deleted.", +} + +func (ClusterAPIStatus) SwaggerDoc() map[string]string { + return map_ClusterAPIStatus +} + var map_ClusterVersionOperator = map[string]string{ "": "ClusterVersionOperator holds cluster-wide information about the Cluster Version Operator.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go index 6c86d66d4..8cee680f2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicyspec.go @@ -9,8 +9,8 @@ import ( // ClusterImagePolicySpecApplyConfiguration represents a declarative configuration of the ClusterImagePolicySpec type for use // with apply. type ClusterImagePolicySpecApplyConfiguration struct { - Scopes []configv1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1.ImageScope `json:"scopes,omitempty"` + Policy *ImageSigstoreVerificationPolicyApplyConfiguration `json:"policy,omitempty"` } // ClusterImagePolicySpecApplyConfiguration constructs a declarative configuration of the ClusterImagePolicySpec type for use with @@ -32,7 +32,7 @@ func (b *ClusterImagePolicySpecApplyConfiguration) WithScopes(values ...configv1 // WithPolicy sets the Policy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Policy field is set to the value of the last call. -func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { +func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *ImageSigstoreVerificationPolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { b.Policy = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/custom.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/custom.go new file mode 100644 index 000000000..77234d0df --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/custom.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CustomApplyConfiguration represents a declarative configuration of the Custom type for use +// with apply. +type CustomApplyConfiguration struct { + Configs []GathererConfigApplyConfiguration `json:"configs,omitempty"` +} + +// CustomApplyConfiguration constructs a declarative configuration of the Custom type for use with +// apply. +func Custom() *CustomApplyConfiguration { + return &CustomApplyConfiguration{} +} + +// WithConfigs adds the given value to the Configs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Configs field. +func (b *CustomApplyConfiguration) WithConfigs(values ...*GathererConfigApplyConfiguration) *CustomApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfigs") + } + b.Configs = append(b.Configs, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherconfig.go new file mode 100644 index 000000000..eaa796519 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherconfig.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// GatherConfigApplyConfiguration represents a declarative configuration of the GatherConfig type for use +// with apply. +type GatherConfigApplyConfiguration struct { + DataPolicy []configv1.DataPolicyOption `json:"dataPolicy,omitempty"` + Gatherers *GatherersApplyConfiguration `json:"gatherers,omitempty"` + Storage *StorageApplyConfiguration `json:"storage,omitempty"` +} + +// GatherConfigApplyConfiguration constructs a declarative configuration of the GatherConfig type for use with +// apply. +func GatherConfig() *GatherConfigApplyConfiguration { + return &GatherConfigApplyConfiguration{} +} + +// WithDataPolicy adds the given value to the DataPolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DataPolicy field. +func (b *GatherConfigApplyConfiguration) WithDataPolicy(values ...configv1.DataPolicyOption) *GatherConfigApplyConfiguration { + for i := range values { + b.DataPolicy = append(b.DataPolicy, values[i]) + } + return b +} + +// WithGatherers sets the Gatherers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Gatherers field is set to the value of the last call. +func (b *GatherConfigApplyConfiguration) WithGatherers(value *GatherersApplyConfiguration) *GatherConfigApplyConfiguration { + b.Gatherers = value + return b +} + +// WithStorage sets the Storage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Storage field is set to the value of the last call. +func (b *GatherConfigApplyConfiguration) WithStorage(value *StorageApplyConfiguration) *GatherConfigApplyConfiguration { + b.Storage = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gathererconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gathererconfig.go new file mode 100644 index 000000000..caa8b79d0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gathererconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// GathererConfigApplyConfiguration represents a declarative configuration of the GathererConfig type for use +// with apply. +type GathererConfigApplyConfiguration struct { + Name *string `json:"name,omitempty"` + State *configv1.GathererState `json:"state,omitempty"` +} + +// GathererConfigApplyConfiguration constructs a declarative configuration of the GathererConfig type for use with +// apply. +func GathererConfig() *GathererConfigApplyConfiguration { + return &GathererConfigApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GathererConfigApplyConfiguration) WithName(value string) *GathererConfigApplyConfiguration { + b.Name = &value + return b +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *GathererConfigApplyConfiguration) WithState(value configv1.GathererState) *GathererConfigApplyConfiguration { + b.State = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherers.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherers.go new file mode 100644 index 000000000..32469f512 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherers.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// GatherersApplyConfiguration represents a declarative configuration of the Gatherers type for use +// with apply. +type GatherersApplyConfiguration struct { + Mode *configv1.GatheringMode `json:"mode,omitempty"` + Custom *CustomApplyConfiguration `json:"custom,omitempty"` +} + +// GatherersApplyConfiguration constructs a declarative configuration of the Gatherers type for use with +// apply. +func Gatherers() *GatherersApplyConfiguration { + return &GatherersApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *GatherersApplyConfiguration) WithMode(value configv1.GatheringMode) *GatherersApplyConfiguration { + b.Mode = &value + return b +} + +// WithCustom sets the Custom field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Custom field is set to the value of the last call. +func (b *GatherersApplyConfiguration) WithCustom(value *CustomApplyConfiguration) *GatherersApplyConfiguration { + b.Custom = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyfulciocawithrekorrootoftrust.go similarity index 57% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyfulciocawithrekorrootoftrust.go index 48b553580..a4c831fca 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/fulciocawithrekor.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyfulciocawithrekorrootoftrust.go @@ -2,24 +2,24 @@ package v1 -// FulcioCAWithRekorApplyConfiguration represents a declarative configuration of the FulcioCAWithRekor type for use +// ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyFulcioCAWithRekorRootOfTrust type for use // with apply. -type FulcioCAWithRekorApplyConfiguration struct { +type ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration struct { FulcioCAData []byte `json:"fulcioCAData,omitempty"` RekorKeyData []byte `json:"rekorKeyData,omitempty"` FulcioSubject *PolicyFulcioSubjectApplyConfiguration `json:"fulcioSubject,omitempty"` } -// FulcioCAWithRekorApplyConfiguration constructs a declarative configuration of the FulcioCAWithRekor type for use with +// ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyFulcioCAWithRekorRootOfTrust type for use with // apply. -func FulcioCAWithRekor() *FulcioCAWithRekorApplyConfiguration { - return &FulcioCAWithRekorApplyConfiguration{} +func ImagePolicyFulcioCAWithRekorRootOfTrust() *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { + return &ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration{} } // WithFulcioCAData adds the given value to the FulcioCAData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the FulcioCAData field. -func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithFulcioCAData(values ...byte) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { for i := range values { b.FulcioCAData = append(b.FulcioCAData, values[i]) } @@ -29,7 +29,7 @@ func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) * // WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the RekorKeyData field. -func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithRekorKeyData(values ...byte) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { for i := range values { b.RekorKeyData = append(b.RekorKeyData, values[i]) } @@ -39,7 +39,7 @@ func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) * // WithFulcioSubject sets the FulcioSubject field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FulcioSubject field is set to the value of the last call. -func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { b.FulcioSubject = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypkirootoftrust.go similarity index 65% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypkirootoftrust.go index 65f27edf8..9a0c257b7 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/pki.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypkirootoftrust.go @@ -2,24 +2,24 @@ package v1 -// PKIApplyConfiguration represents a declarative configuration of the PKI type for use +// ImagePolicyPKIRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyPKIRootOfTrust type for use // with apply. -type PKIApplyConfiguration struct { +type ImagePolicyPKIRootOfTrustApplyConfiguration struct { CertificateAuthorityRootsData []byte `json:"caRootsData,omitempty"` CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` PKICertificateSubject *PKICertificateSubjectApplyConfiguration `json:"pkiCertificateSubject,omitempty"` } -// PKIApplyConfiguration constructs a declarative configuration of the PKI type for use with +// ImagePolicyPKIRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyPKIRootOfTrust type for use with // apply. -func PKI() *PKIApplyConfiguration { - return &PKIApplyConfiguration{} +func ImagePolicyPKIRootOfTrust() *ImagePolicyPKIRootOfTrustApplyConfiguration { + return &ImagePolicyPKIRootOfTrustApplyConfiguration{} } // WithCertificateAuthorityRootsData adds the given value to the CertificateAuthorityRootsData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the CertificateAuthorityRootsData field. -func (b *PKIApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte) *ImagePolicyPKIRootOfTrustApplyConfiguration { for i := range values { b.CertificateAuthorityRootsData = append(b.CertificateAuthorityRootsData, values[i]) } @@ -29,7 +29,7 @@ func (b *PKIApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte // WithCertificateAuthorityIntermediatesData adds the given value to the CertificateAuthorityIntermediatesData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the CertificateAuthorityIntermediatesData field. -func (b *PKIApplyConfiguration) WithCertificateAuthorityIntermediatesData(values ...byte) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithCertificateAuthorityIntermediatesData(values ...byte) *ImagePolicyPKIRootOfTrustApplyConfiguration { for i := range values { b.CertificateAuthorityIntermediatesData = append(b.CertificateAuthorityIntermediatesData, values[i]) } @@ -39,7 +39,7 @@ func (b *PKIApplyConfiguration) WithCertificateAuthorityIntermediatesData(values // WithPKICertificateSubject sets the PKICertificateSubject field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PKICertificateSubject field is set to the value of the last call. -func (b *PKIApplyConfiguration) WithPKICertificateSubject(value *PKICertificateSubjectApplyConfiguration) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithPKICertificateSubject(value *PKICertificateSubjectApplyConfiguration) *ImagePolicyPKIRootOfTrustApplyConfiguration { b.PKICertificateSubject = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypublickeyrootoftrust.go similarity index 54% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypublickeyrootoftrust.go index c1073e882..a14457309 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/publickey.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicypublickeyrootoftrust.go @@ -2,23 +2,23 @@ package v1 -// PublicKeyApplyConfiguration represents a declarative configuration of the PublicKey type for use +// ImagePolicyPublicKeyRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyPublicKeyRootOfTrust type for use // with apply. -type PublicKeyApplyConfiguration struct { +type ImagePolicyPublicKeyRootOfTrustApplyConfiguration struct { KeyData []byte `json:"keyData,omitempty"` RekorKeyData []byte `json:"rekorKeyData,omitempty"` } -// PublicKeyApplyConfiguration constructs a declarative configuration of the PublicKey type for use with +// ImagePolicyPublicKeyRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyPublicKeyRootOfTrust type for use with // apply. -func PublicKey() *PublicKeyApplyConfiguration { - return &PublicKeyApplyConfiguration{} +func ImagePolicyPublicKeyRootOfTrust() *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { + return &ImagePolicyPublicKeyRootOfTrustApplyConfiguration{} } // WithKeyData adds the given value to the KeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the KeyData field. -func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyApplyConfiguration { +func (b *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) WithKeyData(values ...byte) *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { for i := range values { b.KeyData = append(b.KeyData, values[i]) } @@ -28,7 +28,7 @@ func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyAppl // WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the RekorKeyData field. -func (b *PublicKeyApplyConfiguration) WithRekorKeyData(values ...byte) *PublicKeyApplyConfiguration { +func (b *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) WithRekorKeyData(values ...byte) *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { for i := range values { b.RekorKeyData = append(b.RekorKeyData, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go index b75165c8d..321196469 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicyspec.go @@ -9,8 +9,8 @@ import ( // ImagePolicySpecApplyConfiguration represents a declarative configuration of the ImagePolicySpec type for use // with apply. type ImagePolicySpecApplyConfiguration struct { - Scopes []configv1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1.ImageScope `json:"scopes,omitempty"` + Policy *ImageSigstoreVerificationPolicyApplyConfiguration `json:"policy,omitempty"` } // ImagePolicySpecApplyConfiguration constructs a declarative configuration of the ImagePolicySpec type for use with @@ -32,7 +32,7 @@ func (b *ImagePolicySpecApplyConfiguration) WithScopes(values ...configv1.ImageS // WithPolicy sets the Policy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Policy field is set to the value of the last call. -func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { +func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *ImageSigstoreVerificationPolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { b.Policy = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagesigstoreverificationpolicy.go similarity index 52% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagesigstoreverificationpolicy.go index 3e29510bf..6f0d5d2e7 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagesigstoreverificationpolicy.go @@ -2,23 +2,23 @@ package v1 -// PolicyApplyConfiguration represents a declarative configuration of the Policy type for use +// ImageSigstoreVerificationPolicyApplyConfiguration represents a declarative configuration of the ImageSigstoreVerificationPolicy type for use // with apply. -type PolicyApplyConfiguration struct { +type ImageSigstoreVerificationPolicyApplyConfiguration struct { RootOfTrust *PolicyRootOfTrustApplyConfiguration `json:"rootOfTrust,omitempty"` SignedIdentity *PolicyIdentityApplyConfiguration `json:"signedIdentity,omitempty"` } -// PolicyApplyConfiguration constructs a declarative configuration of the Policy type for use with +// ImageSigstoreVerificationPolicyApplyConfiguration constructs a declarative configuration of the ImageSigstoreVerificationPolicy type for use with // apply. -func Policy() *PolicyApplyConfiguration { - return &PolicyApplyConfiguration{} +func ImageSigstoreVerificationPolicy() *ImageSigstoreVerificationPolicyApplyConfiguration { + return &ImageSigstoreVerificationPolicyApplyConfiguration{} } // WithRootOfTrust sets the RootOfTrust field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RootOfTrust field is set to the value of the last call. -func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *PolicyApplyConfiguration { +func (b *ImageSigstoreVerificationPolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *ImageSigstoreVerificationPolicyApplyConfiguration { b.RootOfTrust = value return b } @@ -26,7 +26,7 @@ func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApply // WithSignedIdentity sets the SignedIdentity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SignedIdentity field is set to the value of the last call. -func (b *PolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *PolicyApplyConfiguration { +func (b *ImageSigstoreVerificationPolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *ImageSigstoreVerificationPolicyApplyConfiguration { b.SignedIdentity = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagather.go new file mode 100644 index 000000000..829a4071a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagather.go @@ -0,0 +1,254 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InsightsDataGatherApplyConfiguration represents a declarative configuration of the InsightsDataGather type for use +// with apply. +type InsightsDataGatherApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InsightsDataGatherSpecApplyConfiguration `json:"spec,omitempty"` +} + +// InsightsDataGather constructs a declarative configuration of the InsightsDataGather type for use with +// apply. +func InsightsDataGather(name string) *InsightsDataGatherApplyConfiguration { + b := &InsightsDataGatherApplyConfiguration{} + b.WithName(name) + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractInsightsDataGather extracts the applied configuration owned by fieldManager from +// insightsDataGather. If no managedFields are found in insightsDataGather for fieldManager, a +// InsightsDataGatherApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// insightsDataGather must be a unmodified InsightsDataGather API object that was retrieved from the Kubernetes API. +// ExtractInsightsDataGather provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractInsightsDataGather(insightsDataGather *configv1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "") +} + +// ExtractInsightsDataGatherStatus is the same as ExtractInsightsDataGather except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractInsightsDataGatherStatus(insightsDataGather *configv1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "status") +} + +func extractInsightsDataGather(insightsDataGather *configv1.InsightsDataGather, fieldManager string, subresource string) (*InsightsDataGatherApplyConfiguration, error) { + b := &InsightsDataGatherApplyConfiguration{} + err := managedfields.ExtractInto(insightsDataGather, internal.Parser().Type("com.github.openshift.api.config.v1.InsightsDataGather"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(insightsDataGather.Name) + + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} +func (b InsightsDataGatherApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithKind(value string) *InsightsDataGatherApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithAPIVersion(value string) *InsightsDataGatherApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGenerateName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithNamespace(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithUID(value types.UID) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithResourceVersion(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGeneration(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithLabels(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithAnnotations(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *InsightsDataGatherApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *InsightsDataGatherApplyConfiguration) WithFinalizers(values ...string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *InsightsDataGatherApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithSpec(value *InsightsDataGatherSpecApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.Spec = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagatherspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagatherspec.go new file mode 100644 index 000000000..4be6d441a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagatherspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// InsightsDataGatherSpecApplyConfiguration represents a declarative configuration of the InsightsDataGatherSpec type for use +// with apply. +type InsightsDataGatherSpecApplyConfiguration struct { + GatherConfig *GatherConfigApplyConfiguration `json:"gatherConfig,omitempty"` +} + +// InsightsDataGatherSpecApplyConfiguration constructs a declarative configuration of the InsightsDataGatherSpec type for use with +// apply. +func InsightsDataGatherSpec() *InsightsDataGatherSpecApplyConfiguration { + return &InsightsDataGatherSpecApplyConfiguration{} +} + +// WithGatherConfig sets the GatherConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GatherConfig field is set to the value of the last call. +func (b *InsightsDataGatherSpecApplyConfiguration) WithGatherConfig(value *GatherConfigApplyConfiguration) *InsightsDataGatherSpecApplyConfiguration { + b.GatherConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeclaimreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeclaimreference.go new file mode 100644 index 000000000..49daf4bc2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeclaimreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PersistentVolumeClaimReferenceApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimReference type for use +// with apply. +type PersistentVolumeClaimReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// PersistentVolumeClaimReferenceApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimReference type for use with +// apply. +func PersistentVolumeClaimReference() *PersistentVolumeClaimReferenceApplyConfiguration { + return &PersistentVolumeClaimReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PersistentVolumeClaimReferenceApplyConfiguration) WithName(value string) *PersistentVolumeClaimReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeconfig.go new file mode 100644 index 000000000..c62fdbcf9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeconfig.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PersistentVolumeConfigApplyConfiguration represents a declarative configuration of the PersistentVolumeConfig type for use +// with apply. +type PersistentVolumeConfigApplyConfiguration struct { + Claim *PersistentVolumeClaimReferenceApplyConfiguration `json:"claim,omitempty"` + MountPath *string `json:"mountPath,omitempty"` +} + +// PersistentVolumeConfigApplyConfiguration constructs a declarative configuration of the PersistentVolumeConfig type for use with +// apply. +func PersistentVolumeConfig() *PersistentVolumeConfigApplyConfiguration { + return &PersistentVolumeConfigApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *PersistentVolumeConfigApplyConfiguration) WithClaim(value *PersistentVolumeClaimReferenceApplyConfiguration) *PersistentVolumeConfigApplyConfiguration { + b.Claim = value + return b +} + +// WithMountPath sets the MountPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountPath field is set to the value of the last call. +func (b *PersistentVolumeConfigApplyConfiguration) WithMountPath(value string) *PersistentVolumeConfigApplyConfiguration { + b.MountPath = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go index f1ff91ffb..6b3e46f47 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/policyrootoftrust.go @@ -9,10 +9,10 @@ import ( // PolicyRootOfTrustApplyConfiguration represents a declarative configuration of the PolicyRootOfTrust type for use // with apply. type PolicyRootOfTrustApplyConfiguration struct { - PolicyType *configv1.PolicyType `json:"policyType,omitempty"` - PublicKey *PublicKeyApplyConfiguration `json:"publicKey,omitempty"` - FulcioCAWithRekor *FulcioCAWithRekorApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` - PKI *PKIApplyConfiguration `json:"pki,omitempty"` + PolicyType *configv1.PolicyType `json:"policyType,omitempty"` + PublicKey *ImagePolicyPublicKeyRootOfTrustApplyConfiguration `json:"publicKey,omitempty"` + FulcioCAWithRekor *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` + PKI *ImagePolicyPKIRootOfTrustApplyConfiguration `json:"pki,omitempty"` } // PolicyRootOfTrustApplyConfiguration constructs a declarative configuration of the PolicyRootOfTrust type for use with @@ -32,7 +32,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithPolicyType(value configv1.Poli // WithPublicKey sets the PublicKey field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PublicKey field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.PublicKey = value return b } @@ -40,7 +40,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyAppl // WithFulcioCAWithRekor sets the FulcioCAWithRekor field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FulcioCAWithRekor field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *FulcioCAWithRekorApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.FulcioCAWithRekor = value return b } @@ -48,7 +48,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *Fulci // WithPKI sets the PKI field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PKI field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithPKI(value *PKIApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithPKI(value *ImagePolicyPKIRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.PKI = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/storage.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/storage.go new file mode 100644 index 000000000..405df6c13 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/storage.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// StorageApplyConfiguration represents a declarative configuration of the Storage type for use +// with apply. +type StorageApplyConfiguration struct { + Type *configv1.StorageType `json:"type,omitempty"` + PersistentVolume *PersistentVolumeConfigApplyConfiguration `json:"persistentVolume,omitempty"` +} + +// StorageApplyConfiguration constructs a declarative configuration of the Storage type for use with +// apply. +func Storage() *StorageApplyConfiguration { + return &StorageApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithType(value configv1.StorageType) *StorageApplyConfiguration { + b.Type = &value + return b +} + +// WithPersistentVolume sets the PersistentVolume field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolume field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithPersistentVolume(value *PersistentVolumeConfigApplyConfiguration) *StorageApplyConfiguration { + b.PersistentVolume = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go index e4a3470c4..e1c4c630e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go @@ -9,8 +9,8 @@ import ( // ClusterImagePolicySpecApplyConfiguration represents a declarative configuration of the ClusterImagePolicySpec type for use // with apply. type ClusterImagePolicySpecApplyConfiguration struct { - Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` + Policy *ImageSigstoreVerificationPolicyApplyConfiguration `json:"policy,omitempty"` } // ClusterImagePolicySpecApplyConfiguration constructs a declarative configuration of the ClusterImagePolicySpec type for use with @@ -32,7 +32,7 @@ func (b *ClusterImagePolicySpecApplyConfiguration) WithScopes(values ...configv1 // WithPolicy sets the Policy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Policy field is set to the value of the last call. -func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { +func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *ImageSigstoreVerificationPolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration { b.Policy = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyfulciocawithrekorrootoftrust.go similarity index 57% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyfulciocawithrekorrootoftrust.go index 2a907a7e9..2fcaa3621 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyfulciocawithrekorrootoftrust.go @@ -2,24 +2,24 @@ package v1alpha1 -// FulcioCAWithRekorApplyConfiguration represents a declarative configuration of the FulcioCAWithRekor type for use +// ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyFulcioCAWithRekorRootOfTrust type for use // with apply. -type FulcioCAWithRekorApplyConfiguration struct { +type ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration struct { FulcioCAData []byte `json:"fulcioCAData,omitempty"` RekorKeyData []byte `json:"rekorKeyData,omitempty"` FulcioSubject *PolicyFulcioSubjectApplyConfiguration `json:"fulcioSubject,omitempty"` } -// FulcioCAWithRekorApplyConfiguration constructs a declarative configuration of the FulcioCAWithRekor type for use with +// ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyFulcioCAWithRekorRootOfTrust type for use with // apply. -func FulcioCAWithRekor() *FulcioCAWithRekorApplyConfiguration { - return &FulcioCAWithRekorApplyConfiguration{} +func ImagePolicyFulcioCAWithRekorRootOfTrust() *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { + return &ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration{} } // WithFulcioCAData adds the given value to the FulcioCAData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the FulcioCAData field. -func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithFulcioCAData(values ...byte) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { for i := range values { b.FulcioCAData = append(b.FulcioCAData, values[i]) } @@ -29,7 +29,7 @@ func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) * // WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the RekorKeyData field. -func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithRekorKeyData(values ...byte) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { for i := range values { b.RekorKeyData = append(b.RekorKeyData, values[i]) } @@ -39,7 +39,7 @@ func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) * // WithFulcioSubject sets the FulcioSubject field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FulcioSubject field is set to the value of the last call. -func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *FulcioCAWithRekorApplyConfiguration { +func (b *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration { b.FulcioSubject = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/pki.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypkirootoftrust.go similarity index 65% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/pki.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypkirootoftrust.go index 455abe02a..a218867ea 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/pki.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypkirootoftrust.go @@ -2,24 +2,24 @@ package v1alpha1 -// PKIApplyConfiguration represents a declarative configuration of the PKI type for use +// ImagePolicyPKIRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyPKIRootOfTrust type for use // with apply. -type PKIApplyConfiguration struct { +type ImagePolicyPKIRootOfTrustApplyConfiguration struct { CertificateAuthorityRootsData []byte `json:"caRootsData,omitempty"` CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` PKICertificateSubject *PKICertificateSubjectApplyConfiguration `json:"pkiCertificateSubject,omitempty"` } -// PKIApplyConfiguration constructs a declarative configuration of the PKI type for use with +// ImagePolicyPKIRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyPKIRootOfTrust type for use with // apply. -func PKI() *PKIApplyConfiguration { - return &PKIApplyConfiguration{} +func ImagePolicyPKIRootOfTrust() *ImagePolicyPKIRootOfTrustApplyConfiguration { + return &ImagePolicyPKIRootOfTrustApplyConfiguration{} } // WithCertificateAuthorityRootsData adds the given value to the CertificateAuthorityRootsData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the CertificateAuthorityRootsData field. -func (b *PKIApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte) *ImagePolicyPKIRootOfTrustApplyConfiguration { for i := range values { b.CertificateAuthorityRootsData = append(b.CertificateAuthorityRootsData, values[i]) } @@ -29,7 +29,7 @@ func (b *PKIApplyConfiguration) WithCertificateAuthorityRootsData(values ...byte // WithCertificateAuthorityIntermediatesData adds the given value to the CertificateAuthorityIntermediatesData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the CertificateAuthorityIntermediatesData field. -func (b *PKIApplyConfiguration) WithCertificateAuthorityIntermediatesData(values ...byte) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithCertificateAuthorityIntermediatesData(values ...byte) *ImagePolicyPKIRootOfTrustApplyConfiguration { for i := range values { b.CertificateAuthorityIntermediatesData = append(b.CertificateAuthorityIntermediatesData, values[i]) } @@ -39,7 +39,7 @@ func (b *PKIApplyConfiguration) WithCertificateAuthorityIntermediatesData(values // WithPKICertificateSubject sets the PKICertificateSubject field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PKICertificateSubject field is set to the value of the last call. -func (b *PKIApplyConfiguration) WithPKICertificateSubject(value *PKICertificateSubjectApplyConfiguration) *PKIApplyConfiguration { +func (b *ImagePolicyPKIRootOfTrustApplyConfiguration) WithPKICertificateSubject(value *PKICertificateSubjectApplyConfiguration) *ImagePolicyPKIRootOfTrustApplyConfiguration { b.PKICertificateSubject = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypublickeyrootoftrust.go similarity index 54% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypublickeyrootoftrust.go index 91665a90b..22513de62 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicypublickeyrootoftrust.go @@ -2,23 +2,23 @@ package v1alpha1 -// PublicKeyApplyConfiguration represents a declarative configuration of the PublicKey type for use +// ImagePolicyPublicKeyRootOfTrustApplyConfiguration represents a declarative configuration of the ImagePolicyPublicKeyRootOfTrust type for use // with apply. -type PublicKeyApplyConfiguration struct { +type ImagePolicyPublicKeyRootOfTrustApplyConfiguration struct { KeyData []byte `json:"keyData,omitempty"` RekorKeyData []byte `json:"rekorKeyData,omitempty"` } -// PublicKeyApplyConfiguration constructs a declarative configuration of the PublicKey type for use with +// ImagePolicyPublicKeyRootOfTrustApplyConfiguration constructs a declarative configuration of the ImagePolicyPublicKeyRootOfTrust type for use with // apply. -func PublicKey() *PublicKeyApplyConfiguration { - return &PublicKeyApplyConfiguration{} +func ImagePolicyPublicKeyRootOfTrust() *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { + return &ImagePolicyPublicKeyRootOfTrustApplyConfiguration{} } // WithKeyData adds the given value to the KeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the KeyData field. -func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyApplyConfiguration { +func (b *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) WithKeyData(values ...byte) *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { for i := range values { b.KeyData = append(b.KeyData, values[i]) } @@ -28,7 +28,7 @@ func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyAppl // WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the RekorKeyData field. -func (b *PublicKeyApplyConfiguration) WithRekorKeyData(values ...byte) *PublicKeyApplyConfiguration { +func (b *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) WithRekorKeyData(values ...byte) *ImagePolicyPublicKeyRootOfTrustApplyConfiguration { for i := range values { b.RekorKeyData = append(b.RekorKeyData, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go index ac08e9cf4..84969b600 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go @@ -9,8 +9,8 @@ import ( // ImagePolicySpecApplyConfiguration represents a declarative configuration of the ImagePolicySpec type for use // with apply. type ImagePolicySpecApplyConfiguration struct { - Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` - Policy *PolicyApplyConfiguration `json:"policy,omitempty"` + Scopes []configv1alpha1.ImageScope `json:"scopes,omitempty"` + Policy *ImageSigstoreVerificationPolicyApplyConfiguration `json:"policy,omitempty"` } // ImagePolicySpecApplyConfiguration constructs a declarative configuration of the ImagePolicySpec type for use with @@ -32,7 +32,7 @@ func (b *ImagePolicySpecApplyConfiguration) WithScopes(values ...configv1alpha1. // WithPolicy sets the Policy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Policy field is set to the value of the last call. -func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { +func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *ImageSigstoreVerificationPolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration { b.Policy = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagesigstoreverificationpolicy.go similarity index 52% rename from vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go rename to vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagesigstoreverificationpolicy.go index 61e485664..64f9760e8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagesigstoreverificationpolicy.go @@ -2,23 +2,23 @@ package v1alpha1 -// PolicyApplyConfiguration represents a declarative configuration of the Policy type for use +// ImageSigstoreVerificationPolicyApplyConfiguration represents a declarative configuration of the ImageSigstoreVerificationPolicy type for use // with apply. -type PolicyApplyConfiguration struct { +type ImageSigstoreVerificationPolicyApplyConfiguration struct { RootOfTrust *PolicyRootOfTrustApplyConfiguration `json:"rootOfTrust,omitempty"` SignedIdentity *PolicyIdentityApplyConfiguration `json:"signedIdentity,omitempty"` } -// PolicyApplyConfiguration constructs a declarative configuration of the Policy type for use with +// ImageSigstoreVerificationPolicyApplyConfiguration constructs a declarative configuration of the ImageSigstoreVerificationPolicy type for use with // apply. -func Policy() *PolicyApplyConfiguration { - return &PolicyApplyConfiguration{} +func ImageSigstoreVerificationPolicy() *ImageSigstoreVerificationPolicyApplyConfiguration { + return &ImageSigstoreVerificationPolicyApplyConfiguration{} } // WithRootOfTrust sets the RootOfTrust field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RootOfTrust field is set to the value of the last call. -func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *PolicyApplyConfiguration { +func (b *ImageSigstoreVerificationPolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *ImageSigstoreVerificationPolicyApplyConfiguration { b.RootOfTrust = value return b } @@ -26,7 +26,7 @@ func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApply // WithSignedIdentity sets the SignedIdentity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SignedIdentity field is set to the value of the last call. -func (b *PolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *PolicyApplyConfiguration { +func (b *ImageSigstoreVerificationPolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *ImageSigstoreVerificationPolicyApplyConfiguration { b.SignedIdentity = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go index 5de792be6..5122c82e0 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go @@ -9,10 +9,10 @@ import ( // PolicyRootOfTrustApplyConfiguration represents a declarative configuration of the PolicyRootOfTrust type for use // with apply. type PolicyRootOfTrustApplyConfiguration struct { - PolicyType *configv1alpha1.PolicyType `json:"policyType,omitempty"` - PublicKey *PublicKeyApplyConfiguration `json:"publicKey,omitempty"` - FulcioCAWithRekor *FulcioCAWithRekorApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` - PKI *PKIApplyConfiguration `json:"pki,omitempty"` + PolicyType *configv1alpha1.PolicyType `json:"policyType,omitempty"` + PublicKey *ImagePolicyPublicKeyRootOfTrustApplyConfiguration `json:"publicKey,omitempty"` + FulcioCAWithRekor *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration `json:"fulcioCAWithRekor,omitempty"` + PKI *ImagePolicyPKIRootOfTrustApplyConfiguration `json:"pki,omitempty"` } // PolicyRootOfTrustApplyConfiguration constructs a declarative configuration of the PolicyRootOfTrust type for use with @@ -32,7 +32,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithPolicyType(value configv1alpha // WithPublicKey sets the PublicKey field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PublicKey field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *ImagePolicyPublicKeyRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.PublicKey = value return b } @@ -40,7 +40,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyAppl // WithFulcioCAWithRekor sets the FulcioCAWithRekor field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FulcioCAWithRekor field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *FulcioCAWithRekorApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.FulcioCAWithRekor = value return b } @@ -48,7 +48,7 @@ func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *Fulci // WithPKI sets the PKI field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PKI field is set to the value of the last call. -func (b *PolicyRootOfTrustApplyConfiguration) WithPKI(value *PKIApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { +func (b *PolicyRootOfTrustApplyConfiguration) WithPKI(value *ImagePolicyPKIRootOfTrustApplyConfiguration) *PolicyRootOfTrustApplyConfiguration { b.PKI = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index 45d13f234..f00417a5c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -641,7 +641,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: policy type: - namedType: com.github.openshift.api.config.v1.Policy + namedType: com.github.openshift.api.config.v1.ImageSigstoreVerificationPolicy default: {} - name: scopes type: @@ -1065,6 +1065,17 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.Custom + map: + fields: + - name: configs + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.GathererConfig + elementRelationship: associative + keys: + - name - name: com.github.openshift.api.config.v1.CustomFeatureGates map: fields: @@ -1329,19 +1340,6 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - version -- name: com.github.openshift.api.config.v1.FulcioCAWithRekor - map: - fields: - - name: fulcioCAData - type: - scalar: string - - name: fulcioSubject - type: - namedType: com.github.openshift.api.config.v1.PolicyFulcioSubject - default: {} - - name: rekorKeyData - type: - scalar: string - name: com.github.openshift.api.config.v1.GCPPlatformSpec map: elementType: @@ -1386,14 +1384,6 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - key - - name: serviceEndpoints - type: - list: - elementType: - namedType: com.github.openshift.api.config.v1.GCPServiceEndpoint - elementRelationship: associative - keys: - - name - name: com.github.openshift.api.config.v1.GCPResourceLabel map: fields: @@ -1420,17 +1410,47 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: com.github.openshift.api.config.v1.GCPServiceEndpoint +- name: com.github.openshift.api.config.v1.GatherConfig + map: + fields: + - name: dataPolicy + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: gatherers + type: + namedType: com.github.openshift.api.config.v1.Gatherers + default: {} + - name: storage + type: + namedType: com.github.openshift.api.config.v1.Storage + default: {} +- name: com.github.openshift.api.config.v1.GathererConfig map: fields: - name: name type: scalar: string - default: "" - - name: url + - name: state type: scalar: string - default: "" +- name: com.github.openshift.api.config.v1.Gatherers + map: + fields: + - name: custom + type: + namedType: com.github.openshift.api.config.v1.Custom + default: {} + - name: mode + type: + scalar: string + unions: + - discriminator: mode + fields: + - fieldName: custom + discriminatorValue: Custom - name: com.github.openshift.api.config.v1.GitHubIdentityProvider map: fields: @@ -1750,12 +1770,47 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.ImagePolicyStatus default: {} +- name: com.github.openshift.api.config.v1.ImagePolicyFulcioCAWithRekorRootOfTrust + map: + fields: + - name: fulcioCAData + type: + scalar: string + - name: fulcioSubject + type: + namedType: com.github.openshift.api.config.v1.PolicyFulcioSubject + default: {} + - name: rekorKeyData + type: + scalar: string +- name: com.github.openshift.api.config.v1.ImagePolicyPKIRootOfTrust + map: + fields: + - name: caIntermediatesData + type: + scalar: string + - name: caRootsData + type: + scalar: string + - name: pkiCertificateSubject + type: + namedType: com.github.openshift.api.config.v1.PKICertificateSubject + default: {} +- name: com.github.openshift.api.config.v1.ImagePolicyPublicKeyRootOfTrust + map: + fields: + - name: keyData + type: + scalar: string + - name: rekorKeyData + type: + scalar: string - name: com.github.openshift.api.config.v1.ImagePolicySpec map: fields: - name: policy type: - namedType: com.github.openshift.api.config.v1.Policy + namedType: com.github.openshift.api.config.v1.ImageSigstoreVerificationPolicy default: {} - name: scopes type: @@ -1774,6 +1829,16 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type +- name: com.github.openshift.api.config.v1.ImageSigstoreVerificationPolicy + map: + fields: + - name: rootOfTrust + type: + namedType: com.github.openshift.api.config.v1.PolicyRootOfTrust + default: {} + - name: signedIdentity + type: + namedType: com.github.openshift.api.config.v1.PolicyIdentity - name: com.github.openshift.api.config.v1.ImageSpec map: fields: @@ -2023,6 +2088,30 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.InsightsDataGather + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.InsightsDataGatherSpec + default: {} +- name: com.github.openshift.api.config.v1.InsightsDataGatherSpec + map: + fields: + - name: gatherConfig + type: + namedType: com.github.openshift.api.config.v1.GatherConfig + default: {} - name: com.github.openshift.api.config.v1.IntermediateTLSProfile map: elementType: @@ -2930,26 +3019,29 @@ var schemaYAML = typed.YAMLObject(`types: - name: nodeDNSIP type: scalar: string -- name: com.github.openshift.api.config.v1.PKI +- name: com.github.openshift.api.config.v1.PKICertificateSubject map: fields: - - name: caIntermediatesData + - name: email type: scalar: string - - name: caRootsData + - name: hostname type: scalar: string - - name: pkiCertificateSubject - type: - namedType: com.github.openshift.api.config.v1.PKICertificateSubject - default: {} -- name: com.github.openshift.api.config.v1.PKICertificateSubject +- name: com.github.openshift.api.config.v1.PersistentVolumeClaimReference map: fields: - - name: email + - name: name type: scalar: string - - name: hostname +- name: com.github.openshift.api.config.v1.PersistentVolumeConfig + map: + fields: + - name: claim + type: + namedType: com.github.openshift.api.config.v1.PersistentVolumeClaimReference + default: {} + - name: mountPath type: scalar: string - name: com.github.openshift.api.config.v1.PlatformSpec @@ -3050,16 +3142,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: vsphere type: namedType: com.github.openshift.api.config.v1.VSpherePlatformStatus -- name: com.github.openshift.api.config.v1.Policy - map: - fields: - - name: rootOfTrust - type: - namedType: com.github.openshift.api.config.v1.PolicyRootOfTrust - default: {} - - name: signedIdentity - type: - namedType: com.github.openshift.api.config.v1.PolicyIdentity - name: com.github.openshift.api.config.v1.PolicyFulcioSubject map: fields: @@ -3114,17 +3196,17 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: fulcioCAWithRekor type: - namedType: com.github.openshift.api.config.v1.FulcioCAWithRekor + namedType: com.github.openshift.api.config.v1.ImagePolicyFulcioCAWithRekorRootOfTrust - name: pki type: - namedType: com.github.openshift.api.config.v1.PKI + namedType: com.github.openshift.api.config.v1.ImagePolicyPKIRootOfTrust - name: policyType type: scalar: string default: "" - name: publicKey type: - namedType: com.github.openshift.api.config.v1.PublicKey + namedType: com.github.openshift.api.config.v1.ImagePolicyPublicKeyRootOfTrust unions: - discriminator: policyType fields: @@ -3309,15 +3391,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: noProxy type: scalar: string -- name: com.github.openshift.api.config.v1.PublicKey - map: - fields: - - name: keyData - type: - scalar: string - - name: rekorKeyData - type: - scalar: string - name: com.github.openshift.api.config.v1.RegistryLocation map: fields: @@ -3533,6 +3606,21 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.Storage + map: + fields: + - name: persistentVolume + type: + namedType: com.github.openshift.api.config.v1.PersistentVolumeConfig + default: {} + - name: type + type: + scalar: string + unions: + - discriminator: type + fields: + - fieldName: persistentVolume + discriminatorValue: PersistentVolume - name: com.github.openshift.api.config.v1.TLSSecurityProfile map: fields: @@ -4087,7 +4175,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: policy type: - namedType: com.github.openshift.api.config.v1alpha1.Policy + namedType: com.github.openshift.api.config.v1alpha1.ImageSigstoreVerificationPolicy default: {} - name: scopes type: @@ -4185,19 +4273,6 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: com.github.openshift.api.config.v1alpha1.FulcioCAWithRekor - map: - fields: - - name: fulcioCAData - type: - scalar: string - - name: fulcioSubject - type: - namedType: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject - default: {} - - name: rekorKeyData - type: - scalar: string - name: com.github.openshift.api.config.v1alpha1.GatherConfig map: fields: @@ -4234,12 +4309,47 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyStatus default: {} +- name: com.github.openshift.api.config.v1alpha1.ImagePolicyFulcioCAWithRekorRootOfTrust + map: + fields: + - name: fulcioCAData + type: + scalar: string + - name: fulcioSubject + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject + default: {} + - name: rekorKeyData + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.ImagePolicyPKIRootOfTrust + map: + fields: + - name: caIntermediatesData + type: + scalar: string + - name: caRootsData + type: + scalar: string + - name: pkiCertificateSubject + type: + namedType: com.github.openshift.api.config.v1alpha1.PKICertificateSubject + default: {} +- name: com.github.openshift.api.config.v1alpha1.ImagePolicyPublicKeyRootOfTrust + map: + fields: + - name: keyData + type: + scalar: string + - name: rekorKeyData + type: + scalar: string - name: com.github.openshift.api.config.v1alpha1.ImagePolicySpec map: fields: - name: policy type: - namedType: com.github.openshift.api.config.v1alpha1.Policy + namedType: com.github.openshift.api.config.v1alpha1.ImageSigstoreVerificationPolicy default: {} - name: scopes type: @@ -4258,6 +4368,17 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type +- name: com.github.openshift.api.config.v1alpha1.ImageSigstoreVerificationPolicy + map: + fields: + - name: rootOfTrust + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyRootOfTrust + default: {} + - name: signedIdentity + type: + namedType: com.github.openshift.api.config.v1alpha1.PolicyIdentity + default: {} - name: com.github.openshift.api.config.v1alpha1.InsightsDataGather map: fields: @@ -4336,19 +4457,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: verbosity type: scalar: string -- name: com.github.openshift.api.config.v1alpha1.PKI - map: - fields: - - name: caIntermediatesData - type: - scalar: string - - name: caRootsData - type: - scalar: string - - name: pkiCertificateSubject - type: - namedType: com.github.openshift.api.config.v1alpha1.PKICertificateSubject - default: {} - name: com.github.openshift.api.config.v1alpha1.PKICertificateSubject map: fields: @@ -4375,17 +4483,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: mountPath type: scalar: string -- name: com.github.openshift.api.config.v1alpha1.Policy - map: - fields: - - name: rootOfTrust - type: - namedType: com.github.openshift.api.config.v1alpha1.PolicyRootOfTrust - default: {} - - name: signedIdentity - type: - namedType: com.github.openshift.api.config.v1alpha1.PolicyIdentity - default: {} - name: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject map: fields: @@ -4440,17 +4537,17 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: fulcioCAWithRekor type: - namedType: com.github.openshift.api.config.v1alpha1.FulcioCAWithRekor + namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyFulcioCAWithRekorRootOfTrust - name: pki type: - namedType: com.github.openshift.api.config.v1alpha1.PKI + namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyPKIRootOfTrust - name: policyType type: scalar: string default: "" - name: publicKey type: - namedType: com.github.openshift.api.config.v1alpha1.PublicKey + namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyPublicKeyRootOfTrust unions: - discriminator: policyType fields: @@ -4460,15 +4557,6 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: PKI - fieldName: publicKey discriminatorValue: PublicKey -- name: com.github.openshift.api.config.v1alpha1.PublicKey - map: - fields: - - name: keyData - type: - scalar: string - - name: rekorKeyData - type: - scalar: string - name: com.github.openshift.api.config.v1alpha1.RetentionNumberConfig map: fields: diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go index 048895c11..99cad8e20 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go @@ -132,6 +132,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.ConsoleSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ConsoleStatus"): return &configv1.ConsoleStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Custom"): + return &configv1.CustomApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("CustomFeatureGates"): return &configv1.CustomFeatureGatesApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("CustomTLSProfile"): @@ -170,8 +172,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.FeatureGateSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("FeatureGateStatus"): return &configv1.FeatureGateStatusApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("FulcioCAWithRekor"): - return &configv1.FulcioCAWithRekorApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("GatherConfig"): + return &configv1.GatherConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("GathererConfig"): + return &configv1.GathererConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Gatherers"): + return &configv1.GatherersApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("GCPPlatformStatus"): return &configv1.GCPPlatformStatusApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("GCPResourceLabel"): @@ -216,10 +222,18 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.ImageLabelApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImagePolicy"): return &configv1.ImagePolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImagePolicyFulcioCAWithRekorRootOfTrust"): + return &configv1.ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImagePolicyPKIRootOfTrust"): + return &configv1.ImagePolicyPKIRootOfTrustApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImagePolicyPublicKeyRootOfTrust"): + return &configv1.ImagePolicyPublicKeyRootOfTrustApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImagePolicySpec"): return &configv1.ImagePolicySpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImagePolicyStatus"): return &configv1.ImagePolicyStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageSigstoreVerificationPolicy"): + return &configv1.ImageSigstoreVerificationPolicyApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImageSpec"): return &configv1.ImageSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImageStatus"): @@ -244,6 +258,10 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.IngressSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("IngressStatus"): return &configv1.IngressStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("InsightsDataGather"): + return &configv1.InsightsDataGatherApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("InsightsDataGatherSpec"): + return &configv1.InsightsDataGatherSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("KeystoneIdentityProvider"): return &configv1.KeystoneIdentityProviderApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("KMSConfig"): @@ -336,16 +354,16 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.OvirtPlatformLoadBalancerApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("OvirtPlatformStatus"): return &configv1.OvirtPlatformStatusApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("PKI"): - return &configv1.PKIApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PersistentVolumeClaimReference"): + return &configv1.PersistentVolumeClaimReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("PersistentVolumeConfig"): + return &configv1.PersistentVolumeConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PKICertificateSubject"): return &configv1.PKICertificateSubjectApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PlatformSpec"): return &configv1.PlatformSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PlatformStatus"): return &configv1.PlatformStatusApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("Policy"): - return &configv1.PolicyApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PolicyFulcioSubject"): return &configv1.PolicyFulcioSubjectApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PolicyIdentity"): @@ -378,8 +396,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.ProxySpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ProxyStatus"): return &configv1.ProxyStatusApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("PublicKey"): - return &configv1.PublicKeyApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("RegistryLocation"): return &configv1.RegistryLocationApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("RegistrySources"): @@ -400,6 +416,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.SecretNameReferenceApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("SignatureStore"): return &configv1.SignatureStoreApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Storage"): + return &configv1.StorageApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("TemplateReference"): return &configv1.TemplateReferenceApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("TLSProfileSpec"): @@ -478,16 +496,22 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1alpha1.ContainerResourceApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("EtcdBackupSpec"): return &configv1alpha1.EtcdBackupSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("FulcioCAWithRekor"): - return &configv1alpha1.FulcioCAWithRekorApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("GatherConfig"): return &configv1alpha1.GatherConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicy"): return &configv1alpha1.ImagePolicyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicyFulcioCAWithRekorRootOfTrust"): + return &configv1alpha1.ImagePolicyFulcioCAWithRekorRootOfTrustApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicyPKIRootOfTrust"): + return &configv1alpha1.ImagePolicyPKIRootOfTrustApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicyPublicKeyRootOfTrust"): + return &configv1alpha1.ImagePolicyPublicKeyRootOfTrustApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicySpec"): return &configv1alpha1.ImagePolicySpecApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("ImagePolicyStatus"): return &configv1alpha1.ImagePolicyStatusApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ImageSigstoreVerificationPolicy"): + return &configv1alpha1.ImageSigstoreVerificationPolicyApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("InsightsDataGather"): return &configv1alpha1.InsightsDataGatherApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("InsightsDataGatherSpec"): @@ -498,12 +522,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1alpha1.PersistentVolumeClaimReferenceApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PersistentVolumeConfig"): return &configv1alpha1.PersistentVolumeConfigApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PKI"): - return &configv1alpha1.PKIApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PKICertificateSubject"): return &configv1alpha1.PKICertificateSubjectApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("Policy"): - return &configv1alpha1.PolicyApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PolicyFulcioSubject"): return &configv1alpha1.PolicyFulcioSubjectApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PolicyIdentity"): @@ -514,8 +534,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1alpha1.PolicyMatchRemapIdentityApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("PolicyRootOfTrust"): return &configv1alpha1.PolicyRootOfTrustApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PublicKey"): - return &configv1alpha1.PublicKeyApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("RetentionNumberConfig"): return &configv1alpha1.RetentionNumberConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("RetentionPolicy"): diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go index 70957eee8..afce6aef5 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go @@ -28,6 +28,7 @@ type ConfigV1Interface interface { ImageTagMirrorSetsGetter InfrastructuresGetter IngressesGetter + InsightsDataGathersGetter NetworksGetter NodesGetter OAuthsGetter @@ -106,6 +107,10 @@ func (c *ConfigV1Client) Ingresses() IngressInterface { return newIngresses(c) } +func (c *ConfigV1Client) InsightsDataGathers() InsightsDataGatherInterface { + return newInsightsDataGathers(c) +} + func (c *ConfigV1Client) Networks() NetworkInterface { return newNetworks(c) } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go index 764c8912a..b5a1a5257 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go @@ -76,6 +76,10 @@ func (c *FakeConfigV1) Ingresses() v1.IngressInterface { return newFakeIngresses(c) } +func (c *FakeConfigV1) InsightsDataGathers() v1.InsightsDataGatherInterface { + return newFakeInsightsDataGathers(c) +} + func (c *FakeConfigV1) Networks() v1.NetworkInterface { return newFakeNetworks(c) } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_insightsdatagather.go new file mode 100644 index 000000000..1901b7db7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_insightsdatagather.go @@ -0,0 +1,37 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + typedconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + gentype "k8s.io/client-go/gentype" +) + +// fakeInsightsDataGathers implements InsightsDataGatherInterface +type fakeInsightsDataGathers struct { + *gentype.FakeClientWithListAndApply[*v1.InsightsDataGather, *v1.InsightsDataGatherList, *configv1.InsightsDataGatherApplyConfiguration] + Fake *FakeConfigV1 +} + +func newFakeInsightsDataGathers(fake *FakeConfigV1) typedconfigv1.InsightsDataGatherInterface { + return &fakeInsightsDataGathers{ + gentype.NewFakeClientWithListAndApply[*v1.InsightsDataGather, *v1.InsightsDataGatherList, *configv1.InsightsDataGatherApplyConfiguration]( + fake.Fake, + "", + v1.SchemeGroupVersion.WithResource("insightsdatagathers"), + v1.SchemeGroupVersion.WithKind("InsightsDataGather"), + func() *v1.InsightsDataGather { return &v1.InsightsDataGather{} }, + func() *v1.InsightsDataGatherList { return &v1.InsightsDataGatherList{} }, + func(dst, src *v1.InsightsDataGatherList) { dst.ListMeta = src.ListMeta }, + func(list *v1.InsightsDataGatherList) []*v1.InsightsDataGather { + return gentype.ToPointerSlice(list.Items) + }, + func(list *v1.InsightsDataGatherList, items []*v1.InsightsDataGather) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go index 44ad19dcb..27c5fd110 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go @@ -34,6 +34,8 @@ type InfrastructureExpansion interface{} type IngressExpansion interface{} +type InsightsDataGatherExpansion interface{} + type NetworkExpansion interface{} type NodeExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/insightsdatagather.go new file mode 100644 index 000000000..43f662012 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/insightsdatagather.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// InsightsDataGathersGetter has a method to return a InsightsDataGatherInterface. +// A group's client should implement this interface. +type InsightsDataGathersGetter interface { + InsightsDataGathers() InsightsDataGatherInterface +} + +// InsightsDataGatherInterface has methods to work with InsightsDataGather resources. +type InsightsDataGatherInterface interface { + Create(ctx context.Context, insightsDataGather *configv1.InsightsDataGather, opts metav1.CreateOptions) (*configv1.InsightsDataGather, error) + Update(ctx context.Context, insightsDataGather *configv1.InsightsDataGather, opts metav1.UpdateOptions) (*configv1.InsightsDataGather, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.InsightsDataGather, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.InsightsDataGatherList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.InsightsDataGather, err error) + Apply(ctx context.Context, insightsDataGather *applyconfigurationsconfigv1.InsightsDataGatherApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.InsightsDataGather, err error) + InsightsDataGatherExpansion +} + +// insightsDataGathers implements InsightsDataGatherInterface +type insightsDataGathers struct { + *gentype.ClientWithListAndApply[*configv1.InsightsDataGather, *configv1.InsightsDataGatherList, *applyconfigurationsconfigv1.InsightsDataGatherApplyConfiguration] +} + +// newInsightsDataGathers returns a InsightsDataGathers +func newInsightsDataGathers(c *ConfigV1Client) *insightsDataGathers { + return &insightsDataGathers{ + gentype.NewClientWithListAndApply[*configv1.InsightsDataGather, *configv1.InsightsDataGatherList, *applyconfigurationsconfigv1.InsightsDataGatherApplyConfiguration]( + "insightsdatagathers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.InsightsDataGather { return &configv1.InsightsDataGather{} }, + func() *configv1.InsightsDataGatherList { return &configv1.InsightsDataGatherList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/insightsdatagather.go new file mode 100644 index 000000000..53a173991 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/insightsdatagather.go @@ -0,0 +1,85 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherInformer provides access to a shared informer and lister for +// InsightsDataGathers. +type InsightsDataGatherInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.InsightsDataGatherLister +} + +type insightsDataGatherInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().InsightsDataGathers().List(context.Background(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().InsightsDataGathers().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().InsightsDataGathers().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().InsightsDataGathers().Watch(ctx, options) + }, + }, + &apiconfigv1.InsightsDataGather{}, + resyncPeriod, + indexers, + ) +} + +func (f *insightsDataGatherInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *insightsDataGatherInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.InsightsDataGather{}, f.defaultInformer) +} + +func (f *insightsDataGatherInformer) Lister() configv1.InsightsDataGatherLister { + return configv1.NewInsightsDataGatherLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go index ff4c521b0..0ad1b98f3 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go @@ -40,6 +40,8 @@ type Interface interface { Infrastructures() InfrastructureInformer // Ingresses returns a IngressInformer. Ingresses() IngressInformer + // InsightsDataGathers returns a InsightsDataGatherInformer. + InsightsDataGathers() InsightsDataGatherInformer // Networks returns a NetworkInformer. Networks() NetworkInformer // Nodes returns a NodeInformer. @@ -147,6 +149,11 @@ func (v *version) Ingresses() IngressInformer { return &ingressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// InsightsDataGathers returns a InsightsDataGatherInformer. +func (v *version) InsightsDataGathers() InsightsDataGatherInformer { + return &insightsDataGatherInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // Networks returns a NetworkInformer. func (v *version) Networks() NetworkInformer { return &networkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go index 59c98ea77..146e7e975 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go @@ -71,6 +71,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Infrastructures().Informer()}, nil case v1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Ingresses().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("insightsdatagathers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().InsightsDataGathers().Informer()}, nil case v1.SchemeGroupVersion.WithResource("networks"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Networks().Informer()}, nil case v1.SchemeGroupVersion.WithResource("nodes"): diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go index d4e79cd0e..ca93cb283 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go @@ -70,6 +70,10 @@ type InfrastructureListerExpansion interface{} // IngressLister. type IngressListerExpansion interface{} +// InsightsDataGatherListerExpansion allows custom methods to be added to +// InsightsDataGatherLister. +type InsightsDataGatherListerExpansion interface{} + // NetworkListerExpansion allows custom methods to be added to // NetworkLister. type NetworkListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/insightsdatagather.go new file mode 100644 index 000000000..79da7823f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/insightsdatagather.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherLister helps list InsightsDataGathers. +// All objects returned here must be treated as read-only. +type InsightsDataGatherLister interface { + // List lists all InsightsDataGathers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.InsightsDataGather, err error) + // Get retrieves the InsightsDataGather from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.InsightsDataGather, error) + InsightsDataGatherListerExpansion +} + +// insightsDataGatherLister implements the InsightsDataGatherLister interface. +type insightsDataGatherLister struct { + listers.ResourceIndexer[*configv1.InsightsDataGather] +} + +// NewInsightsDataGatherLister returns a new InsightsDataGatherLister. +func NewInsightsDataGatherLister(indexer cache.Indexer) InsightsDataGatherLister { + return &insightsDataGatherLister{listers.New[*configv1.InsightsDataGather](indexer, configv1.Resource("insightsdatagather"))} +} diff --git a/vendor/github.com/openshift/controller-runtime-common/LICENSE b/vendor/github.com/openshift/controller-runtime-common/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/openshift/controller-runtime-common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/controller-runtime-common/pkg/tls/controller.go b/vendor/github.com/openshift/controller-runtime-common/pkg/tls/controller.go new file mode 100644 index 000000000..b7efbd93f --- /dev/null +++ b/vendor/github.com/openshift/controller-runtime-common/pkg/tls/controller.go @@ -0,0 +1,144 @@ +/* +Copyright 2026 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tls + +import ( + "context" + "fmt" + "reflect" + + "github.com/go-logr/logr" + configv1 "github.com/openshift/api/config/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// SecurityProfileWatcher watches the APIServer object for TLS profile changes +// and triggers a graceful shutdown when the profile changes. +type SecurityProfileWatcher struct { + client.Client + + // InitialTLSProfileSpec is the TLS profile spec that was configured when the operator started. + InitialTLSProfileSpec configv1.TLSProfileSpec + + // OnProfileChange is a function that will be called when the TLS profile changes. + // It receives the reconcile context, old and new TLS profile specs. + // This allows the caller to make decisions based on the actual profile changes. + // + // The most common use case for this callback is + // to trigger a graceful shutdown of the operator + // to make it pick up the new configuration. + // + // Example: + // + // // Create a context that can be cancelled when there is a need to shut down the manager. + // ctx, cancel := context.WithCancel(ctrl.SetupSignalHandler()) + // defer cancel() + // + // watcher := &SecurityProfileWatcher{ + // OnProfileChange: func(ctx context.Context, old, new configv1.TLSProfileSpec) { + // logger.Infof("TLS profile has changed, initiating a shutdown to reload it. %q: %+v, %q: %+v", + // "old profile", old, + // "new profile", new, + // ) + // // Cancel the outer context to trigger a graceful shutdown of the manager. + // cancel() + // }, + // } + OnProfileChange func(ctx context.Context, oldTLSProfileSpec, newTLSProfileSpec configv1.TLSProfileSpec) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *SecurityProfileWatcher) SetupWithManager(mgr ctrl.Manager) error { + if err := ctrl.NewControllerManagedBy(mgr). + Named("tlssecurityprofilewatcher"). + For(&configv1.APIServer{}, builder.WithPredicates( + predicate.Funcs{ + // Only watch the "cluster" APIServer object. + CreateFunc: func(e event.CreateEvent) bool { + return e.Object.GetName() == APIServerName + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return e.ObjectNew.GetName() == APIServerName + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return e.Object.GetName() == APIServerName + }, + GenericFunc: func(e event.GenericEvent) bool { + return e.Object.GetName() == APIServerName + }, + }, + )). + // Override the default log constructor as it makes the logs very chatty. + WithLogConstructor(func(_ *reconcile.Request) logr.Logger { + return mgr.GetLogger().WithValues( + "controller", "tlssecurityprofilewatcher", + ) + }). + Complete(r); err != nil { + return fmt.Errorf("could not set up controller for TLS security profile watcher: %w", err) + } + + return nil +} + +// Reconcile watches for changes to the APIServer TLS profile and triggers a shutdown +// when the profile changes from the initial configuration. +func (r *SecurityProfileWatcher) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx, "name", req.Name) + + logger.V(1).Info("Reconciling APIServer TLS profile") + defer logger.V(1).Info("Finished reconciling APIServer TLS profile") + + // Fetch the APIServer object. + apiServer := &configv1.APIServer{} + if err := r.Get(ctx, req.NamespacedName, apiServer); err != nil { + if apierrors.IsNotFound(err) { + // If the APIServer object is not found, we don't need to do anything. + // This could happen if the object was deleted. + return ctrl.Result{}, nil + } + + return ctrl.Result{}, fmt.Errorf("failed to get APIServer %s: %w", req.NamespacedName.String(), err) + } + + // Get the current TLS profile spec. + currentTLSProfileSpec, err := GetTLSProfileSpec(apiServer.Spec.TLSSecurityProfile) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get TLS profile from APIServer %s: %w", req.NamespacedName.String(), err) + } + + // Compare the current TLS profile spec with the initial one. + if tlsProfileChanged := !reflect.DeepEqual(r.InitialTLSProfileSpec, currentTLSProfileSpec); tlsProfileChanged { + // TLS profile has changed, invoke the callback if it is set. + if r.OnProfileChange != nil { + r.OnProfileChange(ctx, r.InitialTLSProfileSpec, currentTLSProfileSpec) + } + + // Persist the new profile for future change detection. + r.InitialTLSProfileSpec = currentTLSProfileSpec + } + + // No need to requeue, as the callback will handle further actions. + return ctrl.Result{}, nil +} diff --git a/vendor/github.com/openshift/controller-runtime-common/pkg/tls/tls.go b/vendor/github.com/openshift/controller-runtime-common/pkg/tls/tls.go new file mode 100644 index 000000000..6b33bd147 --- /dev/null +++ b/vendor/github.com/openshift/controller-runtime-common/pkg/tls/tls.go @@ -0,0 +1,155 @@ +/* +Copyright 2026 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package tls provides utilities for working with OpenShift TLS profiles. +package tls + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + libgocrypto "github.com/openshift/library-go/pkg/crypto" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // APIServerName is the name of the APIServer resource in the cluster. + APIServerName = "cluster" +) + +var ( + // ErrCustomProfileNil is returned when a custom TLS profile is specified but the Custom field is nil. + ErrCustomProfileNil = errors.New("custom TLS profile specified but Custom field is nil") + + // DefaultTLSCiphers are the default TLS ciphers for API servers. + DefaultTLSCiphers = configv1.TLSProfiles[configv1.TLSProfileIntermediateType].Ciphers //nolint:gochecknoglobals + // DefaultMinTLSVersion is the default minimum TLS version for API servers. + DefaultMinTLSVersion = configv1.TLSProfiles[configv1.TLSProfileIntermediateType].MinTLSVersion //nolint:gochecknoglobals +) + +// FetchAPIServerTLSProfile fetches the TLS profile spec configured in APIServer. +// If no profile is configured, the default profile is returned. +func FetchAPIServerTLSProfile(ctx context.Context, k8sClient client.Client) (configv1.TLSProfileSpec, error) { + apiServer := &configv1.APIServer{} + key := client.ObjectKey{Name: APIServerName} + + if err := k8sClient.Get(ctx, key, apiServer); err != nil { + return configv1.TLSProfileSpec{}, fmt.Errorf("failed to get APIServer %q: %w", key.String(), err) + } + + profile, err := GetTLSProfileSpec(apiServer.Spec.TLSSecurityProfile) + if err != nil { + return configv1.TLSProfileSpec{}, fmt.Errorf("failed to get TLS profile from APIServer %q: %w", key.String(), err) + } + + return profile, nil +} + +// GetTLSProfileSpec returns TLSProfileSpec for the given profile. +// If no profile is configured, the default profile is returned. +func GetTLSProfileSpec(profile *configv1.TLSSecurityProfile) (configv1.TLSProfileSpec, error) { + // Define the default profile (at the time of writing, this is the intermediate profile). + defaultProfile := *configv1.TLSProfiles[configv1.TLSProfileIntermediateType] + // If the profile is nil or the type is empty, return the default profile. + if profile == nil || profile.Type == "" { + return defaultProfile, nil + } + + // Get the profile type. + profileType := profile.Type + + // If the profile type is not custom, return the profile from the map. + if profileType != configv1.TLSProfileCustomType { + if tlsConfig, ok := configv1.TLSProfiles[profileType]; ok { + return *tlsConfig, nil + } + + // If the profile type is not found, return the default profile. + return defaultProfile, nil + } + + if profile.Custom == nil { + // If the custom profile is nil, return an error. + return configv1.TLSProfileSpec{}, ErrCustomProfileNil + } + + // Return the custom profile spec. + return profile.Custom.TLSProfileSpec, nil +} + +// NewTLSConfigFromProfile returns a function that configures a tls.Config based on the provided TLSProfileSpec, +// along with any cipher names from the profile that are not supported by the library-go crypto package. +// The returned function is intended to be used with controller-runtime's TLSOpts. +// +// Note: CipherSuites are only set when MinVersion is below TLS 1.3, as Go's TLS 1.3 implementation +// does not allow configuring cipher suites - all TLS 1.3 ciphers are always enabled. +// See: https://github.com/golang/go/issues/29349 +func NewTLSConfigFromProfile(profile configv1.TLSProfileSpec) (tlsConfig func(*tls.Config), unsupportedCiphers []string) { + minVersion := libgocrypto.TLSVersionOrDie(string(profile.MinTLSVersion)) + cipherSuites, unsupportedCiphers := cipherCodes(profile.Ciphers) + + return func(tlsConf *tls.Config) { + tlsConf.MinVersion = minVersion + // TODO: add curve preferences from profile once https://github.com/openshift/api/pull/2583 merges. + // tlsConf.CurvePreferences <<<<<< profile.Curves + + // TLS 1.3 cipher suites are not configurable in Go (https://github.com/golang/go/issues/29349), so only set CipherSuites accordingly. + // TODO: revisit this once we get an answer on the best way to handle this here: + // https://docs.google.com/document/d/1cMc9E8psHfnoK06ntR8kHSWB8d3rMtmldhnmM4nImjs/edit?disco=AAABu_nPcYg + if minVersion != tls.VersionTLS13 { + tlsConf.CipherSuites = cipherSuites + } + }, unsupportedCiphers +} + +// cipherCode returns the TLS cipher code for an OpenSSL or IANA cipher name. +// Returns 0 if the cipher is not supported. +func cipherCode(cipher string) uint16 { + // First try as IANA name directly. + if code, err := libgocrypto.CipherSuite(cipher); err == nil { + return code + } + + // Try converting from OpenSSL name to IANA name. + ianaCiphers := libgocrypto.OpenSSLToIANACipherSuites([]string{cipher}) + if len(ianaCiphers) == 1 { + if code, err := libgocrypto.CipherSuite(ianaCiphers[0]); err == nil { + return code + } + } + + // Return 0 if the cipher is not supported. + return 0 +} + +// cipherCodes converts a list of cipher names (OpenSSL or IANA format) to their uint16 codes. +// Returns the converted codes and a list of any unsupported cipher names. +func cipherCodes(ciphers []string) (codes []uint16, unsupportedCiphers []string) { + for _, cipher := range ciphers { + code := cipherCode(cipher) + if code == 0 { + unsupportedCiphers = append(unsupportedCiphers, cipher) + continue + } + + codes = append(codes, code) + } + + return codes, unsupportedCiphers +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go index 33a09ae16..be0337b90 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -30,6 +30,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/client-go/util/cert" + + configv1 "github.com/openshift/api/config/v1" ) // TLS versions that are known to golang. Go 1.13 adds support for @@ -141,7 +143,11 @@ var ciphers = map[string]uint16{ } // openSSLToIANACiphersMap maps OpenSSL cipher suite names to IANA names -// ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +// Ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +// This must hold a 1:1 mapping for each OpenSSL cipher defined in openshift/api TLSSecurityProfiles, +// so it can be used to translate OpenSSL ciphers to IANA ciphers, which is what go's crypto/tls understands. +// Ciphers in this map must also be compatible with go's crypto/tls ciphers: +// https://github.com/golang/go/blob/d4febb45179fa99ee1d5783bcb693ed7ba14115c/src/crypto/tls/cipher_suites.go#L682-L724 var openSSLToIANACiphersMap = map[string]string{ // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 @@ -161,6 +167,21 @@ var openSSLToIANACiphersMap = map[string]string{ "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C + // Go's crypto/tls does not support CBC mode and DHE ciphers, so we don't want to include them here. + // See: + // - https://github.com/golang/go/issues/26652 + // - https://github.com/golang/go/issues/7758 + // - https://redhat-internal.slack.com/archives/C098FU5MRAB/p1770309657097269 + // + // "ECDHE-ECDSA-AES256-SHA384": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", // 0xC0,0x24 + // "ECDHE-RSA-AES256-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", // 0xC0,0x28 + // "AES256-SHA256": "TLS_RSA_WITH_AES_256_CBC_SHA256", // 0x00,0x3D + // "DHE-RSA-AES128-GCM-SHA256": "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9E + // "DHE-RSA-AES256-GCM-SHA384": "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9F + // "DHE-RSA-CHACHA20-POLY1305": "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xAA + // "DHE-RSA-AES128-SHA256": "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x67 + // "DHE-RSA-AES256-SHA256": "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", // 0x00,0x6B + // TLS 1 "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09 "ECDHE-RSA-AES128-SHA": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // 0xC0,0x13 @@ -168,9 +189,10 @@ var openSSLToIANACiphersMap = map[string]string{ "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14 // SSL 3 - "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F - "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 - "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A + "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F + "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 + "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A + "ECDHE-RSA-DES-CBC3-SHA": "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", // 0xC0,0x12 } // CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names @@ -242,35 +264,44 @@ func ValidCipherSuites() []string { sort.Strings(validCipherSuites) return validCipherSuites } + +// DefaultTLSProfileType is the intermediate profile type. +const DefaultTLSProfileType = configv1.TLSProfileIntermediateType + +// DefaultCiphers returns the default cipher suites for TLS connections. +// +// RECOMMENDATION: Instead of relying on this function directly, consumers should respect +// TLSSecurityProfile settings from one of the OpenShift API configuration resources: +// - For API servers: Use apiserver.config.openshift.io/cluster Spec.TLSSecurityProfile +// - For ingress controllers: Use operator.openshift.io/v1 IngressController Spec.TLSSecurityProfile +// - For kubelet: Use machineconfiguration.openshift.io/v1 KubeletConfig Spec.TLSSecurityProfile +// +// These API resources allow cluster administrators to choose between Old, Intermediate, +// Modern, or Custom TLS profiles. Components should observe these settings. func DefaultCiphers() []uint16 { - // HTTP/2 mandates TLS 1.2 or higher with an AEAD cipher - // suite (GCM, Poly1305) and ephemeral key exchange (ECDHE, DHE) for - // perfect forward secrecy. Servers may provide additional cipher - // suites for backwards compatibility with HTTP/1.1 clients. - // See RFC7540, section 9.2 (Use of TLS Features) and Appendix A - // (TLS 1.2 Cipher Suite Black List). + // Aligned with intermediate profile of the 5.7 version of the Mozilla Server + // Side TLS guidelines found at: https://ssl-config.mozilla.org/guidelines/5.7.json + // + // Latest guidelines: https://ssl-config.mozilla.org/guidelines/latest.json + // + // This profile provides strong security with wide compatibility. + // It requires TLS 1.2+ and uses only AEAD cipher suites (GCM, ChaCha20-Poly1305) + // with ECDHE key exchange for perfect forward secrecy. + // + // All CBC-mode ciphers have been removed due to padding oracle vulnerabilities. + // All RSA key exchange ciphers have been removed due to lack of perfect forward secrecy. + // + // HTTP/2 compliance: All ciphers are compliant with RFC7540, section 9.2. return []uint16{ + // TLS 1.2 cipher suites with ECDHE + AEAD tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // required by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // required by HTTP/2 tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // forbidden by http/2 - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // forbidden by http/2 - // the next one is in the intermediate suite, but go1.8 http2isBadCipher() complains when it is included at the recommended index - // because it comes after ciphers forbidden by the http/2 spec - // tls.TLS_RSA_WITH_AES_128_CBC_SHA256, - // tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack - // tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack - tls.TLS_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 - tls.TLS_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + + // TLS 1.3 cipher suites (negotiated automatically, not configurable) tls.TLS_AES_128_GCM_SHA256, tls.TLS_AES_256_GCM_SHA384, tls.TLS_CHACHA20_POLY1305_SHA256, diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go index 1cb4e5554..c2c8b8368 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go @@ -188,7 +188,7 @@ func getValidityFromAnnotations(annotations map[string]string) (notBefore time.T return notBefore, notAfter, fmt.Sprintf("bad expiry: %q", notAfterString) } notBeforeString := annotations[CertificateNotBeforeAnnotation] - if len(notAfterString) == 0 { + if len(notBeforeString) == 0 { return notBefore, notAfter, "missing notBefore" } notBefore, err = time.Parse(time.RFC3339, notBeforeString) diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index 7dd2638e8..769af387e 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -29,7 +29,7 @@ loop: MOVD $NUM_ROUNDS, R21 VLD1 (R11), [V30.S4, V31.S4] - // load contants + // load constants // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] WORD $0x4D60E940 diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index a035956fc..47a07539d 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -1490,6 +1490,7 @@ type openSSHEncryptedPrivateKey struct { NumKeys uint32 PubKey []byte PrivKeyBlock []byte + Rest []byte `ssh:"rest"` } type openSSHPrivateKey struct { diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index 251b9d06a..ab22c3d38 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -792,7 +792,7 @@ func marshalString(to []byte, s []byte) []byte { return to[len(s):] } -var bigIntType = reflect.TypeOf((*big.Int)(nil)) +var bigIntType = reflect.TypeFor[*big.Int]() // Decode a packet into its corresponding message. func decode(packet []byte) (interface{}, error) { diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go index 24bd7c8e8..a6249a122 100644 --- a/vendor/golang.org/x/crypto/ssh/ssh_gss.go +++ b/vendor/golang.org/x/crypto/ssh/ssh_gss.go @@ -106,6 +106,13 @@ func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { if !ok { return nil, errors.New("parse uint32 failed") } + // Each ASN.1 encoded OID must have a minimum + // of 2 bytes; 64 maximum mechanisms is an + // arbitrary, but reasonable ceiling. + const maxMechs = 64 + if n > maxMechs || int(n)*2 > len(rest) { + return nil, errors.New("invalid mechanism count") + } s := &userAuthRequestGSSAPI{ N: n, OIDS: make([]asn1.ObjectIdentifier, n), @@ -122,7 +129,6 @@ func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { return nil, err } - } return s, nil } diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go index b171b330b..152470fcb 100644 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go @@ -44,7 +44,7 @@ func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { if !ok { return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) + ch := c.forwards.add("unix", socketPath) return &unixListener{socketPath, c, ch}, nil } @@ -96,7 +96,7 @@ func (l *unixListener) Accept() (net.Conn, error) { // Close closes the listener. func (l *unixListener) Close() error { // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) + l.conn.forwards.remove("unix", l.socketPath) m := streamLocalChannelForwardMsg{ l.socketPath, } diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go index 93d844f03..78c41fe5a 100644 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -11,6 +11,7 @@ import ( "io" "math/rand" "net" + "net/netip" "strconv" "strings" "sync" @@ -22,14 +23,21 @@ import ( // the returned net.Listener. The listener must be serviced, or the // SSH connection may hang. // N must be "tcp", "tcp4", "tcp6", or "unix". +// +// If the address is a hostname, it is sent to the remote peer as-is, without +// being resolved locally, and the Listener Addr method will return a zero IP. func (c *Client) Listen(n, addr string) (net.Listener, error) { switch n { case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseInt(portStr, 10, 32) if err != nil { return nil, err } - return c.ListenTCP(laddr) + return c.listenTCPInternal(host, int(port)) case "unix": return c.ListenUnix(addr) default: @@ -102,15 +110,24 @@ func (c *Client) handleForwards() { // ListenTCP requests the remote peer open a listening socket // on laddr. Incoming connections will be available by calling // Accept on the returned net.Listener. +// +// ListenTCP accepts an IP address, to provide a hostname use [Client.Listen] +// with "tcp", "tcp4", or "tcp6" network instead. func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { c.handleForwardsOnce.Do(c.handleForwards) if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { return c.autoPortListenWorkaround(laddr) } + return c.listenTCPInternal(laddr.IP.String(), laddr.Port) +} + +func (c *Client) listenTCPInternal(host string, port int) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), + host, + uint32(port), } // send message ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) @@ -123,20 +140,33 @@ func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { // If the original port was 0, then the remote side will // supply a real port number in the response. - if laddr.Port == 0 { + if port == 0 { var p struct { Port uint32 } if err := Unmarshal(resp, &p); err != nil { return nil, err } - laddr.Port = int(p.Port) + port = int(p.Port) } + // Construct a local address placeholder for the remote listener. If the + // original host is an IP address, preserve it so that Listener.Addr() + // reports the same IP. If the host is a hostname or cannot be parsed as an + // IP, fall back to IPv4zero. The port field is always set, even if the + // original port was 0, because in that case the remote server will assign + // one, allowing callers to determine which port was selected. + ip := net.IPv4zero + if parsed, err := netip.ParseAddr(host); err == nil { + ip = net.IP(parsed.AsSlice()) + } + laddr := &net.TCPAddr{ + IP: ip, + Port: port, + } + addr := net.JoinHostPort(host, strconv.FormatInt(int64(port), 10)) + ch := c.forwards.add("tcp", addr) - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil + return &tcpListener{laddr, addr, c, ch}, nil } // forwardList stores a mapping between remote @@ -149,8 +179,9 @@ type forwardList struct { // forwardEntry represents an established mapping of a laddr on a // remote ssh server to a channel connected to a tcpListener. type forwardEntry struct { - laddr net.Addr - c chan forward + addr string // host:port or socket path + network string // tcp or unix + c chan forward } // forward represents an incoming forwarded tcpip connection. The @@ -161,12 +192,13 @@ type forward struct { raddr net.Addr // the raddr of the incoming connection } -func (l *forwardList) add(addr net.Addr) chan forward { +func (l *forwardList) add(n, addr string) chan forward { l.Lock() defer l.Unlock() f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), + addr: addr, + network: n, + c: make(chan forward, 1), } l.entries = append(l.entries, f) return f.c @@ -185,19 +217,20 @@ func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { if port == 0 || port > 65535 { return nil, fmt.Errorf("ssh: port number out of range: %d", port) } - ip := net.ParseIP(string(addr)) - if ip == nil { + ip, err := netip.ParseAddr(addr) + if err != nil { return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil + return &net.TCPAddr{IP: net.IP(ip.AsSlice()), Port: int(port)}, nil } func (l *forwardList) handleChannels(in <-chan NewChannel) { for ch := range in { var ( - laddr net.Addr - raddr net.Addr - err error + addr string + network string + raddr net.Addr + err error ) switch channelType := ch.ChannelType(); channelType { case "forwarded-tcpip": @@ -207,40 +240,34 @@ func (l *forwardList) handleChannels(in <-chan NewChannel) { continue } - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } + // RFC 4254 section 7.2 specifies that incoming addresses should + // list the address that was connected, in string format. It is the + // same address used in the tcpip-forward request. The originator + // address is an IP address instead. + addr = net.JoinHostPort(payload.Addr, strconv.FormatUint(uint64(payload.Port), 10)) + raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) if err != nil { ch.Reject(ConnectionFailed, err.Error()) continue } - + network = "tcp" case "forwarded-streamlocal@openssh.com": var payload forwardedStreamLocalPayload if err = Unmarshal(ch.ExtraData(), &payload); err != nil { ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) continue } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } + addr = payload.SocketPath raddr = &net.UnixAddr{ Name: "@", Net: "unix", } + network = "unix" default: panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) } - if ok := l.forward(laddr, raddr, ch); !ok { + if ok := l.forward(network, addr, raddr, ch); !ok { // Section 7.2, implementations MUST reject spurious incoming // connections. ch.Reject(Prohibited, "no forward for address") @@ -252,11 +279,11 @@ func (l *forwardList) handleChannels(in <-chan NewChannel) { // remove removes the forward entry, and the channel feeding its // listener. -func (l *forwardList) remove(addr net.Addr) { +func (l *forwardList) remove(n, addr string) { l.Lock() defer l.Unlock() for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { + if n == f.network && addr == f.addr { l.entries = append(l.entries[:i], l.entries[i+1:]...) close(f.c) return @@ -274,11 +301,11 @@ func (l *forwardList) closeAll() { l.entries = nil } -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { +func (l *forwardList) forward(n, addr string, raddr net.Addr, ch NewChannel) bool { l.Lock() defer l.Unlock() for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { + if n == f.network && addr == f.addr { f.c <- forward{newCh: ch, raddr: raddr} return true } @@ -288,6 +315,7 @@ func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { type tcpListener struct { laddr *net.TCPAddr + addr string conn *Client in <-chan forward @@ -314,13 +342,21 @@ func (l *tcpListener) Accept() (net.Conn, error) { // Close closes the listener. func (l *tcpListener) Close() error { + host, port, err := net.SplitHostPort(l.addr) + if err != nil { + return err + } + rport, err := strconv.ParseUint(port, 10, 32) + if err != nil { + return err + } m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), + host, + uint32(rport), } // this also closes the listener. - l.conn.forwards.remove(l.laddr) + l.conn.forwards.remove("tcp", l.addr) ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) if err == nil && !ok { err = errors.New("ssh: cancel-tcpip-forward failed") diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go index 2a0123d4b..48dbd82ae 100644 --- a/vendor/golang.org/x/mod/modfile/print.go +++ b/vendor/golang.org/x/mod/modfile/print.go @@ -33,7 +33,7 @@ type printer struct { } // printf prints to the buffer. -func (p *printer) printf(format string, args ...interface{}) { +func (p *printer) printf(format string, args ...any) { fmt.Fprintf(p, format, args...) } diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index 2d7486804..504a2f1df 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -94,7 +94,7 @@ func (x *FileSyntax) Span() (start, end Position) { // line, the new line is added at the end of the block containing hint, // extracting hint into a new block if it is not yet in one. // -// If the hint is non-nil buts its first token does not match, +// If the hint is non-nil but its first token does not match, // the new line is added after the block containing hint // (or hint itself, if not in a block). // @@ -600,7 +600,7 @@ func (in *input) readToken() { // Checked all punctuation. Must be identifier token. if c := in.peekRune(); !isIdent(c) { - in.Error(fmt.Sprintf("unexpected input character %#q", c)) + in.Error(fmt.Sprintf("unexpected input character %#q", rune(c))) } // Scan over identifier. diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index a86ee4fd8..c5b8305de 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -368,7 +368,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a Err: err, }) } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { wrapError(fmt.Errorf(format, args...)) } @@ -574,7 +574,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V Err: err, } } - errorf := func(format string, args ...interface{}) *Error { + errorf := func(format string, args ...any) *Error { return wrapError(fmt.Errorf(format, args...)) } @@ -685,7 +685,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, Err: err, }) } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { wrapError(fmt.Errorf(format, args...)) } @@ -1594,7 +1594,7 @@ func (f *File) AddRetract(vi VersionInterval, rationale string) error { r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") } if rationale != "" { - for _, line := range strings.Split(rationale, "\n") { + for line := range strings.SplitSeq(rationale, "\n") { com := Comment{Token: "// " + line} r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) } diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 16e1aa7ab..739c13f48 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -261,7 +261,7 @@ func modPathOK(r rune) bool { // importPathOK reports whether r can appear in a package import path element. // -// Import paths are intermediate between module paths and file paths: we allow +// Import paths are intermediate between module paths and file paths: we // disallow characters that would be confusing or ambiguous as arguments to // 'go get' (such as '@' and ' ' ), but allow certain characters that are // otherwise-unambiguous on the command line and historically used for some @@ -802,8 +802,8 @@ func MatchPrefixPatterns(globs, target string) bool { for globs != "" { // Extract next non-empty glob in comma-separated list. var glob string - if i := strings.Index(globs, ","); i >= 0 { - glob, globs = globs[:i], globs[i+1:] + if before, after, ok := strings.Cut(globs, ","); ok { + glob, globs = before, after } else { glob, globs = globs, "" } diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index 628f8fd68..824b282c8 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -45,8 +45,8 @@ func IsValid(v string) bool { // Canonical returns the canonical formatting of the semantic version v. // It fills in any missing .MINOR or .PATCH and discards build metadata. -// Two semantic versions compare equal only if their canonical formattings -// are identical strings. +// Two semantic versions compare equal only if their canonical formatting +// is an identical string. // The canonical invalid semantic version is the empty string. func Canonical(v string) string { p, ok := parse(v) diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index d3cb95175..24cea6882 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -2,42 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package context defines the Context type, which carries deadlines, -// cancellation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name [context]. -// -// Incoming requests to a server should create a [Context], and outgoing -// calls to servers should accept a Context. The chain of function -// calls between them must propagate the Context, optionally replacing -// it with a derived Context created using [WithCancel], [WithDeadline], -// [WithTimeout], or [WithValue]. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. This is discussed further in -// https://go.dev/blog/context-and-structs. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. +// Package context has been superseded by the standard library [context] package. // -// See https://go.dev/blog/context for example code for a server that uses -// Contexts. +// Deprecated: Use the standard library context package instead. package context import ( diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 93bcaab03..9a4bd123c 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -280,6 +280,8 @@ type Framer struct { // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 + // lastFrameType holds the type of the last frame for verifying frame order. + lastFrameType FrameType maxReadSize uint32 headerBuf [frameHeaderLen]byte @@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool { return err != nil } -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. +// ReadFrameHeader reads the header of the next frame. +// It reads the 9-byte fixed frame header, and does not read any portion of the +// frame payload. The caller is responsible for consuming the payload, either +// with ReadFrameForHeader or directly from the Framer's io.Reader. // -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. +// If the frame is larger than previously set with SetMaxReadFrameSize, it +// returns the frame header and ErrFrameTooLarge. // -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. -func (fr *Framer) ReadFrame() (Frame, error) { +// If the returned FrameHeader.StreamID is non-zero, it indicates the stream +// responsible for the error. +func (fr *Framer) ReadFrameHeader() (FrameHeader, error) { fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { - return nil, err + return fh, err } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) + return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } - return nil, ErrFrameTooLarge + return fh, ErrFrameTooLarge + } + if err := fr.checkFrameOrder(fh); err != nil { + return fh, err + } + return fh, nil +} + +// ReadFrameForHeader reads the payload for the frame with the given FrameHeader. +// +// It behaves identically to ReadFrame, other than not checking the maximum +// frame size. +func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) { + if fr.lastFrame != nil { + fr.lastFrame.invalidate() } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { @@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } return nil, err } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } + fr.lastFrame = f if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } @@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) { return f, nil } +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame or ReadFrameBodyForHeader. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (fr *Framer) ReadFrame() (Frame, error) { + fh, err := fr.ReadFrameHeader() + if err != nil { + return nil, err + } + return fr.ReadFrameForHeader(fh) +} + // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug @@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error { // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f +func (fr *Framer) checkFrameOrder(fh FrameHeader) error { + lastType := fr.lastFrameType + fr.lastFrameType = fh.Type if fr.AllowIllegalReads { return nil } - fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) + lastType, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, @@ -1161,7 +1189,7 @@ var defaultRFC9218Priority = PriorityParam{ // PriorityParam struct below is a superset of both schemes. The exported // symbols are from RFC 7540 and the non-exported ones are from RFC 9218. -// PriorityParam are the stream prioritzation parameters. +// PriorityParam are the stream prioritization parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index be759b606..ccb87e6da 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -9,6 +9,7 @@ package http2 import ( "bufio" "bytes" + "compress/flate" "compress/gzip" "context" "crypto/rand" @@ -375,11 +376,24 @@ type ClientConn struct { // completely unresponsive connection. pendingResets int + // readBeforeStreamID is the smallest stream ID that has not been followed by + // a frame read from the peer. We use this to determine when a request may + // have been sent to a completely unresponsive connection: + // If the request ID is less than readBeforeStreamID, then we have had some + // indication of life on the connection since sending the request. + readBeforeStreamID uint32 + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. // Lock reqmu BEFORE mu or wmu. reqHeaderMu chan struct{} + // internalStateHook reports state changes back to the net/http.ClientConn. + // Note that this is different from the user state hook registered by + // net/http.ClientConn.SetStateHook: The internal hook calls ClientConn, + // which calls the user hook. + internalStateHook func() + // wmu is held while writing. // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. // Only acquire both at the same time when changing peer settings. @@ -709,7 +723,7 @@ func canRetryError(err error) bool { func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { if t.transportTestHooks != nil { - return t.newClientConn(nil, singleUse) + return t.newClientConn(nil, singleUse, nil) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -719,7 +733,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -771,10 +785,10 @@ func (t *Transport) expectContinueTimeout() time.Duration { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) { conf := configFromTransport(t) cc := &ClientConn{ t: t, @@ -796,6 +810,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), lastActive: time.Now(), + internalStateHook: internalStateHook, } if t.transportTestHooks != nil { t.transportTestHooks.newclientconn(cc) @@ -1036,10 +1051,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } - st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && - !cc.doNotReuse && - int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && - !cc.tooIdleLocked() + st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked() // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). @@ -1055,6 +1067,31 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } +func (cc *ClientConn) isUsableLocked() bool { + return cc.goAway == nil && + !cc.closed && + !cc.closing && + !cc.doNotReuse && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && + !cc.tooIdleLocked() +} + +// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn. +// +// This follows slightly different rules than clientConnIdleState.canTakeNewRequest. +// We only permit reservations up to the conn's concurrency limit. +// This differs from ClientConn.ReserveNewRequest, which permits reservations +// past the limit when StrictMaxConcurrentStreams is set. +func (cc *ClientConn) canReserveLocked() bool { + if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) { + return false + } + if !cc.isUsableLocked() { + return false + } + return true +} + // currentRequestCountLocked reports the number of concurrency slots currently in use, // including active streams, reserved slots, and reset streams waiting for acknowledgement. func (cc *ClientConn) currentRequestCountLocked() int { @@ -1066,6 +1103,14 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { return st.canTakeNewRequest } +// availableLocked reports the number of concurrency slots available. +func (cc *ClientConn) availableLocked() int { + if !cc.canTakeNewRequestLocked() { + return 0 + } + return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked()) +} + // tooIdleLocked reports whether this connection has been been sitting idle // for too much wall time. func (cc *ClientConn) tooIdleLocked() bool { @@ -1090,6 +1135,7 @@ func (cc *ClientConn) closeConn() { t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) defer t.Stop() cc.tconn.Close() + cc.maybeCallStateHook() } // A tls.Conn.Close can hang for a long time if the peer is unresponsive. @@ -1615,6 +1661,8 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } bodyClosed := cs.reqBodyClosed closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil + // Have we read any frames from the connection since sending this request? + readSinceStream := cc.readBeforeStreamID > cs.ID cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1646,8 +1694,10 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // // This could be due to the server becoming unresponsive. // To avoid sending too many requests on a dead connection, - // we let the request continue to consume a concurrency slot - // until we can confirm the server is still responding. + // if we haven't read any frames from the connection since + // sending this request, we let it continue to consume + // a concurrency slot until we can confirm the server is + // still responding. // We do this by sending a PING frame along with the RST_STREAM // (unless a ping is already in flight). // @@ -1658,7 +1708,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // because it's short lived and will probably be closed before // we get the ping response. ping := false - if !closeOnIdle { + if !closeOnIdle && !readSinceStream { cc.mu.Lock() // rstStreamPingsBlocked works around a gRPC behavior: // see comment on the field for details. @@ -1692,6 +1742,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } close(cs.donec) + cc.maybeCallStateHook() } // awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. @@ -2744,6 +2795,7 @@ func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientSt // See comment on ClientConn.rstStreamPingsBlocked for details. rl.cc.rstStreamPingsBlocked = false } + rl.cc.readBeforeStreamID = rl.cc.nextStreamID cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2794,6 +2846,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() @@ -2974,6 +3027,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if f.IsAck() { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() // If ack, notify listener if any @@ -3076,35 +3130,102 @@ type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } +var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body") + // gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read +// get gzip.Reader from the pool on the first call to Read. +// After Close is called it puts gzip.Reader to the pool immediately +// if there is no Read in progress or later when Read completes. type gzipReader struct { _ incomparable body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error + mu sync.Mutex // guards zr and zerr + zr *gzip.Reader // stores gzip reader from the pool between reads + zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close } -func (gz *gzipReader) Read(p []byte) (n int, err error) { +type eofReader struct{} + +func (eofReader) Read([]byte) (int, error) { return 0, io.EOF } +func (eofReader) ReadByte() (byte, error) { return 0, io.EOF } + +var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }} + +// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r. +func gzipPoolGet(r io.Reader) (*gzip.Reader, error) { + zr := gzipPool.Get().(*gzip.Reader) + if err := zr.Reset(r); err != nil { + gzipPoolPut(zr) + return nil, err + } + return zr, nil +} + +// gzipPoolPut puts a gzip.Reader back into the pool. +func gzipPoolPut(zr *gzip.Reader) { + // Reset will allocate bufio.Reader if we pass it anything + // other than a flate.Reader, so ensure that it's getting one. + var r flate.Reader = eofReader{} + zr.Reset(r) + gzipPool.Put(zr) +} + +// acquire returns a gzip.Reader for reading response body. +// The reader must be released after use. +func (gz *gzipReader) acquire() (*gzip.Reader, error) { + gz.mu.Lock() + defer gz.mu.Unlock() if gz.zerr != nil { - return 0, gz.zerr + return nil, gz.zerr } if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err + gz.zr, gz.zerr = gzipPoolGet(gz.body) + if gz.zerr != nil { + return nil, gz.zerr } } - return gz.zr.Read(p) + ret := gz.zr + gz.zr, gz.zerr = nil, errConcurrentReadOnResBody + return ret, nil } -func (gz *gzipReader) Close() error { - if err := gz.body.Close(); err != nil { - return err +// release returns the gzip.Reader to the pool if Close was called during Read. +func (gz *gzipReader) release(zr *gzip.Reader) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == errConcurrentReadOnResBody { + gz.zr, gz.zerr = zr, nil + } else { // fs.ErrClosed + gzipPoolPut(zr) + } +} + +// close returns the gzip.Reader to the pool immediately or +// signals release to do so after Read completes. +func (gz *gzipReader) close() { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == nil && gz.zr != nil { + gzipPoolPut(gz.zr) + gz.zr = nil } gz.zerr = fs.ErrClosed - return nil +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + zr, err := gz.acquire() + if err != nil { + return 0, err + } + defer gz.release(zr) + + return zr.Read(p) +} + +func (gz *gzipReader) Close() error { + gz.close() + + return gz.body.Close() } type errorReader struct{ err error } @@ -3130,9 +3251,13 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro } // noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. +// if there's already a cached connection to the host. // (The field is exported so it can be accessed via reflect from net/http; tested // by TestNoDialH2RoundTripperType) +// +// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol, +// and the http1.Transport can use type assertions to call non-RoundTrip methods on it. +// This lets us expose, for example, NewClientConn to net/http. type noDialH2RoundTripper struct{ *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -3143,6 +3268,85 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err return res, err } +func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) { + tr := rt.Transport + cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook) + if err != nil { + return nil, err + } + + // RoundTrip should block when the conn is at its concurrency limit, + // not return an error. Setting strictMaxConcurrentStreams enables this. + cc.strictMaxConcurrentStreams = true + + return netHTTPClientConn{cc}, nil +} + +// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from +// the RoundTripper returned by NewClientConn. +type netHTTPClientConn struct { + cc *ClientConn +} + +func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.cc.RoundTrip(req) +} + +func (cc netHTTPClientConn) Close() error { + return cc.cc.Close() +} + +func (cc netHTTPClientConn) Err() error { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if cc.cc.closed { + return errors.New("connection closed") + } + return nil +} + +func (cc netHTTPClientConn) Reserve() error { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if !cc.cc.canReserveLocked() { + return errors.New("connection is unavailable") + } + cc.cc.streamsReserved++ + return nil +} + +func (cc netHTTPClientConn) Release() { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + // We don't complain if streamsReserved is 0. + // + // This is consistent with RoundTrip: both Release and RoundTrip will + // consume a reservation iff one exists. + if cc.cc.streamsReserved > 0 { + cc.cc.streamsReserved-- + } +} + +func (cc netHTTPClientConn) Available() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.availableLocked() +} + +func (cc netHTTPClientConn) InFlight() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.currentRequestCountLocked() +} + +func (cc *ClientConn) maybeCallStateHook() { + if cc.internalStateHook != nil { + cc.internalStateHook() + } +} + func (t *Transport) idleConnTimeout() time.Duration { // to keep things backwards compatible, we use non-zero values of // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index 4d3890f99..7de27be52 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -185,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { } // writeQueue is used by implementations of WriteScheduler. +// +// Each writeQueue contains a queue of FrameWriteRequests, meant to store all +// FrameWriteRequests associated with a given stream. This is implemented as a +// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done +// by incrementing currPos of currQueue. Adding an item is done by appending it +// to the nextQueue. If currQueue is empty when trying to remove an item, we +// can swap currQueue and nextQueue to remedy the situation. +// This two-stage queue is analogous to the use of two lists in Okasaki's +// purely functional queue but without the overhead of reversing the list when +// swapping stages. +// +// writeQueue also contains prev and next, this can be used by implementations +// of WriteScheduler to construct data structures that represent the order of +// writing between different streams (e.g. circular linked list). type writeQueue struct { - s []FrameWriteRequest + currQueue []FrameWriteRequest + nextQueue []FrameWriteRequest + currPos int + prev, next *writeQueue } -func (q *writeQueue) empty() bool { return len(q.s) == 0 } +func (q *writeQueue) empty() bool { + return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0 +} func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) + q.nextQueue = append(q.nextQueue, wr) } func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { + if q.empty() { panic("invalid use of queue") } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] + if q.currPos >= len(q.currQueue) { + q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0] + } + wr := q.currQueue[q.currPos] + q.currQueue[q.currPos] = FrameWriteRequest{} + q.currPos++ return wr } +func (q *writeQueue) peek() *FrameWriteRequest { + if q.currPos < len(q.currQueue) { + return &q.currQueue[q.currPos] + } + if len(q.nextQueue) > 0 { + return &q.nextQueue[0] + } + return nil +} + // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { + if q.empty() { return FrameWriteRequest{}, false } - consumed, rest, numresult := q.s[0].Consume(n) + consumed, rest, numresult := q.peek().Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: - q.s[0] = rest + *q.peek() = rest } return consumed, true } @@ -232,10 +262,15 @@ type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} + for i := range q.currQueue { + q.currQueue[i] = FrameWriteRequest{} + } + for i := range q.nextQueue { + q.nextQueue[i] = FrameWriteRequest{} } - q.s = q.s[:0] + q.currQueue = q.currQueue[:0] + q.nextQueue = q.nextQueue[:0] + q.currPos = 0 *p = append(*p, q) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index 6d24d6a1b..4e33c29a2 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -214,8 +214,8 @@ func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes) if bi == 0 && bk == 0 { return wi >= wk } @@ -302,7 +302,6 @@ func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { q := n.q ws.queuePool.put(&q) - n.q.s = nil if ws.maxClosedNodesInTree > 0 { ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) } else { diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go similarity index 89% rename from vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go rename to vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go index 9b5b8808e..dfbfc1eb3 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -37,9 +37,18 @@ type priorityWriteSchedulerRFC9218 struct { // incremental streams or not, when urgency is the same in a given Pop() // call. prioritizeIncremental bool + + // priorityUpdateBuf is used to buffer the most recent PRIORITY_UPDATE we + // receive per https://www.rfc-editor.org/rfc/rfc9218.html#name-the-priority_update-frame. + priorityUpdateBuf struct { + // streamID being 0 means that the buffer is empty. This is a safe + // assumption as PRIORITY_UPDATE for stream 0 is a PROTOCOL_ERROR. + streamID uint32 + priority PriorityParam + } } -func newPriorityWriteSchedulerRFC9128() WriteScheduler { +func newPriorityWriteSchedulerRFC9218() WriteScheduler { ws := &priorityWriteSchedulerRFC9218{ streams: make(map[uint32]streamMetadata), } @@ -50,6 +59,10 @@ func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStr if ws.streams[streamID].location != nil { panic(fmt.Errorf("stream %d already opened", streamID)) } + if streamID == ws.priorityUpdateBuf.streamID { + ws.priorityUpdateBuf.streamID = 0 + opt.priority = ws.priorityUpdateBuf.priority + } q := ws.queuePool.get() ws.streams[streamID] = streamMetadata{ location: q, @@ -95,6 +108,8 @@ func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority metadata := ws.streams[streamID] q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental if q == nil { + ws.priorityUpdateBuf.streamID = streamID + ws.priorityUpdateBuf.priority = priority return } diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go index 3aaffdd1f..c2b3c0098 100644 --- a/vendor/golang.org/x/net/trace/events.go +++ b/vendor/golang.org/x/net/trace/events.go @@ -58,8 +58,8 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { Buckets: buckets, } - data.Families = make([]string, 0, len(families)) famMu.RLock() + data.Families = make([]string, 0, len(families)) for name := range families { data.Families = append(data.Families, name) } diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go index dda743466..c7e76cd91 100644 --- a/vendor/golang.org/x/net/websocket/hybi.go +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -440,6 +440,7 @@ func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (er if err != nil { return err } + defer resp.Body.Close() if resp.StatusCode != 101 { return ErrBadStatus } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 1d8cffae8..f69fd7546 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. +// cancellation for groups of goroutines working on subtasks of a common task. // // [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks // returning errors. @@ -144,8 +144,8 @@ func (g *Group) SetLimit(n int) { g.sem = nil return } - if len(g.sem) != 0 { - panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + if active := len(g.sem); active != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", active)) } g.sem = make(chan token, n) } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index 22cc99844..3b0450a06 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -9,31 +9,27 @@ // func getisar0() uint64 TEXT ·getisar0(SB),NOSPLIT,$0-8 // get Instruction Set Attributes 0 into x0 - // mrs x0, ID_AA64ISAR0_EL1 = d5380600 - WORD $0xd5380600 + MRS ID_AA64ISAR0_EL1, R0 MOVD R0, ret+0(FP) RET // func getisar1() uint64 TEXT ·getisar1(SB),NOSPLIT,$0-8 // get Instruction Set Attributes 1 into x0 - // mrs x0, ID_AA64ISAR1_EL1 = d5380620 - WORD $0xd5380620 + MRS ID_AA64ISAR1_EL1, R0 MOVD R0, ret+0(FP) RET // func getpfr0() uint64 TEXT ·getpfr0(SB),NOSPLIT,$0-8 // get Processor Feature Register 0 into x0 - // mrs x0, ID_AA64PFR0_EL1 = d5380400 - WORD $0xd5380400 + MRS ID_AA64PFR0_EL1, R0 MOVD R0, ret+0(FP) RET // func getzfr0() uint64 TEXT ·getzfr0(SB),NOSPLIT,$0-8 // get SVE Feature Register 0 into x0 - // mrs x0, ID_AA64ZFR0_EL1 = d5380480 - WORD $0xd5380480 + MRS ID_AA64ZFR0_EL1, R0 MOVD R0, ret+0(FP) RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 1e642f330..f5723d4f7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -64,6 +64,80 @@ func initOptions() { func archInit() { + // From internal/cpu + const ( + // eax bits + cpuid_AVXVNNI = 1 << 4 + + // ecx bits + cpuid_SSE3 = 1 << 0 + cpuid_PCLMULQDQ = 1 << 1 + cpuid_AVX512VBMI = 1 << 1 + cpuid_AVX512VBMI2 = 1 << 6 + cpuid_SSSE3 = 1 << 9 + cpuid_AVX512GFNI = 1 << 8 + cpuid_AVX512VAES = 1 << 9 + cpuid_AVX512VNNI = 1 << 11 + cpuid_AVX512BITALG = 1 << 12 + cpuid_FMA = 1 << 12 + cpuid_AVX512VPOPCNTDQ = 1 << 14 + cpuid_SSE41 = 1 << 19 + cpuid_SSE42 = 1 << 20 + cpuid_POPCNT = 1 << 23 + cpuid_AES = 1 << 25 + cpuid_OSXSAVE = 1 << 27 + cpuid_AVX = 1 << 28 + + // "Extended Feature Flag" bits returned in EBX for CPUID EAX=0x7 ECX=0x0 + cpuid_BMI1 = 1 << 3 + cpuid_AVX2 = 1 << 5 + cpuid_BMI2 = 1 << 8 + cpuid_ERMS = 1 << 9 + cpuid_AVX512F = 1 << 16 + cpuid_AVX512DQ = 1 << 17 + cpuid_ADX = 1 << 19 + cpuid_AVX512CD = 1 << 28 + cpuid_SHA = 1 << 29 + cpuid_AVX512BW = 1 << 30 + cpuid_AVX512VL = 1 << 31 + + // "Extended Feature Flag" bits returned in ECX for CPUID EAX=0x7 ECX=0x0 + cpuid_AVX512_VBMI = 1 << 1 + cpuid_AVX512_VBMI2 = 1 << 6 + cpuid_GFNI = 1 << 8 + cpuid_AVX512VPCLMULQDQ = 1 << 10 + cpuid_AVX512_BITALG = 1 << 12 + + // edx bits + cpuid_FSRM = 1 << 4 + // edx bits for CPUID 0x80000001 + cpuid_RDTSCP = 1 << 27 + ) + // Additional constants not in internal/cpu + const ( + // eax=1: edx + cpuid_SSE2 = 1 << 26 + // eax=1: ecx + cpuid_CX16 = 1 << 13 + cpuid_RDRAND = 1 << 30 + // eax=7,ecx=0: ebx + cpuid_RDSEED = 1 << 18 + cpuid_AVX512IFMA = 1 << 21 + cpuid_AVX512PF = 1 << 26 + cpuid_AVX512ER = 1 << 27 + // eax=7,ecx=0: edx + cpuid_AVX5124VNNIW = 1 << 2 + cpuid_AVX5124FMAPS = 1 << 3 + cpuid_AMXBF16 = 1 << 22 + cpuid_AMXTile = 1 << 24 + cpuid_AMXInt8 = 1 << 25 + // eax=7,ecx=1: eax + cpuid_AVX512BF16 = 1 << 5 + cpuid_AVXIFMA = 1 << 23 + // eax=7,ecx=1: edx + cpuid_AVXVNNIInt8 = 1 << 4 + ) + Initialized = true maxID, _, _, _ := cpuid(0, 0) @@ -73,90 +147,90 @@ func archInit() { } _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) - - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasCX16 = isSet(13, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) + X86.HasSSE2 = isSet(edx1, cpuid_SSE2) + + X86.HasSSE3 = isSet(ecx1, cpuid_SSE3) + X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ) + X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3) + X86.HasFMA = isSet(ecx1, cpuid_FMA) + X86.HasCX16 = isSet(ecx1, cpuid_CX16) + X86.HasSSE41 = isSet(ecx1, cpuid_SSE41) + X86.HasSSE42 = isSet(ecx1, cpuid_SSE42) + X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT) + X86.HasAES = isSet(ecx1, cpuid_AES) + X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE) + X86.HasRDRAND = isSet(ecx1, cpuid_RDRAND) var osSupportsAVX, osSupportsAVX512 bool // For XGETBV, OSXSAVE bit is required and sufficient. if X86.HasOSXSAVE { eax, _ := xgetbv() // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) + osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2) if runtime.GOOS == "darwin" { // Darwin requires special AVX512 checks, see cpu_darwin_x86.go osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. - osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7) } } - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX if maxID < 7 { return } eax7, ebx7, ecx7, edx7 := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) - - X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + X86.HasBMI1 = isSet(ebx7, cpuid_BMI1) + X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX + X86.HasBMI2 = isSet(ebx7, cpuid_BMI2) + X86.HasERMS = isSet(ebx7, cpuid_ERMS) + X86.HasRDSEED = isSet(ebx7, cpuid_RDSEED) + X86.HasADX = isSet(ebx7, cpuid_ADX) + + X86.HasAVX512 = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 // Because avx-512 foundation is the core required extension if X86.HasAVX512 { X86.HasAVX512F = true - X86.HasAVX512CD = isSet(28, ebx7) - X86.HasAVX512ER = isSet(27, ebx7) - X86.HasAVX512PF = isSet(26, ebx7) - X86.HasAVX512VL = isSet(31, ebx7) - X86.HasAVX512BW = isSet(30, ebx7) - X86.HasAVX512DQ = isSet(17, ebx7) - X86.HasAVX512IFMA = isSet(21, ebx7) - X86.HasAVX512VBMI = isSet(1, ecx7) - X86.HasAVX5124VNNIW = isSet(2, edx7) - X86.HasAVX5124FMAPS = isSet(3, edx7) - X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) - X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) - X86.HasAVX512VNNI = isSet(11, ecx7) - X86.HasAVX512GFNI = isSet(8, ecx7) - X86.HasAVX512VAES = isSet(9, ecx7) - X86.HasAVX512VBMI2 = isSet(6, ecx7) - X86.HasAVX512BITALG = isSet(12, ecx7) + X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD) + X86.HasAVX512ER = isSet(ebx7, cpuid_AVX512ER) + X86.HasAVX512PF = isSet(ebx7, cpuid_AVX512PF) + X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) + X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) + X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ) + X86.HasAVX512IFMA = isSet(ebx7, cpuid_AVX512IFMA) + X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI) + X86.HasAVX5124VNNIW = isSet(edx7, cpuid_AVX5124VNNIW) + X86.HasAVX5124FMAPS = isSet(edx7, cpuid_AVX5124FMAPS) + X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ) + X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ) + X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI) + X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI) + X86.HasAVX512VAES = isSet(ecx7, cpuid_AVX512VAES) + X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2) + X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG) } - X86.HasAMXTile = isSet(24, edx7) - X86.HasAMXInt8 = isSet(25, edx7) - X86.HasAMXBF16 = isSet(22, edx7) + X86.HasAMXTile = isSet(edx7, cpuid_AMXTile) + X86.HasAMXInt8 = isSet(edx7, cpuid_AMXInt8) + X86.HasAMXBF16 = isSet(edx7, cpuid_AMXBF16) // These features depend on the second level of extended features. if eax7 >= 1 { eax71, _, _, edx71 := cpuid(7, 1) if X86.HasAVX512 { - X86.HasAVX512BF16 = isSet(5, eax71) + X86.HasAVX512BF16 = isSet(eax71, cpuid_AVX512BF16) } if X86.HasAVX { - X86.HasAVXIFMA = isSet(23, eax71) - X86.HasAVXVNNI = isSet(4, eax71) - X86.HasAVXVNNIInt8 = isSet(4, edx71) + X86.HasAVXIFMA = isSet(eax71, cpuid_AVXIFMA) + X86.HasAVXVNNI = isSet(eax71, cpuid_AVXVNNI) + X86.HasAVXVNNIInt8 = isSet(edx71, cpuid_AVXVNNIInt8) } } } -func isSet(bitpos uint, value uint32) bool { - return value&(1< #include #include +#include #include #include #include @@ -255,6 +256,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -529,6 +531,7 @@ ccflags="$@" $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ || $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || @@ -611,7 +614,7 @@ ccflags="$@" $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 9439af961..06c0eea6f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2643,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) //sys Mseal(b []byte, flags uint) (err error) + +//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY + +func SetMemPolicy(mode int, mask *CPUSet) error { + return setMemPolicy(mode, mask, _CPU_SETSIZE) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index b6db27d93..120a7b35d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -853,20 +853,86 @@ const ( DM_VERSION_MAJOR = 0x4 DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 + DT_ADDRRNGHI = 0x6ffffeff + DT_ADDRRNGLO = 0x6ffffe00 DT_BLK = 0x6 DT_CHR = 0x2 + DT_DEBUG = 0x15 DT_DIR = 0x4 + DT_ENCODING = 0x20 DT_FIFO = 0x1 + DT_FINI = 0xd + DT_FLAGS_1 = 0x6ffffffb + DT_GNU_HASH = 0x6ffffef5 + DT_HASH = 0x4 + DT_HIOS = 0x6ffff000 + DT_HIPROC = 0x7fffffff + DT_INIT = 0xc + DT_JMPREL = 0x17 DT_LNK = 0xa + DT_LOOS = 0x6000000d + DT_LOPROC = 0x70000000 + DT_NEEDED = 0x1 + DT_NULL = 0x0 + DT_PLTGOT = 0x3 + DT_PLTREL = 0x14 + DT_PLTRELSZ = 0x2 DT_REG = 0x8 + DT_REL = 0x11 + DT_RELA = 0x7 + DT_RELACOUNT = 0x6ffffff9 + DT_RELAENT = 0x9 + DT_RELASZ = 0x8 + DT_RELCOUNT = 0x6ffffffa + DT_RELENT = 0x13 + DT_RELSZ = 0x12 + DT_RPATH = 0xf DT_SOCK = 0xc + DT_SONAME = 0xe + DT_STRSZ = 0xa + DT_STRTAB = 0x5 + DT_SYMBOLIC = 0x10 + DT_SYMENT = 0xb + DT_SYMTAB = 0x6 + DT_TEXTREL = 0x16 DT_UNKNOWN = 0x0 + DT_VALRNGHI = 0x6ffffdff + DT_VALRNGLO = 0x6ffffd00 + DT_VERDEF = 0x6ffffffc + DT_VERDEFNUM = 0x6ffffffd + DT_VERNEED = 0x6ffffffe + DT_VERNEEDNUM = 0x6fffffff + DT_VERSYM = 0x6ffffff0 DT_WHT = 0xe ECHO = 0x8 ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EI_CLASS = 0x4 + EI_DATA = 0x5 + EI_MAG0 = 0x0 + EI_MAG1 = 0x1 + EI_MAG2 = 0x2 + EI_MAG3 = 0x3 + EI_NIDENT = 0x10 + EI_OSABI = 0x7 + EI_PAD = 0x8 + EI_VERSION = 0x6 + ELFCLASS32 = 0x1 + ELFCLASS64 = 0x2 + ELFCLASSNONE = 0x0 + ELFCLASSNUM = 0x3 + ELFDATA2LSB = 0x1 + ELFDATA2MSB = 0x2 + ELFDATANONE = 0x0 + ELFMAG = "\177ELF" + ELFMAG0 = 0x7f + ELFMAG1 = 'E' + ELFMAG2 = 'L' + ELFMAG3 = 'F' + ELFOSABI_LINUX = 0x3 + ELFOSABI_NONE = 0x0 EM_386 = 0x3 EM_486 = 0x6 EM_68K = 0x4 @@ -1152,14 +1218,24 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + ET_CORE = 0x4 + ET_DYN = 0x3 + ET_EXEC = 0x2 + ET_HIPROC = 0xffff + ET_LOPROC = 0xff00 + ET_NONE = 0x0 + ET_REL = 0x1 EV_ABS = 0x3 EV_CNT = 0x20 + EV_CURRENT = 0x1 EV_FF = 0x15 EV_FF_STATUS = 0x17 EV_KEY = 0x1 EV_LED = 0x11 EV_MAX = 0x1f EV_MSC = 0x4 + EV_NONE = 0x0 + EV_NUM = 0x2 EV_PWR = 0x16 EV_REL = 0x2 EV_REP = 0x14 @@ -1539,6 +1615,8 @@ const ( IN_OPEN = 0x20 IN_Q_OVERFLOW = 0x4000 IN_UNMOUNT = 0x2000 + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804 IPPROTO_AH = 0x33 IPPROTO_BEETPH = 0x5e IPPROTO_COMP = 0x6c @@ -2276,7 +2354,167 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 + NN_386_IOPERM = "LINUX" + NN_386_TLS = "LINUX" + NN_ARC_V2 = "LINUX" + NN_ARM_FPMR = "LINUX" + NN_ARM_GCS = "LINUX" + NN_ARM_HW_BREAK = "LINUX" + NN_ARM_HW_WATCH = "LINUX" + NN_ARM_PACA_KEYS = "LINUX" + NN_ARM_PACG_KEYS = "LINUX" + NN_ARM_PAC_ENABLED_KEYS = "LINUX" + NN_ARM_PAC_MASK = "LINUX" + NN_ARM_POE = "LINUX" + NN_ARM_SSVE = "LINUX" + NN_ARM_SVE = "LINUX" + NN_ARM_SYSTEM_CALL = "LINUX" + NN_ARM_TAGGED_ADDR_CTRL = "LINUX" + NN_ARM_TLS = "LINUX" + NN_ARM_VFP = "LINUX" + NN_ARM_ZA = "LINUX" + NN_ARM_ZT = "LINUX" + NN_AUXV = "CORE" + NN_FILE = "CORE" + NN_GNU_PROPERTY_TYPE_0 = "GNU" + NN_LOONGARCH_CPUCFG = "LINUX" + NN_LOONGARCH_CSR = "LINUX" + NN_LOONGARCH_HW_BREAK = "LINUX" + NN_LOONGARCH_HW_WATCH = "LINUX" + NN_LOONGARCH_LASX = "LINUX" + NN_LOONGARCH_LBT = "LINUX" + NN_LOONGARCH_LSX = "LINUX" + NN_MIPS_DSP = "LINUX" + NN_MIPS_FP_MODE = "LINUX" + NN_MIPS_MSA = "LINUX" + NN_PPC_DEXCR = "LINUX" + NN_PPC_DSCR = "LINUX" + NN_PPC_EBB = "LINUX" + NN_PPC_HASHKEYR = "LINUX" + NN_PPC_PKEY = "LINUX" + NN_PPC_PMU = "LINUX" + NN_PPC_PPR = "LINUX" + NN_PPC_SPE = "LINUX" + NN_PPC_TAR = "LINUX" + NN_PPC_TM_CDSCR = "LINUX" + NN_PPC_TM_CFPR = "LINUX" + NN_PPC_TM_CGPR = "LINUX" + NN_PPC_TM_CPPR = "LINUX" + NN_PPC_TM_CTAR = "LINUX" + NN_PPC_TM_CVMX = "LINUX" + NN_PPC_TM_CVSX = "LINUX" + NN_PPC_TM_SPR = "LINUX" + NN_PPC_VMX = "LINUX" + NN_PPC_VSX = "LINUX" + NN_PRFPREG = "CORE" + NN_PRPSINFO = "CORE" + NN_PRSTATUS = "CORE" + NN_PRXFPREG = "LINUX" + NN_RISCV_CSR = "LINUX" + NN_RISCV_TAGGED_ADDR_CTRL = "LINUX" + NN_RISCV_VECTOR = "LINUX" + NN_S390_CTRS = "LINUX" + NN_S390_GS_BC = "LINUX" + NN_S390_GS_CB = "LINUX" + NN_S390_HIGH_GPRS = "LINUX" + NN_S390_LAST_BREAK = "LINUX" + NN_S390_PREFIX = "LINUX" + NN_S390_PV_CPU_DATA = "LINUX" + NN_S390_RI_CB = "LINUX" + NN_S390_SYSTEM_CALL = "LINUX" + NN_S390_TDB = "LINUX" + NN_S390_TIMER = "LINUX" + NN_S390_TODCMP = "LINUX" + NN_S390_TODPREG = "LINUX" + NN_S390_VXRS_HIGH = "LINUX" + NN_S390_VXRS_LOW = "LINUX" + NN_SIGINFO = "CORE" + NN_TASKSTRUCT = "CORE" + NN_VMCOREDD = "LINUX" + NN_X86_SHSTK = "LINUX" + NN_X86_XSAVE_LAYOUT = "LINUX" + NN_X86_XSTATE = "LINUX" NSFS_MAGIC = 0x6e736673 + NT_386_IOPERM = 0x201 + NT_386_TLS = 0x200 + NT_ARC_V2 = 0x600 + NT_ARM_FPMR = 0x40e + NT_ARM_GCS = 0x410 + NT_ARM_HW_BREAK = 0x402 + NT_ARM_HW_WATCH = 0x403 + NT_ARM_PACA_KEYS = 0x407 + NT_ARM_PACG_KEYS = 0x408 + NT_ARM_PAC_ENABLED_KEYS = 0x40a + NT_ARM_PAC_MASK = 0x406 + NT_ARM_POE = 0x40f + NT_ARM_SSVE = 0x40b + NT_ARM_SVE = 0x405 + NT_ARM_SYSTEM_CALL = 0x404 + NT_ARM_TAGGED_ADDR_CTRL = 0x409 + NT_ARM_TLS = 0x401 + NT_ARM_VFP = 0x400 + NT_ARM_ZA = 0x40c + NT_ARM_ZT = 0x40d + NT_AUXV = 0x6 + NT_FILE = 0x46494c45 + NT_GNU_PROPERTY_TYPE_0 = 0x5 + NT_LOONGARCH_CPUCFG = 0xa00 + NT_LOONGARCH_CSR = 0xa01 + NT_LOONGARCH_HW_BREAK = 0xa05 + NT_LOONGARCH_HW_WATCH = 0xa06 + NT_LOONGARCH_LASX = 0xa03 + NT_LOONGARCH_LBT = 0xa04 + NT_LOONGARCH_LSX = 0xa02 + NT_MIPS_DSP = 0x800 + NT_MIPS_FP_MODE = 0x801 + NT_MIPS_MSA = 0x802 + NT_PPC_DEXCR = 0x111 + NT_PPC_DSCR = 0x105 + NT_PPC_EBB = 0x106 + NT_PPC_HASHKEYR = 0x112 + NT_PPC_PKEY = 0x110 + NT_PPC_PMU = 0x107 + NT_PPC_PPR = 0x104 + NT_PPC_SPE = 0x101 + NT_PPC_TAR = 0x103 + NT_PPC_TM_CDSCR = 0x10f + NT_PPC_TM_CFPR = 0x109 + NT_PPC_TM_CGPR = 0x108 + NT_PPC_TM_CPPR = 0x10e + NT_PPC_TM_CTAR = 0x10d + NT_PPC_TM_CVMX = 0x10a + NT_PPC_TM_CVSX = 0x10b + NT_PPC_TM_SPR = 0x10c + NT_PPC_VMX = 0x100 + NT_PPC_VSX = 0x102 + NT_PRFPREG = 0x2 + NT_PRPSINFO = 0x3 + NT_PRSTATUS = 0x1 + NT_PRXFPREG = 0x46e62b7f + NT_RISCV_CSR = 0x900 + NT_RISCV_TAGGED_ADDR_CTRL = 0x902 + NT_RISCV_VECTOR = 0x901 + NT_S390_CTRS = 0x304 + NT_S390_GS_BC = 0x30c + NT_S390_GS_CB = 0x30b + NT_S390_HIGH_GPRS = 0x300 + NT_S390_LAST_BREAK = 0x306 + NT_S390_PREFIX = 0x305 + NT_S390_PV_CPU_DATA = 0x30e + NT_S390_RI_CB = 0x30d + NT_S390_SYSTEM_CALL = 0x307 + NT_S390_TDB = 0x308 + NT_S390_TIMER = 0x301 + NT_S390_TODCMP = 0x302 + NT_S390_TODPREG = 0x303 + NT_S390_VXRS_HIGH = 0x30a + NT_S390_VXRS_LOW = 0x309 + NT_SIGINFO = 0x53494749 + NT_TASKSTRUCT = 0x4 + NT_VMCOREDD = 0x700 + NT_X86_SHSTK = 0x204 + NT_X86_XSAVE_LAYOUT = 0x205 + NT_X86_XSTATE = 0x202 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2463,6 +2701,59 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PF_ALG = 0x26 + PF_APPLETALK = 0x5 + PF_ASH = 0x12 + PF_ATMPVC = 0x8 + PF_ATMSVC = 0x14 + PF_AX25 = 0x3 + PF_BLUETOOTH = 0x1f + PF_BRIDGE = 0x7 + PF_CAIF = 0x25 + PF_CAN = 0x1d + PF_DECnet = 0xc + PF_ECONET = 0x13 + PF_FILE = 0x1 + PF_IB = 0x1b + PF_IEEE802154 = 0x24 + PF_INET = 0x2 + PF_INET6 = 0xa + PF_IPX = 0x4 + PF_IRDA = 0x17 + PF_ISDN = 0x22 + PF_IUCV = 0x20 + PF_KCM = 0x29 + PF_KEY = 0xf + PF_LLC = 0x1a + PF_LOCAL = 0x1 + PF_MAX = 0x2e + PF_MCTP = 0x2d + PF_MPLS = 0x1c + PF_NETBEUI = 0xd + PF_NETLINK = 0x10 + PF_NETROM = 0x6 + PF_NFC = 0x27 + PF_PACKET = 0x11 + PF_PHONET = 0x23 + PF_PPPOX = 0x18 + PF_QIPCRTR = 0x2a + PF_R = 0x4 + PF_RDS = 0x15 + PF_ROSE = 0xb + PF_ROUTE = 0x10 + PF_RXRPC = 0x21 + PF_SECURITY = 0xe + PF_SMC = 0x2b + PF_SNA = 0x16 + PF_TIPC = 0x1e + PF_UNIX = 0x1 + PF_UNSPEC = 0x0 + PF_VSOCK = 0x28 + PF_W = 0x2 + PF_WANPIPE = 0x19 + PF_X = 0x1 + PF_X25 = 0x9 + PF_XDP = 0x2c PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c @@ -2758,6 +3049,23 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + PT_AARCH64_MEMTAG_MTE = 0x70000002 + PT_DYNAMIC = 0x2 + PT_GNU_EH_FRAME = 0x6474e550 + PT_GNU_PROPERTY = 0x6474e553 + PT_GNU_RELRO = 0x6474e552 + PT_GNU_STACK = 0x6474e551 + PT_HIOS = 0x6fffffff + PT_HIPROC = 0x7fffffff + PT_INTERP = 0x3 + PT_LOAD = 0x1 + PT_LOOS = 0x60000000 + PT_LOPROC = 0x70000000 + PT_NOTE = 0x4 + PT_NULL = 0x0 + PT_PHDR = 0x6 + PT_SHLIB = 0x5 + PT_TLS = 0x7 P_ALL = 0x0 P_PGID = 0x2 P_PID = 0x1 @@ -3091,6 +3399,47 @@ const ( SEEK_MAX = 0x4 SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c + SHF_ALLOC = 0x2 + SHF_EXCLUDE = 0x8000000 + SHF_EXECINSTR = 0x4 + SHF_GROUP = 0x200 + SHF_INFO_LINK = 0x40 + SHF_LINK_ORDER = 0x80 + SHF_MASKOS = 0xff00000 + SHF_MASKPROC = 0xf0000000 + SHF_MERGE = 0x10 + SHF_ORDERED = 0x4000000 + SHF_OS_NONCONFORMING = 0x100 + SHF_RELA_LIVEPATCH = 0x100000 + SHF_RO_AFTER_INIT = 0x200000 + SHF_STRINGS = 0x20 + SHF_TLS = 0x400 + SHF_WRITE = 0x1 + SHN_ABS = 0xfff1 + SHN_COMMON = 0xfff2 + SHN_HIPROC = 0xff1f + SHN_HIRESERVE = 0xffff + SHN_LIVEPATCH = 0xff20 + SHN_LOPROC = 0xff00 + SHN_LORESERVE = 0xff00 + SHN_UNDEF = 0x0 + SHT_DYNAMIC = 0x6 + SHT_DYNSYM = 0xb + SHT_HASH = 0x5 + SHT_HIPROC = 0x7fffffff + SHT_HIUSER = 0xffffffff + SHT_LOPROC = 0x70000000 + SHT_LOUSER = 0x80000000 + SHT_NOBITS = 0x8 + SHT_NOTE = 0x7 + SHT_NULL = 0x0 + SHT_NUM = 0xc + SHT_PROGBITS = 0x1 + SHT_REL = 0x9 + SHT_RELA = 0x4 + SHT_SHLIB = 0xa + SHT_STRTAB = 0x3 + SHT_SYMTAB = 0x2 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -3317,6 +3666,16 @@ const ( STATX_UID = 0x8 STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 + STB_GLOBAL = 0x1 + STB_LOCAL = 0x0 + STB_WEAK = 0x2 + STT_COMMON = 0x5 + STT_FILE = 0x4 + STT_FUNC = 0x2 + STT_NOTYPE = 0x0 + STT_OBJECT = 0x1 + STT_SECTION = 0x3 + STT_TLS = 0x6 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 @@ -3553,6 +3912,8 @@ const ( UTIME_OMIT = 0x3ffffffe V9FS_MAGIC = 0x1021997 VERASE = 0x2 + VER_FLG_BASE = 0x1 + VER_FLG_WEAK = 0x2 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 1c37f9fbc..97a61fc5b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6f54d34ae..a0d6d498c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 783ec5c12..dd9c903f9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ca83d3ba1..384c61ca3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -120,6 +120,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 607e611c0..6384c9831 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9cb5bd3c..553c1c6f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 65b078a63..b3339f209 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5298a3033..177091d2b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7bc557c87..c5abf156d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 152399bb0..f1f3fadf5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 1a1ce2409..203ad9c54 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 4231a1fb5..4b9abcb21 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 21c0e9526..f87983037 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f00d1cd7c..64347eb35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index bc8d539e6..7d7191171 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 5cc1e8eb2..8935d10a3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setMemPolicy(mode int, mask *CPUSet, size int) (err error) { + _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 944e75a11..c1a467017 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -3590,6 +3590,8 @@ type Nhmsg struct { Flags uint32 } +const SizeofNhmsg = 0x8 + type NexthopGrp struct { Id uint32 Weight uint8 @@ -3597,6 +3599,8 @@ type NexthopGrp struct { Resvd2 uint16 } +const SizeofNexthopGrp = 0x8 + const ( NHA_UNSPEC = 0x0 NHA_ID = 0x1 @@ -6332,3 +6336,30 @@ type SockDiagReq struct { } const RTM_NEWNVLAN = 0x70 + +const ( + MPOL_BIND = 0x2 + MPOL_DEFAULT = 0x0 + MPOL_F_ADDR = 0x2 + MPOL_F_MEMS_ALLOWED = 0x4 + MPOL_F_MOF = 0x8 + MPOL_F_MORON = 0x10 + MPOL_F_NODE = 0x1 + MPOL_F_NUMA_BALANCING = 0x2000 + MPOL_F_RELATIVE_NODES = 0x4000 + MPOL_F_SHARED = 0x1 + MPOL_F_STATIC_NODES = 0x8000 + MPOL_INTERLEAVE = 0x3 + MPOL_LOCAL = 0x4 + MPOL_MAX = 0x7 + MPOL_MF_INTERNAL = 0x10 + MPOL_MF_LAZY = 0x8 + MPOL_MF_MOVE_ALL = 0x4 + MPOL_MF_MOVE = 0x2 + MPOL_MF_STRICT = 0x1 + MPOL_MF_VALID = 0x7 + MPOL_MODE_FLAGS = 0xe000 + MPOL_PREFERRED = 0x1 + MPOL_PREFERRED_MANY = 0x5 + MPOL_WEIGHTED_INTERLEAVE = 0x6 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec9..50e8e6449 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,7 +104,7 @@ type Statvfs_t struct { Fsid uint32 Namemax uint32 Owner uint32 - Spare [4]uint32 + Spare [4]uint64 Fstypename [32]byte Mntonname [1024]byte Mntfromname [1024]byte diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index bd5133730..69439df2a 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -892,8 +892,12 @@ const socket_error = uintptr(^uint32(0)) //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx //sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2 +//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2 //sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable //sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 //sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange //sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 @@ -916,6 +920,17 @@ type RawSockaddrInet6 struct { Scope_id uint32 } +// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See +// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet. +// +// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using +// unsafe, depending on the address family. +type RawSockaddrInet struct { + Family uint16 + Port uint16 + Data [6]uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 358be3c7f..6e4f50eb4 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2320,6 +2320,82 @@ type MibIfRow2 struct { OutQLen uint64 } +// IP_ADDRESS_PREFIX stores an IP address prefix. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix. +type IpAddressPrefix struct { + Prefix RawSockaddrInet + PrefixLength uint8 +} + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin. +const ( + NlroManual = 0 + NlroWellKnown = 1 + NlroDHCP = 2 + NlroRouterAdvertisement = 3 + Nlro6to4 = 4 +) + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol. +const ( + MIB_IPPROTO_OTHER = 1 + MIB_IPPROTO_LOCAL = 2 + MIB_IPPROTO_NETMGMT = 3 + MIB_IPPROTO_ICMP = 4 + MIB_IPPROTO_EGP = 5 + MIB_IPPROTO_GGP = 6 + MIB_IPPROTO_HELLO = 7 + MIB_IPPROTO_RIP = 8 + MIB_IPPROTO_IS_IS = 9 + MIB_IPPROTO_ES_IS = 10 + MIB_IPPROTO_CISCO = 11 + MIB_IPPROTO_BBN = 12 + MIB_IPPROTO_OSPF = 13 + MIB_IPPROTO_BGP = 14 + MIB_IPPROTO_IDPR = 15 + MIB_IPPROTO_EIGRP = 16 + MIB_IPPROTO_DVMRP = 17 + MIB_IPPROTO_RPL = 18 + MIB_IPPROTO_DHCP = 19 + MIB_IPPROTO_NT_AUTOSTATIC = 10002 + MIB_IPPROTO_NT_STATIC = 10006 + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007 +) + +// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2. +type MibIpForwardRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + DestinationPrefix IpAddressPrefix + NextHop RawSockaddrInet + SitePrefixLength uint8 + ValidLifetime uint32 + PreferredLifetime uint32 + Metric uint32 + Protocol uint32 + Loopback uint8 + AutoconfigureAddress uint8 + Publish uint8 + Immortal uint8 + Age uint32 + Origin uint32 +} + +// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2. +type MibIpForwardTable2 struct { + NumEntries uint32 + Table [1]MibIpForwardRow2 +} + +// Rows returns the IP route entries in the table. +func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 { + return unsafe.Slice(&t.Table[0], t.NumEntries) +} + // MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See // https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. type MibUnicastIpAddressRow struct { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 426151a01..f25b7308a 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -182,13 +182,17 @@ var ( procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") + procFreeMibTable = modiphlpapi.NewProc("FreeMibTable") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2") + procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2") procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2") procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") @@ -1624,6 +1628,11 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { return } +func FreeMibTable(memory unsafe.Pointer) { + syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory)) + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { @@ -1664,6 +1673,22 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { return } +func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { @@ -1684,6 +1709,18 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa return } +func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { var _p0 uint32 if initialNotification { diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index bddb2e2ae..6ec537cdc 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -160,7 +160,9 @@ const ( keyEnd keyDeleteWord keyDeleteLine + keyDelete keyClearScreen + keyTranspose keyPasteStart keyPasteEnd ) @@ -194,6 +196,8 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { return keyDeleteLine, b[1:] case 12: // ^L return keyClearScreen, b[1:] + case 20: // ^T + return keyTranspose, b[1:] case 23: // ^W return keyDeleteWord, b[1:] case 14: // ^N @@ -228,6 +232,10 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { } } + if !pasteActive && len(b) >= 4 && b[0] == keyEscape && b[1] == '[' && b[2] == '3' && b[3] == '~' { + return keyDelete, b[4:] + } + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { switch b[5] { case 'C': @@ -413,7 +421,7 @@ func (t *Terminal) eraseNPreviousChars(n int) { } } -// countToLeftWord returns then number of characters from the cursor to the +// countToLeftWord returns the number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { @@ -438,7 +446,7 @@ func (t *Terminal) countToLeftWord() int { return t.pos - pos } -// countToRightWord returns then number of characters from the cursor to the +// countToRightWord returns the number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos @@ -478,7 +486,7 @@ func visualLength(runes []rune) int { return length } -// histroryAt unlocks the terminal and relocks it while calling History.At. +// historyAt unlocks the terminal and relocks it while calling History.At. func (t *Terminal) historyAt(idx int) (string, bool) { t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. defer t.lock.Lock() // panic in At (or Len) protection. @@ -590,7 +598,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { } t.line = t.line[:t.pos] t.moveCursorToPos(t.pos) - case keyCtrlD: + case keyCtrlD, keyDelete: // Erase the character under the current position. // The EOF case when the line is empty is handled in // readLine(). @@ -600,6 +608,24 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { } case keyCtrlU: t.eraseNPreviousChars(t.pos) + case keyTranspose: + // This transposes the two characters around the cursor and advances the cursor. Best-effort. + if len(t.line) < 2 || t.pos < 1 { + return + } + swap := t.pos + if swap == len(t.line) { + swap-- // special: at end of line, swap previous two chars + } + t.line[swap-1], t.line[swap] = t.line[swap], t.line[swap-1] + if t.pos < len(t.line) { + t.pos++ + } + if t.echo { + t.moveCursorToPos(swap - 1) + t.writeLine(t.line[swap-1:]) + t.moveCursorToPos(t.pos) + } case keyClearScreen: // Erases the screen and moves the cursor to the home position. t.queue([]rune("\x1b[2J\x1b[H")) diff --git a/vendor/golang.org/x/text/encoding/japanese/eucjp.go b/vendor/golang.org/x/text/encoding/japanese/eucjp.go index 79313fa58..6fce8c5f5 100644 --- a/vendor/golang.org/x/text/encoding/japanese/eucjp.go +++ b/vendor/golang.org/x/text/encoding/japanese/eucjp.go @@ -17,9 +17,9 @@ import ( var EUCJP encoding.Encoding = &eucJP var eucJP = internal.Encoding{ - &internal.SimpleEncoding{eucJPDecoder{}, eucJPEncoder{}}, - "EUC-JP", - identifier.EUCPkdFmtJapanese, + Encoding: &internal.SimpleEncoding{Decoder: eucJPDecoder{}, Encoder: eucJPEncoder{}}, + Name: "EUC-JP", + MIB: identifier.EUCPkdFmtJapanese, } type eucJPDecoder struct{ transform.NopResetter } diff --git a/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go b/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go index 613226df5..6f7bd460a 100644 --- a/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go +++ b/vendor/golang.org/x/text/encoding/japanese/iso2022jp.go @@ -17,9 +17,9 @@ import ( var ISO2022JP encoding.Encoding = &iso2022JP var iso2022JP = internal.Encoding{ - internal.FuncEncoding{iso2022JPNewDecoder, iso2022JPNewEncoder}, - "ISO-2022-JP", - identifier.ISO2022JP, + Encoding: internal.FuncEncoding{Decoder: iso2022JPNewDecoder, Encoder: iso2022JPNewEncoder}, + Name: "ISO-2022-JP", + MIB: identifier.ISO2022JP, } func iso2022JPNewDecoder() transform.Transformer { diff --git a/vendor/golang.org/x/text/encoding/japanese/shiftjis.go b/vendor/golang.org/x/text/encoding/japanese/shiftjis.go index 16fd8a6e3..af65d43d9 100644 --- a/vendor/golang.org/x/text/encoding/japanese/shiftjis.go +++ b/vendor/golang.org/x/text/encoding/japanese/shiftjis.go @@ -18,9 +18,9 @@ import ( var ShiftJIS encoding.Encoding = &shiftJIS var shiftJIS = internal.Encoding{ - &internal.SimpleEncoding{shiftJISDecoder{}, shiftJISEncoder{}}, - "Shift JIS", - identifier.ShiftJIS, + Encoding: &internal.SimpleEncoding{Decoder: shiftJISDecoder{}, Encoder: shiftJISEncoder{}}, + Name: "Shift JIS", + MIB: identifier.ShiftJIS, } type shiftJISDecoder struct{ transform.NopResetter } diff --git a/vendor/golang.org/x/text/encoding/korean/euckr.go b/vendor/golang.org/x/text/encoding/korean/euckr.go index 034337f5d..81c834730 100644 --- a/vendor/golang.org/x/text/encoding/korean/euckr.go +++ b/vendor/golang.org/x/text/encoding/korean/euckr.go @@ -20,9 +20,9 @@ var All = []encoding.Encoding{EUCKR} var EUCKR encoding.Encoding = &eucKR var eucKR = internal.Encoding{ - &internal.SimpleEncoding{eucKRDecoder{}, eucKREncoder{}}, - "EUC-KR", - identifier.EUCKR, + Encoding: &internal.SimpleEncoding{Decoder: eucKRDecoder{}, Encoder: eucKREncoder{}}, + Name: "EUC-KR", + MIB: identifier.EUCKR, } type eucKRDecoder struct{ transform.NopResetter } diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go index 0e0fabfd6..2f2fd5d44 100644 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go +++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go @@ -22,21 +22,21 @@ var ( ) var gbk = internal.Encoding{ - &internal.SimpleEncoding{ - gbkDecoder{gb18030: false}, - gbkEncoder{gb18030: false}, + Encoding: &internal.SimpleEncoding{ + Decoder: gbkDecoder{gb18030: false}, + Encoder: gbkEncoder{gb18030: false}, }, - "GBK", - identifier.GBK, + Name: "GBK", + MIB: identifier.GBK, } var gbk18030 = internal.Encoding{ - &internal.SimpleEncoding{ - gbkDecoder{gb18030: true}, - gbkEncoder{gb18030: true}, + Encoding: &internal.SimpleEncoding{ + Decoder: gbkDecoder{gb18030: true}, + Encoder: gbkEncoder{gb18030: true}, }, - "GB18030", - identifier.GB18030, + Name: "GB18030", + MIB: identifier.GB18030, } type gbkDecoder struct { diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go index e15b7bf6a..351750e60 100644 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go +++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go @@ -17,9 +17,9 @@ import ( var HZGB2312 encoding.Encoding = &hzGB2312 var hzGB2312 = internal.Encoding{ - internal.FuncEncoding{hzGB2312NewDecoder, hzGB2312NewEncoder}, - "HZ-GB2312", - identifier.HZGB2312, + Encoding: internal.FuncEncoding{Decoder: hzGB2312NewDecoder, Encoder: hzGB2312NewEncoder}, + Name: "HZ-GB2312", + MIB: identifier.HZGB2312, } func hzGB2312NewDecoder() transform.Transformer { diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go b/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go index 1fcddde08..5046920ee 100644 --- a/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go +++ b/vendor/golang.org/x/text/encoding/traditionalchinese/big5.go @@ -20,9 +20,9 @@ var All = []encoding.Encoding{Big5} var Big5 encoding.Encoding = &big5 var big5 = internal.Encoding{ - &internal.SimpleEncoding{big5Decoder{}, big5Encoder{}}, - "Big5", - identifier.Big5, + Encoding: &internal.SimpleEncoding{Decoder: big5Decoder{}, Encoder: big5Encoder{}}, + Name: "Big5", + MIB: identifier.Big5, } type big5Decoder struct{ transform.NopResetter } diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go index dd99ad14d..ce28c9062 100644 --- a/vendor/golang.org/x/text/encoding/unicode/unicode.go +++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go @@ -60,9 +60,9 @@ func (utf8bomEncoding) NewDecoder() *encoding.Decoder { } var utf8enc = &internal.Encoding{ - &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()}, - "UTF-8", - identifier.UTF8, + Encoding: &internal.SimpleEncoding{Decoder: utf8Decoder{}, Encoder: runes.ReplaceIllFormed()}, + Name: "UTF-8", + MIB: identifier.UTF8, } type utf8bomDecoder struct { diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go index f6118bec6..527540c62 100644 --- a/vendor/golang.org/x/tools/go/analysis/diagnostic.go +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -33,8 +33,9 @@ type Diagnostic struct { URL string // SuggestedFixes is an optional list of fixes to address the - // problem described by the diagnostic. Each one represents - // an alternative strategy; at most one may be applied. + // problem described by the diagnostic. Each one represents an + // alternative strategy, and should have a distinct and + // descriptive message; at most one may be applied. // // Fixes for different diagnostics should be treated as // independent changes to the same baseline file state, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go index e554c3cc9..8ccf982d2 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" ) //go:embed doc.go @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "appends", - Doc: analysisutil.MustExtractDoc(doc, "appends"), + Doc: analyzerutil.MustExtractDoc(doc, "appends"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/appends", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index 1aa7afb9c..ba9ca38a8 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -19,7 +19,7 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = "report mismatches between assembly files and Go declarations" @@ -175,7 +175,7 @@ func run(pass *analysis.Pass) (any, error) { Files: for _, fname := range sfiles { - content, tf, err := analysisutil.ReadFile(pass, fname) + content, tf, err := analyzerutil.ReadFile(pass, fname) if err != nil { return nil, err } @@ -211,7 +211,7 @@ Files: resultStr = "result register" } for _, line := range retLine { - pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr) + pass.Reportf(tf.LineStart(line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr) } } retLine = nil @@ -227,7 +227,7 @@ Files: lineno++ badf := func(format string, args ...any) { - pass.Reportf(analysisutil.LineStart(tf, lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) + pass.Reportf(tf.LineStart(lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) } if arch == "" { @@ -237,7 +237,7 @@ Files: // so accumulate them all and then prefer the one that // matches build.Default.GOARCH. var archCandidates []*asmArch - for _, fld := range strings.Fields(m[1]) { + for fld := range strings.FieldsSeq(m[1]) { for _, a := range arches { if a.name == fld { archCandidates = append(archCandidates, a) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go index 1914bb476..69734df82 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -17,9 +17,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -27,26 +29,26 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "assign", - Doc: analysisutil.MustExtractDoc(doc, "assign"), + Doc: analyzerutil.MustExtractDoc(doc, "assign"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/assign", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info = pass.TypesInfo + ) - nodeFilter := []ast.Node{ - (*ast.AssignStmt)(nil), - } - inspect.Preorder(nodeFilter, func(n ast.Node) { - stmt := n.(*ast.AssignStmt) + for curAssign := range inspect.Root().Preorder((*ast.AssignStmt)(nil)) { + stmt := curAssign.Node().(*ast.AssignStmt) if stmt.Tok != token.ASSIGN { - return // ignore := + continue // ignore := } if len(stmt.Lhs) != len(stmt.Rhs) { // If LHS and RHS have different cardinality, they can't be the same. - return + continue } // Delete redundant LHS, RHS pairs, taking care @@ -61,13 +63,13 @@ func run(pass *analysis.Pass) (any, error) { isSelfAssign := false var le string - if !analysisutil.HasSideEffects(pass.TypesInfo, lhs) && - !analysisutil.HasSideEffects(pass.TypesInfo, rhs) && - !isMapIndex(pass.TypesInfo, lhs) && + if typesinternal.NoEffects(info, lhs) && + typesinternal.NoEffects(info, rhs) && + !isMapIndex(info, lhs) && reflect.TypeOf(lhs) == reflect.TypeOf(rhs) { // short-circuit the heavy-weight gofmt check - le = analysisinternal.Format(pass.Fset, lhs) - re := analysisinternal.Format(pass.Fset, rhs) + le = astutil.Format(pass.Fset, lhs) + re := astutil.Format(pass.Fset, rhs) if le == re { isSelfAssign = true } @@ -109,13 +111,14 @@ func run(pass *analysis.Pass) (any, error) { } if len(exprs) == 0 { - return + continue } if len(exprs) == len(stmt.Lhs) { // If every part of the statement is a self-assignment, // remove the whole statement. - edits = []analysis.TextEdit{{Pos: stmt.Pos(), End: stmt.End()}} + tokFile := pass.Fset.File(stmt.Pos()) + edits = refactor.DeleteStmt(tokFile, curAssign) } pass.Report(analysis.Diagnostic{ @@ -126,7 +129,7 @@ func run(pass *analysis.Pass) (any, error) { TextEdits: edits, }}, }) - }) + } return nil, nil } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go index 82d5439ce..c6ab7ff7a 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go @@ -11,10 +11,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -22,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "atomic", - Doc: analysisutil.MustExtractDoc(doc, "atomic"), + Doc: analyzerutil.MustExtractDoc(doc, "atomic"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomic", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, @@ -30,7 +31,7 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "sync/atomic") { + if !typesinternal.Imports(pass.Pkg, "sync/atomic") { return nil, nil // doesn't directly import sync/atomic } @@ -54,7 +55,7 @@ func run(pass *analysis.Pass) (any, error) { continue } obj := typeutil.Callee(pass.TypesInfo, call) - if analysisinternal.IsFunctionNamed(obj, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") { + if typesinternal.IsFunctionNamed(obj, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") { checkAtomicAddAssignment(pass, n.Lhs[i], call) } } @@ -72,7 +73,7 @@ func checkAtomicAddAssignment(pass *analysis.Pass, left ast.Expr, call *ast.Call arg := call.Args[0] broken := false - gofmt := func(e ast.Expr) string { return analysisinternal.Format(pass.Fset, e) } + gofmt := func(e ast.Expr) string { return astutil.Format(pass.Fset, e) } if uarg, ok := arg.(*ast.UnaryExpr); ok && uarg.Op == token.AND { broken = gofmt(left) == gofmt(uarg.X) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go b/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go index 2508b41f6..84699dd03 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go @@ -18,7 +18,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) const Doc = "check for non-64-bits-aligned arguments to sync/atomic functions" @@ -35,7 +35,7 @@ func run(pass *analysis.Pass) (any, error) { if 8*pass.TypesSizes.Sizeof(types.Typ[types.Uintptr]) == 64 { return nil, nil // 64-bit platform } - if !analysisinternal.Imports(pass.Pkg, "sync/atomic") { + if !typesinternal.Imports(pass.Pkg, "sync/atomic") { return nil, nil // doesn't directly import sync/atomic } @@ -54,7 +54,7 @@ func run(pass *analysis.Pass) (any, error) { inspect.Preorder(nodeFilter, func(node ast.Node) { call := node.(*ast.CallExpr) obj := typeutil.Callee(pass.TypesInfo, call) - if analysisinternal.IsFunctionNamed(obj, "sync/atomic", funcNames...) { + if typesinternal.IsFunctionNamed(obj, "sync/atomic", funcNames...) { // For all the listed functions, the expression to check is always the first function argument. check64BitAlignment(pass, obj.Name(), call.Args[0]) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go index e1cf9f9b7..574fafaa9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) const Doc = "check for common mistakes involving boolean operators" @@ -84,7 +84,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[* i := 0 var sets [][]ast.Expr for j := 0; j <= len(exprs); j++ { - if j == len(exprs) || analysisutil.HasSideEffects(info, exprs[j]) { + if j == len(exprs) || !typesinternal.NoEffects(info, exprs[j]) { if i < j { sets = append(sets, exprs[i:j]) } @@ -104,7 +104,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[* func (op boolOp) checkRedundant(pass *analysis.Pass, exprs []ast.Expr) { seen := make(map[string]bool) for _, e := range exprs { - efmt := analysisinternal.Format(pass.Fset, e) + efmt := astutil.Format(pass.Fset, e) if seen[efmt] { pass.ReportRangef(e, "redundant %s: %s %s %s", op.name, efmt, op.tok, efmt) } else { @@ -150,8 +150,8 @@ func (op boolOp) checkSuspect(pass *analysis.Pass, exprs []ast.Expr) { } // e is of the form 'x != c' or 'x == c'. - xfmt := analysisinternal.Format(pass.Fset, x) - efmt := analysisinternal.Format(pass.Fset, e) + xfmt := astutil.Format(pass.Fset, x) + efmt := astutil.Format(pass.Fset, e) if prev, found := seen[xfmt]; found { // checkRedundant handles the case in which efmt == prev. if efmt != prev { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go index f49fea517..017415f91 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go @@ -11,9 +11,11 @@ package buildssa import ( "go/ast" "go/types" + "iter" "reflect" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/ctrlflow" "golang.org/x/tools/go/ssa" ) @@ -22,7 +24,13 @@ var Analyzer = &analysis.Analyzer{ Doc: "build SSA-form IR for later passes", URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/buildssa", Run: run, - ResultType: reflect.TypeOf(new(SSA)), + Requires: []*analysis.Analyzer{ctrlflow.Analyzer}, + ResultType: reflect.TypeFor[*SSA](), + // Do not add FactTypes here: SSA construction of P must not + // require SSA construction of all of P's dependencies. + // (That's why we enlist the cheaper ctrlflow pass to compute + // noreturn instead of having go/ssa + buildssa do it.) + FactTypes: nil, } // SSA provides SSA-form intermediate representation for all the @@ -33,6 +41,8 @@ type SSA struct { } func run(pass *analysis.Pass) (any, error) { + cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) + // We must create a new Program for each Package because the // analysis API provides no place to hang a Program shared by // all Packages. Consequently, SSA Packages and Functions do not @@ -49,6 +59,9 @@ func run(pass *analysis.Pass) (any, error) { prog := ssa.NewProgram(pass.Fset, mode) + // Use the result of the ctrlflow analysis to improve the SSA CFG. + prog.SetNoReturn(cfgs.NoReturn) + // Create SSA packages for direct imports. for _, p := range pass.Pkg.Imports() { prog.CreatePackage(p, nil, nil, true) @@ -61,34 +74,41 @@ func run(pass *analysis.Pass) (any, error) { // Compute list of source functions, including literals, // in source order. var funcs []*ssa.Function - for _, f := range pass.Files { - for _, decl := range f.Decls { - if fdecl, ok := decl.(*ast.FuncDecl); ok { - // (init functions have distinct Func - // objects named "init" and distinct - // ssa.Functions named "init#1", ...) - - fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func) - if fn == nil { - panic(fn) - } + for _, fn := range allFunctions(pass) { + // (init functions have distinct Func + // objects named "init" and distinct + // ssa.Functions named "init#1", ...) - f := ssapkg.Prog.FuncValue(fn) - if f == nil { - panic(fn) - } + f := ssapkg.Prog.FuncValue(fn) + if f == nil { + panic(fn) + } - var addAnons func(f *ssa.Function) - addAnons = func(f *ssa.Function) { - funcs = append(funcs, f) - for _, anon := range f.AnonFuncs { - addAnons(anon) - } - } - addAnons(f) + var addAnons func(f *ssa.Function) + addAnons = func(f *ssa.Function) { + funcs = append(funcs, f) + for _, anon := range f.AnonFuncs { + addAnons(anon) } } + addAnons(f) } return &SSA{Pkg: ssapkg, SrcFuncs: funcs}, nil } + +// allFunctions returns an iterator over all named functions. +func allFunctions(pass *analysis.Pass) iter.Seq2[*ast.FuncDecl, *types.Func] { + return func(yield func(*ast.FuncDecl, *types.Func) bool) { + for _, file := range pass.Files { + for _, decl := range file.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + fn := pass.TypesInfo.Defs[decl.Name].(*types.Func) + if !yield(decl, fn) { + return + } + } + } + } + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index 6c7a0df58..d0b28e5b8 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -14,7 +14,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = "check //go:build and // +build directives" @@ -86,7 +86,7 @@ func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since this may not be a Go source file. // Read the raw bytes instead. - content, tf, err := analysisutil.ReadFile(pass, filename) + content, tf, err := analyzerutil.ReadFile(pass, filename) if err != nil { return err } @@ -298,7 +298,7 @@ func (check *checker) plusBuildLine(pos token.Pos, line string) { fields := strings.Fields(line[len("//"):]) // IsPlusBuildConstraint check above implies fields[0] == "+build" for _, arg := range fields[1:] { - for _, elem := range strings.Split(arg, ",") { + for elem := range strings.SplitSeq(arg, ",") { if strings.HasPrefix(elem, "!!") { check.pass.Reportf(pos, "invalid double negative in build constraint: %s", arg) check.crossCheck = false diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go index d9189b5b6..54b8062cc 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -18,7 +18,7 @@ import ( "strconv" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) const debug = false @@ -41,7 +41,7 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "runtime/cgo") { + if !typesinternal.Imports(pass.Pkg, "runtime/cgo") { return nil, nil // doesn't use cgo } @@ -350,8 +350,8 @@ func typeOKForCgoCall(t types.Type, m map[types.Type]bool) bool { case *types.Array: return typeOKForCgoCall(t.Elem(), m) case *types.Struct: - for i := 0; i < t.NumFields(); i++ { - if !typeOKForCgoCall(t.Field(i).Type(), m) { + for field := range t.Fields() { + if !typeOKForCgoCall(field.Type(), m) { return false } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index a4e455d9b..208602f48 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -16,8 +16,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" ) @@ -86,7 +87,7 @@ func checkCopyLocksAssign(pass *analysis.Pass, assign *ast.AssignStmt, goversion lhs := assign.Lhs for i, x := range assign.Rhs { if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "assignment copies lock value to %v: %v", analysisinternal.Format(pass.Fset, assign.Lhs[i]), path) + pass.ReportRangef(x, "assignment copies lock value to %v: %v", astutil.Format(pass.Fset, assign.Lhs[i]), path) lhs = nil // An lhs has been reported. We prefer the assignment warning and do not report twice. } } @@ -100,7 +101,7 @@ func checkCopyLocksAssign(pass *analysis.Pass, assign *ast.AssignStmt, goversion if id, ok := l.(*ast.Ident); ok && id.Name != "_" { if obj := pass.TypesInfo.Defs[id]; obj != nil && obj.Type() != nil { if path := lockPath(pass.Pkg, obj.Type(), nil); path != nil { - pass.ReportRangef(l, "for loop iteration copies lock value to %v: %v", analysisinternal.Format(pass.Fset, l), path) + pass.ReportRangef(l, "for loop iteration copies lock value to %v: %v", astutil.Format(pass.Fset, l), path) } } } @@ -132,7 +133,7 @@ func checkCopyLocksCompositeLit(pass *analysis.Pass, cl *ast.CompositeLit) { x = node.Value } if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "literal copies lock value from %v: %v", analysisinternal.Format(pass.Fset, x), path) + pass.ReportRangef(x, "literal copies lock value from %v: %v", astutil.Format(pass.Fset, x), path) } } } @@ -157,13 +158,16 @@ func checkCopyLocksCallExpr(pass *analysis.Pass, ce *ast.CallExpr) { } if fun, ok := pass.TypesInfo.Uses[id].(*types.Builtin); ok { switch fun.Name() { - case "new", "len", "cap", "Sizeof", "Offsetof", "Alignof": + case "len", "cap", "Sizeof", "Offsetof", "Alignof": + // The argument of this operation is used only + // for its type (e.g. len(array)), or the operation + // does not copy a lock (e.g. len(slice)). return } } for _, x := range ce.Args { if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "call of %s copies lock value: %v", analysisinternal.Format(pass.Fset, ce.Fun), path) + pass.ReportRangef(x, "call of %s copies lock value: %v", astutil.Format(pass.Fset, ce.Fun), path) } } } @@ -230,7 +234,7 @@ func checkCopyLocksRangeVar(pass *analysis.Pass, rtok token.Token, e ast.Expr) { return } if path := lockPath(pass.Pkg, typ, nil); path != nil { - pass.Reportf(e.Pos(), "range var %s copies lock: %v", analysisinternal.Format(pass.Fset, e), path) + pass.Reportf(e.Pos(), "range var %s copies lock: %v", astutil.Format(pass.Fset, e), path) } } @@ -324,8 +328,8 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ ttyp, ok := typ.Underlying().(*types.Tuple) if ok { - for i := 0; i < ttyp.Len(); i++ { - subpath := lockPath(tpkg, ttyp.At(i).Type(), seen) + for v := range ttyp.Variables() { + subpath := lockPath(tpkg, v.Type(), seen) if subpath != nil { return append(subpath, typ.String()) } @@ -350,7 +354,7 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ // In go1.10, sync.noCopy did not implement Locker. // (The Unlock method was added only in CL 121876.) // TODO(adonovan): remove workaround when we drop go1.10. - if analysisinternal.IsTypeNamed(typ, "sync", "noCopy") { + if typesinternal.IsTypeNamed(typ, "sync", "noCopy") { return []string{typ.String()} } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go index 951aaed00..4e6ea9d67 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go @@ -19,6 +19,7 @@ import ( "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/cfg" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typesinternal" ) var Analyzer = &analysis.Analyzer{ @@ -26,7 +27,7 @@ var Analyzer = &analysis.Analyzer{ Doc: "build a control-flow graph", URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ctrlflow", Run: run, - ResultType: reflect.TypeOf(new(CFGs)), + ResultType: reflect.TypeFor[*CFGs](), FactTypes: []analysis.Fact{new(noReturn)}, Requires: []*analysis.Analyzer{inspect.Analyzer}, } @@ -44,7 +45,21 @@ type CFGs struct { defs map[*ast.Ident]types.Object // from Pass.TypesInfo.Defs funcDecls map[*types.Func]*declInfo funcLits map[*ast.FuncLit]*litInfo - pass *analysis.Pass // transient; nil after construction + noReturn map[*types.Func]bool // functions lacking a reachable return statement + pass *analysis.Pass // transient; nil after construction +} + +// NoReturn reports whether the specified control-flow graph cannot return normally. +// +// It is defined for at least all function symbols that appear as the static callee of a +// CallExpr in the current package, even if the callee was imported from a dependency. +// +// The result may incorporate interprocedural information based on induction of +// the "no return" property over the static call graph within the package. +// For example, if f simply calls g and g always calls os.Exit, then both f and g may +// be deemed never to return. +func (c *CFGs) NoReturn(fn *types.Func) bool { + return c.noReturn[fn] } // CFGs has two maps: funcDecls for named functions and funcLits for @@ -54,15 +69,14 @@ type CFGs struct { // *types.Func but not the other way. type declInfo struct { - decl *ast.FuncDecl - cfg *cfg.CFG // iff decl.Body != nil - started bool // to break cycles - noReturn bool + decl *ast.FuncDecl + cfg *cfg.CFG // iff decl.Body != nil + started bool // to break cycles } type litInfo struct { cfg *cfg.CFG - noReturn bool + noReturn bool // (currently unused) } // FuncDecl returns the control-flow graph for a named function. @@ -118,6 +132,7 @@ func run(pass *analysis.Pass) (any, error) { defs: pass.TypesInfo.Defs, funcDecls: funcDecls, funcLits: funcLits, + noReturn: make(map[*types.Func]bool), pass: pass, } @@ -138,7 +153,7 @@ func run(pass *analysis.Pass) (any, error) { li := funcLits[lit] if li.cfg == nil { li.cfg = cfg.New(lit.Body, c.callMayReturn) - if !hasReachableReturn(li.cfg) { + if li.cfg.NoReturn() { li.noReturn = true } } @@ -158,26 +173,28 @@ func (c *CFGs) buildDecl(fn *types.Func, di *declInfo) { // The buildDecl call tree thus resembles the static call graph. // We mark each node when we start working on it to break cycles. - if !di.started { // break cycle - di.started = true + if di.started { + return // break cycle + } + di.started = true - if isIntrinsicNoReturn(fn) { - di.noReturn = true - } + noreturn, known := knownIntrinsic(fn) + if !known { if di.decl.Body != nil { di.cfg = cfg.New(di.decl.Body, c.callMayReturn) - if !hasReachableReturn(di.cfg) { - di.noReturn = true + if di.cfg.NoReturn() { + noreturn = true } } - if di.noReturn { - c.pass.ExportObjectFact(fn, new(noReturn)) - } + } + if noreturn { + c.pass.ExportObjectFact(fn, new(noReturn)) + c.noReturn[fn] = true + } - // debugging - if false { - log.Printf("CFG for %s:\n%s (noreturn=%t)\n", fn, di.cfg.Format(c.pass.Fset), di.noReturn) - } + // debugging + if false { + log.Printf("CFG for %s:\n%s (noreturn=%t)\n", fn, di.cfg.Format(c.pass.Fset), noreturn) } } @@ -201,31 +218,61 @@ func (c *CFGs) callMayReturn(call *ast.CallExpr) (r bool) { // Function or method declared in this package? if di, ok := c.funcDecls[fn]; ok { c.buildDecl(fn, di) - return !di.noReturn + return !c.noReturn[fn] } // Not declared in this package. // Is there a fact from another package? - return !c.pass.ImportObjectFact(fn, new(noReturn)) + if c.pass.ImportObjectFact(fn, new(noReturn)) { + c.noReturn[fn] = true + return false + } + + return true } var panicBuiltin = types.Universe.Lookup("panic").(*types.Builtin) -func hasReachableReturn(g *cfg.CFG) bool { - for _, b := range g.Blocks { - if b.Live && b.Return() != nil { - return true - } - } - return false -} - -// isIntrinsicNoReturn reports whether a function intrinsically never -// returns because it stops execution of the calling thread. +// knownIntrinsic reports whether a function intrinsically never +// returns because it stops execution of the calling thread, or does +// in fact return, contrary to its apparent body, because it is +// handled specially by the compiler. +// // It is the base case in the recursion. -func isIntrinsicNoReturn(fn *types.Func) bool { +func knownIntrinsic(fn *types.Func) (noreturn, known bool) { // Add functions here as the need arises, but don't allocate memory. - path, name := fn.Pkg().Path(), fn.Name() - return path == "syscall" && (name == "Exit" || name == "ExitProcess" || name == "ExitThread") || - path == "runtime" && name == "Goexit" + + // Functions known intrinsically never to return. + if typesinternal.IsFunctionNamed(fn, "syscall", "Exit", "ExitProcess", "ExitThread") || + typesinternal.IsFunctionNamed(fn, "runtime", "Goexit", "fatalthrow", "fatalpanic", "exit") || + // Following staticcheck (see go/ir/exits.go) we include functions + // in several popular logging packages whose no-return status is + // beyond the analysis to infer. + // TODO(adonovan): make this list extensible. + typesinternal.IsMethodNamed(fn, "go.uber.org/zap", "Logger", "Fatal", "Panic") || + typesinternal.IsMethodNamed(fn, "go.uber.org/zap", "SugaredLogger", "Fatal", "Fatalw", "Fatalf", "Panic", "Panicw", "Panicf") || + typesinternal.IsMethodNamed(fn, "github.com/sirupsen/logrus", "Logger", "Exit", "Panic", "Panicf", "Panicln") || + typesinternal.IsMethodNamed(fn, "github.com/sirupsen/logrus", "Entry", "Panicf", "Panicln") || + typesinternal.IsFunctionNamed(fn, "k8s.io/klog", "Exit", "ExitDepth", "Exitf", "Exitln", "Fatal", "FatalDepth", "Fatalf", "Fatalln") || + typesinternal.IsFunctionNamed(fn, "k8s.io/klog/v2", "Exit", "ExitDepth", "Exitf", "Exitln", "Fatal", "FatalDepth", "Fatalf", "Fatalln") { + return true, true + } + + // Compiler intrinsics known to return, contrary to + // what analysis of the function body would conclude. + // + // Not all such intrinsics must be listed here: ctrlflow + // considers any function called for its value--such as + // crypto/internal/constanttime.bool2Uint8--to potentially + // return; only functions called as a statement, for effects, + // are no-return candidates. + // + // Unfortunately this does sometimes mean peering into internals. + // Where possible, use the nearest enclosing public API function. + if typesinternal.IsFunctionNamed(fn, "internal/abi", "EscapeNonString") || + typesinternal.IsFunctionNamed(fn, "hash/maphash", "Comparable") { + return false, true + } + + return // unknown } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go index d15e3bc59..32087cd71 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go @@ -14,7 +14,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) const Doc = `check for calls of reflect.DeepEqual on error values @@ -35,7 +35,7 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "reflect") { + if !typesinternal.Imports(pass.Pkg, "reflect") { return nil, nil // doesn't directly import reflect } @@ -47,7 +47,7 @@ func run(pass *analysis.Pass) (any, error) { inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) obj := typeutil.Callee(pass.TypesInfo, call) - if analysisinternal.IsFunctionNamed(obj, "reflect", "DeepEqual") && hasError(pass, call.Args[0]) && hasError(pass, call.Args[1]) { + if typesinternal.IsFunctionNamed(obj, "reflect", "DeepEqual") && hasError(pass, call.Args[0]) && hasError(pass, call.Args[1]) { pass.ReportRangef(call, "avoid using reflect.DeepEqual with errors") } }) @@ -96,8 +96,8 @@ func containsError(typ types.Type) bool { case *types.Map: return check(t.Key()) || check(t.Elem()) case *types.Struct: - for i := 0; i < t.NumFields(); i++ { - if check(t.Field(i).Type()) { + for field := range t.Fields() { + if check(field.Type()) { return true } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go b/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go index e11957f2d..af93407ca 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go @@ -10,10 +10,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -23,20 +23,20 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "defers", Requires: []*analysis.Analyzer{inspect.Analyzer}, + Doc: analyzerutil.MustExtractDoc(doc, "defers"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/defers", - Doc: analysisutil.MustExtractDoc(doc, "defers"), Run: run, } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "time") { + if !typesinternal.Imports(pass.Pkg, "time") { return nil, nil } checkDeferCall := func(node ast.Node) bool { switch v := node.(type) { case *ast.CallExpr: - if analysisinternal.IsFunctionNamed(typeutil.Callee(pass.TypesInfo, v), "time", "Since") { + if typesinternal.IsFunctionNamed(typeutil.Callee(pass.TypesInfo, v), "time", "Since") { pass.Reportf(v.Pos(), "call to time.Since is not deferred") } case *ast.FuncLit: diff --git a/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go index bebec8914..5fa28861e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go @@ -14,7 +14,7 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = `check Go toolchain directives such as //go:debug @@ -86,7 +86,7 @@ func checkGoFile(pass *analysis.Pass, f *ast.File) { func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since is not a Go source file. // Read the raw bytes instead. - content, tf, err := analysisutil.ReadFile(pass, filename) + content, tf, err := analyzerutil.ReadFile(pass, filename) if err != nil { return err } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go index b8d29d019..f1465f734 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go @@ -12,22 +12,20 @@ import ( "go/types" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" + "golang.org/x/tools/internal/typesinternal/typeindex" ) const Doc = `report passing non-pointer or non-error values to errors.As -The errorsas analysis reports calls to errors.As where the type +The errorsas analyzer reports calls to errors.As where the type of the second argument is not a pointer to a type implementing error.` var Analyzer = &analysis.Analyzer{ Name: "errorsas", Doc: Doc, URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/errorsas", - Requires: []*analysis.Analyzer{inspect.Analyzer}, + Requires: []*analysis.Analyzer{typeindexanalyzer.Analyzer}, Run: run, } @@ -39,38 +37,31 @@ func run(pass *analysis.Pass) (any, error) { return nil, nil } - if !analysisinternal.Imports(pass.Pkg, "errors") { - return nil, nil // doesn't directly import errors - } - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + ) - nodeFilter := []ast.Node{ - (*ast.CallExpr)(nil), - } - inspect.Preorder(nodeFilter, func(n ast.Node) { - call := n.(*ast.CallExpr) - obj := typeutil.Callee(pass.TypesInfo, call) - if !analysisinternal.IsFunctionNamed(obj, "errors", "As") { - return - } + for curCall := range index.Calls(index.Object("errors", "As")) { + call := curCall.Node().(*ast.CallExpr) if len(call.Args) < 2 { - return // not enough arguments, e.g. called with return values of another function + continue // spread call: errors.As(pair()) } - if err := checkAsTarget(pass, call.Args[1]); err != nil { + + // Check for incorrect arguments. + if err := checkAsTarget(info, call.Args[1]); err != nil { pass.ReportRangef(call, "%v", err) + continue } - }) + } return nil, nil } -var errorType = types.Universe.Lookup("error").Type() - // checkAsTarget reports an error if the second argument to errors.As is invalid. -func checkAsTarget(pass *analysis.Pass, e ast.Expr) error { - t := pass.TypesInfo.Types[e].Type - if it, ok := t.Underlying().(*types.Interface); ok && it.NumMethods() == 0 { - // A target of interface{} is always allowed, since it often indicates +func checkAsTarget(info *types.Info, e ast.Expr) error { + t := info.Types[e].Type + if types.Identical(t.Underlying(), anyType) { + // A target of any is always allowed, since it often indicates // a value forwarded from another source. return nil } @@ -78,12 +69,16 @@ func checkAsTarget(pass *analysis.Pass, e ast.Expr) error { if !ok { return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") } - if pt.Elem() == errorType { + if types.Identical(pt.Elem(), errorType) { return errors.New("second argument to errors.As should not be *error") } - _, ok = pt.Elem().Underlying().(*types.Interface) - if ok || types.Implements(pt.Elem(), errorType.Underlying().(*types.Interface)) { - return nil + if !types.IsInterface(pt.Elem()) && !types.AssignableTo(pt.Elem(), errorType) { + return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") } - return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") + return nil } + +var ( + anyType = types.Universe.Lookup("any").Type() + errorType = types.Universe.Lookup("error").Type() +) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go index 4987ec5af..235fa4f01 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go @@ -18,6 +18,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/astutil" ) const Doc = `find structs that would use less memory if their fields were sorted @@ -103,6 +104,11 @@ func fieldalignment(pass *analysis.Pass, node *ast.StructType, typ *types.Struct return } + // Analyzers borrow syntax tree; they do not own them and must modify them. + // This Clone operation is a quick fix to the data race introduced + // in CL 278872 by the clearing of the Comment and Doc fields below. + node = astutil.CloneNode(node) + // Flatten the ast node since it could have multiple field names per list item while // *types.Struct only have one item per field. // TODO: Preserve multi-named fields instead of flattening. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go index ff9c8b4f8..a7d558103 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go @@ -13,7 +13,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = "report assembly that clobbers the frame pointer before saving it" @@ -98,7 +98,7 @@ func run(pass *analysis.Pass) (any, error) { } for _, fname := range sfiles { - content, tf, err := analysisutil.ReadFile(pass, fname) + content, tf, err := analyzerutil.ReadFile(pass, fname) if err != nil { return nil, err } @@ -127,7 +127,7 @@ func run(pass *analysis.Pass) (any, error) { } if arch.isFPWrite(line) { - pass.Reportf(analysisutil.LineStart(tf, lineno), "frame pointer is clobbered before saving") + pass.Reportf(tf.LineStart(lineno), "frame pointer is clobbered before saving") active = false continue } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go index e9acd9654..37ecb6523 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go @@ -13,7 +13,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typesinternal" ) @@ -46,7 +45,7 @@ func run(pass *analysis.Pass) (any, error) { // Fast path: if the package doesn't import net/http, // skip the traversal. - if !analysisinternal.Imports(pass.Pkg, "net/http") { + if !typesinternal.Imports(pass.Pkg, "net/http") { return nil, nil } @@ -118,7 +117,7 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { return false // the function called does not return two values. } isPtr, named := typesinternal.ReceiverNamed(res.At(0)) - if !isPtr || named == nil || !analysisinternal.IsTypeNamed(named, "net/http", "Response") { + if !isPtr || named == nil || !typesinternal.IsTypeNamed(named, "net/http", "Response") { return false // the first return type is not *http.Response. } @@ -133,11 +132,11 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { return ok && id.Name == "http" // function in net/http package. } - if analysisinternal.IsTypeNamed(typ, "net/http", "Client") { + if typesinternal.IsTypeNamed(typ, "net/http", "Client") { return true // method on http.Client. } ptr, ok := types.Unalias(typ).(*types.Pointer) - return ok && analysisinternal.IsTypeNamed(ptr.Elem(), "net/http", "Client") // method on *http.Client. + return ok && typesinternal.IsTypeNamed(ptr.Elem(), "net/http", "Client") // method on *http.Client. } // restOfBlock, given a traversal stack, finds the innermost containing diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go index 4022dbe7c..da0acbd8e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go @@ -11,8 +11,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typeparams" ) @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "ifaceassert", - Doc: analysisutil.MustExtractDoc(doc, "ifaceassert"), + Doc: analyzerutil.MustExtractDoc(doc, "ifaceassert"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ifaceassert", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go index ee1972f56..aae5d255f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -41,7 +41,7 @@ var Analyzer = &analysis.Analyzer{ URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inspect", Run: run, RunDespiteErrors: true, - ResultType: reflect.TypeOf(new(inspector.Inspector)), + ResultType: reflect.TypeFor[*inspector.Inspector](), } func run(pass *analysis.Pass) (any, error) { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go deleted file mode 100644 index d3df898d3..000000000 --- a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package analysisutil defines various helper functions -// used by two or more packages beneath go/analysis. -package analysisutil - -import ( - "go/ast" - "go/token" - "go/types" - "os" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" -) - -// HasSideEffects reports whether evaluation of e has side effects. -func HasSideEffects(info *types.Info, e ast.Expr) bool { - safe := true - ast.Inspect(e, func(node ast.Node) bool { - switch n := node.(type) { - case *ast.CallExpr: - typVal := info.Types[n.Fun] - switch { - case typVal.IsType(): - // Type conversion, which is safe. - case typVal.IsBuiltin(): - // Builtin func, conservatively assumed to not - // be safe for now. - safe = false - return false - default: - // A non-builtin func or method call. - // Conservatively assume that all of them have - // side effects for now. - safe = false - return false - } - case *ast.UnaryExpr: - if n.Op == token.ARROW { - safe = false - return false - } - } - return true - }) - return !safe -} - -// ReadFile reads a file and adds it to the FileSet -// so that we can report errors against it using lineStart. -func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { - readFile := pass.ReadFile - if readFile == nil { - readFile = os.ReadFile - } - content, err := readFile(filename) - if err != nil { - return nil, nil, err - } - tf := pass.Fset.AddFile(filename, -1, len(content)) - tf.SetLinesForContent(content) - return content, tf, nil -} - -// LineStart returns the position of the start of the specified line -// within file f, or NoPos if there is no line of that number. -func LineStart(f *token.File, line int) token.Pos { - // Use binary search to find the start offset of this line. - // - // TODO(adonovan): eventually replace this function with the - // simpler and more efficient (*go/token.File).LineStart, added - // in go1.12. - - min := 0 // inclusive - max := f.Size() // exclusive - for { - offset := (min + max) / 2 - pos := f.Pos(offset) - posn := f.Position(pos) - if posn.Line == line { - return pos - (token.Pos(posn.Column) - 1) - } - - if min+1 >= max { - return token.NoPos - } - - if posn.Line < line { - min = offset - } else { - max = offset - } - } -} - -var MustExtractDoc = analysisinternal.MustExtractDoc diff --git a/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go index 2580a0ac2..41b19d793 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go @@ -11,10 +11,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" ) @@ -24,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "loopclosure", - Doc: analysisutil.MustExtractDoc(doc, "loopclosure"), + Doc: analyzerutil.MustExtractDoc(doc, "loopclosure"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/loopclosure", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -56,8 +55,8 @@ func run(pass *analysis.Pass) (any, error) { switch n := n.(type) { case *ast.File: // Only traverse the file if its goversion is strictly before go1.22. - goversion := versions.FileVersion(pass.TypesInfo, n) - return versions.Before(goversion, versions.Go1_22) + return !analyzerutil.FileUsesGoVersion(pass, n, versions.Go1_22) + case *ast.RangeStmt: body = n.Body addVar(n.Key) @@ -309,12 +308,11 @@ func parallelSubtest(info *types.Info, call *ast.CallExpr) []ast.Stmt { if !ok { continue } - expr := exprStmt.X - if isMethodCall(info, expr, "testing", "T", "Parallel") { - call, _ := expr.(*ast.CallExpr) - if call == nil { - continue - } + call, ok := exprStmt.X.(*ast.CallExpr) + if !ok { + continue + } + if isMethodCall(info, call, "testing", "T", "Parallel") { x, _ := call.Fun.(*ast.SelectorExpr) if x == nil { continue @@ -348,26 +346,6 @@ func unlabel(stmt ast.Stmt) (ast.Stmt, bool) { } } -// isMethodCall reports whether expr is a method call of -// ... -func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method string) bool { - call, ok := expr.(*ast.CallExpr) - if !ok { - return false - } - - // Check that we are calling a method - f := typeutil.StaticCallee(info, call) - if f == nil || f.Name() != method { - return false - } - recv := f.Type().(*types.Signature).Recv() - if recv == nil { - return false - } - - // Check that the receiver is a . or - // *.. - _, named := typesinternal.ReceiverNamed(recv) - return analysisinternal.IsTypeNamed(named, pkgPath, typeName) +func isMethodCall(info *types.Info, call *ast.CallExpr, pkgPath, typeName, method string) bool { + return typesinternal.IsMethodNamed(typeutil.Callee(info, call), pkgPath, typeName, method) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go index c0746789e..28a5f6cd9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go @@ -13,11 +13,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/ctrlflow" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/cfg" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "lostcancel", - Doc: analysisutil.MustExtractDoc(doc, "lostcancel"), + Doc: analyzerutil.MustExtractDoc(doc, "lostcancel"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/lostcancel", Run: run, Requires: []*analysis.Analyzer{ @@ -50,7 +50,7 @@ var contextPackage = "context" // checkLostCancel analyzes a single named or literal function. func run(pass *analysis.Pass) (any, error) { // Fast path: bypass check if file doesn't use context.WithCancel. - if !analysisinternal.Imports(pass.Pkg, contextPackage) { + if !typesinternal.Imports(pass.Pkg, contextPackage) { return nil, nil } @@ -316,8 +316,8 @@ outer: } func tupleContains(tuple *types.Tuple, v *types.Var) bool { - for i := 0; i < tuple.Len(); i++ { - if tuple.At(i) == v { + for v0 := range tuple.Variables() { + if v0 == v { return true } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go index fa1883b0c..6b3729518 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go @@ -14,8 +14,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "nilfunc", - Doc: analysisutil.MustExtractDoc(doc, "nilfunc"), + Doc: analyzerutil.MustExtractDoc(doc, "nilfunc"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilfunc", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go index af61ae608..6f353968f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go @@ -12,8 +12,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typeparams" ) @@ -22,7 +22,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "nilness", - Doc: analysisutil.MustExtractDoc(doc, "nilness"), + Doc: analyzerutil.MustExtractDoc(doc, "nilness"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilness", Run: run, Requires: []*analysis.Analyzer{buildssa.Analyzer}, @@ -52,7 +52,7 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) { // notNil reports an error if v is provably nil. notNil := func(stack []fact, instr ssa.Instruction, v ssa.Value, descr string) { if nilnessOf(stack, v) == isnil { - reportf("nilderef", instr.Pos(), descr) + reportf("nilderef", instr.Pos(), "%s", descr) } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go b/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go index 31748795d..2b8add301 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go @@ -41,7 +41,7 @@ var Analyzer = &analysis.Analyzer{ URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/pkgfact", Run: run, FactTypes: []analysis.Fact{new(pairsFact)}, - ResultType: reflect.TypeOf(map[string]string{}), + ResultType: reflect.TypeFor[map[string]string](), } // A pairsFact is a package-level fact that records diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go index eebf40208..a09bfd1c6 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go @@ -82,6 +82,46 @@ // ... // } // +// A local function may also be inferred as a printf wrapper. If it +// is assigned to a variable, each call made through that variable will +// be checked just like a call to a function: +// +// logf := func(format string, args ...any) { +// message := fmt.Sprintf(format, args...) +// log.Printf("%s: %s", prefix, message) +// } +// logf("%s", 123) // logf format %s has arg 123 of wrong type int +// +// Interface methods may also be analyzed as printf wrappers, if +// within the interface's package there is an assignment from a +// implementation type whose corresponding method is a printf wrapper. +// +// For example, the var declaration below causes a *myLoggerImpl value +// to be assigned to a Logger variable: +// +// type Logger interface { +// Logf(format string, args ...any) +// } +// +// type myLoggerImpl struct{ ... } +// +// var _ Logger = (*myLoggerImpl)(nil) +// +// func (*myLoggerImpl) Logf(format string, args ...any) { +// println(fmt.Sprintf(format, args...)) +// } +// +// Since myLoggerImpl's Logf method is a printf wrapper, this +// establishes that Logger.Logf is a printf wrapper too, causing +// dynamic calls through the interface to be checked: +// +// func f(log Logger) { +// log.Logf("%s", 123) // Logger.Logf format %s has arg 123 of wrong type int +// } +// +// This feature applies only to interface methods declared in files +// using at least Go 1.26. +// // # Specifying printf wrappers by flag // // The -funcs flag specifies a comma-separated list of names of diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index 9ad18a041..1afb07c45 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -18,14 +18,16 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/fmtstr" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" + "golang.org/x/tools/refactor/satisfy" ) func init() { @@ -37,11 +39,11 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "printf", - Doc: analysisutil.MustExtractDoc(doc, "printf"), + Doc: analyzerutil.MustExtractDoc(doc, "printf"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/printf", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, - ResultType: reflect.TypeOf((*Result)(nil)), + ResultType: reflect.TypeFor[*Result](), FactTypes: []analysis.Fact{new(isWrapper)}, } @@ -64,13 +66,13 @@ func (kind Kind) String() string { case KindErrorf: return "errorf" } - return "" + return "(none)" } // Result is the printf analyzer's result type. Clients may query the result // to learn whether a function behaves like fmt.Print or fmt.Printf. type Result struct { - funcs map[*types.Func]Kind + funcs map[types.Object]Kind } // Kind reports whether fn behaves like fmt.Print or fmt.Printf. @@ -111,149 +113,301 @@ func (f *isWrapper) String() string { func run(pass *analysis.Pass) (any, error) { res := &Result{ - funcs: make(map[*types.Func]Kind), + funcs: make(map[types.Object]Kind), } - findPrintfLike(pass, res) - checkCalls(pass) + findPrintLike(pass, res) + checkCalls(pass, res) return res, nil } -type printfWrapper struct { - obj *types.Func - fdecl *ast.FuncDecl - format *types.Var - args *types.Var +// A wrapper is a candidate print/printf wrapper function. +// +// We represent functions generally as types.Object, not *Func, so +// that we can analyze anonymous functions such as +// +// printf := func(format string, args ...any) {...}, +// +// representing them by the *types.Var symbol for the local variable +// 'printf'. +type wrapper struct { + obj types.Object // *Func or *Var + curBody inspector.Cursor // for *ast.BlockStmt + format *types.Var // optional "format string" parameter in the Func{Decl,Lit} + args *types.Var // "args ...any" parameter in the Func{Decl,Lit} callers []printfCaller - failed bool // if true, not a printf wrapper } +// printfCaller is a candidate print{,f} forwarding call from candidate wrapper w. type printfCaller struct { - w *printfWrapper - call *ast.CallExpr -} - -// maybePrintfWrapper decides whether decl (a declared function) may be a wrapper -// around a fmt.Printf or fmt.Print function. If so it returns a printfWrapper -// function describing the declaration. Later processing will analyze the -// graph of potential printf wrappers to pick out the ones that are true wrappers. -// A function may be a Printf or Print wrapper if its last argument is ...interface{}. -// If the next-to-last argument is a string, then this may be a Printf wrapper. -// Otherwise it may be a Print wrapper. -func maybePrintfWrapper(info *types.Info, decl ast.Decl) *printfWrapper { - // Look for functions with final argument type ...interface{}. - fdecl, ok := decl.(*ast.FuncDecl) - if !ok || fdecl.Body == nil { - return nil - } - fn, ok := info.Defs[fdecl.Name].(*types.Func) - // Type information may be incomplete. - if !ok { - return nil - } + w *wrapper + call *ast.CallExpr // forwarding call (nil for implicit interface method -> impl calls) +} - sig := fn.Type().(*types.Signature) +// formatArgsParams returns the "format string" and "args ...any" +// parameters of a potential print or printf wrapper function. +// (The format is nil in the print-like case.) +func formatArgsParams(sig *types.Signature) (format, args *types.Var) { if !sig.Variadic() { - return nil // not variadic + return nil, nil // not variadic } params := sig.Params() nparams := params.Len() // variadic => nonzero - // Check final parameter is "args ...interface{}". - args := params.At(nparams - 1) - iface, ok := types.Unalias(args.Type().(*types.Slice).Elem()).(*types.Interface) - if !ok || !iface.Empty() { - return nil - } - // Is second last param 'format string'? - var format *types.Var if nparams >= 2 { if p := params.At(nparams - 2); p.Type() == types.Typ[types.String] { format = p } } - return &printfWrapper{ - obj: fn, - fdecl: fdecl, - format: format, - args: args, + // Check final parameter is "args ...any". + // (variadic => slice) + args = params.At(nparams - 1) + iface, ok := types.Unalias(args.Type().(*types.Slice).Elem()).(*types.Interface) + if !ok || !iface.Empty() { + return nil, nil } + + return format, args } -// findPrintfLike scans the entire package to find printf-like functions. -func findPrintfLike(pass *analysis.Pass, res *Result) (any, error) { - // Gather potential wrappers and call graph between them. - byObj := make(map[*types.Func]*printfWrapper) - var wrappers []*printfWrapper - for _, file := range pass.Files { - for _, decl := range file.Decls { - w := maybePrintfWrapper(pass.TypesInfo, decl) - if w == nil { - continue +// findPrintLike scans the entire package to find print or printf-like functions. +// When it returns, all such functions have been identified. +func findPrintLike(pass *analysis.Pass, res *Result) { + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info = pass.TypesInfo + ) + + // Pass 1: gather candidate wrapper functions (and populate wrappers). + var ( + wrappers []*wrapper + byObj = make(map[types.Object]*wrapper) + ) + for cur := range inspect.Root().Preorder((*ast.FuncDecl)(nil), (*ast.FuncLit)(nil), (*ast.InterfaceType)(nil)) { + + // addWrapper records that a func (or var representing + // a FuncLit) is a potential print{,f} wrapper. + // curBody is its *ast.BlockStmt, if any. + addWrapper := func(obj types.Object, sig *types.Signature, curBody inspector.Cursor) *wrapper { + format, args := formatArgsParams(sig) + if args != nil { + // obj (the symbol for a function/method, or variable + // assigned to an anonymous function) is a potential + // print or printf wrapper. + // + // Later processing will analyze the graph of potential + // wrappers and their function bodies to pick out the + // ones that are true wrappers. + w := &wrapper{ + obj: obj, + curBody: curBody, + format: format, // non-nil => printf + args: args, + } + byObj[w.obj] = w + wrappers = append(wrappers, w) + return w } - byObj[w.obj] = w - wrappers = append(wrappers, w) + return nil + } + + switch f := cur.Node().(type) { + case *ast.FuncDecl: + // named function or method: + // + // func wrapf(format string, args ...any) {...} + if f.Body != nil { + fn := info.Defs[f.Name].(*types.Func) + addWrapper(fn, fn.Signature(), cur.ChildAt(edge.FuncDecl_Body, -1)) + } + + case *ast.FuncLit: + // anonymous function directly assigned to a variable: + // + // var wrapf = func(format string, args ...any) {...} + // wrapf := func(format string, args ...any) {...} + // wrapf = func(format string, args ...any) {...} + // + // The LHS may also be a struct field x.wrapf or + // an imported var pkg.Wrapf. + // + var lhs ast.Expr + switch ek, idx := cur.ParentEdge(); ek { + case edge.ValueSpec_Values: + curName := cur.Parent().ChildAt(edge.ValueSpec_Names, idx) + lhs = curName.Node().(*ast.Ident) + case edge.AssignStmt_Rhs: + curLhs := cur.Parent().ChildAt(edge.AssignStmt_Lhs, idx) + lhs = curLhs.Node().(ast.Expr) + } + + var v *types.Var + switch lhs := lhs.(type) { + case *ast.Ident: + // variable: wrapf = func(...) + v, _ = info.ObjectOf(lhs).(*types.Var) + case *ast.SelectorExpr: + if sel, ok := info.Selections[lhs]; ok { + // struct field: x.wrapf = func(...) + v = sel.Obj().(*types.Var) + } else { + // imported var: pkg.Wrapf = func(...) + v = info.Uses[lhs.Sel].(*types.Var) + } + } + if v != nil { + sig := info.TypeOf(f).(*types.Signature) + curBody := cur.ChildAt(edge.FuncLit_Body, -1) + addWrapper(v, sig, curBody) + } + + case *ast.InterfaceType: + // Induction through interface methods is gated as + // if it were a go1.26 language feature, to avoid + // surprises when go test's vet suite gets stricter. + if analyzerutil.FileUsesGoVersion(pass, astutil.EnclosingFile(cur), versions.Go1_26) { + for imeth := range info.TypeOf(f).(*types.Interface).Methods() { + addWrapper(imeth, imeth.Signature(), inspector.Cursor{}) + } + } + } + } + + // impls maps abstract methods to implementations. + // + // Interface methods are modelled as if they have a body + // that calls each implementing method. + // + // In the code below, impls maps Logger.Logf to + // [myLogger.Logf], and if myLogger.Logf is discovered to be + // printf-like, then so will be Logger.Logf. + // + // type Logger interface { + // Logf(format string, args ...any) + // } + // type myLogger struct{ ... } + // func (myLogger) Logf(format string, args ...any) {...} + // var _ Logger = myLogger{} + impls := methodImplementations(pass) + + // doCall records a call from one wrapper to another. + doCall := func(w *wrapper, callee types.Object, call *ast.CallExpr) { + // Call from one wrapper candidate to another? + // Record the edge so that if callee is found to be + // a true wrapper, w will be too. + if w2, ok := byObj[callee]; ok { + w2.callers = append(w2.callers, printfCaller{w, call}) + } + + // Is the candidate a true wrapper, because it calls + // a known print{,f}-like function from the allowlist + // or an imported fact, or another wrapper found + // to be a true wrapper? + // If so, convert all w's callers to kind. + kind := callKind(pass, callee, res) + if kind != KindNone { + propagate(pass, w, call, kind, res) } } - // Walk the graph to figure out which are really printf wrappers. + // Pass 2: scan the body of each wrapper function + // for calls to other printf-like functions. for _, w := range wrappers { - // Scan function for calls that could be to other printf-like functions. - ast.Inspect(w.fdecl.Body, func(n ast.Node) bool { - if w.failed { - return false + + // An interface method has no body, but acts + // like an implicit call to each implementing method. + if w.curBody.Inspector() == nil { + for impl := range impls[w.obj.(*types.Func)] { + doCall(w, impl, nil) } + continue // (no body) + } + // Process all calls in the wrapper function's body. + scan: + for cur := range w.curBody.Preorder( + (*ast.AssignStmt)(nil), + (*ast.UnaryExpr)(nil), + (*ast.CallExpr)(nil), + ) { + switch n := cur.Node().(type) { + + // Reject tricky cases where the parameters + // are potentially mutated by AssignStmt or UnaryExpr. + // (This logic checks for mutation only before the call.) // TODO: Relax these checks; issue 26555. - if assign, ok := n.(*ast.AssignStmt); ok { - for _, lhs := range assign.Lhs { - if match(pass.TypesInfo, lhs, w.format) || - match(pass.TypesInfo, lhs, w.args) { - // Modifies the format - // string or args in - // some way, so not a - // simple wrapper. - w.failed = true - return false + + case *ast.AssignStmt: + // If the wrapper updates format or args + // it is not a simple wrapper. + for _, lhs := range n.Lhs { + if w.format != nil && match(info, lhs, w.format) || + match(info, lhs, w.args) { + break scan } } - } - if un, ok := n.(*ast.UnaryExpr); ok && un.Op == token.AND { - if match(pass.TypesInfo, un.X, w.format) || - match(pass.TypesInfo, un.X, w.args) { - // Taking the address of the - // format string or args, - // so not a simple wrapper. - w.failed = true - return false + + case *ast.UnaryExpr: + // If the wrapper computes &format or &args, + // it is not a simple wrapper. + if n.Op == token.AND && + (w.format != nil && match(info, n.X, w.format) || + match(info, n.X, w.args)) { + break scan } - } - call, ok := n.(*ast.CallExpr) - if !ok || len(call.Args) == 0 || !match(pass.TypesInfo, call.Args[len(call.Args)-1], w.args) { - return true + case *ast.CallExpr: + if len(n.Args) > 0 && match(info, n.Args[len(n.Args)-1], w.args) { + if callee := typeutil.Callee(pass.TypesInfo, n); callee != nil { + doCall(w, callee, n) + } + } } + } + } +} - fn, kind := printfNameAndKind(pass, call) - if kind != 0 { - checkPrintfFwd(pass, w, call, kind, res) - return true +// methodImplementations returns the mapping from interface methods +// declared in this package to their corresponding implementing +// methods (which may also be interface methods), according to the set +// of assignments to interface types that appear within this package. +func methodImplementations(pass *analysis.Pass) map[*types.Func]map[*types.Func]bool { + impls := make(map[*types.Func]map[*types.Func]bool) + + // To find interface/implementation relations, + // we use the 'satisfy' pass, but proposal #70638 + // provides a better way. + // + // This pass over the syntax could be factored out as + // a separate analysis pass if it is needed by other + // analyzers. + var f satisfy.Finder + f.Find(pass.TypesInfo, pass.Files) + for assign := range f.Result { + // Have: LHS = RHS, where LHS is an interface type. + for imeth := range assign.LHS.Underlying().(*types.Interface).Methods() { + // Limit to interface methods of current package. + if imeth.Pkg() != pass.Pkg { + continue } - // If the call is to another function in this package, - // maybe we will find out it is printf-like later. - // Remember this call for later checking. - if fn != nil && fn.Pkg() == pass.Pkg && byObj[fn] != nil { - callee := byObj[fn] - callee.callers = append(callee.callers, printfCaller{w, call}) + if _, args := formatArgsParams(imeth.Signature()); args == nil { + continue // not print{,f}-like } - return true - }) + // Add implementing method to the set. + impl, _, _ := types.LookupFieldOrMethod(assign.RHS, false, pass.Pkg, imeth.Name()) // can't fail + set, ok := impls[imeth] + if !ok { + set = make(map[*types.Func]bool) + impls[imeth] = set + } + set[impl.(*types.Func)] = true + } } - return nil, nil + return impls } func match(info *types.Info, arg ast.Expr, param *types.Var) bool { @@ -261,19 +415,60 @@ func match(info *types.Info, arg ast.Expr, param *types.Var) bool { return ok && info.ObjectOf(id) == param } -// checkPrintfFwd checks that a printf-forwarding wrapper is forwarding correctly. -// It diagnoses writing fmt.Printf(format, args) instead of fmt.Printf(format, args...). -func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, kind Kind, res *Result) { - matched := kind == KindPrint || - kind != KindNone && len(call.Args) >= 2 && match(pass.TypesInfo, call.Args[len(call.Args)-2], w.format) - if !matched { +// propagate propagates changes in wrapper (non-None) kind information backwards +// through through the wrapper.callers graph of well-formed forwarding calls. +func propagate(pass *analysis.Pass, w *wrapper, call *ast.CallExpr, kind Kind, res *Result) { + // Check correct call forwarding. + // + // Interface methods (call==nil) forward + // correctly by construction. + if call != nil && !checkForward(pass, w, call, kind) { return } + // If the candidate's print{,f} status becomes known, + // propagate it back to all its so-far known callers. + if res.funcs[w.obj] != kind { + res.funcs[w.obj] = kind + + // Export a fact. + // (This is a no-op for local symbols.) + // We can't export facts on a symbol of another package, + // but we can treat the symbol as a wrapper within + // the current analysis unit. + if w.obj.Pkg() == pass.Pkg { + // Facts are associated with origins. + pass.ExportObjectFact(origin(w.obj), &isWrapper{Kind: kind}) + } + + // Propagate kind back to known callers. + for _, caller := range w.callers { + propagate(pass, caller.w, caller.call, kind, res) + } + } +} + +// checkForward checks whether a call from wrapper w is a well-formed +// forwarding call of the specified (non-None) kind. +// +// If not, it reports a diagnostic that the user wrote +// fmt.Printf(format, args) instead of fmt.Printf(format, args...). +func checkForward(pass *analysis.Pass, w *wrapper, call *ast.CallExpr, kind Kind) bool { + // Printf/Errorf calls must delegate the format string. + switch kind { + case KindPrintf, KindErrorf: + if len(call.Args) < 2 || !match(pass.TypesInfo, call.Args[len(call.Args)-2], w.format) { + return false + } + } + + // The args... delegation must be variadic. + // (That args is actually delegated was + // established before the root call to doCall.) if !call.Ellipsis.IsValid() { typ, ok := pass.TypesInfo.Types[call.Fun].Type.(*types.Signature) if !ok { - return + return false } if len(call.Args) > typ.Params().Len() { // If we're passing more arguments than what the @@ -283,25 +478,23 @@ func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, k // func foo(arg1 string, arg2 ...interface{}) { // fmt.Printf("%s %v", arg1, arg2) // } - return - } - desc := "printf" - if kind == KindPrint { - desc = "print" + return false } - pass.ReportRangef(call, "missing ... in args forwarded to %s-like function", desc) - return + pass.ReportRangef(call, "missing ... in args forwarded to %s-like function", kind) + return false } - fn := w.obj - var fact isWrapper - if !pass.ImportObjectFact(fn, &fact) { - fact.Kind = kind - pass.ExportObjectFact(fn, &fact) - res.funcs[fn] = kind - for _, caller := range w.callers { - checkPrintfFwd(pass, caller.w, caller.call, kind, res) - } + + return true +} + +func origin(obj types.Object) types.Object { + switch obj := obj.(type) { + case *types.Func: + return obj.Origin() + case *types.Var: + return obj.Origin() } + return obj } // isPrint records the print functions. @@ -361,16 +554,14 @@ var isPrint = stringSet{ "(*testing.common).Logf": true, "(*testing.common).Skip": true, "(*testing.common).Skipf": true, - // *testing.T and B are detected by induction, but testing.TB is - // an interface and the inference can't follow dynamic calls. - "(testing.TB).Error": true, - "(testing.TB).Errorf": true, - "(testing.TB).Fatal": true, - "(testing.TB).Fatalf": true, - "(testing.TB).Log": true, - "(testing.TB).Logf": true, - "(testing.TB).Skip": true, - "(testing.TB).Skipf": true, + "(testing.TB).Error": true, + "(testing.TB).Errorf": true, + "(testing.TB).Fatal": true, + "(testing.TB).Fatalf": true, + "(testing.TB).Log": true, + "(testing.TB).Logf": true, + "(testing.TB).Skip": true, + "(testing.TB).Skipf": true, } // formatStringIndex returns the index of the format string (the last @@ -412,7 +603,7 @@ func stringConstantExpr(pass *analysis.Pass, expr ast.Expr) (string, bool) { // checkCalls triggers the print-specific checks for calls that invoke a print // function. -func checkCalls(pass *analysis.Pass) { +func checkCalls(pass *analysis.Pass, res *Result) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ (*ast.File)(nil), @@ -426,48 +617,60 @@ func checkCalls(pass *analysis.Pass) { fileVersion = versions.Lang(versions.FileVersion(pass.TypesInfo, n)) case *ast.CallExpr: - fn, kind := printfNameAndKind(pass, n) - switch kind { - case KindPrintf, KindErrorf: - checkPrintf(pass, fileVersion, kind, n, fn.FullName()) - case KindPrint: - checkPrint(pass, n, fn.FullName()) + if callee := typeutil.Callee(pass.TypesInfo, n); callee != nil { + kind := callKind(pass, callee, res) + switch kind { + case KindPrintf, KindErrorf: + checkPrintf(pass, fileVersion, kind, n, fullname(callee)) + case KindPrint: + checkPrint(pass, n, fullname(callee)) + } } } }) } -func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, kind Kind) { - fn, _ = typeutil.Callee(pass.TypesInfo, call).(*types.Func) - if fn == nil { - return nil, 0 +func fullname(obj types.Object) string { + if fn, ok := obj.(*types.Func); ok { + return fn.FullName() } + return obj.Name() +} - // Facts are associated with generic declarations, not instantiations. - fn = fn.Origin() - - _, ok := isPrint[fn.FullName()] +// callKind returns the symbol of the called function +// and its print/printf kind, if any. +// (The symbol may be a var for an anonymous function.) +// The result is memoized in res.funcs. +func callKind(pass *analysis.Pass, obj types.Object, res *Result) Kind { + kind, ok := res.funcs[obj] if !ok { - // Next look up just "printf", for use with -printf.funcs. - _, ok = isPrint[strings.ToLower(fn.Name())] - } - if ok { - if fn.FullName() == "fmt.Errorf" { - kind = KindErrorf - } else if strings.HasSuffix(fn.Name(), "f") { - kind = KindPrintf + // cache miss + _, ok := isPrint[fullname(obj)] + if !ok { + // Next look up just "printf", for use with -printf.funcs. + _, ok = isPrint[strings.ToLower(obj.Name())] + } + if ok { + // well-known printf functions + if fullname(obj) == "fmt.Errorf" { + kind = KindErrorf + } else if strings.HasSuffix(obj.Name(), "f") { + kind = KindPrintf + } else { + kind = KindPrint + } } else { - kind = KindPrint + // imported wrappers + // Facts are associated with generic declarations, not instantiations. + obj = origin(obj) + var fact isWrapper + if pass.ImportObjectFact(obj, &fact) { + kind = fact.Kind + } } - return fn, kind - } - - var fact isWrapper - if pass.ImportObjectFact(fn, &fact) { - return fn, fact.Kind + res.funcs[obj] = kind // cache } - - return fn, KindNone + return kind } // isFormatter reports whether t could satisfy fmt.Formatter. @@ -490,7 +693,7 @@ func isFormatter(typ types.Type) bool { sig := fn.Type().(*types.Signature) return sig.Params().Len() == 2 && sig.Results().Len() == 0 && - analysisinternal.IsTypeNamed(sig.Params().At(0).Type(), "fmt", "State") && + typesinternal.IsTypeNamed(sig.Params().At(0).Type(), "fmt", "State") && types.Identical(sig.Params().At(1).Type(), types.Typ[types.Rune]) } @@ -517,7 +720,7 @@ func checkPrintf(pass *analysis.Pass, fileVersion string, kind Kind, call *ast.C // breaking existing tests and CI scripts. if idx == len(call.Args)-1 && fileVersion != "" && // fail open - versions.AtLeast(fileVersion, "go1.24") { + versions.AtLeast(fileVersion, versions.Go1_24) { pass.Report(analysis.Diagnostic{ Pos: formatArg.Pos(), @@ -567,7 +770,7 @@ func checkPrintf(pass *analysis.Pass, fileVersion string, kind Kind, call *ast.C anyIndex = true } rng := opRange(formatArg, op) - if !okPrintfArg(pass, call, rng, &maxArgIndex, firstArg, name, op) { + if !okPrintfArg(pass, fileVersion, call, rng, &maxArgIndex, firstArg, name, op) { // One error per format is enough. return } @@ -599,9 +802,9 @@ func checkPrintf(pass *analysis.Pass, fileVersion string, kind Kind, call *ast.C // such as the position of the %v substring of "...%v...". func opRange(formatArg ast.Expr, op *fmtstr.Operation) analysis.Range { if lit, ok := formatArg.(*ast.BasicLit); ok { - start, end, err := astutil.RangeInStringLiteral(lit, op.Range.Start, op.Range.End) + rng, err := astutil.RangeInStringLiteral(lit, op.Range.Start, op.Range.End) if err == nil { - return analysisinternal.Range(start, end) // position of "%v" + return rng // position of "%v" } } return formatArg // entire format string @@ -612,6 +815,7 @@ type printfArgType int const ( argBool printfArgType = 1 << iota + argByte argInt argRune argString @@ -656,7 +860,7 @@ var printVerbs = []printVerb{ {'o', sharpNumFlag, argInt | argPointer}, {'O', sharpNumFlag, argInt | argPointer}, {'p', "-#", argPointer}, - {'q', " -+.0#", argRune | argInt | argString}, + {'q', " -+.0#", argRune | argInt | argString}, // note: when analyzing go1.26 code, argInt => argByte {'s', " -+.0", argString}, {'t', "-", argBool}, {'T', "-", anyType}, @@ -670,7 +874,7 @@ var printVerbs = []printVerb{ // okPrintfArg compares the operation to the arguments actually present, // reporting any discrepancies it can discern, maxArgIndex was the index of the highest used index. // If the final argument is ellipsissed, there's little it can do for that. -func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, maxArgIndex *int, firstArg int, name string, operation *fmtstr.Operation) (ok bool) { +func okPrintfArg(pass *analysis.Pass, fileVersion string, call *ast.CallExpr, rng analysis.Range, maxArgIndex *int, firstArg int, name string, operation *fmtstr.Operation) (ok bool) { verb := operation.Verb.Verb var v printVerb found := false @@ -682,6 +886,13 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma } } + // When analyzing go1.26 code, rune and byte are the only %q integers (#72850). + if verb == 'q' && + fileVersion != "" && // fail open + versions.AtLeast(fileVersion, versions.Go1_26) { + v.typ = argRune | argByte | argString + } + // Could verb's arg implement fmt.Formatter? // Skip check for the %w verb, which requires an error. formatter := false @@ -729,7 +940,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma if reason != "" { details = " (" + reason + ")" } - pass.ReportRangef(rng, "%s format %s uses non-int %s%s as argument of *", name, operation.Text, analysisinternal.Format(pass.Fset, arg), details) + pass.ReportRangef(rng, "%s format %s uses non-int %s%s as argument of *", name, operation.Text, astutil.Format(pass.Fset, arg), details) return false } } @@ -756,7 +967,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma } arg := call.Args[verbArgIndex] if isFunctionValue(pass, arg) && verb != 'p' && verb != 'T' { - pass.ReportRangef(rng, "%s format %s arg %s is a func value, not called", name, operation.Text, analysisinternal.Format(pass.Fset, arg)) + pass.ReportRangef(rng, "%s format %s arg %s is a func value, not called", name, operation.Text, astutil.Format(pass.Fset, arg)) return false } if reason, ok := matchArgType(pass, v.typ, arg); !ok { @@ -768,14 +979,14 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma if reason != "" { details = " (" + reason + ")" } - pass.ReportRangef(rng, "%s format %s has arg %s of wrong type %s%s", name, operation.Text, analysisinternal.Format(pass.Fset, arg), typeString, details) + pass.ReportRangef(rng, "%s format %s has arg %s of wrong type %s%s", name, operation.Text, astutil.Format(pass.Fset, arg), typeString, details) return false } // Detect recursive formatting via value's String/Error methods. // The '#' flag suppresses the methods, except with %x, %X, and %q. if v.typ&argString != 0 && v.verb != 'T' && (!strings.Contains(operation.Flags, "#") || strings.ContainsRune("qxX", v.verb)) { if methodName, ok := recursiveStringer(pass, arg); ok { - pass.ReportRangef(rng, "%s format %s with arg %s causes recursive %s method call", name, operation.Text, analysisinternal.Format(pass.Fset, arg), methodName) + pass.ReportRangef(rng, "%s format %s with arg %s causes recursive %s method call", name, operation.Text, astutil.Format(pass.Fset, arg), methodName) return false } } @@ -927,7 +1138,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, name string) { if sel, ok := call.Args[0].(*ast.SelectorExpr); ok { if x, ok := sel.X.(*ast.Ident); ok { if x.Name == "os" && strings.HasPrefix(sel.Sel.Name, "Std") { - pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", name, analysisinternal.Format(pass.Fset, call.Args[0])) + pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", name, astutil.Format(pass.Fset, call.Args[0])) } } } @@ -961,10 +1172,10 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, name string) { } for _, arg := range args { if isFunctionValue(pass, arg) { - pass.ReportRangef(call, "%s arg %s is a func value, not called", name, analysisinternal.Format(pass.Fset, arg)) + pass.ReportRangef(call, "%s arg %s is a func value, not called", name, astutil.Format(pass.Fset, arg)) } if methodName, ok := recursiveStringer(pass, arg); ok { - pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", name, analysisinternal.Format(pass.Fset, arg), methodName) + pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", name, astutil.Format(pass.Fset, arg), methodName) } } } @@ -992,7 +1203,7 @@ func (ss stringSet) String() string { } func (ss stringSet) Set(flag string) error { - for _, name := range strings.Split(flag, ",") { + for name := range strings.SplitSeq(flag, ",") { if len(name) == 0 { return fmt.Errorf("empty string") } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go index f7e50f98a..2cc5c23f1 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go @@ -204,8 +204,7 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { case *types.Struct: // report whether all the elements of the struct match the expected type. For // instance, with "%d" all the elements must be printable with the "%d" format. - for i := 0; i < typ.NumFields(); i++ { - typf := typ.Field(i) + for typf := range typ.Fields() { if !m.match(typf.Type(), false) { return false } @@ -228,14 +227,20 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { types.Bool: return m.t&argBool != 0 + case types.Byte: + return m.t&(argInt|argByte) != 0 + + case types.Rune, types.UntypedRune: + return m.t&(argInt|argRune) != 0 + case types.UntypedInt, types.Int, types.Int8, types.Int16, - types.Int32, + // see case Rune for int32 types.Int64, types.Uint, - types.Uint8, + // see case Byte for uint8 types.Uint16, types.Uint32, types.Uint64, @@ -259,9 +264,6 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { case types.UnsafePointer: return m.t&(argPointer|argInt) != 0 - case types.UntypedRune: - return m.t&(argInt|argRune) != 0 - case types.UntypedNil: return false diff --git a/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go index d0632dbda..5ce357498 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go @@ -11,10 +11,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -22,7 +22,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "reflectvaluecompare", - Doc: analysisutil.MustExtractDoc(doc, "reflectvaluecompare"), + Doc: analyzerutil.MustExtractDoc(doc, "reflectvaluecompare"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/reflectvaluecompare", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -50,7 +50,7 @@ func run(pass *analysis.Pass) (any, error) { } case *ast.CallExpr: obj := typeutil.Callee(pass.TypesInfo, n) - if analysisinternal.IsFunctionNamed(obj, "reflect", "DeepEqual") && (isReflectValue(pass, n.Args[0]) || isReflectValue(pass, n.Args[1])) { + if typesinternal.IsFunctionNamed(obj, "reflect", "DeepEqual") && (isReflectValue(pass, n.Args[0]) || isReflectValue(pass, n.Args[1])) { pass.ReportRangef(n, "avoid using reflect.DeepEqual with reflect.Value") } } @@ -65,7 +65,7 @@ func isReflectValue(pass *analysis.Pass, e ast.Expr) bool { return false } // See if the type is reflect.Value - if !analysisinternal.IsTypeNamed(tv.Type, "reflect", "Value") { + if !typesinternal.IsTypeNamed(tv.Type, "reflect", "Value") { return false } if _, ok := e.(*ast.CompositeLit); ok { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go b/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go index 8f768bb76..8e60e3894 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go @@ -12,8 +12,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" ) // NOTE: Experimental. Not part of the vet suite. @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "shadow", - Doc: analysisutil.MustExtractDoc(doc, "shadow"), + Doc: analyzerutil.MustExtractDoc(doc, "shadow"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/shadow", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go index 57987b3d2..366927326 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go @@ -20,7 +20,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typeparams" ) @@ -123,7 +123,7 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) { } } if amt >= minSize { - ident := analysisinternal.Format(pass.Fset, x) + ident := astutil.Format(pass.Fset, x) qualifier := "" if len(sizes) > 1 { qualifier = "may be " diff --git a/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go index 78a2fa5ea..174c27109 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go @@ -18,9 +18,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -29,14 +29,14 @@ var doc string // Analyzer describes sigchanyzer analysis function detector. var Analyzer = &analysis.Analyzer{ Name: "sigchanyzer", - Doc: analysisutil.MustExtractDoc(doc, "sigchanyzer"), + Doc: analyzerutil.MustExtractDoc(doc, "sigchanyzer"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/sigchanyzer", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "os/signal") { + if !typesinternal.Imports(pass.Pkg, "os/signal") { return nil, nil // doesn't directly import signal } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go b/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go index c1ac96043..4afbe0468 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go @@ -17,10 +17,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -29,7 +29,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "slog", - Doc: analysisutil.MustExtractDoc(doc, "slog"), + Doc: analyzerutil.MustExtractDoc(doc, "slog"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/slog", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -115,10 +115,10 @@ func run(pass *analysis.Pass) (any, error) { default: if unknownArg == nil { pass.ReportRangef(arg, "%s arg %q should be a string or a slog.Attr (possible missing key or value)", - shortName(fn), analysisinternal.Format(pass.Fset, arg)) + shortName(fn), astutil.Format(pass.Fset, arg)) } else { pass.ReportRangef(arg, "%s arg %q should probably be a string or a slog.Attr (previous arg %q cannot be a key)", - shortName(fn), analysisinternal.Format(pass.Fset, arg), analysisinternal.Format(pass.Fset, unknownArg)) + shortName(fn), astutil.Format(pass.Fset, arg), astutil.Format(pass.Fset, unknownArg)) } // Stop here so we report at most one missing key per call. return @@ -158,7 +158,7 @@ func run(pass *analysis.Pass) (any, error) { } func isAttr(t types.Type) bool { - return analysisinternal.IsTypeNamed(t, "log/slog", "Attr") + return typesinternal.IsTypeNamed(t, "log/slog", "Attr") } // shortName returns a name for the function that is shorter than FullName. @@ -168,7 +168,7 @@ func isAttr(t types.Type) bool { // "slog.Logger.With" (instead of "(*log/slog.Logger).With") func shortName(fn *types.Func) string { var r string - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { if _, named := typesinternal.ReceiverNamed(recv); named != nil { r = named.Obj().Name() } else { @@ -188,7 +188,7 @@ func kvFuncSkipArgs(fn *types.Func) (int, bool) { return 0, false } var recvName string // by default a slog package function - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { _, named := typesinternal.ReceiverNamed(recv) if named == nil { return 0, false // anon struct/interface diff --git a/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go b/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go index 9fe0d2092..2b1882041 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go @@ -17,7 +17,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) const Doc = `check the argument type of sort.Slice @@ -34,7 +34,7 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "sort") { + if !typesinternal.Imports(pass.Pkg, "sort") { return nil, nil // doesn't directly import sort } @@ -47,7 +47,7 @@ func run(pass *analysis.Pass) (any, error) { inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) obj := typeutil.Callee(pass.TypesInfo, call) - if !analysisinternal.IsFunctionNamed(obj, "sort", "Slice", "SliceStable", "SliceIsSorted") { + if !typesinternal.IsFunctionNamed(obj, "sort", "Slice", "SliceStable", "SliceIsSorted") { return } callee := obj.(*types.Func) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go index a0bdf001a..b68385b24 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go @@ -12,8 +12,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" ) //go:embed doc.go @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stdmethods", - Doc: analysisutil.MustExtractDoc(doc, "stdmethods"), + Doc: analyzerutil.MustExtractDoc(doc, "stdmethods"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stdmethods", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -131,12 +131,12 @@ func canonicalMethod(pass *analysis.Pass, id *ast.Ident) { } // Do the =s (if any) all match? - if !matchParams(pass, expect.args, args, "=") || !matchParams(pass, expect.results, results, "=") { + if !matchParams(expect.args, args, "=") || !matchParams(expect.results, results, "=") { return } // Everything must match. - if !matchParams(pass, expect.args, args, "") || !matchParams(pass, expect.results, results, "") { + if !matchParams(expect.args, args, "") || !matchParams(expect.results, results, "") { expectFmt := id.Name + "(" + argjoin(expect.args) + ")" if len(expect.results) == 1 { expectFmt += " " + argjoin(expect.results) @@ -168,7 +168,7 @@ func argjoin(x []string) string { } // Does each type in expect with the given prefix match the corresponding type in actual? -func matchParams(pass *analysis.Pass, expect []string, actual *types.Tuple, prefix string) bool { +func matchParams(expect []string, actual *types.Tuple, prefix string) bool { for i, x := range expect { if !strings.HasPrefix(x, prefix) { continue diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go b/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go index 429125a8b..d1fda880e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go @@ -10,7 +10,6 @@ import ( "go/ast" "go/build" "go/types" - "regexp" "slices" "golang.org/x/tools/go/analysis" @@ -100,6 +99,13 @@ func run(pass *analysis.Pass) (any, error) { if obj, ok := pass.TypesInfo.Uses[n]; ok && obj.Pkg() != nil { disallowed := disallowedSymbols(obj.Pkg(), fileVersion) if minVersion, ok := disallowed[origin(obj)]; ok { + // Some symbols are accessible before their release but + // only with specific build tags unknown to us here. + // Avoid false positives in such cases. + // TODO(mkalil): move this check into typesinternal.TooNewStdSymbols. + if obj.Pkg().Path() == "testing/synctest" && versions.AtLeast(fileVersion, "go1.24") { + break // requires go1.24 && goexperiment.synctest || go1.25 + } noun := "module" if fileVersion != pkgVersion { noun = "file" @@ -114,11 +120,6 @@ func run(pass *analysis.Pass) (any, error) { return nil, nil } -// Matches cgo generated comment as well as the proposed standard: -// -// https://golang.org/s/generatedcode -var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`) - // origin returns the original uninstantiated symbol for obj. func origin(obj types.Object) types.Object { switch obj := obj.(type) { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go index 7dbff1e4d..0cbae6889 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" ) @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stringintconv", - Doc: analysisutil.MustExtractDoc(doc, "stringintconv"), + Doc: analyzerutil.MustExtractDoc(doc, "stringintconv"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stringintconv", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -198,7 +198,7 @@ func run(pass *analysis.Pass) (any, error) { // the type has methods, as some {String,GoString,Format} // may change the behavior of fmt.Sprint. if len(ttypes) == 1 && len(vtypes) == 1 && types.NewMethodSet(V0).Len() == 0 { - _, prefix, importEdits := analysisinternal.AddImport(pass.TypesInfo, file, "fmt", "fmt", "Sprint", arg.Pos()) + prefix, importEdits := refactor.AddImport(pass.TypesInfo, file, "fmt", "fmt", "Sprint", arg.Pos()) if types.Identical(T0, types.Typ[types.String]) { // string(x) -> fmt.Sprint(x) addFix("Format the number as a decimal", append(importEdits, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go index cc90f7335..826add2c4 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go @@ -17,6 +17,8 @@ import ( "strconv" "strings" + "fmt" + "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" @@ -100,7 +102,11 @@ func checkCanonicalFieldTag(pass *analysis.Pass, field *types.Var, tag string, s } if err := validateStructTag(tag); err != nil { - pass.Reportf(field.Pos(), "struct field tag %#q not compatible with reflect.StructTag.Get: %s", tag, err) + pass.Report(analysis.Diagnostic{ + Pos: field.Pos(), + End: field.Pos() + token.Pos(len(field.Name())), + Message: fmt.Sprintf("struct field tag %#q not compatible with reflect.StructTag.Get: %s", tag, err), + }) } // Check for use of json or xml tags with unexported fields. @@ -122,7 +128,11 @@ func checkCanonicalFieldTag(pass *analysis.Pass, field *types.Var, tag string, s // ignored. case "", "-": default: - pass.Reportf(field.Pos(), "struct field %s has %s tag but is not exported", field.Name(), enc) + pass.Report(analysis.Diagnostic{ + Pos: field.Pos(), + End: field.Pos() + token.Pos(len(field.Name())), + Message: fmt.Sprintf("struct field %s has %s tag but is not exported", field.Name(), enc), + }) return } } @@ -190,7 +200,11 @@ func checkTagDuplicates(pass *analysis.Pass, tag, key string, nearest, field *ty alsoPos.Filename = rel } - pass.Reportf(nearest.Pos(), "struct field %s repeats %s tag %q also at %s", field.Name(), key, val, alsoPos) + pass.Report(analysis.Diagnostic{ + Pos: nearest.Pos(), + End: nearest.Pos() + token.Pos(len(nearest.Name())), + Message: fmt.Sprintf("struct field %s repeats %s tag %q also at %s", field.Name(), key, val, alsoPos), + }) } else { seen.Set(key, val, level, field.Pos()) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go index 360ba0e74..e38c266af 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go @@ -13,10 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -31,7 +30,7 @@ func init() { var Analyzer = &analysis.Analyzer{ Name: "testinggoroutine", - Doc: analysisutil.MustExtractDoc(doc, "testinggoroutine"), + Doc: analyzerutil.MustExtractDoc(doc, "testinggoroutine"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/testinggoroutine", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -40,7 +39,7 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - if !analysisinternal.Imports(pass.Pkg, "testing") { + if !typesinternal.Imports(pass.Pkg, "testing") { return nil, nil } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go index db2e5f76d..4b68a789c 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go @@ -36,7 +36,7 @@ func localFunctionDecls(info *types.Info, files []*ast.File) func(*types.Func) * // isMethodNamed returns true if f is a method defined // in package with the path pkgPath with a name in names. // -// (Unlike [analysisinternal.IsMethodNamed], it ignores the receiver type name.) +// (Unlike [analysis.IsMethodNamed], it ignores the receiver type name.) func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { if f == nil { return false @@ -44,7 +44,7 @@ func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { if f.Pkg() == nil || f.Pkg().Path() != pkgPath { return false } - if f.Type().(*types.Signature).Recv() == nil { + if f.Signature().Recv() == nil { return false } return slices.Contains(names, f.Name()) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go index d4e9b0253..1f33df840 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go @@ -15,8 +15,9 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -24,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "tests", - Doc: analysisutil.MustExtractDoc(doc, "tests"), + Doc: analyzerutil.MustExtractDoc(doc, "tests"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/tests", Run: run, } @@ -258,7 +259,7 @@ func isTestingType(typ types.Type, testingType string) bool { if !ok { return false } - return analysisinternal.IsTypeNamed(ptr.Elem(), "testing", testingType) + return typesinternal.IsTypeNamed(ptr.Elem(), "testing", testingType) } // Validate that fuzz target function's arguments are of accepted types. @@ -464,7 +465,7 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) { if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 { // Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters. // We have currently decided to also warn before compilation/package loading. This can help users in IDEs. - pass.ReportRangef(analysisinternal.Range(tparams.Opening, tparams.Closing), + pass.ReportRangef(astutil.RangeOf(tparams.Opening, tparams.Closing), "%s has type parameters: it will not be run by go test as a %sXXX function", fn.Name.Name, prefix) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go index 4fdbb2b54..8353c1efa 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go @@ -16,10 +16,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/typesinternal" ) const badFormat = "2006-02-01" @@ -30,7 +30,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "timeformat", - Doc: analysisutil.MustExtractDoc(doc, "timeformat"), + Doc: analyzerutil.MustExtractDoc(doc, "timeformat"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/timeformat", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (any, error) { // Note: (time.Time).Format is a method and can be a typeutil.Callee // without directly importing "time". So we cannot just skip this package - // when !analysisutil.Imports(pass.Pkg, "time"). + // when !analysis.Imports(pass.Pkg, "time"). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) @@ -50,8 +50,8 @@ func run(pass *analysis.Pass) (any, error) { inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) obj := typeutil.Callee(pass.TypesInfo, call) - if !analysisinternal.IsMethodNamed(obj, "time", "Time", "Format") && - !analysisinternal.IsFunctionNamed(obj, "time", "Parse") { + if !typesinternal.IsMethodNamed(obj, "time", "Time", "Format") && + !typesinternal.IsFunctionNamed(obj, "time", "Parse") { return } if len(call.Args) > 0 { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go index 26e894bd4..38eb0b106 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -11,9 +11,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -22,7 +22,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unmarshal", - Doc: analysisutil.MustExtractDoc(doc, "unmarshal"), + Doc: analyzerutil.MustExtractDoc(doc, "unmarshal"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unmarshal", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ func run(pass *analysis.Pass) (any, error) { // Note: (*"encoding/json".Decoder).Decode, (* "encoding/gob".Decoder).Decode // and (* "encoding/xml".Decoder).Decode are methods and can be a typeutil.Callee // without directly importing their packages. So we cannot just skip this package - // when !analysisutil.Imports(pass.Pkg, "encoding/..."). + // when !analysis.Imports(pass.Pkg, "encoding/..."). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) @@ -57,7 +57,7 @@ func run(pass *analysis.Pass) (any, error) { // Classify the callee (without allocating memory). argidx := -1 - recv := fn.Type().(*types.Signature).Recv() + recv := fn.Signature().Recv() if fn.Name() == "Unmarshal" && recv == nil { // "encoding/json".Unmarshal // "encoding/xml".Unmarshal diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go index 317f03499..532f38fe9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -14,8 +14,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/refactor" ) //go:embed doc.go @@ -23,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unreachable", - Doc: analysisutil.MustExtractDoc(doc, "unreachable"), + Doc: analyzerutil.MustExtractDoc(doc, "unreachable"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unreachable", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, @@ -188,6 +189,11 @@ func (d *deadState) findDead(stmt ast.Stmt) { case *ast.EmptyStmt: // do not warn about unreachable empty statements default: + var ( + inspect = d.pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + curStmt, _ = inspect.Root().FindNode(stmt) + tokFile = d.pass.Fset.File(stmt.Pos()) + ) // (This call to pass.Report is a frequent source // of diagnostics beyond EOF in a truncated file; // see #71659.) @@ -196,11 +202,8 @@ func (d *deadState) findDead(stmt ast.Stmt) { End: stmt.End(), Message: "unreachable code", SuggestedFixes: []analysis.SuggestedFix{{ - Message: "Remove", - TextEdits: []analysis.TextEdit{{ - Pos: stmt.Pos(), - End: stmt.End(), - }}, + Message: "Remove", + TextEdits: refactor.DeleteStmt(tokFile, curStmt), }}, }) d.reachable = true // silence error about next statement diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go index 57c6da64f..ce785725e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -14,9 +14,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unsafeptr", - Doc: analysisutil.MustExtractDoc(doc, "unsafeptr"), + Doc: analyzerutil.MustExtractDoc(doc, "unsafeptr"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unsafeptr", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -105,7 +105,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { } switch sel.Sel.Name { case "Pointer", "UnsafeAddr": - if analysisinternal.IsTypeNamed(info.Types[sel.X].Type, "reflect", "Value") { + if typesinternal.IsTypeNamed(info.Types[sel.X].Type, "reflect", "Value") { return true } } @@ -153,5 +153,5 @@ func hasBasicType(info *types.Info, x ast.Expr, kind types.BasicKind) bool { // isReflectHeader reports whether t is reflect.SliceHeader or reflect.StringHeader. func isReflectHeader(t types.Type) bool { - return analysisinternal.IsTypeNamed(t, "reflect", "SliceHeader", "StringHeader") + return typesinternal.IsTypeNamed(t, "reflect", "SliceHeader", "StringHeader") } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index 556ffed7d..bd32d5869 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -23,10 +23,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/astutil" ) //go:embed doc.go @@ -34,7 +34,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unusedresult", - Doc: analysisutil.MustExtractDoc(doc, "unusedresult"), + Doc: analyzerutil.MustExtractDoc(doc, "unusedresult"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedresult", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -150,11 +150,11 @@ func run(pass *analysis.Pass) (any, error) { if !ok { return // e.g. var or builtin } - if sig := fn.Type().(*types.Signature); sig.Recv() != nil { + if sig := fn.Signature(); sig.Recv() != nil { // method (e.g. foo.String()) if types.Identical(sig, sigNoArgsStringResult) { if stringMethods[fn.Name()] { - pass.ReportRangef(analysisinternal.Range(call.Pos(), call.Lparen), + pass.ReportRangef(astutil.RangeOf(call.Pos(), call.Lparen), "result of (%s).%s call not used", sig.Recv().Type(), fn.Name()) } @@ -162,7 +162,7 @@ func run(pass *analysis.Pass) (any, error) { } else { // package-level function (e.g. fmt.Errorf) if pkgFuncs[[2]string{fn.Pkg().Path(), fn.Name()}] { - pass.ReportRangef(analysisinternal.Range(call.Pos(), call.Lparen), + pass.ReportRangef(astutil.RangeOf(call.Pos(), call.Lparen), "result of %s.%s call not used", fn.Pkg().Path(), fn.Name()) } @@ -188,7 +188,7 @@ func (ss *stringSetFlag) String() string { func (ss *stringSetFlag) Set(s string) error { m := make(map[string]bool) // clobber previous value if s != "" { - for _, name := range strings.Split(s, ",") { + for name := range strings.SplitSeq(s, ",") { if name == "" { continue // TODO: report error? proceed? } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go index 2e209c8a6..9bf9f5455 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go @@ -10,8 +10,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typeparams" ) @@ -22,7 +22,7 @@ var doc string // that are never read. var Analyzer = &analysis.Analyzer{ Name: "unusedwrite", - Doc: analysisutil.MustExtractDoc(doc, "unusedwrite"), + Doc: analyzerutil.MustExtractDoc(doc, "unusedwrite"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedwrite", Requires: []*analysis.Analyzer{buildssa.Analyzer}, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go b/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go index 14c6986ea..c2e20521e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go @@ -13,10 +13,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -24,14 +24,14 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "waitgroup", - Doc: analysisutil.MustExtractDoc(doc, "waitgroup"), + Doc: analyzerutil.MustExtractDoc(doc, "waitgroup"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "sync") { + if !typesinternal.Imports(pass.Pkg, "sync") { return nil, nil // doesn't directly import sync } @@ -44,7 +44,7 @@ func run(pass *analysis.Pass) (any, error) { if push { call := n.(*ast.CallExpr) obj := typeutil.Callee(pass.TypesInfo, call) - if analysisinternal.IsMethodNamed(obj, "sync", "WaitGroup", "Add") && + if typesinternal.IsMethodNamed(obj, "sync", "WaitGroup", "Add") && hasSuffix(stack, wantSuffix) && backindex(stack, 1) == backindex(stack, 2).(*ast.BlockStmt).List[0] { // ExprStmt must be Block's first stmt diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 5e5601aa4..adb471101 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -9,6 +9,7 @@ import ( "fmt" "go/ast" "go/token" + "reflect" "slices" "strconv" "strings" @@ -149,7 +150,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added if newImport.Name != nil { newImport.Name.NamePos = pos } - newImport.Path.ValuePos = pos + updateBasicLitPos(newImport.Path, pos) newImport.EndPos = pos // Clean up parens. impDecl contains at least one spec. @@ -184,7 +185,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added first.Lparen = first.Pos() // Move the imports of the other import declaration to the first one. for _, spec := range gen.Specs { - spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + updateBasicLitPos(spec.(*ast.ImportSpec).Path, first.Pos()) first.Specs = append(first.Specs, spec) } f.Decls = slices.Delete(f.Decls, i, i+1) @@ -209,48 +210,46 @@ func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) // DeleteNamedImport deletes the import with the given name and path from the file f, if present. // If there are duplicate import declarations, all matching ones are deleted. func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { - var delspecs []*ast.ImportSpec - var delcomments []*ast.CommentGroup + var ( + delspecs = make(map[*ast.ImportSpec]bool) + delcomments = make(map[*ast.CommentGroup]bool) + ) // Find the import nodes that import path, if any. for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) + gen, ok := f.Decls[i].(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { continue } for j := 0; j < len(gen.Specs); j++ { - spec := gen.Specs[j] - impspec := spec.(*ast.ImportSpec) + impspec := gen.Specs[j].(*ast.ImportSpec) if importName(impspec) != name || importPath(impspec) != path { continue } // We found an import spec that imports path. // Delete it. - delspecs = append(delspecs, impspec) + delspecs[impspec] = true deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] + gen.Specs = slices.Delete(gen.Specs, j, j+1) // If this was the last import spec in this decl, // delete the decl, too. if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] + f.Decls = slices.Delete(f.Decls, i, i+1) i-- break } else if len(gen.Specs) == 1 { if impspec.Doc != nil { - delcomments = append(delcomments, impspec.Doc) + delcomments[impspec.Doc] = true } if impspec.Comment != nil { - delcomments = append(delcomments, impspec.Comment) + delcomments[impspec.Comment] = true } for _, cg := range f.Comments { // Found comment on the same line as the import spec. if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { - delcomments = append(delcomments, cg) + delcomments[cg] = true break } } @@ -294,38 +293,21 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del } // Delete imports from f.Imports. - for i := 0; i < len(f.Imports); i++ { - imp := f.Imports[i] - for j, del := range delspecs { - if imp == del { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - copy(delspecs[j:], delspecs[j+1:]) - delspecs = delspecs[:len(delspecs)-1] - i-- - break - } - } + before := len(f.Imports) + f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool { + _, ok := delspecs[imp] + return ok + }) + if len(f.Imports)+len(delspecs) != before { + // This can happen when the AST is invalid (i.e. imports differ between f.Decls and f.Imports). + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) } // Delete comments from f.Comments. - for i := 0; i < len(f.Comments); i++ { - cg := f.Comments[i] - for j, del := range delcomments { - if cg == del { - copy(f.Comments[i:], f.Comments[i+1:]) - f.Comments = f.Comments[:len(f.Comments)-1] - copy(delcomments[j:], delcomments[j+1:]) - delcomments = delcomments[:len(delcomments)-1] - i-- - break - } - } - } - - if len(delspecs) > 0 { - panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) - } + f.Comments = slices.DeleteFunc(f.Comments, func(cg *ast.CommentGroup) bool { + _, ok := delcomments[cg] + return ok + }) return } @@ -489,3 +471,17 @@ func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { return groups } + +// updateBasicLitPos updates lit.Pos, +// ensuring that lit.End (if set) is displaced by the same amount. +// (See https://go.dev/issue/76395.) +func updateBasicLitPos(lit *ast.BasicLit, pos token.Pos) { + len := lit.End() - lit.Pos() + lit.ValuePos = pos + // TODO(adonovan): after go1.26, simplify to: + // lit.ValueEnd = pos + len + v := reflect.ValueOf(lit).Elem().FieldByName("ValueEnd") + if v.IsValid() && v.Int() != 0 { + v.SetInt(int64(pos + len)) + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go index 7e72d3c28..60ad425f3 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -453,6 +453,9 @@ func (c Cursor) FindNode(n ast.Node) (Cursor, bool) { // rooted at c such that n.Pos() <= start && end <= n.End(). // (For an *ast.File, it uses the bounds n.FileStart-n.FileEnd.) // +// An empty range (start == end) between two adjacent nodes is +// considered to belong to the first node. +// // It returns zero if none is found. // Precondition: start <= end. // @@ -467,7 +470,9 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { // This algorithm could be implemented using c.Inspect, // but it is about 2.5x slower. - best := int32(-1) // push index of latest (=innermost) node containing range + // best is the push-index of the latest (=innermost) node containing range. + // (Beware: latest is not always innermost because FuncDecl.{Name,Type} overlap.) + best := int32(-1) for i, limit := c.indices(); i < limit; i++ { ev := events[i] if ev.index > i { // push? @@ -481,15 +486,35 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { continue } } else { + // Edge case: FuncDecl.Name and .Type overlap: + // Don't update best from Name to FuncDecl.Type. + // + // The condition can be read as: + // - n is FuncType + // - n.parent is FuncDecl + // - best is strictly beneath the FuncDecl + if ev.typ == 1< ev.parent { + continue + } + nodeEnd = n.End() if n.Pos() > start { break // disjoint, after; stop } } + // Inv: node.{Pos,FileStart} <= start if end <= nodeEnd { // node fully contains target range best = i + + // Don't search beyond end of the first match. + // This is important only for an empty range (start=end) + // between two adjoining nodes, which would otherwise + // match both nodes; we want to match only the first. + limit = ev.index } else if nodeEnd < start { i = ev.index // disjoint, before; skip forward } diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/golang.org/x/tools/go/buildutil/allpackages.go index 32886a717..8a7f0fccc 100644 --- a/vendor/golang.org/x/tools/go/buildutil/allpackages.go +++ b/vendor/golang.org/x/tools/go/buildutil/allpackages.go @@ -175,7 +175,7 @@ func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool { for _, pkg := range all { doPkg(pkg, neg) } - } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg { + } else if dir, ok := strings.CutSuffix(arg, "/..."); ok { // dir/... matches all packages beneath dir for _, pkg := range all { if strings.HasPrefix(pkg, dir) && diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go index 410c8e72d..f66cd5df2 100644 --- a/vendor/golang.org/x/tools/go/buildutil/tags.go +++ b/vendor/golang.org/x/tools/go/buildutil/tags.go @@ -43,7 +43,7 @@ func (v *TagsFlag) Set(s string) error { // Starting in Go 1.13, the -tags flag is a comma-separated list of build tags. *v = []string{} - for _, s := range strings.Split(s, ",") { + for s := range strings.SplitSeq(s, ",") { if s != "" { *v = append(*v, s) } diff --git a/vendor/golang.org/x/tools/go/cfg/builder.go b/vendor/golang.org/x/tools/go/cfg/builder.go index ac4d63c40..f16cd4230 100644 --- a/vendor/golang.org/x/tools/go/cfg/builder.go +++ b/vendor/golang.org/x/tools/go/cfg/builder.go @@ -13,7 +13,7 @@ import ( ) type builder struct { - cfg *CFG + blocks []*Block mayReturn func(*ast.CallExpr) bool current *Block lblocks map[string]*lblock // labeled blocks @@ -32,12 +32,18 @@ start: *ast.SendStmt, *ast.IncDecStmt, *ast.GoStmt, - *ast.DeferStmt, *ast.EmptyStmt, *ast.AssignStmt: // No effect on control flow. b.add(s) + case *ast.DeferStmt: + b.add(s) + // Assume conservatively that this behaves like: + // defer func() { recover() } + // so any subsequent panic may act like a return. + b.current.returns = true + case *ast.ExprStmt: b.add(s) if call, ok := s.X.(*ast.CallExpr); ok && !b.mayReturn(call) { @@ -64,6 +70,7 @@ start: goto start // effectively: tailcall stmt(g, s.Stmt, label) case *ast.ReturnStmt: + b.current.returns = true b.add(s) b.current = b.newBlock(KindUnreachable, s) @@ -483,14 +490,13 @@ func (b *builder) labeledBlock(label *ast.Ident, stmt *ast.LabeledStmt) *lblock // It does not automatically become the current block. // comment is an optional string for more readable debugging output. func (b *builder) newBlock(kind BlockKind, stmt ast.Stmt) *Block { - g := b.cfg block := &Block{ - Index: int32(len(g.Blocks)), + Index: int32(len(b.blocks)), Kind: kind, Stmt: stmt, } block.Succs = block.succs2[:0] - g.Blocks = append(g.Blocks, block) + b.blocks = append(b.blocks, block) return block } diff --git a/vendor/golang.org/x/tools/go/cfg/cfg.go b/vendor/golang.org/x/tools/go/cfg/cfg.go index 1f2087160..f69912c80 100644 --- a/vendor/golang.org/x/tools/go/cfg/cfg.go +++ b/vendor/golang.org/x/tools/go/cfg/cfg.go @@ -53,10 +53,13 @@ import ( // // The entry point is Blocks[0]; there may be multiple return blocks. type CFG struct { - fset *token.FileSet - Blocks []*Block // block[0] is entry; order otherwise undefined + Blocks []*Block // block[0] is entry; order otherwise undefined + noreturn bool // function body lacks a reachable return statement } +// NoReturn reports whether the function has no reachable return. +func (cfg *CFG) NoReturn() bool { return cfg.noreturn } + // A Block represents a basic block: a list of statements and // expressions that are always evaluated sequentially. // @@ -68,12 +71,13 @@ type CFG struct { // an [ast.Expr], Succs[0] is the successor if the condition is true, and // Succs[1] is the successor if the condition is false. type Block struct { - Nodes []ast.Node // statements, expressions, and ValueSpecs - Succs []*Block // successor nodes in the graph - Index int32 // index within CFG.Blocks - Live bool // block is reachable from entry - Kind BlockKind // block kind - Stmt ast.Stmt // statement that gave rise to this block (see BlockKind for details) + Nodes []ast.Node // statements, expressions, and ValueSpecs + Succs []*Block // successor nodes in the graph + Index int32 // index within CFG.Blocks + Live bool // block is reachable from entry + returns bool // block contains return or defer (which may recover and return) + Kind BlockKind // block kind + Stmt ast.Stmt // statement that gave rise to this block (see BlockKind for details) succs2 [2]*Block // underlying array for Succs } @@ -142,14 +146,14 @@ func (kind BlockKind) String() string { func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG { b := builder{ mayReturn: mayReturn, - cfg: new(CFG), } b.current = b.newBlock(KindBody, body) b.stmt(body) - // Compute liveness (reachability from entry point), breadth-first. - q := make([]*Block, 0, len(b.cfg.Blocks)) - q = append(q, b.cfg.Blocks[0]) // entry point + // Compute liveness (reachability from entry point), + // breadth-first, marking Block.Live flags. + q := make([]*Block, 0, len(b.blocks)) + q = append(q, b.blocks[0]) // entry point for len(q) > 0 { b := q[len(q)-1] q = q[:len(q)-1] @@ -163,12 +167,22 @@ func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG { // Does control fall off the end of the function's body? // Make implicit return explicit. if b.current != nil && b.current.Live { + b.current.returns = true b.add(&ast.ReturnStmt{ Return: body.End() - 1, }) } - return b.cfg + // Is any return (or defer+recover) block reachable? + noreturn := true + for _, bl := range b.blocks { + if bl.Live && bl.returns { + noreturn = false + break + } + } + + return &CFG{Blocks: b.blocks, noreturn: noreturn} } func (b *Block) String() string { @@ -188,6 +202,14 @@ func (b *Block) comment(fset *token.FileSet) string { // // When control falls off the end of the function, the ReturnStmt is synthetic // and its [ast.Node.End] position may be beyond the end of the file. +// +// A function that contains no return statement (explicit or implied) +// may yet return normally, and may even return a nonzero value. For example: +// +// func() (res any) { +// defer func() { res = recover() }() +// panic(123) +// } func (b *Block) Return() (ret *ast.ReturnStmt) { if len(b.Nodes) > 0 { ret, _ = b.Nodes[len(b.Nodes)-1].(*ast.ReturnStmt) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 89f89dd2d..680a70ca8 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -364,12 +364,6 @@ type jsonPackage struct { DepsErrors []*packagesinternal.PackageError } -type jsonPackageError struct { - ImportStack []string - Pos string - Err string -} - func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 060ab08ef..ff607389d 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -1027,11 +1027,15 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { - // Fill in the blanks to avoid surprises. + // To avoid surprises, fill in the blanks consistent + // with other packages. (For example, some analyzers + // assert that each needed types.Info map is non-nil + // even when there is no syntax that would cause them + // to consult the map.) lpkg.Types = types.Unsafe lpkg.Fset = ld.Fset lpkg.Syntax = []*ast.File{} - lpkg.TypesInfo = new(types.Info) + lpkg.TypesInfo = ld.newTypesInfo() lpkg.TypesSizes = ld.sizes return } @@ -1180,20 +1184,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - // Populate TypesInfo only if needed, as it - // causes the type checker to work much harder. - if ld.Config.Mode&NeedTypesInfo != 0 { - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - FileVersions: make(map[*ast.File]string), - } - } + lpkg.TypesInfo = ld.newTypesInfo() lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1307,6 +1298,24 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { lpkg.IllTyped = illTyped } +func (ld *loader) newTypesInfo() *types.Info { + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo == 0 { + return nil + } + return &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } +} + // An importFunc is an implementation of the single-method // types.Importer interface based on a function value. type importerFunc func(path string) (*types.Package, error) diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index af6a60d75..c546b1b63 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -78,7 +78,7 @@ func PrintErrors(pkgs []*Package) int { return n } -// Postorder returns an iterator over the the packages in +// Postorder returns an iterator over the packages in // the import graph whose roots are pkg. // Packages are enumerated in dependencies-first order. func Postorder(pkgs []*Package) iter.Seq[*Package] { diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go index a5ef8fb40..a75257c8b 100644 --- a/vendor/golang.org/x/tools/go/ssa/builder.go +++ b/vendor/golang.org/x/tools/go/ssa/builder.go @@ -110,10 +110,11 @@ var ( tEface = types.NewInterfaceType(nil, nil).Complete() // SSA Value constants. - vZero = intConst(0) - vOne = intConst(1) - vTrue = NewConst(constant.MakeBool(true), tBool) - vFalse = NewConst(constant.MakeBool(false), tBool) + vZero = intConst(0) + vOne = intConst(1) + vTrue = NewConst(constant.MakeBool(true), tBool) + vFalse = NewConst(constant.MakeBool(false), tBool) + vNoReturn = NewConst(constant.MakeString("noreturn"), tString) jReady = intConst(0) // range-over-func jump is READY jBusy = intConst(-1) // range-over-func jump is BUSY @@ -291,7 +292,7 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value { var c Call b.setCall(fn, e, &c.Call) c.typ = typ - return fn.emit(&c) + return emitCall(fn, &c) case *ast.IndexExpr: mapt := typeparams.CoreType(fn.typeOf(e.X)).(*types.Map) // ,ok must be a map. @@ -380,7 +381,13 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ } case "new": - return emitNew(fn, typeparams.MustDeref(typ), pos, "new") + alloc := emitNew(fn, typeparams.MustDeref(typ), pos, "new") + if !fn.info.Types[args[0]].IsType() { + // new(expr), requires go1.26 + v := b.expr(fn, args[0]) + emitStore(fn, alloc, v, pos) + } + return alloc case "len", "cap": // Special case: len or cap of an array or *array is @@ -717,7 +724,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { var v Call b.setCall(fn, e, &v.Call) v.setType(fn.typ(tv.Type)) - return fn.emit(&v) + return emitCall(fn, &v) case *ast.UnaryExpr: switch e.Op { @@ -2337,7 +2344,7 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { // for x := range f { ... } // into // f(func(x T) bool { ... }) - b.rangeFunc(fn, x, tk, tv, s, label) + b.rangeFunc(fn, x, s, label) return default: @@ -2383,7 +2390,7 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { // rangeFunc emits to fn code for the range-over-func rng.Body of the iterator // function x, optionally labelled by label. It creates a new anonymous function // yield for rng and builds the function. -func (b *builder) rangeFunc(fn *Function, x Value, tk, tv types.Type, rng *ast.RangeStmt, label *lblock) { +func (b *builder) rangeFunc(fn *Function, x Value, rng *ast.RangeStmt, label *lblock) { // Consider the SSA code for the outermost range-over-func in fn: // // func fn(...) (ret R) { @@ -2987,8 +2994,8 @@ func (b *builder) buildYieldFunc(fn *Function) { fn.source = fn.parent.source fn.startBody() params := fn.Signature.Params() - for i := 0; i < params.Len(); i++ { - fn.addParamVar(params.At(i)) + for v := range params.Variables() { + fn.addParamVar(v) } // Initial targets diff --git a/vendor/golang.org/x/tools/go/ssa/create.go b/vendor/golang.org/x/tools/go/ssa/create.go index 2fa3d0757..d94cb6fb7 100644 --- a/vendor/golang.org/x/tools/go/ssa/create.go +++ b/vendor/golang.org/x/tools/go/ssa/create.go @@ -312,3 +312,14 @@ func (prog *Program) AllPackages() []*Package { func (prog *Program) ImportedPackage(path string) *Package { return prog.imported[path] } + +// SetNoReturn sets the predicate used when building the ssa.Program +// prog that reports whether a given function cannot return. +// This may be used to prune spurious control flow edges +// after (e.g.) log.Fatal, improving the precision of analyses. +// +// A typical implementation is the [ctrlflow.CFGs.NoReturn] method from +// [golang.org/x/tools/go/analysis/passes/ctrlflow]. +func (prog *Program) SetNoReturn(noReturn func(*types.Func) bool) { + prog.noReturn = noReturn +} diff --git a/vendor/golang.org/x/tools/go/ssa/emit.go b/vendor/golang.org/x/tools/go/ssa/emit.go index e53ebf5a7..31aa5de8d 100644 --- a/vendor/golang.org/x/tools/go/ssa/emit.go +++ b/vendor/golang.org/x/tools/go/ssa/emit.go @@ -488,7 +488,7 @@ func emitTailCall(f *Function, call *Call) { } else { call.typ = tresults } - tuple := f.emit(call) + tuple := emitCall(f, call) var ret Return switch nr { case 0: @@ -509,6 +509,27 @@ func emitTailCall(f *Function, call *Call) { f.currentBlock = nil } +// emitCall emits a call instruction. If the callee is "no return", +// it also emits a panic to eliminate infeasible CFG edges. +func emitCall(fn *Function, call *Call) Value { + res := fn.emit(call) + + callee := call.Call.StaticCallee() + if callee != nil && + callee.object != nil && + fn.Prog.noReturn != nil && + fn.Prog.noReturn(callee.object) { + // Call cannot return. Insert a panic after it. + fn.emit(&Panic{ + X: emitConv(fn, vNoReturn, tEface), + pos: call.Pos(), + }) + fn.currentBlock = fn.newBasicBlock("unreachable.noreturn") + } + + return res +} + // emitImplicitSelections emits to f code to apply the sequence of // implicit field selections specified by indices to base value v, and // returns the selected value. diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go index f48bd7184..33a12444d 100644 --- a/vendor/golang.org/x/tools/go/ssa/func.go +++ b/vendor/golang.org/x/tools/go/ssa/func.go @@ -668,7 +668,11 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { continue } n, _ := fmt.Fprintf(buf, "%d:", b.Index) + // (|predecessors|, |successors|, immediate dominator) bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs)) + if b.Idom() != nil { + bmsg = fmt.Sprintf("%s idom:%d", bmsg, b.Idom().Index) + } fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg) if false { // CFG debugging diff --git a/vendor/golang.org/x/tools/go/ssa/instantiate.go b/vendor/golang.org/x/tools/go/ssa/instantiate.go index 20a0986e6..5862440a6 100644 --- a/vendor/golang.org/x/tools/go/ssa/instantiate.go +++ b/vendor/golang.org/x/tools/go/ssa/instantiate.go @@ -83,7 +83,7 @@ func createInstance(fn *Function, targs []types.Type) *Function { if prog.mode&InstantiateGenerics != 0 && !prog.isParameterized(targs...) { synthetic = fmt.Sprintf("instance of %s", fn.Name()) if fn.syntax != nil { - subst = makeSubster(prog.ctxt, obj, fn.typeparams, targs, false) + subst = makeSubster(prog.ctxt, obj, fn.typeparams, targs) build = (*builder).buildFromSyntax } else { build = (*builder).buildParamsOnly diff --git a/vendor/golang.org/x/tools/go/ssa/ssa.go b/vendor/golang.org/x/tools/go/ssa/ssa.go index ecad99d03..7c84494c3 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssa.go +++ b/vendor/golang.org/x/tools/go/ssa/ssa.go @@ -45,6 +45,8 @@ type Program struct { // to avoid creation of duplicate methods from type information. objectMethodsMu sync.Mutex objectMethods map[*types.Func]*Function + + noReturn func(*types.Func) bool // (optional) predicate that decides whether a given call cannot return } // A Package is a single analyzed Go package containing Members for diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go index b4feb42cb..7300d2bf3 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go @@ -74,8 +74,8 @@ func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool { methodsOf := func(T types.Type) { if !types.IsInterface(T) { mset := prog.MethodSets.MethodSet(T) - for i := 0; i < mset.Len(); i++ { - function(prog.MethodValue(mset.At(i))) + for method := range mset.Methods() { + function(prog.MethodValue(method)) } } } diff --git a/vendor/golang.org/x/tools/go/ssa/subst.go b/vendor/golang.org/x/tools/go/ssa/subst.go index 362dce126..5799a0780 100644 --- a/vendor/golang.org/x/tools/go/ssa/subst.go +++ b/vendor/golang.org/x/tools/go/ssa/subst.go @@ -59,7 +59,7 @@ type subster struct { // Returns a subster that replaces tparams[i] with targs[i]. Uses ctxt as a cache. // targs should not contain any types in tparams. // fn is the generic function for which we are substituting. -func makeSubster(ctxt *types.Context, fn *types.Func, tparams *types.TypeParamList, targs []types.Type, debug bool) *subster { +func makeSubster(ctxt *types.Context, fn *types.Func, tparams *types.TypeParamList, targs []types.Type) *subster { assert(tparams.Len() == len(targs), "makeSubster argument count must match") subst := &subster{ @@ -352,8 +352,7 @@ func (subst *subster) alias(t *types.Alias) types.Type { // Copy and substitute type params. var newTParams []*types.TypeParam - for i := 0; i < tparams.Len(); i++ { - cur := tparams.At(i) + for cur := range tparams.TypeParams() { cobj := cur.Obj() cname := types.NewTypeName(cobj.Pos(), cobj.Pkg(), cobj.Name(), nil) ntp := types.NewTypeParam(cname, nil) @@ -488,8 +487,7 @@ func (subst *subster) named(t *types.Named) types.Type { obj := types.NewTypeName(tname.Pos(), tname.Pkg(), tname.Name(), nil) fresh := types.NewNamed(obj, nil, nil) var newTParams []*types.TypeParam - for i := 0; i < tparams.Len(); i++ { - cur := tparams.At(i) + for cur := range tparams.TypeParams() { cobj := cur.Obj() cname := types.NewTypeName(cobj.Pos(), cobj.Pkg(), cobj.Name(), nil) ntp := types.NewTypeParam(cname, nil) @@ -567,76 +565,3 @@ func (subst *subster) signature(t *types.Signature) types.Type { } return t } - -// reaches returns true if a type t reaches any type t' s.t. c[t'] == true. -// It updates c to cache results. -// -// reaches is currently only part of the wellFormed debug logic, and -// in practice c is initially only type parameters. It is not currently -// relied on in production. -func reaches(t types.Type, c map[types.Type]bool) (res bool) { - if c, ok := c[t]; ok { - return c - } - - // c is populated with temporary false entries as types are visited. - // This avoids repeat visits and break cycles. - c[t] = false - defer func() { - c[t] = res - }() - - switch t := t.(type) { - case *types.TypeParam, *types.Basic: - return false - case *types.Array: - return reaches(t.Elem(), c) - case *types.Slice: - return reaches(t.Elem(), c) - case *types.Pointer: - return reaches(t.Elem(), c) - case *types.Tuple: - for i := 0; i < t.Len(); i++ { - if reaches(t.At(i).Type(), c) { - return true - } - } - case *types.Struct: - for i := 0; i < t.NumFields(); i++ { - if reaches(t.Field(i).Type(), c) { - return true - } - } - case *types.Map: - return reaches(t.Key(), c) || reaches(t.Elem(), c) - case *types.Chan: - return reaches(t.Elem(), c) - case *types.Signature: - if t.Recv() != nil && reaches(t.Recv().Type(), c) { - return true - } - return reaches(t.Params(), c) || reaches(t.Results(), c) - case *types.Union: - for i := 0; i < t.Len(); i++ { - if reaches(t.Term(i).Type(), c) { - return true - } - } - case *types.Interface: - for i := 0; i < t.NumEmbeddeds(); i++ { - if reaches(t.Embedded(i), c) { - return true - } - } - for i := 0; i < t.NumExplicitMethods(); i++ { - if reaches(t.ExplicitMethod(i).Type(), c) { - return true - } - } - case *types.Named, *types.Alias: - return reaches(t.Underlying(), c) - default: - panic("unreachable") - } - return false -} diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go index 932eb6cb0..42f9621c3 100644 --- a/vendor/golang.org/x/tools/go/ssa/util.go +++ b/vendor/golang.org/x/tools/go/ssa/util.go @@ -121,7 +121,7 @@ func is[T any](x any) bool { // recvType returns the receiver type of method obj. func recvType(obj *types.Func) types.Type { - return obj.Type().(*types.Signature).Recv().Type() + return obj.Signature().Recv().Type() } // fieldOf returns the index'th field of the (core type of) a struct type; @@ -200,7 +200,7 @@ func makeLen(T types.Type) *Builtin { // receiverTypeArgs returns the type arguments to a method's receiver. // Returns an empty list if the receiver does not have type arguments. func receiverTypeArgs(method *types.Func) []types.Type { - recv := method.Type().(*types.Signature).Recv() + recv := method.Signature().Recv() _, named := typesinternal.ReceiverNamed(recv) if named == nil { return nil // recv is anonymous struct/interface @@ -221,8 +221,8 @@ func receiverTypeArgs(method *types.Func) []types.Type { func recvAsFirstArg(sig *types.Signature) *types.Signature { params := make([]*types.Var, 0, 1+sig.Params().Len()) params = append(params, sig.Recv()) - for i := 0; i < sig.Params().Len(); i++ { - params = append(params, sig.Params().At(i)) + for v := range sig.Params().Variables() { + params = append(params, v) } return types.NewSignatureType(nil, nil, nil, types.NewTuple(params...), sig.Results(), sig.Variadic()) } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 6c0c74968..6646bf550 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -249,7 +249,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { case *types.Func: // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + if recv := obj.Signature().Recv(); recv == nil { return "", fmt.Errorf("func is not a method: %v", obj) } @@ -405,7 +405,7 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { return "", false } - _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + _, named := typesinternal.ReceiverNamed(meth.Signature().Recv()) if named == nil { return "", false } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go index 5f10f56cb..3d24a8c63 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -12,6 +12,7 @@ import ( // Callee returns the named target of a function call, if any: // a function, method, builtin, or variable. +// It returns nil for a T(x) conversion. // // Functions and methods may potentially have type parameters. // diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index f035a0b6b..36624572a 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -304,8 +304,7 @@ func (h hasher) hash(t types.Type) uint32 { case *types.Named: hash := h.hashTypeName(t.Obj()) targs := t.TypeArgs() - for i := 0; i < targs.Len(); i++ { - targ := targs.At(i) + for targ := range targs.Types() { hash += 2 * h.hash(targ) } return hash diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index cb6db8893..5d120d077 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -71,7 +71,7 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { } // VendorlessPath returns the devendorized version of the import path ipath. -// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +// For example, VendorlessPath("foo/barbendor/a/b") return "a/b". func VendorlessPath(ipath string) string { return intimp.VendorlessPath(ipath) } diff --git a/vendor/golang.org/x/tools/internal/analysis/analyzerutil/doc.go b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/doc.go new file mode 100644 index 000000000..74a2a1c81 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/doc.go @@ -0,0 +1,6 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analyzerutil provides implementation helpers for analyzers. +package analyzerutil diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/extractdoc.go similarity index 95% rename from vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go rename to vendor/golang.org/x/tools/internal/analysis/analyzerutil/extractdoc.go index 39507723d..772a0300d 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go +++ b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/extractdoc.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package analysisinternal +package analyzerutil import ( "fmt" @@ -35,7 +35,7 @@ import ( // // var Analyzer = &analysis.Analyzer{ // Name: "halting", -// Doc: analysisutil.MustExtractDoc(doc, "halting"), +// Doc: analyzerutil.MustExtractDoc(doc, "halting"), // ... // } func MustExtractDoc(content, name string) string { @@ -97,7 +97,7 @@ func ExtractDoc(content, name string) (string, error) { if f.Doc == nil { return "", fmt.Errorf("Go source file has no package doc comment") } - for _, section := range strings.Split(f.Doc.Text(), "\n# ") { + for section := range strings.SplitSeq(f.Doc.Text(), "\n# ") { if body := strings.TrimPrefix(section, "Analyzer "+name); body != section && body != "" && body[0] == '\r' || body[0] == '\n' { diff --git a/vendor/golang.org/x/tools/internal/analysis/analyzerutil/readfile.go b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/readfile.go new file mode 100644 index 000000000..ecc30cae0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/readfile.go @@ -0,0 +1,30 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analyzerutil + +// This file defines helpers for calling [analysis.Pass.ReadFile]. + +import ( + "go/token" + "os" + + "golang.org/x/tools/go/analysis" +) + +// ReadFile reads a file and adds it to the FileSet in pass +// so that we can report errors against it using lineStart. +func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { + readFile := pass.ReadFile + if readFile == nil { + readFile = os.ReadFile + } + content, err := readFile(filename) + if err != nil { + return nil, nil, err + } + tf := pass.Fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + return content, tf, nil +} diff --git a/vendor/golang.org/x/tools/internal/analysis/analyzerutil/version.go b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/version.go new file mode 100644 index 000000000..0b9bcc37b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysis/analyzerutil/version.go @@ -0,0 +1,42 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analyzerutil + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/packagepath" + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/versions" +) + +// FileUsesGoVersion reports whether the specified file may use features of the +// specified version of Go (e.g. "go1.24"). +// +// Tip: we recommend using this check "late", just before calling +// pass.Report, rather than "early" (when entering each ast.File, or +// each candidate node of interest, during the traversal), because the +// operation is not free, yet is not a highly selective filter: the +// fraction of files that pass most version checks is high and +// increases over time. +func FileUsesGoVersion(pass *analysis.Pass, file *ast.File, version string) (_res bool) { + fileVersion := pass.TypesInfo.FileVersions[file] + + // Standard packages that are part of toolchain bootstrapping + // are not considered to use a version of Go later than the + // current bootstrap toolchain version. + // The bootstrap rule does not cover tests, + // and some tests (e.g. debug/elf/file_test.go) rely on this. + pkgpath := pass.Pkg.Path() + if packagepath.IsStdPackage(pkgpath) && + stdlib.IsBootstrapPackage(pkgpath) && // (excludes "*_test" external test packages) + !strings.HasSuffix(pass.Fset.File(file.Pos()).Name(), "_test.go") { // (excludes all tests) + fileVersion = stdlib.BootstrapVersion.String() // package must bootstrap + } + + return !versions.Before(fileVersion, version) +} diff --git a/vendor/golang.org/x/tools/internal/analysis/typeindex/typeindex.go b/vendor/golang.org/x/tools/internal/analysis/typeindex/typeindex.go new file mode 100644 index 000000000..41146d9ab --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysis/typeindex/typeindex.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeindex defines an analyzer that provides a +// [golang.org/x/tools/internal/typesinternal/typeindex.Index]. +// +// Like [golang.org/x/tools/go/analysis/passes/inspect], it is +// intended to be used as a helper by other analyzers; it reports no +// diagnostics of its own. +package typeindex + +import ( + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +var Analyzer = &analysis.Analyzer{ + Name: "typeindex", + Doc: "indexes of type information for later passes", + URL: "https://pkg.go.dev/golang.org/x/tools/internal/analysis/typeindex", + Run: func(pass *analysis.Pass) (any, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + return typeindex.New(inspect, pass.Pkg, pass.TypesInfo), nil + }, + RunDespiteErrors: true, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + ResultType: reflect.TypeFor[*typeindex.Index](), +} diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go deleted file mode 100644 index e48dc3f33..000000000 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ /dev/null @@ -1,677 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package analysisinternal provides gopls' internal analyses with a -// number of helper functions that operate on typed syntax trees. -package analysisinternal - -import ( - "bytes" - "cmp" - "fmt" - "go/ast" - "go/printer" - "go/scanner" - "go/token" - "go/types" - "iter" - pathpkg "path" - "slices" - "strings" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/moreiters" - "golang.org/x/tools/internal/typesinternal" -) - -// Deprecated: this heuristic is ill-defined. -// TODO(adonovan): move to sole use in gopls/internal/cache. -func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { - // Get the end position for the type error. - file := fset.File(start) - if file == nil { - return start - } - if offset := file.PositionFor(start, false).Offset; offset > len(src) { - return start - } else { - src = src[offset:] - } - - // Attempt to find a reasonable end position for the type error. - // - // TODO(rfindley): the heuristic implemented here is unclear. It looks like - // it seeks the end of the primary operand starting at start, but that is not - // quite implemented (for example, given a func literal this heuristic will - // return the range of the func keyword). - // - // We should formalize this heuristic, or deprecate it by finally proposing - // to add end position to all type checker errors. - // - // Nevertheless, ensure that the end position at least spans the current - // token at the cursor (this was golang/go#69505). - end := start - { - var s scanner.Scanner - fset := token.NewFileSet() - f := fset.AddFile("", fset.Base(), len(src)) - s.Init(f, src, nil /* no error handler */, scanner.ScanComments) - pos, tok, lit := s.Scan() - if tok != token.SEMICOLON && token.Pos(f.Base()) <= pos && pos <= token.Pos(f.Base()+f.Size()) { - off := file.Offset(pos) + len(lit) - src = src[off:] - end += token.Pos(off) - } - } - - // Look for bytes that might terminate the current operand. See note above: - // this is imprecise. - if width := bytes.IndexAny(src, " \n,():;[]+-*/"); width > 0 { - end += token.Pos(width) - } - return end -} - -// WalkASTWithParent walks the AST rooted at n. The semantics are -// similar to ast.Inspect except it does not call f(nil). -func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { - var ancestors []ast.Node - ast.Inspect(n, func(n ast.Node) (recurse bool) { - if n == nil { - ancestors = ancestors[:len(ancestors)-1] - return false - } - - var parent ast.Node - if len(ancestors) > 0 { - parent = ancestors[len(ancestors)-1] - } - ancestors = append(ancestors, n) - return f(n, parent) - }) -} - -// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types. -// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within -// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that -// is unrecognized. -func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string { - - // Initialize matches to contain the variable types we are searching for. - matches := make(map[types.Type][]string) - for _, typ := range typs { - if typ == nil { - continue // TODO(adonovan): is this reachable? - } - matches[typ] = nil // create entry - } - - seen := map[types.Object]struct{}{} - ast.Inspect(node, func(n ast.Node) bool { - if n == nil { - return false - } - // Prevent circular definitions. If 'pos' is within an assignment statement, do not - // allow any identifiers in that assignment statement to be selected. Otherwise, - // we could do the following, where 'x' satisfies the type of 'f0': - // - // x := fakeStruct{f0: x} - // - if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() { - return false - } - if n.End() > pos { - return n.Pos() <= pos - } - ident, ok := n.(*ast.Ident) - if !ok || ident.Name == "_" { - return true - } - obj := info.Defs[ident] - if obj == nil || obj.Type() == nil { - return true - } - if _, ok := obj.(*types.TypeName); ok { - return true - } - // Prevent duplicates in matches' values. - if _, ok = seen[obj]; ok { - return true - } - seen[obj] = struct{}{} - // Find the scope for the given position. Then, check whether the object - // exists within the scope. - innerScope := pkg.Scope().Innermost(pos) - if innerScope == nil { - return true - } - _, foundObj := innerScope.LookupParent(ident.Name, pos) - if foundObj != obj { - return true - } - // The object must match one of the types that we are searching for. - // TODO(adonovan): opt: use typeutil.Map? - if names, ok := matches[obj.Type()]; ok { - matches[obj.Type()] = append(names, ident.Name) - } else { - // If the object type does not exactly match - // any of the target types, greedily find the first - // target type that the object type can satisfy. - for typ := range matches { - if equivalentTypes(obj.Type(), typ) { - matches[typ] = append(matches[typ], ident.Name) - } - } - } - return true - }) - return matches -} - -func equivalentTypes(want, got types.Type) bool { - if types.Identical(want, got) { - return true - } - // Code segment to help check for untyped equality from (golang/go#32146). - if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 { - if lhs, ok := got.Underlying().(*types.Basic); ok { - return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType - } - } - return types.AssignableTo(want, got) -} - -// A ReadFileFunc is a function that returns the -// contents of a file, such as [os.ReadFile]. -type ReadFileFunc = func(filename string) ([]byte, error) - -// CheckedReadFile returns a wrapper around a Pass.ReadFile -// function that performs the appropriate checks. -func CheckedReadFile(pass *analysis.Pass, readFile ReadFileFunc) ReadFileFunc { - return func(filename string) ([]byte, error) { - if err := CheckReadable(pass, filename); err != nil { - return nil, err - } - return readFile(filename) - } -} - -// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. -func CheckReadable(pass *analysis.Pass, filename string) error { - if slices.Contains(pass.OtherFiles, filename) || - slices.Contains(pass.IgnoredFiles, filename) { - return nil - } - for _, f := range pass.Files { - if pass.Fset.File(f.FileStart).Name() == filename { - return nil - } - } - return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) -} - -// AddImport checks whether this file already imports pkgpath and -// that import is in scope at pos. If so, it returns the name under -// which it was imported and a zero edit. Otherwise, it adds a new -// import of pkgpath, using a name derived from the preferred name, -// and returns the chosen name, a prefix to be concatenated with member -// to form a qualified name, and the edit for the new import. -// -// In the special case that pkgpath is dot-imported then member, the -// identifier for which the import is being added, is consulted. If -// member is not shadowed at pos, AddImport returns (".", "", nil). -// (AddImport accepts the caller's implicit claim that the imported -// package declares member.) -// -// It does not mutate its arguments. -func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (name, prefix string, newImport []analysis.TextEdit) { - // Find innermost enclosing lexical block. - scope := info.Scopes[file].Innermost(pos) - if scope == nil { - panic("no enclosing lexical block") - } - - // Is there an existing import of this package? - // If so, are we in its scope? (not shadowed) - for _, spec := range file.Imports { - pkgname := info.PkgNameOf(spec) - if pkgname != nil && pkgname.Imported().Path() == pkgpath { - name = pkgname.Name() - if name == "." { - // The scope of ident must be the file scope. - if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] { - return name, "", nil - } - } else if _, obj := scope.LookupParent(name, pos); obj == pkgname { - return name, name + ".", nil - } - } - } - - // We must add a new import. - // Ensure we have a fresh name. - newName := FreshName(scope, pos, preferredName) - - // Create a new import declaration either before the first existing - // declaration (which must exist), including its comments; or - // inside the declaration, if it is an import group. - // - // Use a renaming import whenever the preferred name is not - // available, or the chosen name does not match the last - // segment of its path. - newText := fmt.Sprintf("%q", pkgpath) - if newName != preferredName || newName != pathpkg.Base(pkgpath) { - newText = fmt.Sprintf("%s %q", newName, pkgpath) - } - decl0 := file.Decls[0] - var before ast.Node = decl0 - switch decl0 := decl0.(type) { - case *ast.GenDecl: - if decl0.Doc != nil { - before = decl0.Doc - } - case *ast.FuncDecl: - if decl0.Doc != nil { - before = decl0.Doc - } - } - // If the first decl is an import group, add this new import at the end. - if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() { - pos = gd.Rparen - // if it's a std lib, we should append it at the beginning of import group. - // otherwise we may see the std package is put at the last behind a 3rd module which doesn't follow our convention. - // besides, gofmt doesn't help in this case. - if IsStdPackage(pkgpath) && len(gd.Specs) != 0 { - pos = gd.Specs[0].Pos() - newText += "\n\t" - } else { - newText = "\t" + newText + "\n" - } - } else { - pos = before.Pos() - newText = "import " + newText + "\n\n" - } - return newName, newName + ".", []analysis.TextEdit{{ - Pos: pos, - End: pos, - NewText: []byte(newText), - }} -} - -// FreshName returns the name of an identifier that is undefined -// at the specified position, based on the preferred name. -func FreshName(scope *types.Scope, pos token.Pos, preferred string) string { - newName := preferred - for i := 0; ; i++ { - if _, obj := scope.LookupParent(newName, pos); obj == nil { - break // fresh - } - newName = fmt.Sprintf("%s%d", preferred, i) - } - return newName -} - -// Format returns a string representation of the node n. -func Format(fset *token.FileSet, n ast.Node) string { - var buf strings.Builder - printer.Fprint(&buf, fset, n) // ignore errors - return buf.String() -} - -// Imports returns true if path is imported by pkg. -func Imports(pkg *types.Package, path string) bool { - for _, imp := range pkg.Imports() { - if imp.Path() == path { - return true - } - } - return false -} - -// IsTypeNamed reports whether t is (or is an alias for) a -// package-level defined type with the given package path and one of -// the given names. It returns false if t is nil. -// -// This function avoids allocating the concatenation of "pkg.Name", -// which is important for the performance of syntax matching. -func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool { - if named, ok := types.Unalias(t).(*types.Named); ok { - tname := named.Obj() - return tname != nil && - typesinternal.IsPackageLevel(tname) && - tname.Pkg().Path() == pkgPath && - slices.Contains(names, tname.Name()) - } - return false -} - -// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a -// package-level defined type with the given package path and one of the given -// names. It returns false if t is not a pointer type. -func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool { - r := typesinternal.Unpointer(t) - if r == t { - return false - } - return IsTypeNamed(r, pkgPath, names...) -} - -// IsFunctionNamed reports whether obj is a package-level function -// defined in the given package and has one of the given names. -// It returns false if obj is nil. -// -// This function avoids allocating the concatenation of "pkg.Name", -// which is important for the performance of syntax matching. -func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { - f, ok := obj.(*types.Func) - return ok && - typesinternal.IsPackageLevel(obj) && - f.Pkg().Path() == pkgPath && - f.Type().(*types.Signature).Recv() == nil && - slices.Contains(names, f.Name()) -} - -// IsMethodNamed reports whether obj is a method defined on a -// package-level type with the given package and type name, and has -// one of the given names. It returns false if obj is nil. -// -// This function avoids allocating the concatenation of "pkg.TypeName.Name", -// which is important for the performance of syntax matching. -func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { - if fn, ok := obj.(*types.Func); ok { - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { - _, T := typesinternal.ReceiverNamed(recv) - return T != nil && - IsTypeNamed(T, pkgPath, typeName) && - slices.Contains(names, fn.Name()) - } - } - return false -} - -// ValidateFixes validates the set of fixes for a single diagnostic. -// Any error indicates a bug in the originating analyzer. -// -// It updates fixes so that fixes[*].End.IsValid(). -// -// It may be used as part of an analysis driver implementation. -func ValidateFixes(fset *token.FileSet, a *analysis.Analyzer, fixes []analysis.SuggestedFix) error { - fixMessages := make(map[string]bool) - for i := range fixes { - fix := &fixes[i] - if fixMessages[fix.Message] { - return fmt.Errorf("analyzer %q suggests two fixes with same Message (%s)", a.Name, fix.Message) - } - fixMessages[fix.Message] = true - if err := validateFix(fset, fix); err != nil { - return fmt.Errorf("analyzer %q suggests invalid fix (%s): %v", a.Name, fix.Message, err) - } - } - return nil -} - -// validateFix validates a single fix. -// Any error indicates a bug in the originating analyzer. -// -// It updates fix so that fix.End.IsValid(). -func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { - - // Stably sort edits by Pos. This ordering puts insertions - // (end = start) before deletions (end > start) at the same - // point, but uses a stable sort to preserve the order of - // multiple insertions at the same point. - slices.SortStableFunc(fix.TextEdits, func(x, y analysis.TextEdit) int { - if sign := cmp.Compare(x.Pos, y.Pos); sign != 0 { - return sign - } - return cmp.Compare(x.End, y.End) - }) - - var prev *analysis.TextEdit - for i := range fix.TextEdits { - edit := &fix.TextEdits[i] - - // Validate edit individually. - start := edit.Pos - file := fset.File(start) - if file == nil { - return fmt.Errorf("no token.File for TextEdit.Pos (%v)", edit.Pos) - } - fileEnd := token.Pos(file.Base() + file.Size()) - if end := edit.End; end.IsValid() { - if end < start { - return fmt.Errorf("TextEdit.Pos (%v) > TextEdit.End (%v)", edit.Pos, edit.End) - } - endFile := fset.File(end) - if endFile != file && end < fileEnd+10 { - // Relax the checks below in the special case when the end position - // is only slightly beyond EOF, as happens when End is computed - // (as in ast.{Struct,Interface}Type) rather than based on - // actual token positions. In such cases, truncate end to EOF. - // - // This is a workaround for #71659; see: - // https://github.com/golang/go/issues/71659#issuecomment-2651606031 - // A better fix would be more faithful recording of token - // positions (or their absence) in the AST. - edit.End = fileEnd - continue - } - if endFile == nil { - return fmt.Errorf("no token.File for TextEdit.End (%v; File(start).FileEnd is %d)", end, file.Base()+file.Size()) - } - if endFile != file { - return fmt.Errorf("edit #%d spans files (%v and %v)", - i, file.Position(edit.Pos), endFile.Position(edit.End)) - } - } else { - edit.End = start // update the SuggestedFix - } - if eof := fileEnd; edit.End > eof { - return fmt.Errorf("end is (%v) beyond end of file (%v)", edit.End, eof) - } - - // Validate the sequence of edits: - // properly ordered, no overlapping deletions - if prev != nil && edit.Pos < prev.End { - xpos := fset.Position(prev.Pos) - xend := fset.Position(prev.End) - ypos := fset.Position(edit.Pos) - yend := fset.Position(edit.End) - return fmt.Errorf("overlapping edits to %s (%d:%d-%d:%d and %d:%d-%d:%d)", - xpos.Filename, - xpos.Line, xpos.Column, - xend.Line, xend.Column, - ypos.Line, ypos.Column, - yend.Line, yend.Column, - ) - } - prev = edit - } - - return nil -} - -// CanImport reports whether one package is allowed to import another. -// -// TODO(adonovan): allow customization of the accessibility relation -// (e.g. for Bazel). -func CanImport(from, to string) bool { - // TODO(adonovan): better segment hygiene. - if to == "internal" || strings.HasPrefix(to, "internal/") { - // Special case: only std packages may import internal/... - // We can't reliably know whether we're in std, so we - // use a heuristic on the first segment. - first, _, _ := strings.Cut(from, "/") - if strings.Contains(first, ".") { - return false // example.com/foo ∉ std - } - if first == "testdata" { - return false // testdata/foo ∉ std - } - } - if strings.HasSuffix(to, "/internal") { - return strings.HasPrefix(from, to[:len(to)-len("/internal")]) - } - if i := strings.LastIndex(to, "/internal/"); i >= 0 { - return strings.HasPrefix(from, to[:i]) - } - return true -} - -// DeleteStmt returns the edits to remove the [ast.Stmt] identified by -// curStmt, if it is contained within a BlockStmt, CaseClause, -// CommClause, or is the STMT in switch STMT; ... {...}. It returns nil otherwise. -func DeleteStmt(fset *token.FileSet, curStmt inspector.Cursor) []analysis.TextEdit { - stmt := curStmt.Node().(ast.Stmt) - // if the stmt is on a line by itself delete the whole line - // otherwise just delete the statement. - - // this logic would be a lot simpler with the file contents, and somewhat simpler - // if the cursors included the comments. - - tokFile := fset.File(stmt.Pos()) - lineOf := tokFile.Line - stmtStartLine, stmtEndLine := lineOf(stmt.Pos()), lineOf(stmt.End()) - - var from, to token.Pos - // bounds of adjacent syntax/comments on same line, if any - limits := func(left, right token.Pos) { - if lineOf(left) == stmtStartLine { - from = left - } - if lineOf(right) == stmtEndLine { - to = right - } - } - // TODO(pjw): there are other places a statement might be removed: - // IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] . - // (removing the blocks requires more rewriting than this routine would do) - // CommCase = "case" ( SendStmt | RecvStmt ) | "default" . - // (removing the stmt requires more rewriting, and it's unclear what the user means) - switch parent := curStmt.Parent().Node().(type) { - case *ast.SwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - case *ast.TypeSwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - if parent.Assign == stmt { - return nil // don't let the user break the type switch - } - case *ast.BlockStmt: - limits(parent.Lbrace, parent.Rbrace) - case *ast.CommClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) - if parent.Comm == stmt { - return nil // maybe the user meant to remove the entire CommClause? - } - case *ast.CaseClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) - case *ast.ForStmt: - limits(parent.For, parent.Body.Lbrace) - - default: - return nil // not one of ours - } - - if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { - from = prev.Node().End() // preceding statement ends on same line - } - if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { - to = next.Node().Pos() // following statement begins on same line - } - // and now for the comments -Outer: - for _, cg := range enclosingFile(curStmt).Comments { - for _, co := range cg.List { - if lineOf(co.End()) < stmtStartLine { - continue - } else if lineOf(co.Pos()) > stmtEndLine { - break Outer // no more are possible - } - if lineOf(co.End()) == stmtStartLine && co.End() < stmt.Pos() { - if !from.IsValid() || co.End() > from { - from = co.End() - continue // maybe there are more - } - } - if lineOf(co.Pos()) == stmtEndLine && co.Pos() > stmt.End() { - if !to.IsValid() || co.Pos() < to { - to = co.Pos() - continue // maybe there are more - } - } - } - } - // if either from or to is valid, just remove the statement - // otherwise remove the line - edit := analysis.TextEdit{Pos: stmt.Pos(), End: stmt.End()} - if from.IsValid() || to.IsValid() { - // remove just the statement. - // we can't tell if there is a ; or whitespace right after the statement - // ideally we'd like to remove the former and leave the latter - // (if gofmt has run, there likely won't be a ;) - // In type switches we know there's a semicolon somewhere after the statement, - // but the extra work for this special case is not worth it, as gofmt will fix it. - return []analysis.TextEdit{edit} - } - // remove the whole line - for lineOf(edit.Pos) == stmtStartLine { - edit.Pos-- - } - edit.Pos++ // get back tostmtStartLine - for lineOf(edit.End) == stmtEndLine { - edit.End++ - } - return []analysis.TextEdit{edit} -} - -// Comments returns an iterator over the comments overlapping the specified interval. -func Comments(file *ast.File, start, end token.Pos) iter.Seq[*ast.Comment] { - // TODO(adonovan): optimize use binary O(log n) instead of linear O(n) search. - return func(yield func(*ast.Comment) bool) { - for _, cg := range file.Comments { - for _, co := range cg.List { - if co.Pos() > end { - return - } - if co.End() < start { - continue - } - - if !yield(co) { - return - } - } - } - } -} - -// IsStdPackage reports whether the specified package path belongs to a -// package in the standard library (including internal dependencies). -func IsStdPackage(path string) bool { - // A standard package has no dot in its first segment. - // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".) - slash := strings.IndexByte(path, '/') - if slash < 0 { - slash = len(path) - } - return !strings.Contains(path[:slash], ".") && path != "testdata" -} - -// Range returns an [analysis.Range] for the specified start and end positions. -func Range(pos, end token.Pos) analysis.Range { - return tokenRange{pos, end} -} - -// tokenRange is an implementation of the [analysis.Range] interface. -type tokenRange struct{ StartPos, EndPos token.Pos } - -func (r tokenRange) Pos() token.Pos { return r.StartPos } -func (r tokenRange) End() token.Pos { return r.EndPos } - -// enclosingFile returns the syntax tree for the file enclosing c. -func enclosingFile(c inspector.Cursor) *ast.File { - c, _ = moreiters.First(c.Enclosing((*ast.File)(nil))) - return c.Node().(*ast.File) -} diff --git a/vendor/golang.org/x/tools/internal/astutil/comment.go b/vendor/golang.org/x/tools/internal/astutil/comment.go index ee4be23f2..7e52aeaaa 100644 --- a/vendor/golang.org/x/tools/internal/astutil/comment.go +++ b/vendor/golang.org/x/tools/internal/astutil/comment.go @@ -7,6 +7,7 @@ package astutil import ( "go/ast" "go/token" + "iter" "strings" ) @@ -15,7 +16,7 @@ import ( // https://go.dev/wiki/Deprecated, or "" if the documented symbol is not // deprecated. func Deprecation(doc *ast.CommentGroup) string { - for _, p := range strings.Split(doc.Text(), "\n\n") { + for p := range strings.SplitSeq(doc.Text(), "\n\n") { // There is still some ambiguity for deprecation message. This function // only returns the paragraph introduced by "Deprecated: ". More // information related to the deprecation may follow in additional @@ -111,3 +112,24 @@ func Directives(g *ast.CommentGroup) (res []*Directive) { } return } + +// Comments returns an iterator over the comments overlapping the specified interval. +func Comments(file *ast.File, start, end token.Pos) iter.Seq[*ast.Comment] { + // TODO(adonovan): optimize use binary O(log n) instead of linear O(n) search. + return func(yield func(*ast.Comment) bool) { + for _, cg := range file.Comments { + for _, co := range cg.List { + if co.Pos() > end { + return + } + if co.End() < start { + continue + } + + if !yield(co) { + return + } + } + } + } +} diff --git a/vendor/golang.org/x/tools/internal/astutil/equal.go b/vendor/golang.org/x/tools/internal/astutil/equal.go index c945de02d..210f39238 100644 --- a/vendor/golang.org/x/tools/internal/astutil/equal.go +++ b/vendor/golang.org/x/tools/internal/astutil/equal.go @@ -26,6 +26,14 @@ func Equal(x, y ast.Node, identical func(x, y *ast.Ident) bool) bool { return equal(reflect.ValueOf(x), reflect.ValueOf(y), identical) } +// EqualSyntax reports whether x and y are equal. +// Identifiers are considered equal if they are spelled the same. +// Comments are ignored. +func EqualSyntax(x, y ast.Expr) bool { + sameName := func(x, y *ast.Ident) bool { return x.Name == y.Name } + return Equal(x, y, sameName) +} + func equal(x, y reflect.Value, identical func(x, y *ast.Ident) bool) bool { // Ensure types are the same if x.Type() != y.Type() { diff --git a/vendor/golang.org/x/tools/internal/astutil/stringlit.go b/vendor/golang.org/x/tools/internal/astutil/stringlit.go index 849d45d85..ce1e7de88 100644 --- a/vendor/golang.org/x/tools/internal/astutil/stringlit.go +++ b/vendor/golang.org/x/tools/internal/astutil/stringlit.go @@ -14,16 +14,16 @@ import ( // RangeInStringLiteral calculates the positional range within a string literal // corresponding to the specified start and end byte offsets within the logical string. -func RangeInStringLiteral(lit *ast.BasicLit, start, end int) (token.Pos, token.Pos, error) { +func RangeInStringLiteral(lit *ast.BasicLit, start, end int) (Range, error) { startPos, err := PosInStringLiteral(lit, start) if err != nil { - return 0, 0, fmt.Errorf("start: %v", err) + return Range{}, fmt.Errorf("start: %v", err) } endPos, err := PosInStringLiteral(lit, end) if err != nil { - return 0, 0, fmt.Errorf("end: %v", err) + return Range{}, fmt.Errorf("end: %v", err) } - return startPos, endPos, nil + return Range{startPos, endPos}, nil } // PosInStringLiteral returns the position within a string literal diff --git a/vendor/golang.org/x/tools/internal/astutil/util.go b/vendor/golang.org/x/tools/internal/astutil/util.go index 14189155e..6820ba4cd 100644 --- a/vendor/golang.org/x/tools/internal/astutil/util.go +++ b/vendor/golang.org/x/tools/internal/astutil/util.go @@ -5,8 +5,15 @@ package astutil import ( + "fmt" "go/ast" + "go/printer" "go/token" + "strings" + + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/moreiters" ) // PreorderStack traverses the tree rooted at root, @@ -44,11 +51,87 @@ func PreorderStack(root ast.Node, stack []ast.Node, f func(n ast.Node, stack []a } // NodeContains reports whether the Pos/End range of node n encloses -// the given position pos. +// the given range. // // It is inclusive of both end points, to allow hovering (etc) when // the cursor is immediately after a node. // +// Like [NodeRange], it treats the range of an [ast.File] as the +// file's complete extent. +// +// Precondition: n must not be nil. +func NodeContains(n ast.Node, rng Range) bool { + return NodeRange(n).Contains(rng) +} + +// NodeContainsPos reports whether the Pos/End range of node n encloses +// the given pos. +// +// Like [NodeRange], it treats the range of an [ast.File] as the +// file's complete extent. +func NodeContainsPos(n ast.Node, pos token.Pos) bool { + return NodeRange(n).ContainsPos(pos) +} + +// IsChildOf reports whether cur.ParentEdge is ek. +// +// TODO(adonovan): promote to a method of Cursor. +func IsChildOf(cur inspector.Cursor, ek edge.Kind) bool { + got, _ := cur.ParentEdge() + return got == ek +} + +// EnclosingFile returns the syntax tree for the file enclosing c. +// +// TODO(adonovan): promote this to a method of Cursor. +func EnclosingFile(c inspector.Cursor) *ast.File { + c, _ = moreiters.First(c.Enclosing((*ast.File)(nil))) + return c.Node().(*ast.File) +} + +// DocComment returns the doc comment for a node, if any. +func DocComment(n ast.Node) *ast.CommentGroup { + switch n := n.(type) { + case *ast.FuncDecl: + return n.Doc + case *ast.GenDecl: + return n.Doc + case *ast.ValueSpec: + return n.Doc + case *ast.TypeSpec: + return n.Doc + case *ast.File: + return n.Doc + case *ast.ImportSpec: + return n.Doc + case *ast.Field: + return n.Doc + } + return nil +} + +// Format returns a string representation of the node n. +func Format(fset *token.FileSet, n ast.Node) string { + var buf strings.Builder + printer.Fprint(&buf, fset, n) // ignore errors + return buf.String() +} + +// -- Range -- + +// Range is a Pos interval. +// It implements [analysis.Range] and [ast.Node]. +type Range struct{ Start, EndPos token.Pos } + +// RangeOf constructs a Range. +// +// RangeOf exists to pacify the "unkeyed literal" (composites) vet +// check. It would be nice if there were a way for a type to add +// itself to the allowlist. +func RangeOf(start, end token.Pos) Range { return Range{start, end} } + +// NodeRange returns the extent of node n as a Range. +// // For unfortunate historical reasons, the Pos/End extent of an // ast.File runs from the start of its package declaration---excluding // copyright comments, build tags, and package documentation---to the @@ -56,14 +139,103 @@ func PreorderStack(root ast.Node, stack []ast.Node, f func(n ast.Node, stack []a // as a special case, if n is an [ast.File], NodeContains uses // n.FileStart <= pos && pos <= n.FileEnd to report whether the // position lies anywhere within the file. -// -// Precondition: n must not be nil. -func NodeContains(n ast.Node, pos token.Pos) bool { - var start, end token.Pos +func NodeRange(n ast.Node) Range { if file, ok := n.(*ast.File); ok { - start, end = file.FileStart, file.FileEnd // entire file - } else { - start, end = n.Pos(), n.End() + return Range{file.FileStart, file.FileEnd} // entire file } - return start <= pos && pos <= end + return Range{n.Pos(), n.End()} } + +func (r Range) Pos() token.Pos { return r.Start } +func (r Range) End() token.Pos { return r.EndPos } + +// ContainsPos reports whether the range (inclusive of both end points) +// includes the specified position. +func (r Range) ContainsPos(pos token.Pos) bool { + return r.Contains(RangeOf(pos, pos)) +} + +// Contains reports whether the range (inclusive of both end points) +// includes the specified range. +func (r Range) Contains(rng Range) bool { + return r.Start <= rng.Start && rng.EndPos <= r.EndPos +} + +// IsValid reports whether the range is valid. +func (r Range) IsValid() bool { return r.Start.IsValid() && r.Start <= r.EndPos } + +// -- + +// Select returns the syntax nodes identified by a user's text +// selection. It returns three nodes: the innermost node that wholly +// encloses the selection; and the first and last nodes that are +// wholly enclosed by the selection. +// +// For example, given this selection: +// +// { f(); g(); /* comment */ } +// ~~~~~~~~~~~ +// +// Select returns the enclosing BlockStmt, the f() CallExpr, and the g() CallExpr. +// +// If the selection does not wholly enclose any nodes, Select returns an error +// and invalid start/end nodes, but it may return a valid enclosing node. +// +// Callers that require exactly one syntax tree (e.g. just f() or just +// g()) should check that the returned start and end nodes are +// identical. +// +// This function is intended to be called early in the handling of a +// user's request, since it is tolerant of sloppy selection including +// extraneous whitespace and comments. Use it in new code instead of +// PathEnclosingInterval. When the exact extent of a node is known, +// use [Cursor.FindByPos] instead. +func Select(curFile inspector.Cursor, start, end token.Pos) (_enclosing, _start, _end inspector.Cursor, _ error) { + curEnclosing, ok := curFile.FindByPos(start, end) + if !ok { + return noCursor, noCursor, noCursor, fmt.Errorf("invalid selection") + } + + // Find the first and last node wholly within the (start, end) range. + // We'll narrow the effective selection to them, to exclude whitespace. + // (This matches the functionality of PathEnclosingInterval.) + var curStart, curEnd inspector.Cursor + rng := RangeOf(start, end) + for cur := range curEnclosing.Preorder() { + if rng.Contains(NodeRange(cur.Node())) { + // The start node has the least Pos. + if !CursorValid(curStart) { + curStart = cur + } + // The end node has the greatest End. + // End positions do not change monotonically, + // so we must compute the max. + if !CursorValid(curEnd) || + cur.Node().End() > curEnd.Node().End() { + curEnd = cur + } + } + } + if !CursorValid(curStart) { + // The selection is valid (inside curEnclosing) but contains no + // complete nodes. This happens for point selections (start == end), + // or selections covering only only spaces, comments, and punctuation + // tokens. + // Return the enclosing node so the caller can still use the context. + return curEnclosing, noCursor, noCursor, fmt.Errorf("invalid selection") + } + return curEnclosing, curStart, curEnd, nil +} + +// CursorValid reports whether the cursor is valid. +// +// A valid cursor may yet be the virtual root node, +// cur.Inspector.Root(), which has no [Cursor.Node]. +// +// TODO(adonovan): move to cursorutil package, and move that package into x/tools. +// Ultimately, make this a method of Cursor. Needs a proposal. +func CursorValid(cur inspector.Cursor) bool { + return cur.Inspector() != nil +} + +var noCursor inspector.Cursor diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go index a6cf0e64a..ade5d1e79 100644 --- a/vendor/golang.org/x/tools/internal/event/core/event.go +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -28,11 +28,6 @@ type Event struct { dynamic []label.Label // dynamically sized storage for remaining labels } -// eventLabelMap implements label.Map for a the labels of an Event. -type eventLabelMap struct { - event Event -} - func (ev Event) At() time.Time { return ev.at } func (ev Event) Format(f fmt.State, r rune) { diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go index 05f3a9a57..16ae6bb02 100644 --- a/vendor/golang.org/x/tools/internal/event/core/export.go +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -8,7 +8,6 @@ import ( "context" "sync/atomic" "time" - "unsafe" "golang.org/x/tools/internal/event/label" ) @@ -17,23 +16,21 @@ import ( // It may return a modified context and event. type Exporter func(context.Context, Event, label.Map) context.Context -var ( - exporter unsafe.Pointer -) +var exporter atomic.Pointer[Exporter] // SetExporter sets the global exporter function that handles all events. // The exporter is called synchronously from the event call site, so it should // return quickly so as not to hold up user code. func SetExporter(e Exporter) { - p := unsafe.Pointer(&e) if e == nil { // &e is always valid, and so p is always valid, but for the early abort // of ProcessEvent to be efficient it needs to make the nil check on the // pointer without having to dereference it, so we make the nil function // also a nil pointer - p = nil + exporter.Store(nil) + } else { + exporter.Store(&e) } - atomic.StorePointer(&exporter, p) } // deliver is called to deliver an event to the supplied exporter. @@ -48,7 +45,7 @@ func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { // Export is called to deliver an event to the global exporter if set. func Export(ctx context.Context, ev Event) context.Context { // get the global exporter and abort early if there is not one - exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + exporterPtr := exporter.Load() if exporterPtr == nil { return ctx } @@ -61,7 +58,7 @@ func Export(ctx context.Context, ev Event) context.Context { // It will fill in the time. func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { // get the global exporter and abort early if there is not one - exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + exporterPtr := exporter.Load() if exporterPtr == nil { return ctx, func() {} } diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index 92a391057..c37584af9 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -7,7 +7,6 @@ package label import ( "fmt" "io" - "reflect" "slices" "unsafe" ) @@ -103,11 +102,10 @@ type stringptr unsafe.Pointer // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. func OfString(k Key, v string) Label { - hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) return Label{ key: k, - packed: uint64(hdr.Len), - untyped: stringptr(hdr.Data), + packed: uint64(len(v)), + untyped: stringptr(unsafe.StringData(v)), } } @@ -116,11 +114,7 @@ func OfString(k Key, v string) Label { // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. func (t Label) UnpackString() string { - var v string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) - hdr.Data = uintptr(t.untyped.(stringptr)) - hdr.Len = int(t.packed) - return v + return unsafe.String((*byte)(t.untyped.(stringptr)), int(t.packed)) } // Valid returns true if the Label is a valid one (it has a key). diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index 734c46198..555ef626c 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -34,7 +34,7 @@ type fileInfo struct { const maxlines = 64 * 1024 func (s *fakeFileSet) pos(file string, line, column int) token.Pos { - // TODO(mdempsky): Make use of column. + _ = column // TODO(mdempsky): Make use of column. // Since we don't know the set of needed file positions, we reserve maxlines // positions per file. We delay calling token.File.SetLines until all diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 780873e3a..2bef2b058 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -569,7 +569,6 @@ func (p *iexporter) exportName(obj types.Object) (res string) { type iexporter struct { fset *token.FileSet - out *bytes.Buffer version int shallow bool // don't put types from other packages in the index @@ -830,8 +829,7 @@ func (p *iexporter) doDecl(obj types.Object) { // their name must be qualified before exporting recv. if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() - for i := 0; i < rparams.Len(); i++ { - rparam := rparams.At(i) + for rparam := range rparams.TypeParams() { name := tparamExportName(prefix, rparam) w.p.tparamNames[rparam.Obj()] = name } @@ -945,6 +943,13 @@ func (w *exportWriter) posV0(pos token.Pos) { } func (w *exportWriter) pkg(pkg *types.Package) { + if pkg == nil { + // [exportWriter.typ] accepts a nil pkg only for types + // of constants, which cannot contain named objects + // such as fields or methods and thus should never + // reach this method (#76222). + panic("nil package") + } // Ensure any referenced packages are declared in the main index. w.p.allPkgs[pkg] = true @@ -960,9 +965,11 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) { w.pkg(obj.Pkg()) } -// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass -// it in explicitly into signatures and structs that may use it for -// constructing fields. +// typ emits the specified type. +// +// Objects within the type (struct fields and interface methods) are +// qualified by pkg. It may be nil if the type cannot contain objects, +// such as the type of a constant. func (w *exportWriter) typ(t types.Type, pkg *types.Package) { w.data.uint64(w.p.typOff(t, pkg)) } @@ -992,6 +999,7 @@ func (w *exportWriter) startType(k itag) { w.data.uint64(uint64(k)) } +// doTyp is the implementation of [exportWriter.typ]. func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { if trace { w.p.trace("exporting type %s (%T)", t, t) @@ -1065,7 +1073,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Signature: w.startType(signatureType) - w.pkg(pkg) + w.pkg(pkg) // qualifies param/result vars w.signature(t) case *types.Struct: @@ -1111,19 +1119,19 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Interface: w.startType(interfaceType) - w.pkg(pkg) + w.pkg(pkg) // qualifies unexported method funcs n := t.NumEmbeddeds() w.uint64(uint64(n)) for i := 0; i < n; i++ { ft := t.EmbeddedType(i) - tPkg := pkg if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { + // e.g. ~int w.pos(token.NoPos) } - w.typ(ft, tPkg) + w.typ(ft, pkg) } // See comment for struct fields. In shallow mode we change the encoding @@ -1224,20 +1232,19 @@ func (w *exportWriter) signature(sig *types.Signature) { func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) - for i := 0; i < ts.Len(); i++ { - w.typ(ts.At(i), pkg) + for t := range ts.Types() { + w.typ(t, pkg) } } func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) + for tparam := range list.TypeParams() { // Set the type parameter exportName before exporting its type. exportName := tparamExportName(prefix, tparam) w.p.tparamNames[tparam.Obj()] = exportName - w.typ(list.At(i), pkg) + w.typ(tparam, pkg) } } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 82e6c9d2d..4d6d50094 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -432,10 +432,10 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { errorf("%v.%v not in index", pkg, name) } - r := &importReader{p: p, currPkg: pkg} + r := &importReader{p: p} r.declReader.Reset(p.declData[off:]) - r.obj(name) + r.obj(pkg, name) } func (p *iimporter) stringAt(off uint64) string { @@ -551,7 +551,6 @@ func canReuse(def *types.Named, rhs types.Type) bool { type importReader struct { p *iimporter declReader bytes.Reader - currPkg *types.Package prevFile string prevLine int64 prevColumn int64 @@ -565,7 +564,8 @@ type importReader struct { // for 1.24, but the fix was not worth back-porting). var markBlack = func(name *types.TypeName) {} -func (r *importReader) obj(name string) { +// obj decodes and declares the package-level object denoted by (pkg, name). +func (r *importReader) obj(pkg *types.Package, name string) { tag := r.byte() pos := r.pos() @@ -576,27 +576,27 @@ func (r *importReader) obj(name string) { tparams = r.tparamList() } typ := r.typ() - obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + obj := aliases.NewAlias(r.p.aliases, pos, pkg, name, typ, tparams) markBlack(obj) // workaround for golang/go#69912 r.declare(obj) case constTag: typ, val := r.value() - r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + r.declare(types.NewConst(pos, pkg, name, typ, val)) case funcTag, genericFuncTag: var tparams []*types.TypeParam if tag == genericFuncTag { tparams = r.tparamList() } - sig := r.signature(nil, nil, tparams) - r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + sig := r.signature(pkg, nil, nil, tparams) + r.declare(types.NewFunc(pos, pkg, name, sig)) case typeTag, genericTypeTag: // Types can be recursive. We need to setup a stub // declaration before recursing. - obj := types.NewTypeName(pos, r.currPkg, name, nil) + obj := types.NewTypeName(pos, pkg, name, nil) named := types.NewNamed(obj, nil, nil) markBlack(obj) // workaround for golang/go#69912 @@ -616,7 +616,7 @@ func (r *importReader) obj(name string) { for n := r.uint64(); n > 0; n-- { mpos := r.pos() mname := r.ident() - recv := r.param() + recv := r.param(pkg) // If the receiver has any targs, set those as the // rparams of the method (since those are the @@ -630,9 +630,9 @@ func (r *importReader) obj(name string) { rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } - msig := r.signature(recv, rparams, nil) + msig := r.signature(pkg, recv, rparams, nil) - named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + named.AddMethod(types.NewFunc(mpos, pkg, mname, msig)) } } @@ -644,12 +644,12 @@ func (r *importReader) obj(name string) { errorf("unexpected type param type") } name0 := tparamName(name) - tn := types.NewTypeName(pos, r.currPkg, name0, nil) + tn := types.NewTypeName(pos, pkg, name0, nil) t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg, name} + id := ident{pkg, name} r.p.tparamIndex[id] = t var implicit bool if r.p.version >= iexportVersionGo1_18 { @@ -672,7 +672,7 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - v := types.NewVar(pos, r.currPkg, name, typ) + v := types.NewVar(pos, pkg, name, typ) typesinternal.SetVarKind(v, typesinternal.PackageVar) r.declare(v) @@ -905,11 +905,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { case mapType: return types.NewMap(r.typ(), r.typ()) case signatureType: - r.currPkg = r.pkg() - return r.signature(nil, nil, nil) + paramPkg := r.pkg() + return r.signature(paramPkg, nil, nil, nil) case structType: - r.currPkg = r.pkg() + fieldPkg := r.pkg() fields := make([]*types.Var, r.uint64()) tags := make([]string, len(fields)) @@ -932,7 +932,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // discussed in iexport.go, this is not correct, but mostly works and is // preferable to failing (for now at least). if field == nil { - field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + field = types.NewField(fpos, fieldPkg, fname, ftyp, emb) } fields[i] = field @@ -941,7 +941,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { return types.NewStruct(fields, tags) case interfaceType: - r.currPkg = r.pkg() + methodPkg := r.pkg() // qualifies methods and their param/result vars embeddeds := make([]types.Type, r.uint64()) for i := range embeddeds { @@ -963,12 +963,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // don't agree with this. var recv *types.Var if base != nil { - recv = types.NewVar(token.NoPos, r.currPkg, "", base) + recv = types.NewVar(token.NoPos, methodPkg, "", base) } - msig := r.signature(recv, nil, nil) + msig := r.signature(methodPkg, recv, nil, nil) if method == nil { - method = types.NewFunc(mpos, r.currPkg, mname, msig) + method = types.NewFunc(mpos, methodPkg, mname, msig) } methods[i] = method } @@ -1049,9 +1049,9 @@ func (r *importReader) objectPathObject() types.Object { return obj } -func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { - params := r.paramList() - results := r.paramList() +func (r *importReader) signature(paramPkg *types.Package, recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { + params := r.paramList(paramPkg) + results := r.paramList(paramPkg) variadic := params.Len() > 0 && r.bool() return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } @@ -1070,19 +1070,19 @@ func (r *importReader) tparamList() []*types.TypeParam { return xs } -func (r *importReader) paramList() *types.Tuple { +func (r *importReader) paramList(pkg *types.Package) *types.Tuple { xs := make([]*types.Var, r.uint64()) for i := range xs { - xs[i] = r.param() + xs[i] = r.param(pkg) } return types.NewTuple(xs...) } -func (r *importReader) param() *types.Var { +func (r *importReader) param(pkg *types.Package) *types.Var { pos := r.pos() name := r.ident() typ := r.typ() - return types.NewParam(pos, r.currPkg, name, typ) + return types.NewParam(pos, pkg, name, typ) } func (r *importReader) bool() bool { diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 50b6ca51a..1b4dc0cb5 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -16,6 +16,7 @@ import ( "go/types" "io/fs" "io/ioutil" + "maps" "os" "path" "path/filepath" @@ -27,8 +28,6 @@ import ( "unicode" "unicode/utf8" - "maps" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" @@ -43,7 +42,7 @@ var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ if localPrefix == "" { return } - for _, p := range strings.Split(localPrefix, ",") { + for p := range strings.SplitSeq(localPrefix, ",") { if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { return 3, true } @@ -1251,7 +1250,6 @@ func ImportPathToAssumedName(importPath string) string { // gopathResolver implements resolver for GOPATH workspaces. type gopathResolver struct { env *ProcessEnv - walked bool cache *DirInfoCache scanSema chan struct{} // scanSema prevents concurrent scans. } diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index 67c17bc43..f390be90f 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -11,6 +11,7 @@ import ( "go/ast" "go/token" "log" + "reflect" "slices" "sort" "strconv" @@ -65,7 +66,7 @@ func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { } // mergeImports merges all the import declarations into the first one. -// Taken from golang.org/x/tools/ast/astutil. +// Taken from golang.org/x/tools/go/ast/astutil. // This does not adjust line numbers properly func mergeImports(f *ast.File) { if len(f.Decls) <= 1 { @@ -89,7 +90,7 @@ func mergeImports(f *ast.File) { first.Lparen = first.Pos() // Move the imports of the other import declaration to the first one. for _, spec := range gen.Specs { - spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + updateBasicLitPos(spec.(*ast.ImportSpec).Path, first.Pos()) first.Specs = append(first.Specs, spec) } f.Decls = slices.Delete(f.Decls, i, i+1) @@ -98,7 +99,7 @@ func mergeImports(f *ast.File) { } // declImports reports whether gen contains an import of path. -// Taken from golang.org/x/tools/ast/astutil. +// Taken from golang.org/x/tools/go/ast/astutil. func declImports(gen *ast.GenDecl, path string) bool { if gen.Tok != token.IMPORT { return false @@ -221,7 +222,7 @@ func sortSpecs(localPrefix string, tokFile *token.File, f *ast.File, specs []ast if s.Name != nil { s.Name.NamePos = pos[i].Start } - s.Path.ValuePos = pos[i].Start + updateBasicLitPos(s.Path, pos[i].Start) s.EndPos = pos[i].End nextSpecPos := pos[i].End @@ -296,3 +297,17 @@ type byCommentPos []*ast.CommentGroup func (x byCommentPos) Len() int { return len(x) } func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() } + +// updateBasicLitPos updates lit.Pos, +// ensuring that lit.End (if set) is displaced by the same amount. +// (See https://go.dev/issue/76395.) +func updateBasicLitPos(lit *ast.BasicLit, pos token.Pos) { + len := lit.End() - lit.Pos() + lit.ValuePos = pos + // TODO(adonovan): after go1.26, simplify to: + // lit.ValueEnd = pos + len + v := reflect.ValueOf(lit).Elem().FieldByName("ValueEnd") + if v.IsValid() && v.Int() != 0 { + v.SetInt(int64(pos + len)) + } +} diff --git a/vendor/golang.org/x/tools/internal/modindex/index.go b/vendor/golang.org/x/tools/internal/modindex/index.go index c41d1dd90..c7ef97dcd 100644 --- a/vendor/golang.org/x/tools/internal/modindex/index.go +++ b/vendor/golang.org/x/tools/internal/modindex/index.go @@ -10,7 +10,6 @@ import ( "encoding/csv" "fmt" "io" - "log" "os" "path/filepath" "strconv" @@ -107,14 +106,14 @@ var IndexDir string = func() string { var err error dir, err = os.UserCacheDir() // shouldn't happen, but TempDir is better than - // creating ./go/imports + // creating ./goimports if err != nil { dir = os.TempDir() } } dir = filepath.Join(dir, "goimports") if err := os.MkdirAll(dir, 0777); err != nil { - log.Printf("failed to create modcache index dir: %v", err) + dir = "" // #75505, people complain about the error message } return dir }() @@ -127,6 +126,9 @@ func Read(gomodcache string) (*Index, error) { if err != nil { return nil, err } + if IndexDir == "" { + return nil, os.ErrNotExist + } // Read the "link" file for the specified gomodcache directory. // It names the payload file. @@ -227,6 +229,9 @@ func readIndexFrom(gomodcache string, r io.Reader) (*Index, error) { // write writes the index file and updates the index directory to refer to it. func write(gomodcache string, ix *Index) error { + if IndexDir == "" { + return os.ErrNotExist + } // Write the index into a payload file with a fresh name. f, err := os.CreateTemp(IndexDir, fmt.Sprintf("index-%d-*", CurrentVersion)) if err != nil { diff --git a/vendor/golang.org/x/tools/internal/modindex/lookup.go b/vendor/golang.org/x/tools/internal/modindex/lookup.go index 0c011a99b..83bd49cd4 100644 --- a/vendor/golang.org/x/tools/internal/modindex/lookup.go +++ b/vendor/golang.org/x/tools/internal/modindex/lookup.go @@ -8,6 +8,8 @@ import ( "slices" "strconv" "strings" + + "golang.org/x/mod/module" ) type Candidate struct { @@ -104,11 +106,15 @@ func (ix *Index) Lookup(pkgName, name string, prefix bool) []Candidate { if len(flds) < 2 { continue // should never happen } + impPath, err := module.UnescapePath(e.ImportPath) + if err != nil { + continue + } px := Candidate{ PkgName: pkgName, Name: flds[0], Dir: string(e.Dir), - ImportPath: e.ImportPath, + ImportPath: impPath, Type: asLexType(flds[1][0]), Deprecated: len(flds[1]) > 1 && flds[1][1] == 'D', } diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go index fe24db9b1..8e9702d84 100644 --- a/vendor/golang.org/x/tools/internal/modindex/symbols.go +++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go @@ -206,8 +206,7 @@ func isDeprecated(doc *ast.CommentGroup) bool { // go.dev/wiki/Deprecated Paragraph starting 'Deprecated:' // This code fails for /* Deprecated: */, but it's the code from // gopls/internal/analysis/deprecated - lines := strings.Split(doc.Text(), "\n\n") - for _, line := range lines { + for line := range strings.SplitSeq(doc.Text(), "\n\n") { if strings.HasPrefix(line, "Deprecated:") { return true } diff --git a/vendor/golang.org/x/tools/internal/moreiters/iters.go b/vendor/golang.org/x/tools/internal/moreiters/iters.go index 69c76ccb9..9e4aaf948 100644 --- a/vendor/golang.org/x/tools/internal/moreiters/iters.go +++ b/vendor/golang.org/x/tools/internal/moreiters/iters.go @@ -45,3 +45,11 @@ func Any[T any](seq iter.Seq[T], pred func(T) bool) bool { } return false } + +// Len returns the number of elements in the sequence (by iterating). +func Len[T any](seq iter.Seq[T]) (n int) { + for range seq { + n++ + } + return +} diff --git a/vendor/golang.org/x/tools/internal/packagepath/packagepath.go b/vendor/golang.org/x/tools/internal/packagepath/packagepath.go new file mode 100644 index 000000000..fa39a13f9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagepath/packagepath.go @@ -0,0 +1,49 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagepath provides metadata operations on package path +// strings. +package packagepath + +// (This package should not depend on go/ast.) +import "strings" + +// CanImport reports whether one package is allowed to import another. +// +// TODO(adonovan): allow customization of the accessibility relation +// (e.g. for Bazel). +func CanImport(from, to string) bool { + // TODO(adonovan): better segment hygiene. + if to == "internal" || strings.HasPrefix(to, "internal/") { + // Special case: only std packages may import internal/... + // We can't reliably know whether we're in std, so we + // use a heuristic on the first segment. + first, _, _ := strings.Cut(from, "/") + if strings.Contains(first, ".") { + return false // example.com/foo ∉ std + } + if first == "testdata" { + return false // testdata/foo ∉ std + } + } + if strings.HasSuffix(to, "/internal") { + return strings.HasPrefix(from, to[:len(to)-len("/internal")]) + } + if i := strings.LastIndex(to, "/internal/"); i >= 0 { + return strings.HasPrefix(from, to[:i]) + } + return true +} + +// IsStdPackage reports whether the specified package path belongs to a +// package in the standard library (including internal dependencies). +func IsStdPackage(path string) bool { + // A standard package has no dot in its first segment. + // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".) + slash := strings.IndexByte(path, '/') + if slash < 0 { + slash = len(path) + } + return !strings.Contains(path[:slash], ".") && path != "testdata" +} diff --git a/vendor/golang.org/x/tools/internal/refactor/delete.go b/vendor/golang.org/x/tools/internal/refactor/delete.go new file mode 100644 index 000000000..54d0b5f03 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/refactor/delete.go @@ -0,0 +1,566 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package refactor + +// This file defines operations for computing deletion edits. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "slices" + + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +// DeleteVar returns edits to delete the declaration of a variable or +// constant whose defining identifier is curId. +// +// It handles variants including: +// - GenDecl > ValueSpec versus AssignStmt; +// - RHS expression has effects, or not; +// - entire statement/declaration may be eliminated; +// and removes associated comments. +// +// If it cannot make the necessary edits, such as for a function +// parameter or result, it returns nil. +func DeleteVar(tokFile *token.File, info *types.Info, curId inspector.Cursor) []Edit { + switch ek, _ := curId.ParentEdge(); ek { + case edge.ValueSpec_Names: + return deleteVarFromValueSpec(tokFile, info, curId) + + case edge.AssignStmt_Lhs: + return deleteVarFromAssignStmt(tokFile, info, curId) + } + + // e.g. function receiver, parameter, or result, + // or "switch v := expr.(T) {}" (which has no object). + return nil +} + +// deleteVarFromValueSpec returns edits to delete the declaration of a +// variable or constant within a ValueSpec. +// +// Precondition: curId is Ident beneath ValueSpec.Names beneath GenDecl. +// +// See also [deleteVarFromAssignStmt], which has parallel structure. +func deleteVarFromValueSpec(tokFile *token.File, info *types.Info, curIdent inspector.Cursor) []Edit { + var ( + id = curIdent.Node().(*ast.Ident) + curSpec = curIdent.Parent() + spec = curSpec.Node().(*ast.ValueSpec) + ) + + declaresOtherNames := slices.ContainsFunc(spec.Names, func(name *ast.Ident) bool { + return name != id && name.Name != "_" + }) + noRHSEffects := !slices.ContainsFunc(spec.Values, func(rhs ast.Expr) bool { + return !typesinternal.NoEffects(info, rhs) + }) + if !declaresOtherNames && noRHSEffects { + // The spec is no longer needed, either to declare + // other variables, or for its side effects. + return DeleteSpec(tokFile, curSpec) + } + + // The spec is still needed, either for + // at least one LHS, or for effects on RHS. + // Blank out or delete just one LHS. + + _, index := curIdent.ParentEdge() // index of LHS within ValueSpec.Names + + // If there is no RHS, we can delete the LHS. + if len(spec.Values) == 0 { + var pos, end token.Pos + if index == len(spec.Names)-1 { + // Delete final name. + // + // var _, lhs1 T + // ------ + pos = spec.Names[index-1].End() + end = spec.Names[index].End() + } else { + // Delete non-final name. + // + // var lhs0, _ T + // ------ + pos = spec.Names[index].Pos() + end = spec.Names[index+1].Pos() + } + return []Edit{{ + Pos: pos, + End: end, + }} + } + + // If the assignment is n:n and the RHS has no effects, + // we can delete the LHS and its corresponding RHS. + if len(spec.Names) == len(spec.Values) && + typesinternal.NoEffects(info, spec.Values[index]) { + + if index == len(spec.Names)-1 { + // Delete final items. + // + // var _, lhs1 = rhs0, rhs1 + // ------ ------ + return []Edit{ + { + Pos: spec.Names[index-1].End(), + End: spec.Names[index].End(), + }, + { + Pos: spec.Values[index-1].End(), + End: spec.Values[index].End(), + }, + } + } else { + // Delete non-final items. + // + // var lhs0, _ = rhs0, rhs1 + // ------ ------ + return []Edit{ + { + Pos: spec.Names[index].Pos(), + End: spec.Names[index+1].Pos(), + }, + { + Pos: spec.Values[index].Pos(), + End: spec.Values[index+1].Pos(), + }, + } + } + } + + // We cannot delete the RHS. + // Blank out the LHS. + return []Edit{{ + Pos: id.Pos(), + End: id.End(), + NewText: []byte("_"), + }} +} + +// Precondition: curId is Ident beneath AssignStmt.Lhs. +// +// See also [deleteVarFromValueSpec], which has parallel structure. +func deleteVarFromAssignStmt(tokFile *token.File, info *types.Info, curIdent inspector.Cursor) []Edit { + var ( + id = curIdent.Node().(*ast.Ident) + curStmt = curIdent.Parent() + assign = curStmt.Node().(*ast.AssignStmt) + ) + + declaresOtherNames := slices.ContainsFunc(assign.Lhs, func(lhs ast.Expr) bool { + lhsId, ok := lhs.(*ast.Ident) + return ok && lhsId != id && lhsId.Name != "_" + }) + noRHSEffects := !slices.ContainsFunc(assign.Rhs, func(rhs ast.Expr) bool { + return !typesinternal.NoEffects(info, rhs) + }) + if !declaresOtherNames && noRHSEffects { + // The assignment is no longer needed, either to + // declare other variables, or for its side effects. + if edits := DeleteStmt(tokFile, curStmt); edits != nil { + return edits + } + // Statement could not not be deleted in this context. + // Fall back to conservative deletion. + } + + // The assign is still needed, either for + // at least one LHS, or for effects on RHS, + // or because it cannot deleted because of its context. + // Blank out or delete just one LHS. + + // If the assignment is 1:1 and the RHS has no effects, + // we can delete the LHS and its corresponding RHS. + _, index := curIdent.ParentEdge() + if len(assign.Lhs) > 1 && + len(assign.Lhs) == len(assign.Rhs) && + typesinternal.NoEffects(info, assign.Rhs[index]) { + + if index == len(assign.Lhs)-1 { + // Delete final items. + // + // _, lhs1 := rhs0, rhs1 + // ------ ------ + return []Edit{ + { + Pos: assign.Lhs[index-1].End(), + End: assign.Lhs[index].End(), + }, + { + Pos: assign.Rhs[index-1].End(), + End: assign.Rhs[index].End(), + }, + } + } else { + // Delete non-final items. + // + // lhs0, _ := rhs0, rhs1 + // ------ ------ + return []Edit{ + { + Pos: assign.Lhs[index].Pos(), + End: assign.Lhs[index+1].Pos(), + }, + { + Pos: assign.Rhs[index].Pos(), + End: assign.Rhs[index+1].Pos(), + }, + } + } + } + + // We cannot delete the RHS. + // Blank out the LHS. + edits := []Edit{{ + Pos: id.Pos(), + End: id.End(), + NewText: []byte("_"), + }} + + // If this eliminates the final variable declared by + // an := statement, we need to turn it into an = + // assignment to avoid a "no new variables on left + // side of :=" error. + if !declaresOtherNames { + edits = append(edits, Edit{ + Pos: assign.TokPos, + End: assign.TokPos + token.Pos(len(":=")), + NewText: []byte("="), + }) + } + + return edits +} + +// DeleteSpec returns edits to delete the {Type,Value}Spec identified by curSpec. +// +// TODO(adonovan): add test suite. Test for consts as well. +func DeleteSpec(tokFile *token.File, curSpec inspector.Cursor) []Edit { + var ( + spec = curSpec.Node().(ast.Spec) + curDecl = curSpec.Parent() + decl = curDecl.Node().(*ast.GenDecl) + ) + + // If it is the sole spec in the decl, + // delete the entire decl. + if len(decl.Specs) == 1 { + return DeleteDecl(tokFile, curDecl) + } + + // Delete the spec and its comments. + _, index := curSpec.ParentEdge() // index of ValueSpec within GenDecl.Specs + pos, end := spec.Pos(), spec.End() + if doc := astutil.DocComment(spec); doc != nil { + pos = doc.Pos() // leading comment + } + if index == len(decl.Specs)-1 { + // Delete final spec. + if c := eolComment(spec); c != nil { + // var (v int // comment \n) + end = c.End() + } + } else { + // Delete non-final spec. + // var ( a T; b T ) + // ----- + end = decl.Specs[index+1].Pos() + } + return []Edit{{ + Pos: pos, + End: end, + }} +} + +// DeleteDecl returns edits to delete the ast.Decl identified by curDecl. +// +// TODO(adonovan): add test suite. +func DeleteDecl(tokFile *token.File, curDecl inspector.Cursor) []Edit { + decl := curDecl.Node().(ast.Decl) + + ek, _ := curDecl.ParentEdge() + switch ek { + case edge.DeclStmt_Decl: + return DeleteStmt(tokFile, curDecl.Parent()) + + case edge.File_Decls: + pos, end := decl.Pos(), decl.End() + if doc := astutil.DocComment(decl); doc != nil { + pos = doc.Pos() + } + + // Delete free-floating comments on same line as rparen. + // var (...) // comment + var ( + file = curDecl.Parent().Node().(*ast.File) + lineOf = tokFile.Line + declEndLine = lineOf(decl.End()) + ) + for _, cg := range file.Comments { + for _, c := range cg.List { + if c.Pos() < end { + continue // too early + } + commentEndLine := lineOf(c.End()) + if commentEndLine > declEndLine { + break // too late + } else if lineOf(c.Pos()) == declEndLine && commentEndLine == declEndLine { + end = c.End() + } + } + } + + return []Edit{{ + Pos: pos, + End: end, + }} + + default: + panic(fmt.Sprintf("Decl parent is %v, want DeclStmt or File", ek)) + } +} + +// find leftmost Pos bigger than start and rightmost less than end +func filterPos(nds []*ast.Comment, start, end token.Pos) (token.Pos, token.Pos, bool) { + l, r := end, token.NoPos + ok := false + for _, n := range nds { + if n.Pos() > start && n.Pos() < l { + l = n.Pos() + ok = true + } + if n.End() <= end && n.End() > r { + r = n.End() + ok = true + } + } + return l, r, ok +} + +// DeleteStmt returns the edits to remove the [ast.Stmt] identified by +// curStmt if it recognizes the context. It returns nil otherwise. +// TODO(pjw, adonovan): it should not return nil, it should return an error +// +// DeleteStmt is called with just the AST so it has trouble deciding if +// a comment is associated with the statement to be deleted. For instance, +// +// for /*A*/ init()/*B*/;/*C/cond()/*D/;/*E*/post() /*F*/ { /*G*/} +// +// comment B and C are indistinguishable, as are D and E. That is, as the +// AST does not say where the semicolons are, B and C could go either +// with the init() or the cond(), so cannot be removed safely. The same +// is true for D, E, and the post(). (And there are other similar cases.) +// But the other comments can be removed as they are unambiguously +// associated with the statement being deleted. In particular, +// it removes whole lines like +// +// stmt // comment +func DeleteStmt(file *token.File, curStmt inspector.Cursor) []Edit { + // if the stmt is on a line by itself, or a range of lines, delete the whole thing + // including comments. Except for the heads of switches, type + // switches, and for-statements that's the usual case. Complexity occurs where + // there are multiple statements on the same line, and adjacent comments. + + // In that case we remove some adjacent comments: + // In me()/*A*/;b(), comment A cannot be removed, because the ast + // is indistinguishable from me();/*A*/b() + // and the same for cases like switch me()/*A*/; x.(type) { + + // this would be more precise with the file contents, or if the ast + // contained the location of semicolons + var ( + stmt = curStmt.Node().(ast.Stmt) + tokFile = file + lineOf = tokFile.Line + stmtStartLine = lineOf(stmt.Pos()) + stmtEndLine = lineOf(stmt.End()) + + leftSyntax, rightSyntax token.Pos // pieces of parent node on stmt{Start,End}Line + leftComments, rightComments []*ast.Comment // comments before/after stmt on the same line + ) + + // remember the Pos that are on the same line as stmt + use := func(left, right token.Pos) { + if lineOf(left) == stmtStartLine { + leftSyntax = left + } + if lineOf(right) == stmtEndLine { + rightSyntax = right + } + } + + // find the comments, if any, on the same line +Big: + for _, cg := range astutil.EnclosingFile(curStmt).Comments { + for _, co := range cg.List { + if lineOf(co.End()) < stmtStartLine { + continue + } else if lineOf(co.Pos()) > stmtEndLine { + break Big // no more are possible + } + if lineOf(co.End()) == stmtStartLine && co.End() <= stmt.Pos() { + // comment is before the statement + leftComments = append(leftComments, co) + } else if lineOf(co.Pos()) == stmtEndLine && co.Pos() >= stmt.End() { + // comment is after the statement + rightComments = append(rightComments, co) + } + } + } + + // find any other syntax on the same line + var ( + leftStmt, rightStmt token.Pos // end/start positions of sibling statements in a []Stmt list + inStmtList = false + curParent = curStmt.Parent() + ) + switch parent := curParent.Node().(type) { + case *ast.BlockStmt: + use(parent.Lbrace, parent.Rbrace) + inStmtList = true + case *ast.CaseClause: + use(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + inStmtList = true + case *ast.CommClause: + if parent.Comm == stmt { + return nil // maybe the user meant to remove the entire CommClause? + } + use(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + inStmtList = true + case *ast.ForStmt: + use(parent.For, parent.Body.Lbrace) + // special handling, as init;cond;post BlockStmt is not a statment list + if parent.Init != nil && parent.Cond != nil && stmt == parent.Init && lineOf(parent.Cond.Pos()) == lineOf(stmt.End()) { + rightStmt = parent.Cond.Pos() + } else if parent.Post != nil && parent.Cond != nil && stmt == parent.Post && lineOf(parent.Cond.End()) == lineOf(stmt.Pos()) { + leftStmt = parent.Cond.End() + } + case *ast.IfStmt: + switch stmt { + case parent.Init: + use(parent.If, parent.Body.Lbrace) + case parent.Else: + // stmt is the {...} in "if cond {} else {...}" and removing + // it would require removing the 'else' keyword, but the ast + // does not contain its position. + return nil + } + case *ast.SwitchStmt: + use(parent.Switch, parent.Body.Lbrace) + case *ast.TypeSwitchStmt: + if stmt == parent.Assign { + return nil // don't remove .(type) + } + use(parent.Switch, parent.Body.Lbrace) + default: + return nil // not one of ours + } + + if inStmtList { + // find the siblings, if any, on the same line + if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { + if _, ok := prev.Node().(ast.Stmt); ok { + leftStmt = prev.Node().End() // preceding statement ends on same line + } + } + if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { + rightStmt = next.Node().Pos() // following statement begins on same line + } + } + + // compute the left and right limits of the edit + var leftEdit, rightEdit token.Pos + if leftStmt.IsValid() { + leftEdit = stmt.Pos() // can't remove preceding comments: a()/*A*/; me() + } else if leftSyntax.IsValid() { + // remove intervening leftComments + if a, _, ok := filterPos(leftComments, leftSyntax, stmt.Pos()); ok { + leftEdit = a + } else { + leftEdit = stmt.Pos() + } + } else { // remove whole line + for leftEdit = stmt.Pos(); lineOf(leftEdit) == stmtStartLine; leftEdit-- { + } + if leftEdit < stmt.Pos() { + leftEdit++ // beginning of line + } + } + if rightStmt.IsValid() { + rightEdit = stmt.End() // can't remove following comments + } else if rightSyntax.IsValid() { + // remove intervening rightComments + if _, b, ok := filterPos(rightComments, stmt.End(), rightSyntax); ok { + rightEdit = b + } else { + rightEdit = stmt.End() + } + } else { // remove whole line + fend := token.Pos(file.Base()) + token.Pos(file.Size()) + for rightEdit = stmt.End(); fend >= rightEdit && lineOf(rightEdit) == stmtEndLine; rightEdit++ { + } + // don't remove \n if there was other stuff earlier + if leftSyntax.IsValid() || leftStmt.IsValid() { + rightEdit-- + } + } + + return []Edit{{Pos: leftEdit, End: rightEdit}} +} + +// DeleteUnusedVars computes the edits required to delete the +// declarations of any local variables whose last uses are in the +// curDelend subtree, which is about to be deleted. +func DeleteUnusedVars(index *typeindex.Index, info *types.Info, tokFile *token.File, curDelend inspector.Cursor) []Edit { + // TODO(adonovan): we might want to generalize this by + // splitting the two phases below, so that we can gather + // across a whole sequence of deletions then finally compute the + // set of variables that are no longer wanted. + + // Count number of deletions of each var. + delcount := make(map[*types.Var]int) + for curId := range curDelend.Preorder((*ast.Ident)(nil)) { + id := curId.Node().(*ast.Ident) + if v, ok := info.Uses[id].(*types.Var); ok && + typesinternal.GetVarKind(v) == typesinternal.LocalVar { // always false before go1.25 + delcount[v]++ + } + } + + // Delete declaration of each var that became unused. + var edits []Edit + for v, count := range delcount { + if len(slices.Collect(index.Uses(v))) == count { + if curDefId, ok := index.Def(v); ok { + edits = append(edits, DeleteVar(tokFile, info, curDefId)...) + } + } + } + return edits +} + +func eolComment(n ast.Node) *ast.CommentGroup { + // TODO(adonovan): support: + // func f() {...} // comment + switch n := n.(type) { + case *ast.GenDecl: + if !n.TokPos.IsValid() && len(n.Specs) == 1 { + return eolComment(n.Specs[0]) + } + case *ast.ValueSpec: + return n.Comment + case *ast.TypeSpec: + return n.Comment + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/refactor/edit.go b/vendor/golang.org/x/tools/internal/refactor/edit.go new file mode 100644 index 000000000..42be9a54b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/refactor/edit.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file.p + +package refactor + +// This is the only file in this package that should import analysis. +// +// TODO(adonovan): consider unaliasing the type to break the +// dependency. (The ergonomics of slice append are unfortunate.) + +import "golang.org/x/tools/go/analysis" + +// An Edit describes a deletion and/or an insertion. +type Edit = analysis.TextEdit diff --git a/vendor/golang.org/x/tools/internal/refactor/imports.go b/vendor/golang.org/x/tools/internal/refactor/imports.go new file mode 100644 index 000000000..e1860ab06 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/refactor/imports.go @@ -0,0 +1,149 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package refactor + +// This file defines operations for computing edits to imports. + +import ( + "go/ast" + "go/token" + "go/types" + pathpkg "path" + "strconv" + + "golang.org/x/tools/internal/packagepath" +) + +// AddImport returns the prefix (either "pkg." or "") that should be +// used to qualify references to the desired symbol (member) imported +// from the specified package, plus any necessary edits to the file's +// import declaration to add a new import. +// +// If the import already exists, and is accessible at pos, AddImport +// returns the existing name and no edits. (If the existing import is +// a dot import, the prefix is "".) +// +// Otherwise, it adds a new import, using a local name derived from +// the preferred name. To request a blank import, use a preferredName +// of "_", and discard the prefix result; member is ignored in this +// case. +// +// AddImport accepts the caller's implicit claim that the imported +// package declares member. +// +// AddImport does not mutate its arguments. +func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (prefix string, edits []Edit) { + // Find innermost enclosing lexical block. + scope := info.Scopes[file].Innermost(pos) + if scope == nil { + panic("no enclosing lexical block") + } + + // Is there an existing import of this package? + // If so, are we in its scope? (not shadowed) + for _, spec := range file.Imports { + pkgname := info.PkgNameOf(spec) + if pkgname != nil && pkgname.Imported().Path() == pkgpath { + name := pkgname.Name() + if preferredName == "_" { + // Request for blank import; any existing import will do. + return "", nil + } + if name == "." { + // The scope of ident must be the file scope. + if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] { + return "", nil + } + } else if _, obj := scope.LookupParent(name, pos); obj == pkgname { + return name + ".", nil + } + } + } + + // We must add a new import. + + // Ensure we have a fresh name. + newName := preferredName + if preferredName != "_" { + newName = FreshName(scope, pos, preferredName) + prefix = newName + "." + } + + // Use a renaming import whenever the preferred name is not + // available, or the chosen name does not match the last + // segment of its path. + if newName == preferredName && newName == pathpkg.Base(pkgpath) { + newName = "" + } + + return prefix, AddImportEdits(file, newName, pkgpath) +} + +// AddImportEdits returns the edits to add an import of the specified +// package, without any analysis of whether this is necessary or safe. +// If name is nonempty, it is used as an explicit [ImportSpec.Name]. +// +// A sequence of calls to AddImportEdits that each add the file's +// first import (or in a file that does not have a grouped import) may +// result in multiple import declarations, rather than a single one +// with multiple ImportSpecs. However, a subsequent run of +// x/tools/cmd/goimports ([imports.Process]) will combine them. +// +// AddImportEdits does not mutate the AST. +func AddImportEdits(file *ast.File, name, pkgpath string) []Edit { + newText := strconv.Quote(pkgpath) + if name != "" { + newText = name + " " + newText + } + + // Create a new import declaration either before the first existing + // declaration (which must exist), including its comments; or + // inside the declaration, if it is an import group. + decl0 := file.Decls[0] + before := decl0.Pos() + switch decl0 := decl0.(type) { + case *ast.GenDecl: + if decl0.Doc != nil { + before = decl0.Doc.Pos() + } + case *ast.FuncDecl: + if decl0.Doc != nil { + before = decl0.Doc.Pos() + } + } + var pos token.Pos + if gd, ok := decl0.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() { + // Have existing grouped import ( ... ) decl. + if packagepath.IsStdPackage(pkgpath) && len(gd.Specs) > 0 { + // Add spec for a std package before + // first existing spec, followed by + // a blank line if the next one is non-std. + first := gd.Specs[0].(*ast.ImportSpec) + pos = first.Pos() + if !packagepath.IsStdPackage(first.Path.Value) { + newText += "\n" + } + newText += "\n\t" + } else { + // Add spec at end of group. + pos = gd.Rparen + newText = "\t" + newText + "\n" + } + } else { + // No import decl, or non-grouped import. + // Add a new import decl before first decl. + // (gofmt will merge multiple import decls.) + // + // TODO(adonovan): do better here; plunder the + // mergeImports logic from [imports.Process]. + pos = before + newText = "import " + newText + "\n\n" + } + return []Edit{{ + Pos: pos, + End: pos, + NewText: []byte(newText), + }} +} diff --git a/vendor/golang.org/x/tools/internal/refactor/refactor.go b/vendor/golang.org/x/tools/internal/refactor/refactor.go new file mode 100644 index 000000000..8664377f8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/refactor/refactor.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package refactor provides operators to compute common textual edits +// for refactoring tools. +// +// This package should not use features of the analysis API other than [Edit]. +package refactor + +import ( + "fmt" + "go/token" + "go/types" +) + +// FreshName returns the name of an identifier that is undefined +// at the specified position, based on the preferred name. +// +// TODO(adonovan): refine this to choose a fresh name only when there +// would be a conflict with the existing declaration: it's fine to +// redeclare a name in a narrower scope so long as there are no free +// references to the outer name from within the narrower scope. +func FreshName(scope *types.Scope, pos token.Pos, preferred string) string { + newName := preferred + for i := 0; ; i++ { + if _, obj := scope.LookupParent(newName, pos); obj == nil { + break // fresh + } + newName = fmt.Sprintf("%s%d", preferred, i) + } + return newName +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go index 96ad6c582..f41431c94 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -12,354 +12,516 @@ type pkginfo struct { } var deps = [...]pkginfo{ - {"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, - {"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, - {"bufio", "\x03k\x83\x01D\x14"}, - {"bytes", "n*Y\x03\fG\x02\x02"}, + {"archive/tar", "\x03q\x03F=\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\r"}, + {"archive/zip", "\x02\x04g\a\x03\x13\x021=\x01+\x05\x01\x0f\x03\x02\x0f\x04"}, + {"bufio", "\x03q\x86\x01D\x15"}, + {"bytes", "t+[\x03\fH\x02\x02"}, {"cmp", ""}, - {"compress/bzip2", "\x02\x02\xed\x01A"}, - {"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"}, - {"compress/gzip", "\x02\x04a\a\x03\x14lT"}, - {"compress/lzw", "\x02l\x03\x80\x01"}, - {"compress/zlib", "\x02\x04a\a\x03\x12\x01m"}, - {"container/heap", "\xb3\x02"}, + {"compress/bzip2", "\x02\x02\xf6\x01A"}, + {"compress/flate", "\x02r\x03\x83\x01\f\x033\x01\x03"}, + {"compress/gzip", "\x02\x04g\a\x03\x15nU"}, + {"compress/lzw", "\x02r\x03\x83\x01"}, + {"compress/zlib", "\x02\x04g\a\x03\x13\x01o"}, + {"container/heap", "\xbc\x02"}, {"container/list", ""}, {"container/ring", ""}, - {"context", "n\\m\x01\r"}, - {"crypto", "\x83\x01nC"}, - {"crypto/aes", "\x10\n\a\x93\x02"}, - {"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"}, - {"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"}, - {"crypto/dsa", "A\x04)\x83\x01\r"}, - {"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"}, - {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"}, - {"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"}, - {"crypto/elliptic", "0>\x83\x01\r9"}, - {"crypto/fips140", " \x05"}, - {"crypto/hkdf", "-\x13\x01-\x15"}, - {"crypto/hmac", "\x1a\x14\x12\x01\x111"}, - {"crypto/internal/boring", "\x0e\x02\rf"}, - {"crypto/internal/boring/bbig", "\x1a\xe4\x01M"}, - {"crypto/internal/boring/bcache", "\xb8\x02\x13"}, + {"context", "t\\p\x01\x0e"}, + {"crypto", "\x8a\x01pC"}, + {"crypto/aes", "\x10\v\t\x99\x02"}, + {"crypto/cipher", "\x03!\x01\x01 \x12\x1c,Z"}, + {"crypto/des", "\x10\x16 .,\x9d\x01\x03"}, + {"crypto/dsa", "F\x03+\x86\x01\r"}, + {"crypto/ecdh", "\x03\v\r\x10\x04\x17\x03\x0f\x1c\x86\x01"}, + {"crypto/ecdsa", "\x0e\x05\x03\x05\x01\x10\b\v\x06\x01\x03\x0e\x01\x1c\x86\x01\r\x05L\x01"}, + {"crypto/ed25519", "\x0e\x1f\x12\a\x03\b\a\x1cI=C"}, + {"crypto/elliptic", "4@\x86\x01\r9"}, + {"crypto/fips140", "#\x05\x95\x01\x98\x01"}, + {"crypto/hkdf", "0\x15\x01.\x16"}, + {"crypto/hmac", "\x1b\x16\x14\x01\x122"}, + {"crypto/hpke", "\x03\v\x02\x03\x04\x01\f\x01\x05\x1f\x05\a\x01\x01\x1d\x03\x13\x16\x9b\x01\x1c"}, + {"crypto/internal/boring", "\x0e\x02\x0el"}, + {"crypto/internal/boring/bbig", "\x1b\xec\x01N"}, + {"crypto/internal/boring/bcache", "\xc1\x02\x14"}, {"crypto/internal/boring/sig", ""}, - {"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, - {"crypto/internal/entropy", "F"}, - {"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"}, - {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"}, - {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"}, - {"crypto/internal/fips140/alias", "\xcb\x02"}, - {"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"}, - {"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"}, - {"crypto/internal/fips140/check/checktest", "%\x85\x02!"}, - {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"}, - {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"}, - {"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"}, - {"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"}, - {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"}, - {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"}, - {"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"}, - {"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"}, - {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"}, - {"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"}, - {"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/ssh", "%^"}, - {"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"}, - {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"}, - {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"}, - {"crypto/internal/fips140cache", "\xaa\x02\r&"}, + {"crypto/internal/constanttime", ""}, + {"crypto/internal/cryptotest", "\x03\r\v\b%\x10\x19\x06\x13\x12 \x04\x06\t\x19\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\f"}, + {"crypto/internal/entropy", "K"}, + {"crypto/internal/entropy/v1.0.0", "D0\x95\x018\x14"}, + {"crypto/internal/fips140", "C1\xbf\x01\v\x17"}, + {"crypto/internal/fips140/aes", "\x03 \x03\x02\x14\x05\x01\x01\x05,\x95\x014"}, + {"crypto/internal/fips140/aes/gcm", "#\x01\x02\x02\x02\x12\x05\x01\x06,\x92\x01"}, + {"crypto/internal/fips140/alias", "\xd5\x02"}, + {"crypto/internal/fips140/bigmod", "(\x19\x01\x06,\x95\x01"}, + {"crypto/internal/fips140/check", "#\x0e\a\t\x02\xb7\x01["}, + {"crypto/internal/fips140/check/checktest", "(\x8b\x02\""}, + {"crypto/internal/fips140/drbg", "\x03\x1f\x01\x01\x04\x14\x05\n)\x86\x01\x0f7\x01"}, + {"crypto/internal/fips140/ecdh", "\x03 \x05\x02\n\r3\x86\x01\x0f7"}, + {"crypto/internal/fips140/ecdsa", "\x03 \x04\x01\x02\a\x03\x06:\x16pF"}, + {"crypto/internal/fips140/ed25519", "\x03 \x05\x02\x04\f:\xc9\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "\x1f\t\a\x123\x95\x017"}, + {"crypto/internal/fips140/edwards25519/field", "(\x14\x053\x95\x01"}, + {"crypto/internal/fips140/hkdf", "\x03 \x05\t\a<\x16"}, + {"crypto/internal/fips140/hmac", "\x03 \x15\x01\x01:\x16"}, + {"crypto/internal/fips140/mldsa", "\x03\x1c\x04\x05\x02\x0e\x01\x03\x053\x95\x017"}, + {"crypto/internal/fips140/mlkem", "\x03 \x05\x02\x0f\x03\x053\xcc\x01"}, + {"crypto/internal/fips140/nistec", "\x1f\t\r\f3\x95\x01*\r\x15"}, + {"crypto/internal/fips140/nistec/fiat", "(\x148\x95\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03 \x05\t\a<\x16"}, + {"crypto/internal/fips140/rsa", "\x03\x1c\x04\x04\x01\x02\x0e\x01\x01\x028\x16pF"}, + {"crypto/internal/fips140/sha256", "\x03 \x1e\x01\x06,\x16\x7f"}, + {"crypto/internal/fips140/sha3", "\x03 \x19\x05\x012\x95\x01L"}, + {"crypto/internal/fips140/sha512", "\x03 \x1e\x01\x06,\x16\x7f"}, + {"crypto/internal/fips140/ssh", "(b"}, + {"crypto/internal/fips140/subtle", "\x1f\a\x1b\xc8\x01"}, + {"crypto/internal/fips140/tls12", "\x03 \x05\t\a\x02:\x16"}, + {"crypto/internal/fips140/tls13", "\x03 \x05\b\b\t3\x16"}, + {"crypto/internal/fips140cache", "\xb3\x02\r'"}, {"crypto/internal/fips140deps", ""}, - {"crypto/internal/fips140deps/byteorder", "\x99\x01"}, - {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, - {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, - {"crypto/internal/fips140hash", "5\x1b3\xc8\x01"}, - {"crypto/internal/fips140only", "'\r\x01\x01M3;"}, + {"crypto/internal/fips140deps/byteorder", "\xa0\x01"}, + {"crypto/internal/fips140deps/cpu", "\xb5\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xbd\x01"}, + {"crypto/internal/fips140deps/time", "\xcf\x02"}, + {"crypto/internal/fips140hash", "9\x1d4\xcb\x01"}, + {"crypto/internal/fips140only", "\x17\x13\x0e\x01\x01Pp"}, {"crypto/internal/fips140test", ""}, - {"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"}, - {"crypto/internal/impl", "\xb5\x02"}, - {"crypto/internal/randutil", "\xf1\x01\x12"}, - {"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"}, - {"crypto/internal/sysrand/internal/seccomp", "n"}, - {"crypto/md5", "\x0e3-\x15\x16g"}, - {"crypto/mlkem", "/"}, - {"crypto/pbkdf2", "2\x0e\x01-\x15"}, - {"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"}, - {"crypto/rc4", "#\x1e-\xc6\x01"}, - {"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"}, - {"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"}, - {"crypto/sha256", "\x0e\f\x1aO"}, - {"crypto/sha3", "\x0e'N\xc8\x01"}, - {"crypto/sha512", "\x0e\f\x1cM"}, - {"crypto/subtle", "8\x9b\x01W"}, - {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, - {"crypto/tls/internal/fips140tls", "\x17\xa1\x02"}, - {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, - {"crypto/x509/pkix", "d\x06\a\x8d\x01G"}, - {"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, - {"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"}, - {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"}, - {"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"}, - {"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"}, - {"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"}, - {"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"}, - {"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"}, - {"debug/plan9obj", "g\a\x03e\x1b,"}, - {"embed", "n*@\x19\x01S"}, + {"crypto/internal/impl", "\xbe\x02"}, + {"crypto/internal/rand", "\x1b\x0f s=["}, + {"crypto/internal/randutil", "\xfa\x01\x12"}, + {"crypto/internal/sysrand", "tq! \r\r\x01\x01\r\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "t"}, + {"crypto/md5", "\x0e8.\x16\x16i"}, + {"crypto/mlkem", "\x0e%"}, + {"crypto/mlkem/mlkemtest", "3\x13\b&"}, + {"crypto/pbkdf2", "6\x0f\x01.\x16"}, + {"crypto/rand", "\x1b\x0f\x1c\x03+\x86\x01\rN"}, + {"crypto/rc4", "& .\xc9\x01"}, + {"crypto/rsa", "\x0e\r\x01\v\x10\x0e\x01\x03\b\a\x1c\x03\x133=\f\x01"}, + {"crypto/sha1", "\x0e\r+\x02,\x16\x16\x15T"}, + {"crypto/sha256", "\x0e\r\x1dR"}, + {"crypto/sha3", "\x0e+Q\xcb\x01"}, + {"crypto/sha512", "\x0e\r\x1fP"}, + {"crypto/subtle", "\x1f\x1d\x9f\x01z"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\x01\t\x01\x18\x01\x0f\x01\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x15\b=\x16\x16\r\b\x01\x01\x01\x02\x01\x0e\x06\x02\x01\x0f"}, + {"crypto/tls/internal/fips140tls", "\x17\xaa\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x017\x06\x01\x01\x02\x05\x0e\x06\x02\x02\x03F\x03:\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\a\b\x02\x01\x02\x0f\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "j\x06\a\x90\x01H"}, + {"database/sql", "\x03\nQ\x16\x03\x83\x01\v\a\"\x05\b\x02\x03\x01\x0e\x02\x02\x02"}, + {"database/sql/driver", "\rg\x03\xb7\x01\x0f\x12"}, + {"debug/buildinfo", "\x03^\x02\x01\x01\b\a\x03g\x1a\x02\x01+\x0f "}, + {"debug/dwarf", "\x03j\a\x03\x83\x011\x11\x01\x01"}, + {"debug/elf", "\x03\x06W\r\a\x03g\x1b\x01\f \x17\x01\x17"}, + {"debug/gosym", "\x03j\n$\xa1\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06W\r\ng\x1c,\x17\x01"}, + {"debug/pe", "\x03\x06W\r\a\x03g\x1c,\x17\x01\x17"}, + {"debug/plan9obj", "m\a\x03g\x1c,"}, + {"embed", "t+B\x19\x01T"}, {"embed/internal/embedtest", ""}, {"encoding", ""}, - {"encoding/ascii85", "\xf1\x01C"}, - {"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"}, - {"encoding/base32", "\xf1\x01A\x02"}, - {"encoding/base64", "\x99\x01XA\x02"}, - {"encoding/binary", "n\x83\x01\f(\r\x05"}, - {"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"}, - {"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, - {"encoding/hex", "n\x03\x80\x01A\x03"}, - {"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, - {"encoding/pem", "\x03c\b\x83\x01A\x03"}, - {"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"}, - {"errors", "\xca\x01\x81\x01"}, - {"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"}, - {"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"}, - {"fmt", "nE>\f \b\r\x02\x03\x12"}, - {"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"}, - {"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, - {"go/build/constraint", "n\xc6\x01\x01\x12\x02"}, - {"go/constant", "q\x0f}\x01\x024\x01\x02\x12"}, - {"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"}, - {"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"}, - {"go/format", "\x03n\x01\v\x01\x02qD"}, - {"go/importer", "s\a\x01\x01\x04\x01p9"}, - {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"}, - {"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"}, - {"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"}, - {"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"}, - {"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"}, - {"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"}, - {"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"}, - {"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, - {"go/version", "\xbb\x01z"}, - {"hash", "\xf1\x01"}, - {"hash/adler32", "n\x15\x16"}, - {"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"}, - {"hash/crc64", "n\x15\x16\x9e\x01"}, - {"hash/fnv", "n\x15\x16g"}, - {"hash/maphash", "\x83\x01\x11!\x03\x93\x01"}, - {"html", "\xb5\x02\x02\x12"}, - {"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, - {"image", "\x02l\x1ee\x0f4\x03\x01"}, + {"encoding/ascii85", "\xfa\x01C"}, + {"encoding/asn1", "\x03q\x03g(\x01'\r\x02\x01\x11\x03\x01"}, + {"encoding/base32", "\xfa\x01A\x02"}, + {"encoding/base64", "\xa0\x01ZA\x02"}, + {"encoding/binary", "t\x86\x01\f(\r\x05"}, + {"encoding/csv", "\x02\x01q\x03\x83\x01D\x13\x02"}, + {"encoding/gob", "\x02f\x05\a\x03g\x1c\v\x01\x03\x1d\b\x12\x01\x10\x02"}, + {"encoding/hex", "t\x03\x83\x01A\x03"}, + {"encoding/json", "\x03\x01d\x04\b\x03\x83\x01\f(\r\x02\x01\x02\x11\x01\x01\x02"}, + {"encoding/pem", "\x03i\b\x86\x01A\x03"}, + {"encoding/xml", "\x02\x01e\f\x03\x83\x014\x05\n\x01\x02\x11\x02"}, + {"errors", "\xd0\x01\x85\x01"}, + {"expvar", "qLA\b\v\x15\r\b\x02\x03\x01\x12"}, + {"flag", "h\f\x03\x83\x01,\b\x05\b\x02\x01\x11"}, + {"fmt", "tF'\x19\f \b\r\x02\x03\x13"}, + {"go/ast", "\x03\x01s\x0f\x01s\x03)\b\r\x02\x01\x13\x02"}, + {"go/build", "\x02\x01q\x03\x01\x02\x02\b\x02\x01\x17\x1f\x04\x02\b\x1c\x13\x01+\x01\x04\x01\a\b\x02\x01\x13\x02\x02"}, + {"go/build/constraint", "t\xc9\x01\x01\x13\x02"}, + {"go/constant", "w\x10\x7f\x01\x024\x01\x02\x13"}, + {"go/doc", "\x04s\x01\x05\n=61\x10\x02\x01\x13\x02"}, + {"go/doc/comment", "\x03t\xc4\x01\x01\x01\x01\x13\x02"}, + {"go/format", "\x03t\x01\f\x01\x02sD"}, + {"go/importer", "y\a\x01\x02\x04\x01r9"}, + {"go/internal/gccgoimporter", "\x02\x01^\x13\x03\x04\f\x01p\x02,\x01\x05\x11\x01\r\b"}, + {"go/internal/gcimporter", "\x02u\x10\x010\x05\r0,\x15\x03\x02"}, + {"go/internal/scannerhooks", "\x87\x01"}, + {"go/internal/srcimporter", "w\x01\x01\v\x03\x01r,\x01\x05\x12\x02\x15"}, + {"go/parser", "\x03q\x03\x01\x02\b\x04\x01s\x01+\x06\x12"}, + {"go/printer", "w\x01\x02\x03\ns\f \x15\x02\x01\x02\f\x05\x02"}, + {"go/scanner", "\x03t\v\x05s2\x10\x01\x14\x02"}, + {"go/token", "\x04s\x86\x01>\x02\x03\x01\x10\x02"}, + {"go/types", "\x03\x01\x06j\x03\x01\x03\t\x03\x024\x063\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x10\x02\x02"}, + {"go/version", "\xc2\x01|"}, + {"hash", "\xfa\x01"}, + {"hash/adler32", "t\x16\x16"}, + {"hash/crc32", "t\x16\x16\x15\x8b\x01\x01\x14"}, + {"hash/crc64", "t\x16\x16\xa0\x01"}, + {"hash/fnv", "t\x16\x16i"}, + {"hash/maphash", "\x8a\x01\x11<~"}, + {"html", "\xbe\x02\x02\x13"}, + {"html/template", "\x03n\x06\x19-=\x01\n!\x05\x01\x02\x03\f\x01\x02\r\x01\x03\x02"}, + {"image", "\x02r\x1fg\x0f4\x03\x01"}, {"image/color", ""}, - {"image/color/palette", "\x8c\x01"}, - {"image/draw", "\x8b\x01\x01\x04"}, - {"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"}, - {"image/internal/imageutil", "\x8b\x01"}, - {"image/jpeg", "\x02l\x1d\x01\x04a"}, - {"image/png", "\x02\a^\n\x12\x02\x06\x01eC"}, - {"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"}, - {"internal/abi", "\xb5\x01\x96\x01"}, - {"internal/asan", "\xcb\x02"}, - {"internal/bisect", "\xaa\x02\r\x01"}, - {"internal/buildcfg", "qGe\x06\x02\x05\n\x01"}, - {"internal/bytealg", "\xae\x01\x9d\x01"}, + {"image/color/palette", "\x93\x01"}, + {"image/draw", "\x92\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05l\x03\x1b\x01\x01\x01\vZ\x0f"}, + {"image/internal/imageutil", "\x92\x01"}, + {"image/jpeg", "\x02r\x1e\x01\x04c"}, + {"image/png", "\x02\ad\n\x13\x02\x06\x01gC"}, + {"index/suffixarray", "\x03j\a\x86\x01\f+\n\x01"}, + {"internal/abi", "\xbc\x01\x99\x01"}, + {"internal/asan", "\xd5\x02"}, + {"internal/bisect", "\xb3\x02\r\x01"}, + {"internal/buildcfg", "wHg\x06\x02\x05\n\x01"}, + {"internal/bytealg", "\xb5\x01\xa0\x01"}, {"internal/byteorder", ""}, {"internal/cfg", ""}, - {"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"}, - {"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"}, + {"internal/cgrouptest", "w[T\x06\x0f\x02\x01\x04\x01"}, + {"internal/chacha8rand", "\xa0\x01\x15\a\x99\x01"}, {"internal/copyright", ""}, {"internal/coverage", ""}, {"internal/coverage/calloc", ""}, - {"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"}, - {"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"}, - {"internal/coverage/cmerge", "q-_"}, - {"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"}, - {"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"}, - {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"}, - {"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."}, - {"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"}, - {"internal/coverage/rtcov", "\xcb\x02"}, - {"internal/coverage/slicereader", "g\n\x80\x01Z"}, - {"internal/coverage/slicewriter", "q\x80\x01"}, - {"internal/coverage/stringtab", "q8\x04D"}, + {"internal/coverage/cfile", "q\x06\x17\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02',\x06\a\n\x01\x03\x0e\x06"}, + {"internal/coverage/cformat", "\x04s.\x04Q\v6\x01\x02\x0e"}, + {"internal/coverage/cmerge", "w.a"}, + {"internal/coverage/decodecounter", "m\n.\v\x02H,\x17\x18"}, + {"internal/coverage/decodemeta", "\x02k\n\x17\x17\v\x02H,"}, + {"internal/coverage/encodecounter", "\x02k\n.\f\x01\x02F\v!\x15"}, + {"internal/coverage/encodemeta", "\x02\x01j\n\x13\x04\x17\r\x02F,/"}, + {"internal/coverage/pods", "\x04s.\x81\x01\x06\x05\n\x02\x01"}, + {"internal/coverage/rtcov", "\xd5\x02"}, + {"internal/coverage/slicereader", "m\n\x83\x01["}, + {"internal/coverage/slicewriter", "w\x83\x01"}, + {"internal/coverage/stringtab", "w9\x04F"}, {"internal/coverage/test", ""}, {"internal/coverage/uleb128", ""}, - {"internal/cpu", "\xcb\x02"}, - {"internal/dag", "\x04m\xc1\x01\x03"}, - {"internal/diff", "\x03n\xc2\x01\x02"}, - {"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"}, - {"internal/filepathlite", "n*@\x1a@"}, - {"internal/fmtsort", "\x04\xa1\x02\r"}, - {"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, + {"internal/cpu", "\xd5\x02"}, + {"internal/dag", "\x04s\xc4\x01\x03"}, + {"internal/diff", "\x03t\xc5\x01\x02"}, + {"internal/exportdata", "\x02\x01q\x03\x02e\x1c,\x01\x05\x11\x01\x02"}, + {"internal/filepathlite", "t+B\x1a@"}, + {"internal/fmtsort", "\x04\xaa\x02\r"}, + {"internal/fuzz", "\x03\nH\x18\x04\x03\x03\x01\f\x036=\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\r\x04\x02"}, {"internal/goarch", ""}, - {"internal/godebug", "\x96\x01!\x80\x01\x01\x13"}, + {"internal/godebug", "\x9d\x01!\x82\x01\x01\x14"}, {"internal/godebugs", ""}, {"internal/goexperiment", ""}, {"internal/goos", ""}, - {"internal/goroot", "\x9d\x02\x01\x05\x12\x02"}, + {"internal/goroot", "\xa6\x02\x01\x05\x12\x02"}, {"internal/gover", "\x04"}, {"internal/goversion", ""}, - {"internal/itoa", ""}, - {"internal/lazyregexp", "\x9d\x02\v\r\x02"}, - {"internal/lazytemplate", "\xf1\x01,\x18\x02\f"}, - {"internal/msan", "\xcb\x02"}, + {"internal/lazyregexp", "\xa6\x02\v\r\x02"}, + {"internal/lazytemplate", "\xfa\x01,\x18\x02\r"}, + {"internal/msan", "\xd5\x02"}, {"internal/nettrace", ""}, - {"internal/obscuretestdata", "f\x8b\x01,"}, - {"internal/oserror", "n"}, - {"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"}, + {"internal/obscuretestdata", "l\x8e\x01,"}, + {"internal/oserror", "t"}, + {"internal/pkgbits", "\x03R\x18\a\x03\x04\fs\r\x1f\r\n\x01"}, {"internal/platform", ""}, - {"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"}, - {"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"}, + {"internal/poll", "tl\x05\x159\r\x01\x01\r\x06"}, + {"internal/profile", "\x03\x04m\x03\x83\x017\n\x01\x01\x01\x11"}, {"internal/profilerecord", ""}, - {"internal/race", "\x94\x01\xb7\x01"}, - {"internal/reflectlite", "\x94\x01!9\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, - {"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, - {"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"}, - {"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"}, - {"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, - {"net/http/httptrace", "\rFnF\x14\n "}, - {"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, - {"net/http/internal", "\x02\x01k\x03\x80\x01"}, - {"net/http/internal/ascii", "\xb5\x02\x12"}, - {"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, - {"net/http/internal/testcert", "\xb5\x02"}, - {"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, + {"log/slog/internal/benchmarks", "\rg\x03\x83\x01\x06\x03:\x12"}, + {"log/slog/internal/buffer", "\xc0\x02"}, + {"log/syslog", "t\x03\x87\x01\x12\x16\x18\x02\x0f"}, + {"maps", "\xfd\x01X"}, + {"math", "\xb5\x01TL"}, + {"math/big", "\x03q\x03)\x15E\f\x03\x020\x02\x01\x02\x15"}, + {"math/big/internal/asmgen", "\x03\x01s\x92\x012\x03"}, + {"math/bits", "\xd5\x02"}, + {"math/cmplx", "\x86\x02\x03"}, + {"math/rand", "\xbd\x01I:\x01\x14"}, + {"math/rand/v2", "t,\x03c\x03L"}, + {"mime", "\x02\x01i\b\x03\x83\x01\v!\x15\x03\x02\x11\x02"}, + {"mime/multipart", "\x02\x01N#\x03F=\v\x01\a\x02\x15\x02\x06\x0f\x02\x01\x17"}, + {"mime/quotedprintable", "\x02\x01t\x83\x01"}, + {"net", "\x04\tg+\x1e\n\x05\x13\x01\x01\x04\x15\x01%\x06\r\b\x05\x01\x01\r\x06\a"}, + {"net/http", "\x02\x01\x03\x01\x04\x02D\b\x13\x01\a\x03F=\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\x0e\x02\x02\x02\b\x01\x01\x01"}, + {"net/http/cgi", "\x02W\x1b\x03\x83\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x11\x0e"}, + {"net/http/cookiejar", "\x04p\x03\x99\x01\x01\b\a\x05\x16\x03\x02\x0f\x04"}, + {"net/http/fcgi", "\x02\x01\n`\a\x03\x83\x01\x16\x01\x01\x14\x18\x02\x0f"}, + {"net/http/httptest", "\x02\x01\nL\x02\x1b\x01\x83\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0f\x0e"}, + {"net/http/httptrace", "\rLnI\x14\n!"}, + {"net/http/httputil", "\x02\x01\ng\x03\x83\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x01\x0e\x0e"}, + {"net/http/internal", "\x02\x01q\x03\x83\x01"}, + {"net/http/internal/ascii", "\xbe\x02\x13"}, + {"net/http/internal/httpcommon", "\rg\x03\x9f\x01\x0e\x01\x17\x01\x01\x02\x1d\x02"}, + {"net/http/internal/testcert", "\xbe\x02"}, + {"net/http/pprof", "\x02\x01\nj\x19-\x02\x0e-\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x11"}, {"net/internal/cgotest", ""}, - {"net/internal/socktest", "q\xc6\x01\x02"}, - {"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, - {"net/netip", "\x04j*\x01$@\x034\x16"}, - {"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"}, - {"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"}, - {"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"}, - {"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"}, - {"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"}, - {"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, - {"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"}, - {"os/exec/internal/fdtest", "\xb9\x02"}, - {"os/signal", "\r\x90\x02\x15\x05\x02"}, - {"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"}, - {"path", "n*\xb1\x01"}, - {"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"}, - {"plugin", "n"}, - {"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"}, + {"net/internal/socktest", "w\xc9\x01\x02"}, + {"net/mail", "\x02r\x03\x83\x01\x04\x0f\x03\x14\x1a\x02\x0f\x04"}, + {"net/netip", "\x04p+\x01f\x034\x17"}, + {"net/rpc", "\x02m\x05\x03\x10\ni\x04\x12\x01\x1d\r\x03\x02"}, + {"net/rpc/jsonrpc", "q\x03\x03\x83\x01\x16\x11\x1f"}, + {"net/smtp", "\x194\f\x13\b\x03\x83\x01\x16\x14\x1a"}, + {"net/textproto", "\x02\x01q\x03\x83\x01\f\n-\x01\x02\x15"}, + {"net/url", "t\x03Fc\v\x10\x02\x01\x17"}, + {"os", "t+\x01\x19\x03\x10\x14\x01\x03\x01\x05\x10\x018\b\x05\x01\x01\r\x06"}, + {"os/exec", "\x03\ngI'\x01\x15\x01+\x06\a\n\x01\x04\r"}, + {"os/exec/internal/fdtest", "\xc2\x02"}, + {"os/signal", "\r\x99\x02\x15\x05\x02"}, + {"os/user", "\x02\x01q\x03\x83\x01,\r\n\x01\x02"}, + {"path", "t+\xb4\x01"}, + {"path/filepath", "t+\x1aB+\r\b\x03\x04\x11"}, + {"plugin", "t"}, + {"reflect", "t'\x04\x1d\x13\b\x04\x05\x17\x06\t-\n\x03\x11\x02\x02"}, {"reflect/internal/example1", ""}, {"reflect/internal/example2", ""}, - {"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"}, - {"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"}, - {"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"}, - {"runtime/coverage", "\xa0\x01Q"}, - {"runtime/debug", "qUW\r\b\x02\x01\x10\x06"}, - {"runtime/metrics", "\xb7\x01F-!"}, - {"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, - {"runtime/race", "\xb0\x02"}, + {"regexp", "\x03\xf7\x018\t\x02\x01\x02\x11\x02"}, + {"regexp/syntax", "\xbb\x02\x01\x01\x01\x02\x11\x02"}, + {"runtime", "\x9b\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0e\x03\x01\x01\x01\x02\x01\x01\x01\x02\x01\x04\x01\x10\x18L"}, + {"runtime/coverage", "\xa7\x01S"}, + {"runtime/debug", "wUZ\r\b\x02\x01\x11\x06"}, + {"runtime/metrics", "\xbe\x01H-\""}, + {"runtime/pprof", "\x02\x01\x01\x03\x06`\a\x03$$\x0f\v!\f \r\b\x01\x01\x01\x02\x02\n\x03\x06"}, + {"runtime/race", "\xb9\x02"}, {"runtime/race/internal/amd64v1", ""}, - {"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"}, - {"slices", "\x04\xf0\x01\fK"}, - {"sort", "\xca\x0162"}, - {"strconv", "n*@%\x03I"}, - {"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"}, + {"runtime/trace", "\rg\x03z\t9\b\x05\x01\x0e\x06"}, + {"slices", "\x04\xf9\x01\fL"}, + {"sort", "\xd0\x0192"}, + {"strconv", "t+A\x01r"}, + {"strings", "t'\x04B\x19\x03\f7\x11\x02\x02"}, {"structs", ""}, - {"sync", "\xc9\x01\x10\x01P\x0e\x13"}, - {"sync/atomic", "\xcb\x02"}, - {"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"}, - {"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"}, - {"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"}, - {"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"}, - {"testing/iotest", "\x03k\x03\x80\x01\x04"}, - {"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"}, - {"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"}, - {"testing/synctest", "\xda\x01`\x11"}, - {"text/scanner", "\x03n\x80\x01,*\x02"}, - {"text/tabwriter", "q\x80\x01X"}, - {"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, - {"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"}, - {"time", "n*\x1e\"(*\r\x02\x12"}, - {"time/tzdata", "n\xcb\x01\x12"}, + {"sync", "\xcf\x01\x13\x01P\x0e\x14"}, + {"sync/atomic", "\xd5\x02"}, + {"syscall", "t(\x03\x01\x1c\n\x03\x06\r\x04S\b\x05\x01\x14"}, + {"testing", "\x03\ng\x02\x01X\x17\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\x0e\x02\x04"}, + {"testing/cryptotest", "QOZ\x124\x03\x12"}, + {"testing/fstest", "t\x03\x83\x01\x01\n&\x10\x03\t\b"}, + {"testing/internal/testdeps", "\x02\v\xae\x01/\x10,\x03\x05\x03\x06\a\x02\x0f"}, + {"testing/iotest", "\x03q\x03\x83\x01\x04"}, + {"testing/quick", "v\x01\x8f\x01\x05#\x10\x11"}, + {"testing/slogtest", "\rg\x03\x89\x01.\x05\x10\f"}, + {"testing/synctest", "\xe3\x01`\x12"}, + {"text/scanner", "\x03t\x83\x01,+\x02"}, + {"text/tabwriter", "w\x83\x01Y"}, + {"text/template", "t\x03C@\x01\n \x01\x05\x01\x02\x05\v\x02\x0e\x03\x02"}, + {"text/template/parse", "\x03t\xbc\x01\n\x01\x13\x02"}, + {"time", "t+\x1e$(*\r\x02\x13"}, + {"time/tzdata", "t\xce\x01\x13"}, {"unicode", ""}, {"unicode/utf16", ""}, {"unicode/utf8", ""}, - {"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"}, + {"unique", "\x9b\x01!%\x01Q\r\x01\x14\x12"}, {"unsafe", ""}, - {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"}, - {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"}, - {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"}, + {"vendor/golang.org/x/crypto/chacha20", "\x10]\a\x95\x01*'"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10\aV\a\xe2\x01\x04\x01\a"}, + {"vendor/golang.org/x/crypto/cryptobyte", "j\n\x03\x90\x01'!\n"}, {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, - {"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"}, - {"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"}, - {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, - {"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"}, - {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"}, - {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"}, - {"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"}, - {"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, - {"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"}, - {"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"}, - {"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"}, - {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"}, - {"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"}, - {"weak", "\x94\x01\x96\x01!"}, + {"vendor/golang.org/x/crypto/internal/alias", "\xd5\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "X\x15\x9c\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "t\xc7\x01"}, + {"vendor/golang.org/x/net/http/httpguts", "\x90\x02\x14\x1a\x15\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "t\x03\x99\x01\x10\x05\x01\x18\x15\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03q\x03\x83\x01F"}, + {"vendor/golang.org/x/net/idna", "w\x8f\x018\x15\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03j\a\x03\x83\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\f"}, + {"vendor/golang.org/x/sys/cpu", "\xa6\x02\r\n\x01\x17"}, + {"vendor/golang.org/x/text/secure/bidirule", "t\xdf\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03q\x86\x01Y"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bl\x87\x01>\x17"}, + {"vendor/golang.org/x/text/unicode/norm", "m\n\x83\x01F\x13\x11"}, + {"weak", "\x9b\x01\x98\x01\""}, } + +// bootstrap is the list of bootstrap packages extracted from cmd/dist. +var bootstrap = map[string]bool{ + "cmp": true, + "cmd/asm": true, + "cmd/asm/internal/arch": true, + "cmd/asm/internal/asm": true, + "cmd/asm/internal/flags": true, + "cmd/asm/internal/lex": true, + "cmd/cgo": true, + "cmd/compile": true, + "cmd/compile/internal/abi": true, + "cmd/compile/internal/abt": true, + "cmd/compile/internal/amd64": true, + "cmd/compile/internal/arm": true, + "cmd/compile/internal/arm64": true, + "cmd/compile/internal/base": true, + "cmd/compile/internal/bitvec": true, + "cmd/compile/internal/bloop": true, + "cmd/compile/internal/compare": true, + "cmd/compile/internal/coverage": true, + "cmd/compile/internal/deadlocals": true, + "cmd/compile/internal/devirtualize": true, + "cmd/compile/internal/dwarfgen": true, + "cmd/compile/internal/escape": true, + "cmd/compile/internal/gc": true, + "cmd/compile/internal/importer": true, + "cmd/compile/internal/inline": true, + "cmd/compile/internal/inline/inlheur": true, + "cmd/compile/internal/inline/interleaved": true, + "cmd/compile/internal/ir": true, + "cmd/compile/internal/liveness": true, + "cmd/compile/internal/logopt": true, + "cmd/compile/internal/loong64": true, + "cmd/compile/internal/loopvar": true, + "cmd/compile/internal/mips": true, + "cmd/compile/internal/mips64": true, + "cmd/compile/internal/noder": true, + "cmd/compile/internal/objw": true, + "cmd/compile/internal/pgoir": true, + "cmd/compile/internal/pkginit": true, + "cmd/compile/internal/ppc64": true, + "cmd/compile/internal/rangefunc": true, + "cmd/compile/internal/reflectdata": true, + "cmd/compile/internal/riscv64": true, + "cmd/compile/internal/rttype": true, + "cmd/compile/internal/s390x": true, + "cmd/compile/internal/slice": true, + "cmd/compile/internal/ssa": true, + "cmd/compile/internal/ssagen": true, + "cmd/compile/internal/staticdata": true, + "cmd/compile/internal/staticinit": true, + "cmd/compile/internal/syntax": true, + "cmd/compile/internal/test": true, + "cmd/compile/internal/typebits": true, + "cmd/compile/internal/typecheck": true, + "cmd/compile/internal/types": true, + "cmd/compile/internal/types2": true, + "cmd/compile/internal/walk": true, + "cmd/compile/internal/wasm": true, + "cmd/compile/internal/x86": true, + "cmd/internal/archive": true, + "cmd/internal/bio": true, + "cmd/internal/codesign": true, + "cmd/internal/dwarf": true, + "cmd/internal/edit": true, + "cmd/internal/gcprog": true, + "cmd/internal/goobj": true, + "cmd/internal/hash": true, + "cmd/internal/macho": true, + "cmd/internal/obj": true, + "cmd/internal/obj/arm": true, + "cmd/internal/obj/arm64": true, + "cmd/internal/obj/loong64": true, + "cmd/internal/obj/mips": true, + "cmd/internal/obj/ppc64": true, + "cmd/internal/obj/riscv": true, + "cmd/internal/obj/s390x": true, + "cmd/internal/obj/wasm": true, + "cmd/internal/obj/x86": true, + "cmd/internal/objabi": true, + "cmd/internal/par": true, + "cmd/internal/pgo": true, + "cmd/internal/pkgpath": true, + "cmd/internal/quoted": true, + "cmd/internal/src": true, + "cmd/internal/sys": true, + "cmd/internal/telemetry": true, + "cmd/internal/telemetry/counter": true, + "cmd/link": true, + "cmd/link/internal/amd64": true, + "cmd/link/internal/arm": true, + "cmd/link/internal/arm64": true, + "cmd/link/internal/benchmark": true, + "cmd/link/internal/dwtest": true, + "cmd/link/internal/ld": true, + "cmd/link/internal/loadelf": true, + "cmd/link/internal/loader": true, + "cmd/link/internal/loadmacho": true, + "cmd/link/internal/loadpe": true, + "cmd/link/internal/loadxcoff": true, + "cmd/link/internal/loong64": true, + "cmd/link/internal/mips": true, + "cmd/link/internal/mips64": true, + "cmd/link/internal/ppc64": true, + "cmd/link/internal/riscv64": true, + "cmd/link/internal/s390x": true, + "cmd/link/internal/sym": true, + "cmd/link/internal/wasm": true, + "cmd/link/internal/x86": true, + "compress/flate": true, + "compress/zlib": true, + "container/heap": true, + "debug/dwarf": true, + "debug/elf": true, + "debug/macho": true, + "debug/pe": true, + "go/build/constraint": true, + "go/constant": true, + "go/version": true, + "internal/abi": true, + "internal/coverage": true, + "cmd/internal/cov/covcmd": true, + "internal/bisect": true, + "internal/buildcfg": true, + "internal/exportdata": true, + "internal/goarch": true, + "internal/godebugs": true, + "internal/goexperiment": true, + "internal/goroot": true, + "internal/gover": true, + "internal/goversion": true, + "internal/lazyregexp": true, + "internal/pkgbits": true, + "internal/platform": true, + "internal/profile": true, + "internal/race": true, + "internal/runtime/gc": true, + "internal/saferio": true, + "internal/syscall/unix": true, + "internal/types/errors": true, + "internal/unsafeheader": true, + "internal/xcoff": true, + "internal/zstd": true, + "math/bits": true, + "sort": true, +} + +// BootstrapVersion is the minor version of Go used during toolchain +// bootstrapping. Packages for which [IsBootstrapPackage] must not use +// features of Go newer than this version. +const BootstrapVersion = Version(24) // go1.24.6 diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go index f6909878a..8ecc672b8 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/import.go +++ b/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -87,3 +87,11 @@ func find(pkg string) (int, bool) { return strings.Compare(p.name, n) }) } + +// IsBootstrapPackage reports whether pkg is one of the low-level +// packages in the Go distribution that must compile with the older +// language version specified by [BootstrapVersion] during toolchain +// bootstrapping; see golang.org/s/go15bootstrap. +func IsBootstrapPackage(pkg string) bool { + return bootstrap[pkg] +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index c1faa50d3..33e4f505f 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -16,6 +16,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).Flush", Method, 0, ""}, {"(*Writer).Write", Method, 0, ""}, {"(*Writer).WriteHeader", Method, 0, ""}, + {"(FileInfoNames).Gname", Method, 23, ""}, + {"(FileInfoNames).IsDir", Method, 23, ""}, + {"(FileInfoNames).ModTime", Method, 23, ""}, + {"(FileInfoNames).Mode", Method, 23, ""}, + {"(FileInfoNames).Name", Method, 23, ""}, + {"(FileInfoNames).Size", Method, 23, ""}, + {"(FileInfoNames).Sys", Method, 23, ""}, + {"(FileInfoNames).Uname", Method, 23, ""}, {"(Format).String", Method, 10, ""}, {"ErrFieldTooLong", Var, 0, ""}, {"ErrHeader", Var, 0, ""}, @@ -225,6 +233,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Buffer).Grow", Method, 1, ""}, {"(*Buffer).Len", Method, 0, ""}, {"(*Buffer).Next", Method, 0, ""}, + {"(*Buffer).Peek", Method, 26, ""}, {"(*Buffer).Read", Method, 0, ""}, {"(*Buffer).ReadByte", Method, 0, ""}, {"(*Buffer).ReadBytes", Method, 0, ""}, @@ -337,6 +346,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).Write", Method, 0, ""}, {"(CorruptInputError).Error", Method, 0, ""}, {"(InternalError).Error", Method, 0, ""}, + {"(Reader).Read", Method, 0, ""}, + {"(Reader).ReadByte", Method, 0, ""}, + {"(Resetter).Reset", Method, 4, ""}, {"BestCompression", Const, 0, ""}, {"BestSpeed", Const, 0, ""}, {"CorruptInputError", Type, 0, ""}, @@ -408,6 +420,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).Flush", Method, 0, ""}, {"(*Writer).Reset", Method, 2, ""}, {"(*Writer).Write", Method, 0, ""}, + {"(Resetter).Reset", Method, 4, ""}, {"BestCompression", Const, 0, ""}, {"BestSpeed", Const, 0, ""}, {"DefaultCompression", Const, 0, ""}, @@ -425,6 +438,11 @@ var PackageSymbols = map[string][]Symbol{ {"Writer", Type, 0, ""}, }, "container/heap": { + {"(Interface).Len", Method, 0, ""}, + {"(Interface).Less", Method, 0, ""}, + {"(Interface).Pop", Method, 0, ""}, + {"(Interface).Push", Method, 0, ""}, + {"(Interface).Swap", Method, 0, ""}, {"Fix", Func, 2, "func(h Interface, i int)"}, {"Init", Func, 0, "func(h Interface)"}, {"Interface", Type, 0, ""}, @@ -468,6 +486,10 @@ var PackageSymbols = map[string][]Symbol{ {"Ring.Value", Field, 0, ""}, }, "context": { + {"(Context).Deadline", Method, 7, ""}, + {"(Context).Done", Method, 7, ""}, + {"(Context).Err", Method, 7, ""}, + {"(Context).Value", Method, 7, ""}, {"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"}, {"Background", Func, 7, "func() Context"}, {"CancelCauseFunc", Type, 20, ""}, @@ -487,17 +509,31 @@ var PackageSymbols = map[string][]Symbol{ {"WithoutCancel", Func, 21, "func(parent Context) Context"}, }, "crypto": { + {"(Decapsulator).Decapsulate", Method, 26, ""}, + {"(Decapsulator).Encapsulator", Method, 26, ""}, + {"(Decrypter).Decrypt", Method, 5, ""}, + {"(Decrypter).Public", Method, 5, ""}, + {"(Encapsulator).Bytes", Method, 26, ""}, + {"(Encapsulator).Encapsulate", Method, 26, ""}, {"(Hash).Available", Method, 0, ""}, {"(Hash).HashFunc", Method, 4, ""}, {"(Hash).New", Method, 0, ""}, {"(Hash).Size", Method, 0, ""}, {"(Hash).String", Method, 15, ""}, + {"(MessageSigner).Public", Method, 25, ""}, + {"(MessageSigner).Sign", Method, 25, ""}, + {"(MessageSigner).SignMessage", Method, 25, ""}, + {"(Signer).Public", Method, 4, ""}, + {"(Signer).Sign", Method, 4, ""}, + {"(SignerOpts).HashFunc", Method, 4, ""}, {"BLAKE2b_256", Const, 9, ""}, {"BLAKE2b_384", Const, 9, ""}, {"BLAKE2b_512", Const, 9, ""}, {"BLAKE2s_256", Const, 9, ""}, + {"Decapsulator", Type, 26, ""}, {"Decrypter", Type, 5, ""}, {"DecrypterOpts", Type, 5, ""}, + {"Encapsulator", Type, 26, ""}, {"Hash", Type, 0, ""}, {"MD4", Const, 0, ""}, {"MD5", Const, 0, ""}, @@ -529,6 +565,16 @@ var PackageSymbols = map[string][]Symbol{ {"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"}, }, "crypto/cipher": { + {"(AEAD).NonceSize", Method, 2, ""}, + {"(AEAD).Open", Method, 2, ""}, + {"(AEAD).Overhead", Method, 2, ""}, + {"(AEAD).Seal", Method, 2, ""}, + {"(Block).BlockSize", Method, 0, ""}, + {"(Block).Decrypt", Method, 0, ""}, + {"(Block).Encrypt", Method, 0, ""}, + {"(BlockMode).BlockSize", Method, 0, ""}, + {"(BlockMode).CryptBlocks", Method, 0, ""}, + {"(Stream).XORKeyStream", Method, 0, ""}, {"(StreamReader).Read", Method, 0, ""}, {"(StreamWriter).Close", Method, 0, ""}, {"(StreamWriter).Write", Method, 0, ""}, @@ -580,7 +626,7 @@ var PackageSymbols = map[string][]Symbol{ {"PublicKey", Type, 0, ""}, {"PublicKey.Parameters", Field, 0, ""}, {"PublicKey.Y", Field, 0, ""}, - {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"}, + {"Sign", Func, 0, "func(random io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"}, {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"}, }, "crypto/ecdh": { @@ -593,7 +639,13 @@ var PackageSymbols = map[string][]Symbol{ {"(*PublicKey).Bytes", Method, 20, ""}, {"(*PublicKey).Curve", Method, 20, ""}, {"(*PublicKey).Equal", Method, 20, ""}, - {"Curve", Type, 20, ""}, + {"(Curve).GenerateKey", Method, 20, ""}, + {"(Curve).NewPrivateKey", Method, 20, ""}, + {"(Curve).NewPublicKey", Method, 20, ""}, + {"(KeyExchanger).Curve", Method, 26, ""}, + {"(KeyExchanger).ECDH", Method, 26, ""}, + {"(KeyExchanger).PublicKey", Method, 26, ""}, + {"KeyExchanger", Type, 26, ""}, {"P256", Func, 20, "func() Curve"}, {"P384", Func, 20, "func() Curve"}, {"P521", Func, 20, "func() Curve"}, @@ -622,7 +674,7 @@ var PackageSymbols = map[string][]Symbol{ {"(PublicKey).Params", Method, 0, ""}, {"(PublicKey).ScalarBaseMult", Method, 0, ""}, {"(PublicKey).ScalarMult", Method, 0, ""}, - {"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"}, + {"GenerateKey", Func, 0, "func(c elliptic.Curve, r io.Reader) (*PrivateKey, error)"}, {"ParseRawPrivateKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PrivateKey, error)"}, {"ParseUncompressedPublicKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PublicKey, error)"}, {"PrivateKey", Type, 0, ""}, @@ -633,7 +685,7 @@ var PackageSymbols = map[string][]Symbol{ {"PublicKey.X", Field, 0, ""}, {"PublicKey.Y", Field, 0, ""}, {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"}, - {"SignASN1", Func, 15, "func(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"}, + {"SignASN1", Func, 15, "func(r io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"}, {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"}, {"VerifyASN1", Func, 15, "func(pub *PublicKey, hash []byte, sig []byte) bool"}, }, @@ -644,7 +696,7 @@ var PackageSymbols = map[string][]Symbol{ {"(PrivateKey).Seed", Method, 13, ""}, {"(PrivateKey).Sign", Method, 13, ""}, {"(PublicKey).Equal", Method, 15, ""}, - {"GenerateKey", Func, 13, "func(rand io.Reader) (PublicKey, PrivateKey, error)"}, + {"GenerateKey", Func, 13, "func(random io.Reader) (PublicKey, PrivateKey, error)"}, {"NewKeyFromSeed", Func, 13, "func(seed []byte) PrivateKey"}, {"Options", Type, 20, ""}, {"Options.Context", Field, 20, ""}, @@ -666,6 +718,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*CurveParams).Params", Method, 0, ""}, {"(*CurveParams).ScalarBaseMult", Method, 0, ""}, {"(*CurveParams).ScalarMult", Method, 0, ""}, + {"(Curve).Add", Method, 0, ""}, + {"(Curve).Double", Method, 0, ""}, + {"(Curve).IsOnCurve", Method, 0, ""}, + {"(Curve).Params", Method, 0, ""}, + {"(Curve).ScalarBaseMult", Method, 0, ""}, + {"(Curve).ScalarMult", Method, 0, ""}, {"Curve", Type, 0, ""}, {"CurveParams", Type, 0, ""}, {"CurveParams.B", Field, 0, ""}, @@ -687,6 +745,9 @@ var PackageSymbols = map[string][]Symbol{ }, "crypto/fips140": { {"Enabled", Func, 24, "func() bool"}, + {"Enforced", Func, 26, "func() bool"}, + {"Version", Func, 26, "func() string"}, + {"WithoutEnforcement", Func, 26, "func(f func())"}, }, "crypto/hkdf": { {"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"}, @@ -697,6 +758,54 @@ var PackageSymbols = map[string][]Symbol{ {"Equal", Func, 1, "func(mac1 []byte, mac2 []byte) bool"}, {"New", Func, 0, "func(h func() hash.Hash, key []byte) hash.Hash"}, }, + "crypto/hpke": { + {"(*Recipient).Export", Method, 26, ""}, + {"(*Recipient).Open", Method, 26, ""}, + {"(*Sender).Export", Method, 26, ""}, + {"(*Sender).Seal", Method, 26, ""}, + {"(AEAD).ID", Method, 26, ""}, + {"(KDF).ID", Method, 26, ""}, + {"(KEM).DeriveKeyPair", Method, 26, ""}, + {"(KEM).GenerateKey", Method, 26, ""}, + {"(KEM).ID", Method, 26, ""}, + {"(KEM).NewPrivateKey", Method, 26, ""}, + {"(KEM).NewPublicKey", Method, 26, ""}, + {"(PrivateKey).Bytes", Method, 26, ""}, + {"(PrivateKey).KEM", Method, 26, ""}, + {"(PrivateKey).PublicKey", Method, 26, ""}, + {"(PublicKey).Bytes", Method, 26, ""}, + {"(PublicKey).KEM", Method, 26, ""}, + {"AES128GCM", Func, 26, "func() AEAD"}, + {"AES256GCM", Func, 26, "func() AEAD"}, + {"ChaCha20Poly1305", Func, 26, "func() AEAD"}, + {"DHKEM", Func, 26, "func(curve ecdh.Curve) KEM"}, + {"ExportOnly", Func, 26, "func() AEAD"}, + {"HKDFSHA256", Func, 26, "func() KDF"}, + {"HKDFSHA384", Func, 26, "func() KDF"}, + {"HKDFSHA512", Func, 26, "func() KDF"}, + {"MLKEM1024", Func, 26, "func() KEM"}, + {"MLKEM1024P384", Func, 26, "func() KEM"}, + {"MLKEM768", Func, 26, "func() KEM"}, + {"MLKEM768P256", Func, 26, "func() KEM"}, + {"MLKEM768X25519", Func, 26, "func() KEM"}, + {"NewAEAD", Func, 26, "func(id uint16) (AEAD, error)"}, + {"NewDHKEMPrivateKey", Func, 26, "func(priv ecdh.KeyExchanger) (PrivateKey, error)"}, + {"NewDHKEMPublicKey", Func, 26, "func(pub *ecdh.PublicKey) (PublicKey, error)"}, + {"NewHybridPrivateKey", Func, 26, "func(pq crypto.Decapsulator, t ecdh.KeyExchanger) (PrivateKey, error)"}, + {"NewHybridPublicKey", Func, 26, "func(pq crypto.Encapsulator, t *ecdh.PublicKey) (PublicKey, error)"}, + {"NewKDF", Func, 26, "func(id uint16) (KDF, error)"}, + {"NewKEM", Func, 26, "func(id uint16) (KEM, error)"}, + {"NewMLKEMPrivateKey", Func, 26, "func(priv crypto.Decapsulator) (PrivateKey, error)"}, + {"NewMLKEMPublicKey", Func, 26, "func(pub crypto.Encapsulator) (PublicKey, error)"}, + {"NewRecipient", Func, 26, "func(enc []byte, k PrivateKey, kdf KDF, aead AEAD, info []byte) (*Recipient, error)"}, + {"NewSender", Func, 26, "func(pk PublicKey, kdf KDF, aead AEAD, info []byte) (enc []byte, s *Sender, err error)"}, + {"Open", Func, 26, "func(k PrivateKey, kdf KDF, aead AEAD, info []byte, ciphertext []byte) ([]byte, error)"}, + {"Recipient", Type, 26, ""}, + {"SHAKE128", Func, 26, "func() KDF"}, + {"SHAKE256", Func, 26, "func() KDF"}, + {"Seal", Func, 26, "func(pk PublicKey, kdf KDF, aead AEAD, info []byte, plaintext []byte) ([]byte, error)"}, + {"Sender", Type, 26, ""}, + }, "crypto/md5": { {"BlockSize", Const, 0, ""}, {"New", Func, 0, "func() hash.Hash"}, @@ -707,9 +816,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*DecapsulationKey1024).Bytes", Method, 24, ""}, {"(*DecapsulationKey1024).Decapsulate", Method, 24, ""}, {"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""}, + {"(*DecapsulationKey1024).Encapsulator", Method, 26, ""}, {"(*DecapsulationKey768).Bytes", Method, 24, ""}, {"(*DecapsulationKey768).Decapsulate", Method, 24, ""}, {"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""}, + {"(*DecapsulationKey768).Encapsulator", Method, 26, ""}, {"(*EncapsulationKey1024).Bytes", Method, 24, ""}, {"(*EncapsulationKey1024).Encapsulate", Method, 24, ""}, {"(*EncapsulationKey768).Bytes", Method, 24, ""}, @@ -731,12 +842,16 @@ var PackageSymbols = map[string][]Symbol{ {"SeedSize", Const, 24, ""}, {"SharedKeySize", Const, 24, ""}, }, + "crypto/mlkem/mlkemtest": { + {"Encapsulate1024", Func, 26, "func(ek *mlkem.EncapsulationKey1024, random []byte) (sharedKey []byte, ciphertext []byte, err error)"}, + {"Encapsulate768", Func, 26, "func(ek *mlkem.EncapsulationKey768, random []byte) (sharedKey []byte, ciphertext []byte, err error)"}, + }, "crypto/pbkdf2": { {"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"}, }, "crypto/rand": { {"Int", Func, 0, "func(rand io.Reader, max *big.Int) (n *big.Int, err error)"}, - {"Prime", Func, 0, "func(rand io.Reader, bits int) (*big.Int, error)"}, + {"Prime", Func, 0, "func(r io.Reader, bits int) (*big.Int, error)"}, {"Read", Func, 0, "func(b []byte) (n int, err error)"}, {"Reader", Var, 0, ""}, {"Text", Func, 24, "func() string"}, @@ -768,6 +883,7 @@ var PackageSymbols = map[string][]Symbol{ {"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"}, {"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"}, {"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"}, + {"EncryptOAEPWithOptions", Func, 26, "func(random io.Reader, pub *PublicKey, msg []byte, opts *OAEPOptions) ([]byte, error)"}, {"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"}, {"ErrDecryption", Var, 0, ""}, {"ErrMessageTooLong", Var, 0, ""}, @@ -799,7 +915,7 @@ var PackageSymbols = map[string][]Symbol{ {"PublicKey.E", Field, 0, ""}, {"PublicKey.N", Field, 0, ""}, {"SignPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)"}, - {"SignPSS", Func, 2, "func(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"}, + {"SignPSS", Func, 2, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"}, {"VerifyPKCS1v15", Func, 0, "func(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error"}, {"VerifyPSS", Func, 2, "func(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error"}, }, @@ -920,6 +1036,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*SessionState).Bytes", Method, 21, ""}, {"(AlertError).Error", Method, 21, ""}, {"(ClientAuthType).String", Method, 15, ""}, + {"(ClientSessionCache).Get", Method, 3, ""}, + {"(ClientSessionCache).Put", Method, 3, ""}, {"(CurveID).String", Method, 15, ""}, {"(QUICEncryptionLevel).String", Method, 21, ""}, {"(RecordHeaderError).Error", Method, 6, ""}, @@ -952,6 +1070,7 @@ var PackageSymbols = map[string][]Symbol{ {"ClientHelloInfo.CipherSuites", Field, 4, ""}, {"ClientHelloInfo.Conn", Field, 8, ""}, {"ClientHelloInfo.Extensions", Field, 24, ""}, + {"ClientHelloInfo.HelloRetryRequest", Field, 26, ""}, {"ClientHelloInfo.ServerName", Field, 4, ""}, {"ClientHelloInfo.SignatureSchemes", Field, 8, ""}, {"ClientHelloInfo.SupportedCurves", Field, 4, ""}, @@ -1000,6 +1119,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConnectionState.DidResume", Field, 1, ""}, {"ConnectionState.ECHAccepted", Field, 23, ""}, {"ConnectionState.HandshakeComplete", Field, 0, ""}, + {"ConnectionState.HelloRetryRequest", Field, 26, ""}, {"ConnectionState.NegotiatedProtocol", Field, 0, ""}, {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""}, {"ConnectionState.OCSPResponse", Field, 5, ""}, @@ -1054,8 +1174,10 @@ var PackageSymbols = map[string][]Symbol{ {"QUICEncryptionLevelEarly", Const, 21, ""}, {"QUICEncryptionLevelHandshake", Const, 21, ""}, {"QUICEncryptionLevelInitial", Const, 21, ""}, + {"QUICErrorEvent", Const, 26, ""}, {"QUICEvent", Type, 21, ""}, {"QUICEvent.Data", Field, 21, ""}, + {"QUICEvent.Err", Field, 26, ""}, {"QUICEvent.Kind", Field, 21, ""}, {"QUICEvent.Level", Field, 21, ""}, {"QUICEvent.SessionState", Field, 23, ""}, @@ -1086,6 +1208,8 @@ var PackageSymbols = map[string][]Symbol{ {"RequestClientCert", Const, 0, ""}, {"RequireAndVerifyClientCert", Const, 0, ""}, {"RequireAnyClientCert", Const, 0, ""}, + {"SecP256r1MLKEM768", Const, 26, ""}, + {"SecP384r1MLKEM1024", Const, 26, ""}, {"Server", Func, 0, "func(conn net.Conn, config *Config) *Conn"}, {"SessionState", Type, 21, ""}, {"SessionState.EarlyData", Field, 21, ""}, @@ -1150,8 +1274,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*RevocationList).CheckSignatureFrom", Method, 19, ""}, {"(CertificateInvalidError).Error", Method, 0, ""}, {"(ConstraintViolationError).Error", Method, 0, ""}, + {"(ExtKeyUsage).OID", Method, 26, ""}, + {"(ExtKeyUsage).String", Method, 26, ""}, {"(HostnameError).Error", Method, 0, ""}, {"(InsecureAlgorithmError).Error", Method, 6, ""}, + {"(KeyUsage).String", Method, 26, ""}, {"(OID).AppendBinary", Method, 24, ""}, {"(OID).AppendText", Method, 24, ""}, {"(OID).Equal", Method, 22, ""}, @@ -1306,6 +1433,7 @@ var PackageSymbols = map[string][]Symbol{ {"NoValidChains", Const, 24, ""}, {"NotAuthorizedToSign", Const, 0, ""}, {"OID", Type, 22, ""}, + {"OIDFromASN1OID", Func, 26, "func(asn1OID asn1.ObjectIdentifier) (OID, error)"}, {"OIDFromInts", Func, 22, "func(oid []uint64) (OID, error)"}, {"PEMCipher", Type, 1, ""}, {"PEMCipher3DES", Const, 1, ""}, @@ -1515,6 +1643,9 @@ var PackageSymbols = map[string][]Symbol{ {"(NullInt64).Value", Method, 0, ""}, {"(NullString).Value", Method, 0, ""}, {"(NullTime).Value", Method, 13, ""}, + {"(Result).LastInsertId", Method, 0, ""}, + {"(Result).RowsAffected", Method, 0, ""}, + {"(Scanner).Scan", Method, 0, ""}, {"ColumnType", Type, 8, ""}, {"Conn", Type, 9, ""}, {"DB", Type, 0, ""}, @@ -1546,8 +1677,6 @@ var PackageSymbols = map[string][]Symbol{ {"NamedArg.Name", Field, 8, ""}, {"NamedArg.Value", Field, 8, ""}, {"Null", Type, 22, ""}, - {"Null.V", Field, 22, ""}, - {"Null.Valid", Field, 22, ""}, {"NullBool", Type, 0, ""}, {"NullBool.Bool", Field, 0, ""}, {"NullBool.Valid", Field, 0, ""}, @@ -1590,10 +1719,68 @@ var PackageSymbols = map[string][]Symbol{ {"TxOptions.ReadOnly", Field, 8, ""}, }, "database/sql/driver": { + {"(ColumnConverter).ColumnConverter", Method, 0, ""}, + {"(Conn).Begin", Method, 0, ""}, + {"(Conn).Close", Method, 0, ""}, + {"(Conn).Prepare", Method, 0, ""}, + {"(ConnBeginTx).BeginTx", Method, 8, ""}, + {"(ConnPrepareContext).PrepareContext", Method, 8, ""}, + {"(Connector).Connect", Method, 10, ""}, + {"(Connector).Driver", Method, 10, ""}, + {"(Driver).Open", Method, 0, ""}, + {"(DriverContext).OpenConnector", Method, 10, ""}, + {"(Execer).Exec", Method, 0, ""}, + {"(ExecerContext).ExecContext", Method, 8, ""}, + {"(NamedValueChecker).CheckNamedValue", Method, 9, ""}, {"(NotNull).ConvertValue", Method, 0, ""}, {"(Null).ConvertValue", Method, 0, ""}, + {"(Pinger).Ping", Method, 8, ""}, + {"(Queryer).Query", Method, 1, ""}, + {"(QueryerContext).QueryContext", Method, 8, ""}, + {"(Result).LastInsertId", Method, 0, ""}, + {"(Result).RowsAffected", Method, 0, ""}, + {"(Rows).Close", Method, 0, ""}, + {"(Rows).Columns", Method, 0, ""}, + {"(Rows).Next", Method, 0, ""}, {"(RowsAffected).LastInsertId", Method, 0, ""}, {"(RowsAffected).RowsAffected", Method, 0, ""}, + {"(RowsColumnTypeDatabaseTypeName).Close", Method, 8, ""}, + {"(RowsColumnTypeDatabaseTypeName).ColumnTypeDatabaseTypeName", Method, 8, ""}, + {"(RowsColumnTypeDatabaseTypeName).Columns", Method, 8, ""}, + {"(RowsColumnTypeDatabaseTypeName).Next", Method, 8, ""}, + {"(RowsColumnTypeLength).Close", Method, 8, ""}, + {"(RowsColumnTypeLength).ColumnTypeLength", Method, 8, ""}, + {"(RowsColumnTypeLength).Columns", Method, 8, ""}, + {"(RowsColumnTypeLength).Next", Method, 8, ""}, + {"(RowsColumnTypeNullable).Close", Method, 8, ""}, + {"(RowsColumnTypeNullable).ColumnTypeNullable", Method, 8, ""}, + {"(RowsColumnTypeNullable).Columns", Method, 8, ""}, + {"(RowsColumnTypeNullable).Next", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).Close", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).ColumnTypePrecisionScale", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).Columns", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).Next", Method, 8, ""}, + {"(RowsColumnTypeScanType).Close", Method, 8, ""}, + {"(RowsColumnTypeScanType).ColumnTypeScanType", Method, 8, ""}, + {"(RowsColumnTypeScanType).Columns", Method, 8, ""}, + {"(RowsColumnTypeScanType).Next", Method, 8, ""}, + {"(RowsNextResultSet).Close", Method, 8, ""}, + {"(RowsNextResultSet).Columns", Method, 8, ""}, + {"(RowsNextResultSet).HasNextResultSet", Method, 8, ""}, + {"(RowsNextResultSet).Next", Method, 8, ""}, + {"(RowsNextResultSet).NextResultSet", Method, 8, ""}, + {"(SessionResetter).ResetSession", Method, 10, ""}, + {"(Stmt).Close", Method, 0, ""}, + {"(Stmt).Exec", Method, 0, ""}, + {"(Stmt).NumInput", Method, 0, ""}, + {"(Stmt).Query", Method, 0, ""}, + {"(StmtExecContext).ExecContext", Method, 8, ""}, + {"(StmtQueryContext).QueryContext", Method, 8, ""}, + {"(Tx).Commit", Method, 0, ""}, + {"(Tx).Rollback", Method, 0, ""}, + {"(Validator).IsValid", Method, 15, ""}, + {"(ValueConverter).ConvertValue", Method, 0, ""}, + {"(Valuer).Value", Method, 0, ""}, {"Bool", Var, 0, ""}, {"ColumnConverter", Type, 0, ""}, {"Conn", Type, 0, ""}, @@ -1754,6 +1941,9 @@ var PackageSymbols = map[string][]Symbol{ {"(DecodeError).Error", Method, 0, ""}, {"(Tag).GoString", Method, 0, ""}, {"(Tag).String", Method, 0, ""}, + {"(Type).Common", Method, 0, ""}, + {"(Type).Size", Method, 0, ""}, + {"(Type).String", Method, 0, ""}, {"AddrType", Type, 0, ""}, {"AddrType.BasicType", Field, 0, ""}, {"ArrayType", Type, 0, ""}, @@ -3161,6 +3351,7 @@ var PackageSymbols = map[string][]Symbol{ {"R_LARCH_B16", Const, 20, ""}, {"R_LARCH_B21", Const, 20, ""}, {"R_LARCH_B26", Const, 20, ""}, + {"R_LARCH_CALL36", Const, 26, ""}, {"R_LARCH_CFA", Const, 22, ""}, {"R_LARCH_COPY", Const, 19, ""}, {"R_LARCH_DELETE", Const, 22, ""}, @@ -3218,11 +3409,25 @@ var PackageSymbols = map[string][]Symbol{ {"R_LARCH_SUB64", Const, 19, ""}, {"R_LARCH_SUB8", Const, 19, ""}, {"R_LARCH_SUB_ULEB128", Const, 22, ""}, + {"R_LARCH_TLS_DESC32", Const, 26, ""}, + {"R_LARCH_TLS_DESC64", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_HI12", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_LO20", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_PC_HI12", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_PC_LO20", Const, 26, ""}, + {"R_LARCH_TLS_DESC_CALL", Const, 26, ""}, + {"R_LARCH_TLS_DESC_HI20", Const, 26, ""}, + {"R_LARCH_TLS_DESC_LD", Const, 26, ""}, + {"R_LARCH_TLS_DESC_LO12", Const, 26, ""}, + {"R_LARCH_TLS_DESC_PCREL20_S2", Const, 26, ""}, + {"R_LARCH_TLS_DESC_PC_HI20", Const, 26, ""}, + {"R_LARCH_TLS_DESC_PC_LO12", Const, 26, ""}, {"R_LARCH_TLS_DTPMOD32", Const, 19, ""}, {"R_LARCH_TLS_DTPMOD64", Const, 19, ""}, {"R_LARCH_TLS_DTPREL32", Const, 19, ""}, {"R_LARCH_TLS_DTPREL64", Const, 19, ""}, {"R_LARCH_TLS_GD_HI20", Const, 20, ""}, + {"R_LARCH_TLS_GD_PCREL20_S2", Const, 26, ""}, {"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""}, {"R_LARCH_TLS_IE64_HI12", Const, 20, ""}, {"R_LARCH_TLS_IE64_LO20", Const, 20, ""}, @@ -3233,11 +3438,15 @@ var PackageSymbols = map[string][]Symbol{ {"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""}, {"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""}, {"R_LARCH_TLS_LD_HI20", Const, 20, ""}, + {"R_LARCH_TLS_LD_PCREL20_S2", Const, 26, ""}, {"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""}, {"R_LARCH_TLS_LE64_HI12", Const, 20, ""}, {"R_LARCH_TLS_LE64_LO20", Const, 20, ""}, + {"R_LARCH_TLS_LE_ADD_R", Const, 26, ""}, {"R_LARCH_TLS_LE_HI20", Const, 20, ""}, + {"R_LARCH_TLS_LE_HI20_R", Const, 26, ""}, {"R_LARCH_TLS_LE_LO12", Const, 20, ""}, + {"R_LARCH_TLS_LE_LO12_R", Const, 26, ""}, {"R_LARCH_TLS_TPREL32", Const, 19, ""}, {"R_LARCH_TLS_TPREL64", Const, 19, ""}, {"R_MIPS", Type, 6, ""}, @@ -3942,6 +4151,7 @@ var PackageSymbols = map[string][]Symbol{ {"(FatArch).ImportedSymbols", Method, 3, ""}, {"(FatArch).Section", Method, 3, ""}, {"(FatArch).Segment", Method, 3, ""}, + {"(Load).Raw", Method, 0, ""}, {"(LoadBytes).Raw", Method, 0, ""}, {"(LoadCmd).GoString", Method, 0, ""}, {"(LoadCmd).String", Method, 0, ""}, @@ -4588,6 +4798,12 @@ var PackageSymbols = map[string][]Symbol{ {"FS", Type, 16, ""}, }, "encoding": { + {"(BinaryAppender).AppendBinary", Method, 24, ""}, + {"(BinaryMarshaler).MarshalBinary", Method, 2, ""}, + {"(BinaryUnmarshaler).UnmarshalBinary", Method, 2, ""}, + {"(TextAppender).AppendText", Method, 24, ""}, + {"(TextMarshaler).MarshalText", Method, 2, ""}, + {"(TextUnmarshaler).UnmarshalText", Method, 2, ""}, {"BinaryAppender", Type, 24, ""}, {"BinaryMarshaler", Type, 2, ""}, {"BinaryUnmarshaler", Type, 2, ""}, @@ -4703,6 +4919,17 @@ var PackageSymbols = map[string][]Symbol{ {"URLEncoding", Var, 0, ""}, }, "encoding/binary": { + {"(AppendByteOrder).AppendUint16", Method, 19, ""}, + {"(AppendByteOrder).AppendUint32", Method, 19, ""}, + {"(AppendByteOrder).AppendUint64", Method, 19, ""}, + {"(AppendByteOrder).String", Method, 19, ""}, + {"(ByteOrder).PutUint16", Method, 0, ""}, + {"(ByteOrder).PutUint32", Method, 0, ""}, + {"(ByteOrder).PutUint64", Method, 0, ""}, + {"(ByteOrder).String", Method, 0, ""}, + {"(ByteOrder).Uint16", Method, 0, ""}, + {"(ByteOrder).Uint32", Method, 0, ""}, + {"(ByteOrder).Uint64", Method, 0, ""}, {"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"}, {"AppendByteOrder", Type, 19, ""}, {"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"}, @@ -4765,6 +4992,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*Decoder).DecodeValue", Method, 0, ""}, {"(*Encoder).Encode", Method, 0, ""}, {"(*Encoder).EncodeValue", Method, 0, ""}, + {"(GobDecoder).GobDecode", Method, 0, ""}, + {"(GobEncoder).GobEncode", Method, 0, ""}, {"CommonType", Type, 0, ""}, {"CommonType.Id", Field, 0, ""}, {"CommonType.Name", Field, 0, ""}, @@ -4817,10 +5046,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnsupportedTypeError).Error", Method, 0, ""}, {"(*UnsupportedValueError).Error", Method, 0, ""}, {"(Delim).String", Method, 5, ""}, + {"(Marshaler).MarshalJSON", Method, 0, ""}, {"(Number).Float64", Method, 1, ""}, {"(Number).Int64", Method, 1, ""}, {"(Number).String", Method, 1, ""}, {"(RawMessage).MarshalJSON", Method, 8, ""}, + {"(Unmarshaler).UnmarshalJSON", Method, 0, ""}, {"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"}, {"Decoder", Type, 0, ""}, {"Delim", Type, 5, ""}, @@ -4892,10 +5123,15 @@ var PackageSymbols = map[string][]Symbol{ {"(CharData).Copy", Method, 0, ""}, {"(Comment).Copy", Method, 0, ""}, {"(Directive).Copy", Method, 0, ""}, + {"(Marshaler).MarshalXML", Method, 2, ""}, + {"(MarshalerAttr).MarshalXMLAttr", Method, 2, ""}, {"(ProcInst).Copy", Method, 0, ""}, {"(StartElement).Copy", Method, 0, ""}, {"(StartElement).End", Method, 2, ""}, + {"(TokenReader).Token", Method, 10, ""}, {"(UnmarshalError).Error", Method, 0, ""}, + {"(Unmarshaler).UnmarshalXML", Method, 2, ""}, + {"(UnmarshalerAttr).UnmarshalXMLAttr", Method, 2, ""}, {"Attr", Type, 0, ""}, {"Attr.Name", Field, 0, ""}, {"Attr.Value", Field, 0, ""}, @@ -4953,6 +5189,7 @@ var PackageSymbols = map[string][]Symbol{ }, "errors": { {"As", Func, 13, "func(err error, target any) bool"}, + {"AsType", Func, 26, "func[E error](err error) (E, bool)"}, {"ErrUnsupported", Var, 21, ""}, {"Is", Func, 13, "func(err error, target error) bool"}, {"Join", Func, 20, "func(errs ...error) error"}, @@ -4981,6 +5218,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*String).Value", Method, 8, ""}, {"(Func).String", Method, 0, ""}, {"(Func).Value", Method, 8, ""}, + {"(Var).String", Method, 0, ""}, {"Do", Func, 0, "func(f func(KeyValue))"}, {"Float", Type, 0, ""}, {"Func", Type, 0, ""}, @@ -5036,6 +5274,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*FlagSet).Var", Method, 0, ""}, {"(*FlagSet).Visit", Method, 0, ""}, {"(*FlagSet).VisitAll", Method, 0, ""}, + {"(Getter).Get", Method, 2, ""}, + {"(Getter).Set", Method, 2, ""}, + {"(Getter).String", Method, 2, ""}, + {"(Value).Set", Method, 0, ""}, + {"(Value).String", Method, 0, ""}, {"Arg", Func, 0, "func(i int) string"}, {"Args", Func, 0, "func() []string"}, {"Bool", Func, 0, "func(name string, value bool, usage string) *bool"}, @@ -5087,10 +5330,24 @@ var PackageSymbols = map[string][]Symbol{ {"VisitAll", Func, 0, "func(fn func(*Flag))"}, }, "fmt": { + {"(Formatter).Format", Method, 0, ""}, + {"(GoStringer).GoString", Method, 0, ""}, + {"(ScanState).Read", Method, 0, ""}, + {"(ScanState).ReadRune", Method, 0, ""}, + {"(ScanState).SkipSpace", Method, 0, ""}, + {"(ScanState).Token", Method, 0, ""}, + {"(ScanState).UnreadRune", Method, 0, ""}, + {"(ScanState).Width", Method, 0, ""}, + {"(Scanner).Scan", Method, 0, ""}, + {"(State).Flag", Method, 0, ""}, + {"(State).Precision", Method, 0, ""}, + {"(State).Width", Method, 0, ""}, + {"(State).Write", Method, 0, ""}, + {"(Stringer).String", Method, 0, ""}, {"Append", Func, 19, "func(b []byte, a ...any) []byte"}, {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"}, {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"}, - {"Errorf", Func, 0, "func(format string, a ...any) error"}, + {"Errorf", Func, 0, "func(format string, a ...any) (err error)"}, {"FormatString", Func, 20, "func(state State, verb rune) string"}, {"Formatter", Type, 0, ""}, {"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"}, @@ -5155,6 +5412,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*DeclStmt).Pos", Method, 0, ""}, {"(*DeferStmt).End", Method, 0, ""}, {"(*DeferStmt).Pos", Method, 0, ""}, + {"(*Directive).End", Method, 26, ""}, + {"(*Directive).ParseArgs", Method, 26, ""}, + {"(*Directive).Pos", Method, 26, ""}, {"(*Ellipsis).End", Method, 0, ""}, {"(*Ellipsis).Pos", Method, 0, ""}, {"(*EmptyStmt).End", Method, 0, ""}, @@ -5242,7 +5502,18 @@ var PackageSymbols = map[string][]Symbol{ {"(CommentMap).Filter", Method, 1, ""}, {"(CommentMap).String", Method, 1, ""}, {"(CommentMap).Update", Method, 1, ""}, + {"(Decl).End", Method, 0, ""}, + {"(Decl).Pos", Method, 0, ""}, + {"(Expr).End", Method, 0, ""}, + {"(Expr).Pos", Method, 0, ""}, + {"(Node).End", Method, 0, ""}, + {"(Node).Pos", Method, 0, ""}, {"(ObjKind).String", Method, 0, ""}, + {"(Spec).End", Method, 0, ""}, + {"(Spec).Pos", Method, 0, ""}, + {"(Stmt).End", Method, 0, ""}, + {"(Stmt).Pos", Method, 0, ""}, + {"(Visitor).Visit", Method, 0, ""}, {"ArrayType", Type, 0, ""}, {"ArrayType.Elt", Field, 0, ""}, {"ArrayType.Lbrack", Field, 0, ""}, @@ -5265,6 +5536,7 @@ var PackageSymbols = map[string][]Symbol{ {"BasicLit", Type, 0, ""}, {"BasicLit.Kind", Field, 0, ""}, {"BasicLit.Value", Field, 0, ""}, + {"BasicLit.ValueEnd", Field, 26, ""}, {"BasicLit.ValuePos", Field, 0, ""}, {"BinaryExpr", Type, 0, ""}, {"BinaryExpr.Op", Field, 0, ""}, @@ -5314,19 +5586,26 @@ var PackageSymbols = map[string][]Symbol{ {"CompositeLit.Rbrace", Field, 0, ""}, {"CompositeLit.Type", Field, 0, ""}, {"Con", Const, 0, ""}, - {"Decl", Type, 0, ""}, {"DeclStmt", Type, 0, ""}, {"DeclStmt.Decl", Field, 0, ""}, {"DeferStmt", Type, 0, ""}, {"DeferStmt.Call", Field, 0, ""}, {"DeferStmt.Defer", Field, 0, ""}, + {"Directive", Type, 26, ""}, + {"Directive.Args", Field, 26, ""}, + {"Directive.ArgsPos", Field, 26, ""}, + {"Directive.Name", Field, 26, ""}, + {"Directive.Slash", Field, 26, ""}, + {"Directive.Tool", Field, 26, ""}, + {"DirectiveArg", Type, 26, ""}, + {"DirectiveArg.Arg", Field, 26, ""}, + {"DirectiveArg.Pos", Field, 26, ""}, {"Ellipsis", Type, 0, ""}, {"Ellipsis.Ellipsis", Field, 0, ""}, {"Ellipsis.Elt", Field, 0, ""}, {"EmptyStmt", Type, 0, ""}, {"EmptyStmt.Implicit", Field, 5, ""}, {"EmptyStmt.Semicolon", Field, 0, ""}, - {"Expr", Type, 0, ""}, {"ExprStmt", Type, 0, ""}, {"ExprStmt.X", Field, 0, ""}, {"Field", Type, 0, ""}, @@ -5469,6 +5748,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParenExpr.Lparen", Field, 0, ""}, {"ParenExpr.Rparen", Field, 0, ""}, {"ParenExpr.X", Field, 0, ""}, + {"ParseDirective", Func, 26, "func(pos token.Pos, c string) (Directive, bool)"}, {"Pkg", Const, 0, ""}, {"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"}, {"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"}, @@ -5509,11 +5789,9 @@ var PackageSymbols = map[string][]Symbol{ {"SliceExpr.Slice3", Field, 2, ""}, {"SliceExpr.X", Field, 0, ""}, {"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"}, - {"Spec", Type, 0, ""}, {"StarExpr", Type, 0, ""}, {"StarExpr.Star", Field, 0, ""}, {"StarExpr.X", Field, 0, ""}, - {"Stmt", Type, 0, ""}, {"StructType", Type, 0, ""}, {"StructType.Fields", Field, 0, ""}, {"StructType.Incomplete", Field, 0, ""}, @@ -5668,10 +5946,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*SyntaxError).Error", Method, 16, ""}, {"(*TagExpr).Eval", Method, 16, ""}, {"(*TagExpr).String", Method, 16, ""}, + {"(Expr).Eval", Method, 16, ""}, + {"(Expr).String", Method, 16, ""}, {"AndExpr", Type, 16, ""}, {"AndExpr.X", Field, 16, ""}, {"AndExpr.Y", Field, 16, ""}, - {"Expr", Type, 16, ""}, {"GoVersion", Func, 21, "func(x Expr) string"}, {"IsGoBuild", Func, 16, "func(line string) bool"}, {"IsPlusBuild", Func, 16, "func(line string) bool"}, @@ -5690,6 +5969,9 @@ var PackageSymbols = map[string][]Symbol{ }, "go/constant": { {"(Kind).String", Method, 18, ""}, + {"(Value).ExactString", Method, 6, ""}, + {"(Value).Kind", Method, 5, ""}, + {"(Value).String", Method, 5, ""}, {"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"}, {"BitLen", Func, 5, "func(x Value) int"}, {"Bool", Const, 5, ""}, @@ -5728,7 +6010,6 @@ var PackageSymbols = map[string][]Symbol{ {"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"}, {"Unknown", Const, 5, ""}, {"Val", Func, 13, "func(x Value) any"}, - {"Value", Type, 5, ""}, }, "go/doc": { {"(*Package).Filter", Method, 0, ""}, @@ -5812,7 +6093,6 @@ var PackageSymbols = map[string][]Symbol{ {"(*Printer).HTML", Method, 19, ""}, {"(*Printer).Markdown", Method, 19, ""}, {"(*Printer).Text", Method, 19, ""}, - {"Block", Type, 19, ""}, {"Code", Type, 19, ""}, {"Code.Text", Field, 19, ""}, {"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"}, @@ -5857,7 +6137,6 @@ var PackageSymbols = map[string][]Symbol{ {"Printer.TextCodePrefix", Field, 19, ""}, {"Printer.TextPrefix", Field, 19, ""}, {"Printer.TextWidth", Field, 19, ""}, - {"Text", Type, 19, ""}, }, "go/format": { {"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"}, @@ -5929,6 +6208,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).AddLineColumnInfo", Method, 11, ""}, {"(*File).AddLineInfo", Method, 0, ""}, {"(*File).Base", Method, 0, ""}, + {"(*File).End", Method, 26, ""}, {"(*File).Line", Method, 0, ""}, {"(*File).LineCount", Method, 0, ""}, {"(*File).LineStart", Method, 12, ""}, @@ -6291,6 +6571,22 @@ var PackageSymbols = map[string][]Symbol{ {"(Checker).PkgNameOf", Method, 22, ""}, {"(Checker).TypeOf", Method, 5, ""}, {"(Error).Error", Method, 5, ""}, + {"(Importer).Import", Method, 5, ""}, + {"(ImporterFrom).Import", Method, 6, ""}, + {"(ImporterFrom).ImportFrom", Method, 6, ""}, + {"(Object).Exported", Method, 5, ""}, + {"(Object).Id", Method, 5, ""}, + {"(Object).Name", Method, 5, ""}, + {"(Object).Parent", Method, 5, ""}, + {"(Object).Pkg", Method, 5, ""}, + {"(Object).Pos", Method, 5, ""}, + {"(Object).String", Method, 5, ""}, + {"(Object).Type", Method, 5, ""}, + {"(Sizes).Alignof", Method, 5, ""}, + {"(Sizes).Offsetsof", Method, 5, ""}, + {"(Sizes).Sizeof", Method, 5, ""}, + {"(Type).String", Method, 5, ""}, + {"(Type).Underlying", Method, 5, ""}, {"(TypeAndValue).Addressable", Method, 5, ""}, {"(TypeAndValue).Assignable", Method, 5, ""}, {"(TypeAndValue).HasOk", Method, 5, ""}, @@ -6429,7 +6725,6 @@ var PackageSymbols = map[string][]Symbol{ {"NewUnion", Func, 18, "func(terms []*Term) *Union"}, {"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"}, {"Nil", Type, 5, ""}, - {"Object", Type, 5, ""}, {"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"}, {"Package", Type, 5, ""}, {"PackageVar", Const, 25, ""}, @@ -6500,6 +6795,33 @@ var PackageSymbols = map[string][]Symbol{ {"Lang", Func, 22, "func(x string) string"}, }, "hash": { + {"(Cloner).BlockSize", Method, 25, ""}, + {"(Cloner).Clone", Method, 25, ""}, + {"(Cloner).Reset", Method, 25, ""}, + {"(Cloner).Size", Method, 25, ""}, + {"(Cloner).Sum", Method, 25, ""}, + {"(Cloner).Write", Method, 25, ""}, + {"(Hash).BlockSize", Method, 0, ""}, + {"(Hash).Reset", Method, 0, ""}, + {"(Hash).Size", Method, 0, ""}, + {"(Hash).Sum", Method, 0, ""}, + {"(Hash).Write", Method, 0, ""}, + {"(Hash32).BlockSize", Method, 0, ""}, + {"(Hash32).Reset", Method, 0, ""}, + {"(Hash32).Size", Method, 0, ""}, + {"(Hash32).Sum", Method, 0, ""}, + {"(Hash32).Sum32", Method, 0, ""}, + {"(Hash32).Write", Method, 0, ""}, + {"(Hash64).BlockSize", Method, 0, ""}, + {"(Hash64).Reset", Method, 0, ""}, + {"(Hash64).Size", Method, 0, ""}, + {"(Hash64).Sum", Method, 0, ""}, + {"(Hash64).Sum64", Method, 0, ""}, + {"(Hash64).Write", Method, 0, ""}, + {"(XOF).BlockSize", Method, 25, ""}, + {"(XOF).Read", Method, 25, ""}, + {"(XOF).Reset", Method, 25, ""}, + {"(XOF).Write", Method, 25, ""}, {"Cloner", Type, 25, ""}, {"Hash", Type, 0, ""}, {"Hash32", Type, 0, ""}, @@ -6765,6 +7087,13 @@ var PackageSymbols = map[string][]Symbol{ {"(*YCbCr).SubImage", Method, 0, ""}, {"(*YCbCr).YCbCrAt", Method, 4, ""}, {"(*YCbCr).YOffset", Method, 0, ""}, + {"(Image).At", Method, 0, ""}, + {"(Image).Bounds", Method, 0, ""}, + {"(Image).ColorModel", Method, 0, ""}, + {"(PalettedImage).At", Method, 0, ""}, + {"(PalettedImage).Bounds", Method, 0, ""}, + {"(PalettedImage).ColorIndexAt", Method, 0, ""}, + {"(PalettedImage).ColorModel", Method, 0, ""}, {"(Point).Add", Method, 0, ""}, {"(Point).Div", Method, 0, ""}, {"(Point).Eq", Method, 0, ""}, @@ -6773,6 +7102,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Point).Mul", Method, 0, ""}, {"(Point).String", Method, 0, ""}, {"(Point).Sub", Method, 0, ""}, + {"(RGBA64Image).At", Method, 17, ""}, + {"(RGBA64Image).Bounds", Method, 17, ""}, + {"(RGBA64Image).ColorModel", Method, 17, ""}, + {"(RGBA64Image).RGBA64At", Method, 17, ""}, {"(Rectangle).Add", Method, 0, ""}, {"(Rectangle).At", Method, 5, ""}, {"(Rectangle).Bounds", Method, 5, ""}, @@ -6897,8 +7230,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Alpha).RGBA", Method, 0, ""}, {"(Alpha16).RGBA", Method, 0, ""}, {"(CMYK).RGBA", Method, 5, ""}, + {"(Color).RGBA", Method, 0, ""}, {"(Gray).RGBA", Method, 0, ""}, {"(Gray16).RGBA", Method, 0, ""}, + {"(Model).Convert", Method, 0, ""}, {"(NRGBA).RGBA", Method, 0, ""}, {"(NRGBA64).RGBA", Method, 0, ""}, {"(NYCbCrA).RGBA", Method, 6, ""}, @@ -6976,7 +7311,19 @@ var PackageSymbols = map[string][]Symbol{ {"WebSafe", Var, 2, ""}, }, "image/draw": { + {"(Drawer).Draw", Method, 2, ""}, + {"(Image).At", Method, 0, ""}, + {"(Image).Bounds", Method, 0, ""}, + {"(Image).ColorModel", Method, 0, ""}, + {"(Image).Set", Method, 0, ""}, {"(Op).Draw", Method, 2, ""}, + {"(Quantizer).Quantize", Method, 2, ""}, + {"(RGBA64Image).At", Method, 17, ""}, + {"(RGBA64Image).Bounds", Method, 17, ""}, + {"(RGBA64Image).ColorModel", Method, 17, ""}, + {"(RGBA64Image).RGBA64At", Method, 17, ""}, + {"(RGBA64Image).Set", Method, 17, ""}, + {"(RGBA64Image).SetRGBA64", Method, 17, ""}, {"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"}, {"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"}, {"Drawer", Type, 2, ""}, @@ -7011,6 +7358,8 @@ var PackageSymbols = map[string][]Symbol{ }, "image/jpeg": { {"(FormatError).Error", Method, 0, ""}, + {"(Reader).Read", Method, 0, ""}, + {"(Reader).ReadByte", Method, 0, ""}, {"(UnsupportedError).Error", Method, 0, ""}, {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"}, {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"}, @@ -7024,6 +7373,8 @@ var PackageSymbols = map[string][]Symbol{ }, "image/png": { {"(*Encoder).Encode", Method, 4, ""}, + {"(EncoderBufferPool).Get", Method, 9, ""}, + {"(EncoderBufferPool).Put", Method, 9, ""}, {"(FormatError).Error", Method, 0, ""}, {"(UnsupportedError).Error", Method, 0, ""}, {"BestCompression", Const, 4, ""}, @@ -7067,6 +7418,41 @@ var PackageSymbols = map[string][]Symbol{ {"(*SectionReader).ReadAt", Method, 0, ""}, {"(*SectionReader).Seek", Method, 0, ""}, {"(*SectionReader).Size", Method, 0, ""}, + {"(ByteReader).ReadByte", Method, 0, ""}, + {"(ByteScanner).ReadByte", Method, 0, ""}, + {"(ByteScanner).UnreadByte", Method, 0, ""}, + {"(ByteWriter).WriteByte", Method, 1, ""}, + {"(Closer).Close", Method, 0, ""}, + {"(ReadCloser).Close", Method, 0, ""}, + {"(ReadCloser).Read", Method, 0, ""}, + {"(ReadSeekCloser).Close", Method, 16, ""}, + {"(ReadSeekCloser).Read", Method, 16, ""}, + {"(ReadSeekCloser).Seek", Method, 16, ""}, + {"(ReadSeeker).Read", Method, 0, ""}, + {"(ReadSeeker).Seek", Method, 0, ""}, + {"(ReadWriteCloser).Close", Method, 0, ""}, + {"(ReadWriteCloser).Read", Method, 0, ""}, + {"(ReadWriteCloser).Write", Method, 0, ""}, + {"(ReadWriteSeeker).Read", Method, 0, ""}, + {"(ReadWriteSeeker).Seek", Method, 0, ""}, + {"(ReadWriteSeeker).Write", Method, 0, ""}, + {"(ReadWriter).Read", Method, 0, ""}, + {"(ReadWriter).Write", Method, 0, ""}, + {"(Reader).Read", Method, 0, ""}, + {"(ReaderAt).ReadAt", Method, 0, ""}, + {"(ReaderFrom).ReadFrom", Method, 0, ""}, + {"(RuneReader).ReadRune", Method, 0, ""}, + {"(RuneScanner).ReadRune", Method, 0, ""}, + {"(RuneScanner).UnreadRune", Method, 0, ""}, + {"(Seeker).Seek", Method, 0, ""}, + {"(StringWriter).WriteString", Method, 12, ""}, + {"(WriteCloser).Close", Method, 0, ""}, + {"(WriteCloser).Write", Method, 0, ""}, + {"(WriteSeeker).Seek", Method, 0, ""}, + {"(WriteSeeker).Write", Method, 0, ""}, + {"(Writer).Write", Method, 0, ""}, + {"(WriterAt).WriteAt", Method, 0, ""}, + {"(WriterTo).WriteTo", Method, 0, ""}, {"ByteReader", Type, 0, ""}, {"ByteScanner", Type, 0, ""}, {"ByteWriter", Type, 1, ""}, @@ -7126,11 +7512,42 @@ var PackageSymbols = map[string][]Symbol{ {"(*PathError).Error", Method, 16, ""}, {"(*PathError).Timeout", Method, 16, ""}, {"(*PathError).Unwrap", Method, 16, ""}, + {"(DirEntry).Info", Method, 16, ""}, + {"(DirEntry).IsDir", Method, 16, ""}, + {"(DirEntry).Name", Method, 16, ""}, + {"(DirEntry).Type", Method, 16, ""}, + {"(FS).Open", Method, 16, ""}, + {"(File).Close", Method, 16, ""}, + {"(File).Read", Method, 16, ""}, + {"(File).Stat", Method, 16, ""}, + {"(FileInfo).IsDir", Method, 16, ""}, + {"(FileInfo).ModTime", Method, 16, ""}, + {"(FileInfo).Mode", Method, 16, ""}, + {"(FileInfo).Name", Method, 16, ""}, + {"(FileInfo).Size", Method, 16, ""}, + {"(FileInfo).Sys", Method, 16, ""}, {"(FileMode).IsDir", Method, 16, ""}, {"(FileMode).IsRegular", Method, 16, ""}, {"(FileMode).Perm", Method, 16, ""}, {"(FileMode).String", Method, 16, ""}, {"(FileMode).Type", Method, 16, ""}, + {"(GlobFS).Glob", Method, 16, ""}, + {"(GlobFS).Open", Method, 16, ""}, + {"(ReadDirFS).Open", Method, 16, ""}, + {"(ReadDirFS).ReadDir", Method, 16, ""}, + {"(ReadDirFile).Close", Method, 16, ""}, + {"(ReadDirFile).Read", Method, 16, ""}, + {"(ReadDirFile).ReadDir", Method, 16, ""}, + {"(ReadDirFile).Stat", Method, 16, ""}, + {"(ReadFileFS).Open", Method, 16, ""}, + {"(ReadFileFS).ReadFile", Method, 16, ""}, + {"(ReadLinkFS).Lstat", Method, 25, ""}, + {"(ReadLinkFS).Open", Method, 25, ""}, + {"(ReadLinkFS).ReadLink", Method, 25, ""}, + {"(StatFS).Open", Method, 16, ""}, + {"(StatFS).Stat", Method, 16, ""}, + {"(SubFS).Open", Method, 16, ""}, + {"(SubFS).Sub", Method, 16, ""}, {"DirEntry", Type, 16, ""}, {"ErrClosed", Var, 16, ""}, {"ErrExist", Var, 16, ""}, @@ -7271,6 +7688,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Logger).WarnContext", Method, 21, ""}, {"(*Logger).With", Method, 21, ""}, {"(*Logger).WithGroup", Method, 21, ""}, + {"(*MultiHandler).Enabled", Method, 26, ""}, + {"(*MultiHandler).Handle", Method, 26, ""}, + {"(*MultiHandler).WithAttrs", Method, 26, ""}, + {"(*MultiHandler).WithGroup", Method, 26, ""}, {"(*Record).Add", Method, 21, ""}, {"(*Record).AddAttrs", Method, 21, ""}, {"(*TextHandler).Enabled", Method, 21, ""}, @@ -7279,12 +7700,18 @@ var PackageSymbols = map[string][]Symbol{ {"(*TextHandler).WithGroup", Method, 21, ""}, {"(Attr).Equal", Method, 21, ""}, {"(Attr).String", Method, 21, ""}, + {"(Handler).Enabled", Method, 21, ""}, + {"(Handler).Handle", Method, 21, ""}, + {"(Handler).WithAttrs", Method, 21, ""}, + {"(Handler).WithGroup", Method, 21, ""}, {"(Kind).String", Method, 21, ""}, {"(Level).AppendText", Method, 24, ""}, {"(Level).Level", Method, 21, ""}, {"(Level).MarshalJSON", Method, 21, ""}, {"(Level).MarshalText", Method, 21, ""}, {"(Level).String", Method, 21, ""}, + {"(Leveler).Level", Method, 21, ""}, + {"(LogValuer).LogValue", Method, 21, ""}, {"(Record).Attrs", Method, 21, ""}, {"(Record).Clone", Method, 21, ""}, {"(Record).NumAttrs", Method, 21, ""}, @@ -7358,9 +7785,11 @@ var PackageSymbols = map[string][]Symbol{ {"LogValuer", Type, 21, ""}, {"Logger", Type, 21, ""}, {"MessageKey", Const, 21, ""}, + {"MultiHandler", Type, 26, ""}, {"New", Func, 21, "func(h Handler) *Logger"}, {"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"}, {"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"}, + {"NewMultiHandler", Func, 26, "func(handlers ...Handler) *MultiHandler"}, {"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"}, {"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"}, {"Record", Type, 21, ""}, @@ -7515,7 +7944,7 @@ var PackageSymbols = map[string][]Symbol{ {"MinInt64", Const, 0, ""}, {"MinInt8", Const, 0, ""}, {"Mod", Func, 0, "func(x float64, y float64) float64"}, - {"Modf", Func, 0, "func(f float64) (int float64, frac float64)"}, + {"Modf", Func, 0, "func(f float64) (integer float64, fractional float64)"}, {"NaN", Func, 0, "func() float64"}, {"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"}, {"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"}, @@ -7811,6 +8240,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*Rand).Uint32", Method, 0, ""}, {"(*Rand).Uint64", Method, 8, ""}, {"(*Zipf).Uint64", Method, 0, ""}, + {"(Source).Int63", Method, 0, ""}, + {"(Source).Seed", Method, 0, ""}, + {"(Source64).Int63", Method, 8, ""}, + {"(Source64).Seed", Method, 8, ""}, + {"(Source64).Uint64", Method, 8, ""}, {"ExpFloat64", Func, 0, "func() float64"}, {"Float32", Func, 0, "func() float32"}, {"Float64", Func, 0, "func() float64"}, @@ -7866,6 +8300,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Rand).Uint64N", Method, 22, ""}, {"(*Rand).UintN", Method, 22, ""}, {"(*Zipf).Uint64", Method, 22, ""}, + {"(Source).Uint64", Method, 22, ""}, {"ChaCha8", Type, 22, ""}, {"ExpFloat64", Func, 22, "func() float64"}, {"Float32", Func, 22, "func() float32"}, @@ -7929,6 +8364,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).FormDataContentType", Method, 0, ""}, {"(*Writer).SetBoundary", Method, 1, ""}, {"(*Writer).WriteField", Method, 0, ""}, + {"(File).Close", Method, 0, ""}, + {"(File).Read", Method, 0, ""}, + {"(File).ReadAt", Method, 0, ""}, + {"(File).Seek", Method, 0, ""}, {"ErrMessageTooLarge", Var, 9, ""}, {"File", Type, 0, ""}, {"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"}, @@ -7972,6 +8411,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*DNSError).Unwrap", Method, 23, ""}, {"(*Dialer).Dial", Method, 1, ""}, {"(*Dialer).DialContext", Method, 7, ""}, + {"(*Dialer).DialIP", Method, 26, ""}, + {"(*Dialer).DialTCP", Method, 26, ""}, + {"(*Dialer).DialUDP", Method, 26, ""}, + {"(*Dialer).DialUnix", Method, 26, ""}, {"(*Dialer).MultipathTCP", Method, 21, ""}, {"(*Dialer).SetMultipathTCP", Method, 21, ""}, {"(*IP).UnmarshalText", Method, 2, ""}, @@ -8109,6 +8552,19 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnixListener).SetDeadline", Method, 0, ""}, {"(*UnixListener).SetUnlinkOnClose", Method, 8, ""}, {"(*UnixListener).SyscallConn", Method, 10, ""}, + {"(Addr).Network", Method, 0, ""}, + {"(Addr).String", Method, 0, ""}, + {"(Conn).Close", Method, 0, ""}, + {"(Conn).LocalAddr", Method, 0, ""}, + {"(Conn).Read", Method, 0, ""}, + {"(Conn).RemoteAddr", Method, 0, ""}, + {"(Conn).SetDeadline", Method, 0, ""}, + {"(Conn).SetReadDeadline", Method, 0, ""}, + {"(Conn).SetWriteDeadline", Method, 0, ""}, + {"(Conn).Write", Method, 0, ""}, + {"(Error).Error", Method, 0, ""}, + {"(Error).Temporary", Method, 0, ""}, + {"(Error).Timeout", Method, 0, ""}, {"(Flags).String", Method, 0, ""}, {"(HardwareAddr).String", Method, 0, ""}, {"(IP).AppendText", Method, 24, ""}, @@ -8132,6 +8588,16 @@ var PackageSymbols = map[string][]Symbol{ {"(InvalidAddrError).Error", Method, 0, ""}, {"(InvalidAddrError).Temporary", Method, 0, ""}, {"(InvalidAddrError).Timeout", Method, 0, ""}, + {"(Listener).Accept", Method, 0, ""}, + {"(Listener).Addr", Method, 0, ""}, + {"(Listener).Close", Method, 0, ""}, + {"(PacketConn).Close", Method, 0, ""}, + {"(PacketConn).LocalAddr", Method, 0, ""}, + {"(PacketConn).ReadFrom", Method, 0, ""}, + {"(PacketConn).SetDeadline", Method, 0, ""}, + {"(PacketConn).SetReadDeadline", Method, 0, ""}, + {"(PacketConn).SetWriteDeadline", Method, 0, ""}, + {"(PacketConn).WriteTo", Method, 0, ""}, {"(UnknownNetworkError).Error", Method, 0, ""}, {"(UnknownNetworkError).Temporary", Method, 0, ""}, {"(UnknownNetworkError).Timeout", Method, 0, ""}, @@ -8307,6 +8773,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Client).Head", Method, 0, ""}, {"(*Client).Post", Method, 0, ""}, {"(*Client).PostForm", Method, 0, ""}, + {"(*ClientConn).Available", Method, 26, ""}, + {"(*ClientConn).Close", Method, 26, ""}, + {"(*ClientConn).Err", Method, 26, ""}, + {"(*ClientConn).InFlight", Method, 26, ""}, + {"(*ClientConn).Release", Method, 26, ""}, + {"(*ClientConn).Reserve", Method, 26, ""}, + {"(*ClientConn).RoundTrip", Method, 26, ""}, + {"(*ClientConn).SetStateHook", Method, 26, ""}, {"(*Cookie).String", Method, 0, ""}, {"(*Cookie).Valid", Method, 18, ""}, {"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""}, @@ -8366,10 +8840,22 @@ var PackageSymbols = map[string][]Symbol{ {"(*Transport).CancelRequest", Method, 1, ""}, {"(*Transport).Clone", Method, 13, ""}, {"(*Transport).CloseIdleConnections", Method, 0, ""}, + {"(*Transport).NewClientConn", Method, 26, ""}, {"(*Transport).RegisterProtocol", Method, 0, ""}, {"(*Transport).RoundTrip", Method, 0, ""}, + {"(CloseNotifier).CloseNotify", Method, 1, ""}, {"(ConnState).String", Method, 3, ""}, + {"(CookieJar).Cookies", Method, 0, ""}, + {"(CookieJar).SetCookies", Method, 0, ""}, {"(Dir).Open", Method, 0, ""}, + {"(File).Close", Method, 0, ""}, + {"(File).Read", Method, 0, ""}, + {"(File).Readdir", Method, 0, ""}, + {"(File).Seek", Method, 0, ""}, + {"(File).Stat", Method, 0, ""}, + {"(FileSystem).Open", Method, 0, ""}, + {"(Flusher).Flush", Method, 0, ""}, + {"(Handler).ServeHTTP", Method, 0, ""}, {"(HandlerFunc).ServeHTTP", Method, 0, ""}, {"(Header).Add", Method, 0, ""}, {"(Header).Clone", Method, 13, ""}, @@ -8379,10 +8865,16 @@ var PackageSymbols = map[string][]Symbol{ {"(Header).Values", Method, 14, ""}, {"(Header).Write", Method, 0, ""}, {"(Header).WriteSubset", Method, 0, ""}, + {"(Hijacker).Hijack", Method, 0, ""}, {"(Protocols).HTTP1", Method, 24, ""}, {"(Protocols).HTTP2", Method, 24, ""}, {"(Protocols).String", Method, 24, ""}, {"(Protocols).UnencryptedHTTP2", Method, 24, ""}, + {"(Pusher).Push", Method, 8, ""}, + {"(ResponseWriter).Header", Method, 0, ""}, + {"(ResponseWriter).Write", Method, 0, ""}, + {"(ResponseWriter).WriteHeader", Method, 0, ""}, + {"(RoundTripper).RoundTrip", Method, 0, ""}, {"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"}, {"CanonicalHeaderKey", Func, 0, "func(s string) string"}, {"Client", Type, 0, ""}, @@ -8390,6 +8882,7 @@ var PackageSymbols = map[string][]Symbol{ {"Client.Jar", Field, 0, ""}, {"Client.Timeout", Field, 3, ""}, {"Client.Transport", Field, 0, ""}, + {"ClientConn", Type, 26, ""}, {"CloseNotifier", Type, 1, ""}, {"ConnState", Type, 3, ""}, {"Cookie", Type, 0, ""}, @@ -8457,6 +8950,7 @@ var PackageSymbols = map[string][]Symbol{ {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""}, {"HTTP2Config.PingTimeout", Field, 24, ""}, {"HTTP2Config.SendPingTimeout", Field, 24, ""}, + {"HTTP2Config.StrictMaxConcurrentRequests", Field, 26, ""}, {"HTTP2Config.WriteByteTimeout", Field, 24, ""}, {"Handle", Func, 0, "func(pattern string, handler Handler)"}, {"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"}, @@ -8699,6 +9193,8 @@ var PackageSymbols = map[string][]Symbol{ "net/http/cookiejar": { {"(*Jar).Cookies", Method, 1, ""}, {"(*Jar).SetCookies", Method, 1, ""}, + {"(PublicSuffixList).PublicSuffix", Method, 1, ""}, + {"(PublicSuffixList).String", Method, 1, ""}, {"Jar", Type, 1, ""}, {"New", Func, 1, "func(o *Options) (*Jar, error)"}, {"Options", Type, 1, ""}, @@ -8792,6 +9288,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*ServerConn).Pending", Method, 0, ""}, {"(*ServerConn).Read", Method, 0, ""}, {"(*ServerConn).Write", Method, 0, ""}, + {"(BufferPool).Get", Method, 6, ""}, + {"(BufferPool).Put", Method, 6, ""}, {"BufferPool", Type, 6, ""}, {"ClientConn", Type, 0, ""}, {"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"}, @@ -8904,6 +9402,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Prefix).AppendText", Method, 24, ""}, {"(Prefix).AppendTo", Method, 18, ""}, {"(Prefix).Bits", Method, 18, ""}, + {"(Prefix).Compare", Method, 26, ""}, {"(Prefix).Contains", Method, 18, ""}, {"(Prefix).IsSingleIP", Method, 18, ""}, {"(Prefix).IsValid", Method, 18, ""}, @@ -8944,6 +9443,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Server).ServeConn", Method, 0, ""}, {"(*Server).ServeHTTP", Method, 0, ""}, {"(*Server).ServeRequest", Method, 0, ""}, + {"(ClientCodec).Close", Method, 0, ""}, + {"(ClientCodec).ReadResponseBody", Method, 0, ""}, + {"(ClientCodec).ReadResponseHeader", Method, 0, ""}, + {"(ClientCodec).WriteRequest", Method, 0, ""}, + {"(ServerCodec).Close", Method, 0, ""}, + {"(ServerCodec).ReadRequestBody", Method, 0, ""}, + {"(ServerCodec).ReadRequestHeader", Method, 0, ""}, + {"(ServerCodec).WriteResponse", Method, 0, ""}, {"(ServerError).Error", Method, 0, ""}, {"Accept", Func, 0, "func(lis net.Listener)"}, {"Call", Type, 0, ""}, @@ -9002,6 +9509,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*Client).StartTLS", Method, 0, ""}, {"(*Client).TLSConnectionState", Method, 5, ""}, {"(*Client).Verify", Method, 0, ""}, + {"(Auth).Next", Method, 0, ""}, + {"(Auth).Start", Method, 0, ""}, {"Auth", Type, 0, ""}, {"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"}, {"Client", Type, 0, ""}, @@ -9177,6 +9686,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Process).Release", Method, 0, ""}, {"(*Process).Signal", Method, 0, ""}, {"(*Process).Wait", Method, 0, ""}, + {"(*Process).WithHandle", Method, 26, ""}, {"(*ProcessState).ExitCode", Method, 12, ""}, {"(*ProcessState).Exited", Method, 0, ""}, {"(*ProcessState).Pid", Method, 0, ""}, @@ -9212,10 +9722,18 @@ var PackageSymbols = map[string][]Symbol{ {"(*SyscallError).Error", Method, 0, ""}, {"(*SyscallError).Timeout", Method, 10, ""}, {"(*SyscallError).Unwrap", Method, 13, ""}, + {"(FileInfo).IsDir", Method, 0, ""}, + {"(FileInfo).ModTime", Method, 0, ""}, + {"(FileInfo).Mode", Method, 0, ""}, + {"(FileInfo).Name", Method, 0, ""}, + {"(FileInfo).Size", Method, 0, ""}, + {"(FileInfo).Sys", Method, 0, ""}, {"(FileMode).IsDir", Method, 0, ""}, {"(FileMode).IsRegular", Method, 1, ""}, {"(FileMode).Perm", Method, 0, ""}, {"(FileMode).String", Method, 0, ""}, + {"(Signal).Signal", Method, 0, ""}, + {"(Signal).String", Method, 0, ""}, {"Args", Var, 0, ""}, {"Chdir", Func, 0, "func(dir string) error"}, {"Chmod", Func, 0, "func(name string, mode FileMode) error"}, @@ -9234,6 +9752,7 @@ var PackageSymbols = map[string][]Symbol{ {"ErrExist", Var, 0, ""}, {"ErrInvalid", Var, 0, ""}, {"ErrNoDeadline", Var, 10, ""}, + {"ErrNoHandle", Var, 26, ""}, {"ErrNotExist", Var, 0, ""}, {"ErrPermission", Var, 0, ""}, {"ErrProcessDone", Var, 16, ""}, @@ -9461,7 +9980,7 @@ var PackageSymbols = map[string][]Symbol{ {"ListSeparator", Const, 0, ""}, {"Localize", Func, 23, "func(path string) (string, error)"}, {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"}, - {"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"}, + {"Rel", Func, 0, "func(basePath string, targPath string) (string, error)"}, {"Separator", Const, 0, ""}, {"SkipAll", Var, 20, ""}, {"SkipDir", Var, 0, ""}, @@ -9491,6 +10010,45 @@ var PackageSymbols = map[string][]Symbol{ {"(StructField).IsExported", Method, 17, ""}, {"(StructTag).Get", Method, 0, ""}, {"(StructTag).Lookup", Method, 7, ""}, + {"(Type).Align", Method, 0, ""}, + {"(Type).AssignableTo", Method, 0, ""}, + {"(Type).Bits", Method, 0, ""}, + {"(Type).CanSeq", Method, 23, ""}, + {"(Type).CanSeq2", Method, 23, ""}, + {"(Type).ChanDir", Method, 0, ""}, + {"(Type).Comparable", Method, 4, ""}, + {"(Type).ConvertibleTo", Method, 1, ""}, + {"(Type).Elem", Method, 0, ""}, + {"(Type).Field", Method, 0, ""}, + {"(Type).FieldAlign", Method, 0, ""}, + {"(Type).FieldByIndex", Method, 0, ""}, + {"(Type).FieldByName", Method, 0, ""}, + {"(Type).FieldByNameFunc", Method, 0, ""}, + {"(Type).Fields", Method, 26, ""}, + {"(Type).Implements", Method, 0, ""}, + {"(Type).In", Method, 0, ""}, + {"(Type).Ins", Method, 26, ""}, + {"(Type).IsVariadic", Method, 0, ""}, + {"(Type).Key", Method, 0, ""}, + {"(Type).Kind", Method, 0, ""}, + {"(Type).Len", Method, 0, ""}, + {"(Type).Method", Method, 0, ""}, + {"(Type).MethodByName", Method, 0, ""}, + {"(Type).Methods", Method, 26, ""}, + {"(Type).Name", Method, 0, ""}, + {"(Type).NumField", Method, 0, ""}, + {"(Type).NumIn", Method, 0, ""}, + {"(Type).NumMethod", Method, 0, ""}, + {"(Type).NumOut", Method, 0, ""}, + {"(Type).Out", Method, 0, ""}, + {"(Type).Outs", Method, 26, ""}, + {"(Type).OverflowComplex", Method, 23, ""}, + {"(Type).OverflowFloat", Method, 23, ""}, + {"(Type).OverflowInt", Method, 23, ""}, + {"(Type).OverflowUint", Method, 23, ""}, + {"(Type).PkgPath", Method, 0, ""}, + {"(Type).Size", Method, 0, ""}, + {"(Type).String", Method, 0, ""}, {"(Value).Addr", Method, 0, ""}, {"(Value).Bool", Method, 0, ""}, {"(Value).Bytes", Method, 0, ""}, @@ -9517,6 +10075,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Value).FieldByIndexErr", Method, 18, ""}, {"(Value).FieldByName", Method, 0, ""}, {"(Value).FieldByNameFunc", Method, 0, ""}, + {"(Value).Fields", Method, 26, ""}, {"(Value).Float", Method, 0, ""}, {"(Value).Grow", Method, 20, ""}, {"(Value).Index", Method, 0, ""}, @@ -9533,6 +10092,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Value).MapRange", Method, 12, ""}, {"(Value).Method", Method, 0, ""}, {"(Value).MethodByName", Method, 0, ""}, + {"(Value).Methods", Method, 26, ""}, {"(Value).NumField", Method, 0, ""}, {"(Value).NumMethod", Method, 0, ""}, {"(Value).OverflowComplex", Method, 0, ""}, @@ -9648,7 +10208,6 @@ var PackageSymbols = map[string][]Symbol{ {"StructOf", Func, 7, "func(fields []StructField) Type"}, {"StructTag", Type, 0, ""}, {"Swapper", Func, 8, "func(slice any) func(i int, j int)"}, - {"Type", Type, 0, ""}, {"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"}, {"TypeFor", Func, 22, "func[T any]() Type"}, {"TypeOf", Func, 0, "func(i any) Type"}, @@ -9850,6 +10409,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*TypeAssertionError).Error", Method, 0, ""}, {"(*TypeAssertionError).RuntimeError", Method, 0, ""}, {"(Cleanup).Stop", Method, 24, ""}, + {"(Error).Error", Method, 0, ""}, + {"(Error).RuntimeError", Method, 0, ""}, {"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"}, {"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"}, {"BlockProfileRecord", Type, 1, ""}, @@ -9932,7 +10493,7 @@ var PackageSymbols = map[string][]Symbol{ {"PanicNilError", Type, 21, ""}, {"Pinner", Type, 21, ""}, {"ReadMemStats", Func, 0, "func(m *MemStats)"}, - {"ReadTrace", Func, 5, "func() []byte"}, + {"ReadTrace", Func, 5, "func() (buf []byte)"}, {"SetBlockProfileRate", Func, 1, "func(rate int)"}, {"SetCPUProfileRate", Func, 0, "func(hz int)"}, {"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"}, @@ -10124,6 +10685,9 @@ var PackageSymbols = map[string][]Symbol{ {"(IntSlice).Search", Method, 0, ""}, {"(IntSlice).Sort", Method, 0, ""}, {"(IntSlice).Swap", Method, 0, ""}, + {"(Interface).Len", Method, 0, ""}, + {"(Interface).Less", Method, 0, ""}, + {"(Interface).Swap", Method, 0, ""}, {"(StringSlice).Len", Method, 0, ""}, {"(StringSlice).Less", Method, 0, ""}, {"(StringSlice).Search", Method, 0, ""}, @@ -10315,6 +10879,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*WaitGroup).Done", Method, 0, ""}, {"(*WaitGroup).Go", Method, 25, ""}, {"(*WaitGroup).Wait", Method, 0, ""}, + {"(Locker).Lock", Method, 0, ""}, + {"(Locker).Unlock", Method, 0, ""}, {"Cond", Type, 0, ""}, {"Cond.L", Field, 0, ""}, {"Locker", Type, 0, ""}, @@ -10456,10 +11022,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Timeval).Nano", Method, 0, ""}, {"(*Timeval).Nanoseconds", Method, 0, ""}, {"(*Timeval).Unix", Method, 0, ""}, + {"(Conn).SyscallConn", Method, 9, ""}, {"(Errno).Error", Method, 0, ""}, {"(Errno).Is", Method, 13, ""}, {"(Errno).Temporary", Method, 0, ""}, {"(Errno).Timeout", Method, 0, ""}, + {"(RawConn).Control", Method, 9, ""}, + {"(RawConn).Read", Method, 9, ""}, + {"(RawConn).Write", Method, 9, ""}, {"(Signal).Signal", Method, 0, ""}, {"(Signal).String", Method, 0, ""}, {"(Token).Close", Method, 0, ""}, @@ -14379,7 +14949,7 @@ var PackageSymbols = map[string][]Symbol{ {"RouteMessage.Data", Field, 0, ""}, {"RouteMessage.Header", Field, 0, ""}, {"RouteRIB", Func, 0, ""}, - {"RoutingMessage", Type, 0, ""}, + {"RoutingMessage", Type, 14, ""}, {"RtAttr", Type, 0, ""}, {"RtAttr.Len", Field, 0, ""}, {"RtAttr.Type", Field, 0, ""}, @@ -15865,7 +16435,6 @@ var PackageSymbols = map[string][]Symbol{ {"SockFprog.Filter", Field, 0, ""}, {"SockFprog.Len", Field, 0, ""}, {"SockFprog.Pad_cgo_0", Field, 0, ""}, - {"Sockaddr", Type, 0, ""}, {"SockaddrDatalink", Type, 0, ""}, {"SockaddrDatalink.Alen", Field, 0, ""}, {"SockaddrDatalink.Data", Field, 0, ""}, @@ -16679,6 +17248,7 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0, ""}, }, "testing": { + {"(*B).ArtifactDir", Method, 26, ""}, {"(*B).Attr", Method, 25, ""}, {"(*B).Chdir", Method, 24, ""}, {"(*B).Cleanup", Method, 14, ""}, @@ -16713,6 +17283,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0, ""}, {"(*B).TempDir", Method, 15, ""}, {"(*F).Add", Method, 18, ""}, + {"(*F).ArtifactDir", Method, 26, ""}, {"(*F).Attr", Method, 25, ""}, {"(*F).Chdir", Method, 24, ""}, {"(*F).Cleanup", Method, 18, ""}, @@ -16738,6 +17309,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18, ""}, {"(*M).Run", Method, 4, ""}, {"(*PB).Next", Method, 3, ""}, + {"(*T).ArtifactDir", Method, 26, ""}, {"(*T).Attr", Method, 25, ""}, {"(*T).Chdir", Method, 24, ""}, {"(*T).Cleanup", Method, 14, ""}, @@ -16768,6 +17340,29 @@ var PackageSymbols = map[string][]Symbol{ {"(BenchmarkResult).MemString", Method, 1, ""}, {"(BenchmarkResult).NsPerOp", Method, 0, ""}, {"(BenchmarkResult).String", Method, 0, ""}, + {"(TB).ArtifactDir", Method, 26, ""}, + {"(TB).Attr", Method, 25, ""}, + {"(TB).Chdir", Method, 24, ""}, + {"(TB).Cleanup", Method, 14, ""}, + {"(TB).Context", Method, 24, ""}, + {"(TB).Error", Method, 2, ""}, + {"(TB).Errorf", Method, 2, ""}, + {"(TB).Fail", Method, 2, ""}, + {"(TB).FailNow", Method, 2, ""}, + {"(TB).Failed", Method, 2, ""}, + {"(TB).Fatal", Method, 2, ""}, + {"(TB).Fatalf", Method, 2, ""}, + {"(TB).Helper", Method, 9, ""}, + {"(TB).Log", Method, 2, ""}, + {"(TB).Logf", Method, 2, ""}, + {"(TB).Name", Method, 8, ""}, + {"(TB).Output", Method, 25, ""}, + {"(TB).Setenv", Method, 17, ""}, + {"(TB).Skip", Method, 2, ""}, + {"(TB).SkipNow", Method, 2, ""}, + {"(TB).Skipf", Method, 2, ""}, + {"(TB).Skipped", Method, 2, ""}, + {"(TB).TempDir", Method, 15, ""}, {"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"}, {"B", Type, 0, ""}, {"B.N", Field, 0, ""}, @@ -16818,10 +17413,12 @@ var PackageSymbols = map[string][]Symbol{ {"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"}, {"Short", Func, 0, "func() bool"}, {"T", Type, 0, ""}, - {"TB", Type, 2, ""}, {"Testing", Func, 21, "func() bool"}, {"Verbose", Func, 1, "func() bool"}, }, + "testing/cryptotest": { + {"SetGlobalRandom", Func, 26, "func(t *testing.T, seed uint64)"}, + }, "testing/fstest": { {"(MapFS).Glob", Method, 16, ""}, {"(MapFS).Lstat", Method, 25, ""}, @@ -16854,6 +17451,7 @@ var PackageSymbols = map[string][]Symbol{ "testing/quick": { {"(*CheckEqualError).Error", Method, 0, ""}, {"(*CheckError).Error", Method, 0, ""}, + {"(Generator).Generate", Method, 0, ""}, {"(SetupError).Error", Method, 0, ""}, {"Check", Func, 0, "func(f any, config *Config) error"}, {"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"}, @@ -17060,6 +17658,10 @@ var PackageSymbols = map[string][]Symbol{ {"(ListNode).Position", Method, 1, ""}, {"(ListNode).Type", Method, 0, ""}, {"(NilNode).Position", Method, 1, ""}, + {"(Node).Copy", Method, 0, ""}, + {"(Node).Position", Method, 1, ""}, + {"(Node).String", Method, 0, ""}, + {"(Node).Type", Method, 0, ""}, {"(NodeType).Type", Method, 0, ""}, {"(NumberNode).Position", Method, 1, ""}, {"(NumberNode).Type", Method, 0, ""}, diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go index e223e0f34..59a5de36a 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -39,7 +39,7 @@ const ( Var // "EOF" Const // "Pi" Field // "Point.X" - Method // "(*Buffer).Grow" + Method // "(*Buffer).Grow" or "(Reader).Read" ) func (kind Kind) String() string { diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index f49802b8e..8d13f1214 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -160,8 +160,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in // The term set of an interface is the intersection of the term sets of its // embedded types. tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) + for embedded := range u.EmbeddedTypes() { if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } @@ -174,8 +173,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) + for t := range u.Terms() { var terms termlist switch t.Type().Underlying().(type) { case *types.Interface: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go index 3db2a135b..7ebe9768b 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go @@ -8,7 +8,7 @@ import ( "fmt" "go/ast" "go/types" - _ "unsafe" + _ "unsafe" // for go:linkname hack ) // CallKind describes the function position of an [*ast.CallExpr]. diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go index 4957f0216..5fe4d8abc 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/element.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -35,8 +35,8 @@ func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T type // Recursion over signatures of each method. tmset := msets.MethodSet(T) - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) + for method := range tmset.Methods() { + sig := method.Type().(*types.Signature) // It is tempting to call visit(sig, false) // but, as noted in golang.org/cl/65450043, // the Signature.Recv field is ignored by diff --git a/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/vendor/golang.org/x/tools/internal/typesinternal/fx.go new file mode 100644 index 000000000..c846a53d5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/fx.go @@ -0,0 +1,88 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/ast" + "go/token" + "go/types" +) + +// NoEffects reports whether the expression has no side effects, i.e., it +// does not modify the memory state. This function is conservative: it may +// return false even when the expression has no effect. +func NoEffects(info *types.Info, expr ast.Expr) bool { + noEffects := true + ast.Inspect(expr, func(n ast.Node) bool { + switch v := n.(type) { + case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr, + *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr, + *ast.StarExpr, *ast.CompositeLit, + // non-expressions that may appear within expressions + *ast.KeyValueExpr, + *ast.FieldList, + *ast.Field, + *ast.Ellipsis, + *ast.IndexListExpr: + // No effect. + + case *ast.ArrayType, + *ast.StructType, + *ast.ChanType, + *ast.FuncType, + *ast.MapType, + *ast.InterfaceType: + // Type syntax: no effects, recursively. + // Prune descent. + return false + + case *ast.UnaryExpr: + // Channel send <-ch has effects. + if v.Op == token.ARROW { + noEffects = false + } + + case *ast.CallExpr: + // Type conversion has no effects. + if !info.Types[v.Fun].IsType() { + if CallsPureBuiltin(info, v) { + // A call such as len(e) has no effects of its + // own, though the subexpression e might. + } else { + noEffects = false + } + } + + case *ast.FuncLit: + // A FuncLit has no effects, but do not descend into it. + return false + + default: + // All other expressions have effects + noEffects = false + } + + return noEffects + }) + return noEffects +} + +// CallsPureBuiltin reports whether call is a call of a built-in +// function that is a pure computation over its operands (analogous to +// a + operator). Because it does not depend on program state, it may +// be evaluated at any point--though not necessarily at multiple +// points (consider new, make). +func CallsPureBuiltin(info *types.Info, call *ast.CallExpr) bool { + if id, ok := ast.Unparen(call.Fun).(*ast.Ident); ok { + if b, ok := info.ObjectOf(id).(*types.Builtin); ok { + switch b.Name() { + case "len", "cap", "complex", "imag", "real", "make", "new", "max", "min": + return true + } + // Not: append clear close copy delete panic print println recover + } + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go new file mode 100644 index 000000000..e0d63c46c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go @@ -0,0 +1,71 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + "slices" +) + +// IsTypeNamed reports whether t is (or is an alias for) a +// package-level defined type with the given package path and one of +// the given names. It returns false if t is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool { + if named, ok := types.Unalias(t).(*types.Named); ok { + tname := named.Obj() + return tname != nil && + IsPackageLevel(tname) && + tname.Pkg().Path() == pkgPath && + slices.Contains(names, tname.Name()) + } + return false +} + +// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a +// package-level defined type with the given package path and one of the given +// names. It returns false if t is not a pointer type. +func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool { + r := Unpointer(t) + if r == t { + return false + } + return IsTypeNamed(r, pkgPath, names...) +} + +// IsFunctionNamed reports whether obj is a package-level function +// defined in the given package and has one of the given names. +// It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { + f, ok := obj.(*types.Func) + return ok && + IsPackageLevel(obj) && + f.Pkg().Path() == pkgPath && + f.Signature().Recv() == nil && + slices.Contains(names, f.Name()) +} + +// IsMethodNamed reports whether obj is a method defined on a +// package-level type with the given package and type name, and has +// one of the given names. It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.TypeName.Name", +// which is important for the performance of syntax matching. +func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { + if fn, ok := obj.(*types.Func); ok { + if recv := fn.Signature().Recv(); recv != nil { + _, T := ReceiverNamed(recv) + return T != nil && + IsTypeNamed(T, pkgPath, typeName) && + slices.Contains(names, fn.Name()) + } + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go index b64f714eb..4e2756fc4 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -15,6 +15,14 @@ import ( // file. // If the same package is imported multiple times, the last appearance is // recorded. +// +// TODO(adonovan): this function ignores the effect of shadowing. It +// should accept a [token.Pos] and a [types.Info] and compute only the +// set of imports that are not shadowed at that point, analogous to +// [analysis.AddImport]. It could also compute (as a side +// effect) the set of additional imports required to ensure that there +// is an accessible import for each necessary package, making it +// converge even more closely with AddImport. func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier { // Construct mapping of import paths to their defined names. // It is only necessary to look at renaming imports. diff --git a/vendor/golang.org/x/tools/internal/typesinternal/typeindex/typeindex.go b/vendor/golang.org/x/tools/internal/typesinternal/typeindex/typeindex.go new file mode 100644 index 000000000..01ad7b9cf --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/typeindex/typeindex.go @@ -0,0 +1,261 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeindex provides an [Index] of type information for a +// package, allowing efficient lookup of, say, whether a given symbol +// is referenced and, if so, where from; or of the [inspector.Cursor] for +// the declaration of a particular [types.Object] symbol. +package typeindex + +import ( + "encoding/binary" + "go/ast" + "go/types" + "iter" + + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typesinternal" +) + +// New constructs an Index for the package of type-annotated syntax +// +// TODO(adonovan): accept a FileSet too? +// We regret not requiring one in inspector.New. +func New(inspect *inspector.Inspector, pkg *types.Package, info *types.Info) *Index { + ix := &Index{ + inspect: inspect, + info: info, + packages: make(map[string]*types.Package), + def: make(map[types.Object]inspector.Cursor), + uses: make(map[types.Object]*uses), + } + + addPackage := func(pkg2 *types.Package) { + if pkg2 != nil && pkg2 != pkg { + ix.packages[pkg2.Path()] = pkg2 + } + } + + for cur := range inspect.Root().Preorder((*ast.ImportSpec)(nil), (*ast.Ident)(nil)) { + switch n := cur.Node().(type) { + case *ast.ImportSpec: + // Index direct imports, including blank ones. + if pkgname := info.PkgNameOf(n); pkgname != nil { + addPackage(pkgname.Imported()) + } + + case *ast.Ident: + // Index all defining and using identifiers. + if obj := info.Defs[n]; obj != nil { + ix.def[obj] = cur + } + + if obj := info.Uses[n]; obj != nil { + // Index indirect dependencies (via fields and methods). + if !typesinternal.IsPackageLevel(obj) { + addPackage(obj.Pkg()) + } + + for { + us, ok := ix.uses[obj] + if !ok { + us = &uses{} + us.code = us.initial[:0] + ix.uses[obj] = us + } + delta := cur.Index() - us.last + if delta < 0 { + panic("non-monotonic") + } + us.code = binary.AppendUvarint(us.code, uint64(delta)) + us.last = cur.Index() + + // If n is a selection of a field or method of an instantiated + // type, also record a use of the generic field or method. + obj, ok = objectOrigin(obj) + if !ok { + break + } + } + } + } + } + return ix +} + +// objectOrigin returns the generic object for obj if it is a field or +// method of an instantied type; zero otherwise. +// +// (This operation is appropriate only for selections. +// Lexically resolved references always resolve to the generic. +// Although Named and Alias types also use Origin to express +// an instance/generic distinction, that's in the domain +// of Types; their TypeName objects always refer to the generic.) +func objectOrigin(obj types.Object) (types.Object, bool) { + var origin types.Object + switch obj := obj.(type) { + case *types.Func: + if obj.Signature().Recv() != nil { + origin = obj.Origin() // G[int].method -> G[T].method + } + case *types.Var: + if obj.IsField() { + origin = obj.Origin() // G[int].field -> G[T].field + } + } + if origin != nil && origin != obj { + return origin, true + } + return nil, false +} + +// An Index holds an index mapping [types.Object] symbols to their syntax. +// In effect, it is the inverse of [types.Info]. +type Index struct { + inspect *inspector.Inspector + info *types.Info + packages map[string]*types.Package // packages of all symbols referenced from this package + def map[types.Object]inspector.Cursor // Cursor of *ast.Ident that defines the Object + uses map[types.Object]*uses // Cursors of *ast.Idents that use the Object +} + +// A uses holds the list of Cursors of Idents that use a given symbol. +// +// The Uses map of [types.Info] is substantial, so it pays to compress +// its inverse mapping here, both in space and in CPU due to reduced +// allocation. A Cursor is 2 words; a Cursor.Index is 4 bytes; but +// since Cursors are naturally delivered in ascending order, we can +// use varint-encoded deltas at a cost of only ~1.7-2.2 bytes per use. +// +// Many variables have only one or two uses, so their encoded uses may +// fit in the 4 bytes of initial, saving further CPU and space +// essentially for free since the struct's size class is 4 words. +type uses struct { + code []byte // varint-encoded deltas of successive Cursor.Index values + last int32 // most recent Cursor.Index value; used during encoding + initial [4]byte // use slack in size class as initial space for code +} + +// Uses returns the sequence of Cursors of [*ast.Ident]s in this package +// that refer to obj. If obj is nil, the sequence is empty. +// +// Uses, unlike the Uses field of [types.Info], records additional +// entries mapping fields and methods of generic types to references +// through their corresponding instantiated objects. +func (ix *Index) Uses(obj types.Object) iter.Seq[inspector.Cursor] { + return func(yield func(inspector.Cursor) bool) { + if uses := ix.uses[obj]; uses != nil { + var last int32 + for code := uses.code; len(code) > 0; { + delta, n := binary.Uvarint(code) + last += int32(delta) + if !yield(ix.inspect.At(last)) { + return + } + code = code[n:] + } + } + } +} + +// Used reports whether any of the specified objects are used, in +// other words, obj != nil && Uses(obj) is non-empty for some obj in objs. +// +// (This treatment of nil allows Used to be called directly on the +// result of [Index.Object] so that analyzers can conveniently skip +// packages that don't use a symbol of interest.) +func (ix *Index) Used(objs ...types.Object) bool { + for _, obj := range objs { + if obj != nil && ix.uses[obj] != nil { + return true + } + } + return false +} + +// Def returns the Cursor of the [*ast.Ident] in this package +// that declares the specified object, if any. +func (ix *Index) Def(obj types.Object) (inspector.Cursor, bool) { + cur, ok := ix.def[obj] + return cur, ok +} + +// Package returns the package of the specified path, +// or nil if it is not referenced from this package. +func (ix *Index) Package(path string) *types.Package { + return ix.packages[path] +} + +// Object returns the package-level symbol name within the package of +// the specified path, or nil if the package or symbol does not exist +// or is not visible from this package. +func (ix *Index) Object(path, name string) types.Object { + if pkg := ix.Package(path); pkg != nil { + return pkg.Scope().Lookup(name) + } + return nil +} + +// Selection returns the named method or field belonging to the +// package-level type returned by Object(path, typename). +func (ix *Index) Selection(path, typename, name string) types.Object { + if obj := ix.Object(path, typename); obj != nil { + if tname, ok := obj.(*types.TypeName); ok { + obj, _, _ := types.LookupFieldOrMethod(tname.Type(), true, obj.Pkg(), name) + return obj + } + } + return nil +} + +// Calls returns the sequence of cursors for *ast.CallExpr nodes that +// call the specified callee, as defined by [typeutil.Callee]. +// If callee is nil, the sequence is empty. +func (ix *Index) Calls(callee types.Object) iter.Seq[inspector.Cursor] { + return func(yield func(inspector.Cursor) bool) { + for cur := range ix.Uses(callee) { + ek, _ := cur.ParentEdge() + + // The call may be of the form f() or x.f(), + // optionally with parens; ascend from f to call. + // + // It is tempting but wrong to use the first + // CallExpr ancestor: we have to make sure the + // ident is in the CallExpr.Fun position, otherwise + // f(f, f) would have two spurious matches. + // Avoiding Enclosing is also significantly faster. + + // inverse unparen: f -> (f) + for ek == edge.ParenExpr_X { + cur = cur.Parent() + ek, _ = cur.ParentEdge() + } + + // ascend selector: f -> x.f + if ek == edge.SelectorExpr_Sel { + cur = cur.Parent() + ek, _ = cur.ParentEdge() + } + + // inverse unparen again + for ek == edge.ParenExpr_X { + cur = cur.Parent() + ek, _ = cur.ParentEdge() + } + + // ascend from f or x.f to call + if ek == edge.CallExpr_Fun { + curCall := cur.Parent() + call := curCall.Node().(*ast.CallExpr) + if typeutil.Callee(ix.info, call) == callee { + if !yield(curCall) { + return + } + } + } + } + } +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index a5cd7e8db..51001666e 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -2,8 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package typesinternal provides access to internal go/types APIs that are not -// yet exported. +// Package typesinternal provides helpful operators for dealing with +// go/types: +// +// - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]); +// - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]); +// - helpers for working with the [go/types] API (e.g. [NewTypesInfo]); +// - access to internal go/types APIs that are not yet +// exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and +// - common algorithms related to types (e.g. [TooNewStdSymbols]). +// +// See also: +// - [golang.org/x/tools/internal/astutil], for operations on untyped syntax; +// - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers; +// - [golang.org/x/tools/internal/refactor], for operators to compute text edits. package typesinternal import ( @@ -11,8 +23,8 @@ import ( "go/token" "go/types" "reflect" - "unsafe" + "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/aliases" ) @@ -27,8 +39,7 @@ func SetUsesCgo(conf *types.Config) bool { } } - addr := unsafe.Pointer(f.UnsafeAddr()) - *(*bool)(addr) = true + *(*bool)(f.Addr().UnsafePointer()) = true return true } @@ -60,6 +71,9 @@ func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, o // which is often excessive.) // // If pkg is nil, it is equivalent to [*types.Package.Name]. +// +// TODO(adonovan): all uses of this with TypeString should be +// eliminated when https://go.dev/issues/75604 is resolved. func NameRelativeTo(pkg *types.Package) types.Qualifier { return func(other *types.Package) string { if pkg != nil && pkg == other { @@ -153,3 +167,31 @@ func NewTypesInfo() *types.Info { FileVersions: map[*ast.File]string{}, } } + +// EnclosingScope returns the innermost block logically enclosing the cursor. +func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope { + for cur := range cur.Enclosing() { + n := cur.Node() + // A function's Scope is associated with its FuncType. + switch f := n.(type) { + case *ast.FuncDecl: + n = f.Type + case *ast.FuncLit: + n = f.Type + } + if b := info.Scopes[n]; b != nil { + return b + } + } + panic("no Scope for *ast.File") +} + +// Imports reports whether path is imported by pkg. +func Imports(pkg *types.Package, path string) bool { + for _, imp := range pkg.Imports() { + if imp.Path() == path { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go index e5da04951..26499cdd2 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -2,39 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package typesinternal +//go:build go1.25 -// TODO(adonovan): when CL 645115 lands, define the go1.25 version of -// this API that actually does something. +package typesinternal import "go/types" -type VarKind uint8 +type VarKind = types.VarKind const ( - _ VarKind = iota // (not meaningful) - PackageVar // a package-level variable - LocalVar // a local variable - RecvVar // a method receiver variable - ParamVar // a function parameter variable - ResultVar // a function result variable - FieldVar // a struct field + PackageVar = types.PackageVar + LocalVar = types.LocalVar + RecvVar = types.RecvVar + ParamVar = types.ParamVar + ResultVar = types.ResultVar + FieldVar = types.FieldVar ) -func (kind VarKind) String() string { - return [...]string{ - 0: "VarKind(0)", - PackageVar: "PackageVar", - LocalVar: "LocalVar", - RecvVar: "RecvVar", - ParamVar: "ParamVar", - ResultVar: "ResultVar", - FieldVar: "FieldVar", - }[kind] -} - -// GetVarKind returns an invalid VarKind. -func GetVarKind(v *types.Var) VarKind { return 0 } - -// SetVarKind has no effect. -func SetVarKind(v *types.Var, kind VarKind) {} +func GetVarKind(v *types.Var) VarKind { return v.Kind() } +func SetVarKind(v *types.Var, kind VarKind) { v.SetKind(kind) } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go new file mode 100644 index 000000000..17b1804b4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.25 + +package typesinternal + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index d272949c1..d612a7102 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -204,23 +204,12 @@ func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) { } } -// IsZeroExpr uses simple syntactic heuristics to report whether expr -// is a obvious zero value, such as 0, "", nil, or false. -// It cannot do better without type information. -func IsZeroExpr(expr ast.Expr) bool { - switch e := expr.(type) { - case *ast.BasicLit: - return e.Value == "0" || e.Value == `""` - case *ast.Ident: - return e.Name == "nil" || e.Name == "false" - default: - return false - } -} - // TypeExpr returns syntax for the specified type. References to named types // are qualified by an appropriate (optional) qualifier function. // It may panic for types such as Tuple or Union. +// +// See also https://go.dev/issues/75604, which will provide a robust +// Type-to-valid-Go-syntax formatter. func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { switch t := t.(type) { case *types.Basic: @@ -269,12 +258,12 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { case *types.Signature: var params []*ast.Field - for i := 0; i < t.Params().Len(); i++ { + for v := range t.Params().Variables() { params = append(params, &ast.Field{ - Type: TypeExpr(t.Params().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), Names: []*ast.Ident{ { - Name: t.Params().At(i).Name(), + Name: v.Name(), }, }, }) @@ -284,9 +273,9 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} } var returns []*ast.Field - for i := 0; i < t.Results().Len(); i++ { + for v := range t.Results().Variables() { returns = append(returns, &ast.Field{ - Type: TypeExpr(t.Results().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), }) } return &ast.FuncType{ @@ -326,8 +315,8 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { var indices []ast.Expr - for i := range typeArgs.Len() { - indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + for t0 := range typeArgs.Types() { + indices = append(indices, TypeExpr(t0, qual)) } expr = &ast.IndexListExpr{ X: expr, diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go index b53f17861..cdd36c388 100644 --- a/vendor/golang.org/x/tools/internal/versions/features.go +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -7,13 +7,18 @@ package versions // This file contains predicates for working with file versions to // decide when a tool should consider a language feature enabled. -// GoVersions that features in x/tools can be gated to. +// named constants, to avoid misspelling const ( + Go1_17 = "go1.17" Go1_18 = "go1.18" Go1_19 = "go1.19" Go1_20 = "go1.20" Go1_21 = "go1.21" Go1_22 = "go1.22" + Go1_23 = "go1.23" + Go1_24 = "go1.24" + Go1_25 = "go1.25" + Go1_26 = "go1.26" ) // Future is an invalid unknown Go version sometime in the future. diff --git a/vendor/golang.org/x/tools/refactor/satisfy/find.go b/vendor/golang.org/x/tools/refactor/satisfy/find.go new file mode 100644 index 000000000..bb3837553 --- /dev/null +++ b/vendor/golang.org/x/tools/refactor/satisfy/find.go @@ -0,0 +1,725 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package satisfy inspects the type-checked ASTs of Go packages and +// reports the set of discovered type constraints of the form (lhs, rhs +// Type) where lhs is a non-trivial interface, rhs satisfies this +// interface, and this fact is necessary for the package to be +// well-typed. +// +// It requires well-typed inputs. +package satisfy // import "golang.org/x/tools/refactor/satisfy" + +// NOTES: +// +// We don't care about numeric conversions, so we don't descend into +// types or constant expressions. This is unsound because +// constant expressions can contain arbitrary statements, e.g. +// const x = len([1]func(){func() { +// ... +// }}) +// +// Assignability conversions are possible in the following places: +// - in assignments y = x, y := x, var y = x. +// - from call argument types to formal parameter types +// - in append and delete calls +// - from return operands to result parameter types +// - in composite literal T{k:v}, from k and v to T's field/element/key type +// - in map[key] from key to the map's key type +// - in comparisons x==y and switch x { case y: }. +// - in explicit conversions T(x) +// - in sends ch <- x, from x to the channel element type +// - in type assertions x.(T) and switch x.(type) { case T: } +// +// The results of this pass provide information equivalent to the +// ssa.MakeInterface and ssa.ChangeInterface instructions. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typeparams" +) + +// A Constraint records the fact that the RHS type does and must +// satisfy the LHS type, which is an interface. +// The names are suggestive of an assignment statement LHS = RHS. +// +// The constraint is implicitly universally quantified over any type +// parameters appearing within the two types. +type Constraint struct { + LHS, RHS types.Type +} + +// A Finder inspects the type-checked ASTs of Go packages and +// accumulates the set of type constraints (x, y) such that x is +// assignable to y, y is an interface, and both x and y have methods. +// +// In other words, it returns the subset of the "implements" relation +// that is checked during compilation of a package. Refactoring tools +// will need to preserve at least this part of the relation to ensure +// continued compilation. +type Finder struct { + Result map[Constraint]bool + msetcache typeutil.MethodSetCache + + // per-Find state + info *types.Info + sig *types.Signature +} + +// Find inspects a single package, populating Result with its pairs of +// constrained types. +// +// The result is non-canonical and thus may contain duplicates (but this +// tends to preserves names of interface types better). +// +// The package must be free of type errors, and +// info.{Defs,Uses,Selections,Types} must have been populated by the +// type-checker. +func (f *Finder) Find(info *types.Info, files []*ast.File) { + if info.Defs == nil || info.Uses == nil || info.Selections == nil || info.Types == nil { + panic("Finder.Find: one of info.{Defs,Uses,Selections.Types} is not populated") + } + if f.Result == nil { + f.Result = make(map[Constraint]bool) + } + + f.info = info + for _, file := range files { + for _, d := range file.Decls { + switch d := d.(type) { + case *ast.GenDecl: + if d.Tok == token.VAR { // ignore consts + for _, spec := range d.Specs { + f.valueSpec(spec.(*ast.ValueSpec)) + } + } + + case *ast.FuncDecl: + if d.Body != nil { + f.sig = f.info.Defs[d.Name].Type().(*types.Signature) + f.stmt(d.Body) + f.sig = nil + } + } + } + } + f.info = nil +} + +var ( + tInvalid = types.Typ[types.Invalid] + tUntypedBool = types.Typ[types.UntypedBool] + tUntypedNil = types.Typ[types.UntypedNil] +) + +// exprN visits an expression in a multi-value context. +func (f *Finder) exprN(e ast.Expr) types.Type { + typ := f.info.Types[e].Type.(*types.Tuple) + switch e := e.(type) { + case *ast.ParenExpr: + return f.exprN(e.X) + + case *ast.CallExpr: + // x, err := f(args) + sig := typeparams.CoreType(f.expr(e.Fun)).(*types.Signature) + f.call(sig, e.Args) + + case *ast.IndexExpr: + // y, ok := x[i] + x := f.expr(e.X) + f.assign(f.expr(e.Index), typeparams.CoreType(x).(*types.Map).Key()) + + case *ast.TypeAssertExpr: + // y, ok := x.(T) + f.typeAssert(f.expr(e.X), typ.At(0).Type()) + + case *ast.UnaryExpr: // must be receive <- + // y, ok := <-x + f.expr(e.X) + + default: + panic(e) + } + return typ +} + +func (f *Finder) call(sig *types.Signature, args []ast.Expr) { + if len(args) == 0 { + return + } + + // Ellipsis call? e.g. f(x, y, z...) + if _, ok := args[len(args)-1].(*ast.Ellipsis); ok { + for i, arg := range args { + // The final arg is a slice, and so is the final param. + f.assign(sig.Params().At(i).Type(), f.expr(arg)) + } + return + } + + var argtypes []types.Type + + // Gather the effective actual parameter types. + if tuple, ok := f.info.Types[args[0]].Type.(*types.Tuple); ok { + // f(g()) call where g has multiple results? + f.expr(args[0]) + // unpack the tuple + for v := range tuple.Variables() { + argtypes = append(argtypes, v.Type()) + } + } else { + for _, arg := range args { + argtypes = append(argtypes, f.expr(arg)) + } + } + + // Assign the actuals to the formals. + if !sig.Variadic() { + for i, argtype := range argtypes { + f.assign(sig.Params().At(i).Type(), argtype) + } + } else { + // The first n-1 parameters are assigned normally. + nnormals := sig.Params().Len() - 1 + for i, argtype := range argtypes[:nnormals] { + f.assign(sig.Params().At(i).Type(), argtype) + } + // Remaining args are assigned to elements of varargs slice. + tElem := sig.Params().At(nnormals).Type().(*types.Slice).Elem() + for i := nnormals; i < len(argtypes); i++ { + f.assign(tElem, argtypes[i]) + } + } +} + +// builtin visits the arguments of a builtin type with signature sig. +func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Expr) { + switch obj.Name() { + case "make", "new": + for i, arg := range args { + if i == 0 && f.info.Types[arg].IsType() { + continue // skip the type operand + } + f.expr(arg) + } + + case "append": + s := f.expr(args[0]) + if _, ok := args[len(args)-1].(*ast.Ellipsis); ok && len(args) == 2 { + // append(x, y...) including append([]byte, "foo"...) + f.expr(args[1]) + } else { + // append(x, y, z) + tElem := typeparams.CoreType(s).(*types.Slice).Elem() + for _, arg := range args[1:] { + f.assign(tElem, f.expr(arg)) + } + } + + case "delete": + m := f.expr(args[0]) + k := f.expr(args[1]) + f.assign(typeparams.CoreType(m).(*types.Map).Key(), k) + + default: + // ordinary call + f.call(sig, args) + } +} + +func (f *Finder) extract(tuple types.Type, i int) types.Type { + if tuple, ok := tuple.(*types.Tuple); ok && i < tuple.Len() { + return tuple.At(i).Type() + } + return tInvalid +} + +func (f *Finder) valueSpec(spec *ast.ValueSpec) { + var T types.Type + if spec.Type != nil { + T = f.info.Types[spec.Type].Type + } + switch len(spec.Values) { + case len(spec.Names): // e.g. var x, y = f(), g() + for _, value := range spec.Values { + v := f.expr(value) + if T != nil { + f.assign(T, v) + } + } + + case 1: // e.g. var x, y = f() + tuple := f.exprN(spec.Values[0]) + for i := range spec.Names { + if T != nil { + f.assign(T, f.extract(tuple, i)) + } + } + } +} + +// assign records pairs of distinct types that are related by +// assignability, where the left-hand side is an interface and both +// sides have methods. +// +// It should be called for all assignability checks, type assertions, +// explicit conversions and comparisons between two types, unless the +// types are uninteresting (e.g. lhs is a concrete type, or the empty +// interface; rhs has no methods). +func (f *Finder) assign(lhs, rhs types.Type) { + if types.Identical(lhs, rhs) { + return + } + if !types.IsInterface(lhs) { + return + } + + if f.msetcache.MethodSet(lhs).Len() == 0 { + return + } + if f.msetcache.MethodSet(rhs).Len() == 0 { + return + } + // record the pair + f.Result[Constraint{lhs, rhs}] = true +} + +// typeAssert must be called for each type assertion x.(T) where x has +// interface type I. +func (f *Finder) typeAssert(I, T types.Type) { + // Type assertions are slightly subtle, because they are allowed + // to be "impossible", e.g. + // + // var x interface{f()} + // _ = x.(interface{f()int}) // legal + // + // (In hindsight, the language spec should probably not have + // allowed this, but it's too late to fix now.) + // + // This means that a type assert from I to T isn't exactly a + // constraint that T is assignable to I, but for a refactoring + // tool it is a conditional constraint that, if T is assignable + // to I before a refactoring, it should remain so after. + + if types.AssignableTo(T, I) { + f.assign(I, T) + } +} + +// compare must be called for each comparison x==y. +func (f *Finder) compare(x, y types.Type) { + if types.AssignableTo(x, y) { + f.assign(y, x) + } else if types.AssignableTo(y, x) { + f.assign(x, y) + } +} + +// expr visits a true expression (not a type or defining ident) +// and returns its type. +func (f *Finder) expr(e ast.Expr) types.Type { + tv := f.info.Types[e] + if tv.Value != nil { + return tv.Type // prune the descent for constants + } + + // tv.Type may be nil for an ast.Ident. + + switch e := e.(type) { + case *ast.BadExpr, *ast.BasicLit: + // no-op + + case *ast.Ident: + // (referring idents only) + if obj, ok := f.info.Uses[e]; ok { + return obj.Type() + } + if e.Name == "_" { // e.g. "for _ = range x" + return tInvalid + } + panic("undefined ident: " + e.Name) + + case *ast.Ellipsis: + if e.Elt != nil { + f.expr(e.Elt) + } + + case *ast.FuncLit: + saved := f.sig + f.sig = tv.Type.(*types.Signature) + f.stmt(e.Body) + f.sig = saved + + case *ast.CompositeLit: + switch T := typeparams.CoreType(typeparams.Deref(tv.Type)).(type) { + case *types.Struct: + for i, elem := range e.Elts { + if kv, ok := elem.(*ast.KeyValueExpr); ok { + f.assign(f.info.Uses[kv.Key.(*ast.Ident)].Type(), f.expr(kv.Value)) + } else { + f.assign(T.Field(i).Type(), f.expr(elem)) + } + } + + case *types.Map: + for _, elem := range e.Elts { + elem := elem.(*ast.KeyValueExpr) + f.assign(T.Key(), f.expr(elem.Key)) + f.assign(T.Elem(), f.expr(elem.Value)) + } + + case *types.Array, *types.Slice: + tElem := T.(interface { + Elem() types.Type + }).Elem() + for _, elem := range e.Elts { + if kv, ok := elem.(*ast.KeyValueExpr); ok { + // ignore the key + f.assign(tElem, f.expr(kv.Value)) + } else { + f.assign(tElem, f.expr(elem)) + } + } + + default: + panic(fmt.Sprintf("unexpected composite literal type %T: %v", tv.Type, tv.Type.String())) + } + + case *ast.ParenExpr: + f.expr(e.X) + + case *ast.SelectorExpr: + if _, ok := f.info.Selections[e]; ok { + f.expr(e.X) // selection + } else { + return f.info.Uses[e.Sel].Type() // qualified identifier + } + + case *ast.IndexExpr: + if instance(f.info, e.X) { + // f[T] or C[T] -- generic instantiation + } else { + // x[i] or m[k] -- index or lookup operation + x := f.expr(e.X) + i := f.expr(e.Index) + if ux, ok := typeparams.CoreType(x).(*types.Map); ok { + f.assign(ux.Key(), i) + } + } + + case *ast.IndexListExpr: + // f[X, Y] -- generic instantiation + + case *ast.SliceExpr: + f.expr(e.X) + if e.Low != nil { + f.expr(e.Low) + } + if e.High != nil { + f.expr(e.High) + } + if e.Max != nil { + f.expr(e.Max) + } + + case *ast.TypeAssertExpr: + x := f.expr(e.X) + f.typeAssert(x, f.info.Types[e.Type].Type) + + case *ast.CallExpr: + if tvFun := f.info.Types[e.Fun]; tvFun.IsType() { + // conversion + arg0 := f.expr(e.Args[0]) + f.assign(tvFun.Type, arg0) + } else { + // function call + + // unsafe call. Treat calls to functions in unsafe like ordinary calls, + // except that their signature cannot be determined by their func obj. + // Without this special handling, f.expr(e.Fun) would fail below. + if s, ok := ast.Unparen(e.Fun).(*ast.SelectorExpr); ok { + if obj, ok := f.info.Uses[s.Sel].(*types.Builtin); ok && obj.Pkg().Path() == "unsafe" { + sig := f.info.Types[e.Fun].Type.(*types.Signature) + f.call(sig, e.Args) + return tv.Type + } + } + + // builtin call + if id, ok := ast.Unparen(e.Fun).(*ast.Ident); ok { + if obj, ok := f.info.Uses[id].(*types.Builtin); ok { + sig := f.info.Types[id].Type.(*types.Signature) + f.builtin(obj, sig, e.Args) + return tv.Type + } + } + + // ordinary call + f.call(typeparams.CoreType(f.expr(e.Fun)).(*types.Signature), e.Args) + } + + case *ast.StarExpr: + f.expr(e.X) + + case *ast.UnaryExpr: + f.expr(e.X) + + case *ast.BinaryExpr: + x := f.expr(e.X) + y := f.expr(e.Y) + if e.Op == token.EQL || e.Op == token.NEQ { + f.compare(x, y) + } + + case *ast.KeyValueExpr: + f.expr(e.Key) + f.expr(e.Value) + + case *ast.ArrayType, + *ast.StructType, + *ast.FuncType, + *ast.InterfaceType, + *ast.MapType, + *ast.ChanType: + panic(e) + } + + if tv.Type == nil { + panic(fmt.Sprintf("no type for %T", e)) + } + + return tv.Type +} + +func (f *Finder) stmt(s ast.Stmt) { + switch s := s.(type) { + case *ast.BadStmt, + *ast.EmptyStmt, + *ast.BranchStmt: + // no-op + + case *ast.DeclStmt: + d := s.Decl.(*ast.GenDecl) + if d.Tok == token.VAR { // ignore consts + for _, spec := range d.Specs { + f.valueSpec(spec.(*ast.ValueSpec)) + } + } + + case *ast.LabeledStmt: + f.stmt(s.Stmt) + + case *ast.ExprStmt: + f.expr(s.X) + + case *ast.SendStmt: + ch := f.expr(s.Chan) + val := f.expr(s.Value) + f.assign(typeparams.CoreType(ch).(*types.Chan).Elem(), val) + + case *ast.IncDecStmt: + f.expr(s.X) + + case *ast.AssignStmt: + switch s.Tok { + case token.ASSIGN, token.DEFINE: + // y := x or y = x + var rhsTuple types.Type + if len(s.Lhs) != len(s.Rhs) { + rhsTuple = f.exprN(s.Rhs[0]) + } + for i := range s.Lhs { + var lhs, rhs types.Type + if rhsTuple == nil { + rhs = f.expr(s.Rhs[i]) // 1:1 assignment + } else { + rhs = f.extract(rhsTuple, i) // n:1 assignment + } + + if id, ok := s.Lhs[i].(*ast.Ident); ok { + if id.Name != "_" { + if obj, ok := f.info.Defs[id]; ok { + lhs = obj.Type() // definition + } + } + } + if lhs == nil { + lhs = f.expr(s.Lhs[i]) // assignment + } + f.assign(lhs, rhs) + } + + default: + // y op= x + f.expr(s.Lhs[0]) + f.expr(s.Rhs[0]) + } + + case *ast.GoStmt: + f.expr(s.Call) + + case *ast.DeferStmt: + f.expr(s.Call) + + case *ast.ReturnStmt: + formals := f.sig.Results() + switch len(s.Results) { + case formals.Len(): // 1:1 + for i, result := range s.Results { + f.assign(formals.At(i).Type(), f.expr(result)) + } + + case 1: // n:1 + tuple := f.exprN(s.Results[0]) + for i := 0; i < formals.Len(); i++ { + f.assign(formals.At(i).Type(), f.extract(tuple, i)) + } + } + + case *ast.SelectStmt: + f.stmt(s.Body) + + case *ast.BlockStmt: + for _, s := range s.List { + f.stmt(s) + } + + case *ast.IfStmt: + if s.Init != nil { + f.stmt(s.Init) + } + f.expr(s.Cond) + f.stmt(s.Body) + if s.Else != nil { + f.stmt(s.Else) + } + + case *ast.SwitchStmt: + if s.Init != nil { + f.stmt(s.Init) + } + var tag types.Type = tUntypedBool + if s.Tag != nil { + tag = f.expr(s.Tag) + } + for _, cc := range s.Body.List { + cc := cc.(*ast.CaseClause) + for _, cond := range cc.List { + f.compare(tag, f.info.Types[cond].Type) + } + for _, s := range cc.Body { + f.stmt(s) + } + } + + case *ast.TypeSwitchStmt: + if s.Init != nil { + f.stmt(s.Init) + } + var I types.Type + switch ass := s.Assign.(type) { + case *ast.ExprStmt: // x.(type) + I = f.expr(ast.Unparen(ass.X).(*ast.TypeAssertExpr).X) + case *ast.AssignStmt: // y := x.(type) + I = f.expr(ast.Unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) + } + for _, cc := range s.Body.List { + cc := cc.(*ast.CaseClause) + for _, cond := range cc.List { + tCase := f.info.Types[cond].Type + if tCase != tUntypedNil { + f.typeAssert(I, tCase) + } + } + for _, s := range cc.Body { + f.stmt(s) + } + } + + case *ast.CommClause: + if s.Comm != nil { + f.stmt(s.Comm) + } + for _, s := range s.Body { + f.stmt(s) + } + + case *ast.ForStmt: + if s.Init != nil { + f.stmt(s.Init) + } + if s.Cond != nil { + f.expr(s.Cond) + } + if s.Post != nil { + f.stmt(s.Post) + } + f.stmt(s.Body) + + case *ast.RangeStmt: + x := f.expr(s.X) + // No conversions are involved when Tok==DEFINE. + if s.Tok == token.ASSIGN { + if s.Key != nil { + k := f.expr(s.Key) + var xelem types.Type + // Keys of array, *array, slice, string aren't interesting + // since the RHS key type is just an int. + switch ux := typeparams.CoreType(x).(type) { + case *types.Chan: + xelem = ux.Elem() + case *types.Map: + xelem = ux.Key() + } + if xelem != nil { + f.assign(k, xelem) + } + } + if s.Value != nil { + val := f.expr(s.Value) + var xelem types.Type + // Values of type strings aren't interesting because + // the RHS value type is just a rune. + switch ux := typeparams.CoreType(x).(type) { + case *types.Array: + xelem = ux.Elem() + case *types.Map: + xelem = ux.Elem() + case *types.Pointer: // *array + xelem = typeparams.CoreType(typeparams.Deref(ux)).(*types.Array).Elem() + case *types.Slice: + xelem = ux.Elem() + } + if xelem != nil { + f.assign(val, xelem) + } + } + } + f.stmt(s.Body) + + default: + panic(s) + } +} + +// -- Plundered from golang.org/x/tools/go/ssa ----------------- + +func instance(info *types.Info, expr ast.Expr) bool { + var id *ast.Ident + switch x := expr.(type) { + case *ast.Ident: + id = x + case *ast.SelectorExpr: + id = x.Sel + default: + return false + } + _, ok := info.Instances[id] + return ok +} diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/README.md b/vendor/gopkg.in/evanphx/json-patch.v4/README.md index 28e351693..86fefd5bf 100644 --- a/vendor/gopkg.in/evanphx/json-patch.v4/README.md +++ b/vendor/gopkg.in/evanphx/json-patch.v4/README.md @@ -4,7 +4,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). [![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Build Status](https://github.com/evanphx/json-patch/actions/workflows/go.yml/badge.svg)](https://github.com/evanphx/json-patch/actions/workflows/go.yml) [![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) # Get It! @@ -14,9 +14,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie go get -u github.com/evanphx/json-patch/v5 ``` -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` +If you need version 4, use `go get -u gopkg.in/evanphx/json-patch.v4` (previous versions below `v3` are unavailable) @@ -314,4 +312,4 @@ go test -cover ./... ``` Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). +using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml). diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go index dc2b7e51e..95136681b 100644 --- a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go +++ b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go @@ -3,11 +3,10 @@ package jsonpatch import ( "bytes" "encoding/json" + "errors" "fmt" "strconv" "strings" - - "github.com/pkg/errors" ) const ( @@ -277,7 +276,7 @@ func (o Operation) Path() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") + return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing) } // From reads the "from" field of the Operation. @@ -294,7 +293,7 @@ func (o Operation) From() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") + return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing) } func (o Operation) value() *lazyNode { @@ -319,7 +318,7 @@ func (o Operation) ValueInterface() (interface{}, error) { return v, nil } - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") + return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing) } func isArray(buf []byte) bool { @@ -359,7 +358,7 @@ func findObject(pd *container, path string) (container, string) { next, ok := doc.get(decodePatchKey(part)) - if next == nil || ok != nil { + if next == nil || ok != nil || next.raw == nil { return nil, "" } @@ -398,7 +397,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) { func (d *partialDoc) remove(key string) error { _, ok := (*d)[key] if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + return fmt.Errorf("Unable to remove nonexistent key: %s: %w", key, ErrMissing) } delete(*d, key) @@ -415,10 +414,10 @@ func (d *partialArray) set(key string, val *lazyNode) error { if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(*d) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(*d) } @@ -435,7 +434,7 @@ func (d *partialArray) add(key string, val *lazyNode) error { idx, err := strconv.Atoi(key) if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + return fmt.Errorf("value was not a proper array index: '%s': %w", key, err) } sz := len(*d) + 1 @@ -445,15 +444,15 @@ func (d *partialArray) add(key string, val *lazyNode) error { cur := *d if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(ary) } @@ -475,16 +474,16 @@ func (d *partialArray) get(key string) (*lazyNode, error) { if idx < 0 { if !SupportNegativeIndices { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(*d) } if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } return (*d)[idx], nil @@ -499,15 +498,15 @@ func (d *partialArray) remove(key string) error { cur := *d if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(cur) } @@ -525,18 +524,18 @@ func (d *partialArray) remove(key string) error { func (p Patch) add(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") + return fmt.Errorf("add operation failed to decode path: %w", ErrMissing) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.add(key, op.value()) if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) + return fmt.Errorf("error in add for path: '%s': %w", path, err) } return nil @@ -545,18 +544,18 @@ func (p Patch) add(doc *container, op Operation) error { func (p Patch) remove(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.remove(key) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -565,7 +564,7 @@ func (p Patch) remove(doc *container, op Operation) error { func (p Patch) replace(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") + return fmt.Errorf("replace operation failed to decode path: %w", err) } if path == "" { @@ -574,7 +573,7 @@ func (p Patch) replace(doc *container, op Operation) error { if val.which == eRaw { if !val.tryDoc() { if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") + return fmt.Errorf("replace operation value must be object or array: %w", err) } } } @@ -585,7 +584,7 @@ func (p Patch) replace(doc *container, op Operation) error { case eDoc: *doc = &val.doc case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") + return fmt.Errorf("replace operation hit impossible case: %w", err) } return nil @@ -594,17 +593,17 @@ func (p Patch) replace(doc *container, op Operation) error { con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing) } _, ok := con.get(key) if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing) } err = con.set(key, op.value()) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -613,39 +612,39 @@ func (p Patch) replace(doc *container, op Operation) error { func (p Patch) move(doc *container, op Operation) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") + return fmt.Errorf("move operation failed to decode from: %w", err) } con, key := findObject(doc, from) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } err = con.remove(key) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") + return fmt.Errorf("move operation failed to decode path: %w", err) } con, key = findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } err = con.add(key, val) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) + return fmt.Errorf("error in move for path: '%s': %w", path, err) } return nil @@ -654,7 +653,7 @@ func (p Patch) move(doc *container, op Operation) error { func (p Patch) test(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") + return fmt.Errorf("test operation failed to decode path: %w", err) } if path == "" { @@ -673,67 +672,67 @@ func (p Patch) test(doc *container, op Operation) error { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) + return fmt.Errorf("error in test for path: '%s': %w", path, err) } if val == nil { - if op.value().raw == nil { + if op.value() == nil || op.value().raw == nil { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } if val.equal(op.value()) { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") + return fmt.Errorf("copy operation failed to decode from: %w", err) } con, key := findObject(doc, from) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + return fmt.Errorf("copy operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) + return fmt.Errorf("error in copy for from: '%s': %w", from, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing) } con, key = findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } valCopy, sz, err := deepCopy(val) if err != nil { - return errors.Wrapf(err, "error while performing deep copy") + return fmt.Errorf("error while performing deep copy: %w", err) } (*accumulatedCopySize) += int64(sz) @@ -743,7 +742,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er err = con.add(key, valCopy) if err != nil { - return errors.Wrapf(err, "error while adding value during copy") + return fmt.Errorf("error while adding value during copy: %w", err) } return nil diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 5f983b6b6..e07c04e62 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -596,16 +596,7 @@ func newInformer(clientState Store, options InformerOptions) Controller { // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. - var fifo Queue - if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) { - fifo = NewRealFIFO(MetaNamespaceKeyFunc, clientState, options.Transform) - } else { - fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{ - KnownObjects: clientState, - EmitDeltaTypeReplaced: true, - Transformer: options.Transform, - }) - } + fifo := newQueueFIFO(clientState, options.Transform) cfg := &Config{ Queue: fifo, @@ -623,3 +614,15 @@ func newInformer(clientState Store, options InformerOptions) Controller { } return New(cfg) } + +func newQueueFIFO(clientState Store, transform TransformFunc) Queue { + if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) { + return NewRealFIFO(MetaNamespaceKeyFunc, clientState, transform) + } else { + return NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: clientState, + EmitDeltaTypeReplaced: true, + Transformer: transform, + }) + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 9d9e238cc..a0d7a834a 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -270,7 +270,8 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { } var ( - _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue + _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue + _ = TransformingStore(&DeltaFIFO{}) // DeltaFIFO implements TransformingStore to allow memory optimizations ) var ( diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index ee9be7727..6fd43375f 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -80,7 +80,7 @@ type ReflectorStore interface { // TransformingStore is an optional interface that can be implemented by the provided store. // If implemented on the provided store reflector will use the same transformer in its internal stores. type TransformingStore interface { - Store + ReflectorStore Transformer() TransformFunc } @@ -726,9 +726,11 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) { return false } + var transformer TransformFunc storeOpts := []StoreOption{} if tr, ok := r.store.(TransformingStore); ok && tr.Transformer() != nil { - storeOpts = append(storeOpts, WithTransformer(tr.Transformer())) + transformer = tr.Transformer() + storeOpts = append(storeOpts, WithTransformer(transformer)) } initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) @@ -788,7 +790,7 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) { // we utilize the temporaryStore to ensure independence from the current store implementation. // as of today, the store is implemented as a queue and will be drained by the higher-level // component as soon as it finishes replacing the content. - checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, temporaryStore.List) + checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, transformer, temporaryStore.List) if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { return nil, fmt.Errorf("unable to sync watch-list result: %w", err) diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go index a7e0d9c43..4119c78a6 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -33,11 +33,11 @@ import ( // // Note that this function will panic when data inconsistency is detected. // This is intentional because we want to catch it in the CI. -func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { +func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], listItemTransformFunc func(interface{}) (interface{}, error), retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() { return } // for informers we pass an empty ListOptions because // listFn might be wrapped for filtering during informer construction. - consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn) + consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, listItemTransformFunc, metav1.ListOptions{}, retrieveItemsFn) } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 99e5fcd18..1c12aa2d6 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -539,16 +539,7 @@ func (s *sharedIndexInformer) RunWithContext(ctx context.Context) { s.startedLock.Lock() defer s.startedLock.Unlock() - var fifo Queue - if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) { - fifo = NewRealFIFO(MetaNamespaceKeyFunc, s.indexer, s.transform) - } else { - fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{ - KnownObjects: s.indexer, - EmitDeltaTypeReplaced: true, - Transformer: s.transform, - }) - } + fifo := newQueueFIFO(s.indexer, s.transform) cfg := &Config{ Queue: fifo, diff --git a/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go b/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go index ef322bea8..b907410dc 100644 --- a/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go @@ -61,7 +61,8 @@ type RealFIFO struct { } var ( - _ = Queue(&RealFIFO{}) // RealFIFO is a Queue + _ = Queue(&RealFIFO{}) // RealFIFO is a Queue + _ = TransformingStore(&RealFIFO{}) // RealFIFO implements TransformingStore to allow memory optimizations ) // Close the queue. diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 5d2054155..79a748b74 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -77,6 +77,9 @@ func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) if ll.Labels != nil { + if ll.lease.Labels == nil { + ll.lease.Labels = map[string]string{} + } // Only overwrite the labels that are specifically set for k, v := range ll.Labels { ll.lease.Labels[k] = v diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 122046126..48c78b595 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -75,13 +75,15 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro CommonName: cfg.CommonName, Organization: cfg.Organization, }, - DNSNames: []string{cfg.CommonName}, NotBefore: notBefore, NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, IsCA: true, } + if len(cfg.CommonName) > 0 { + tmpl.DNSNames = []string{cfg.CommonName} + } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) if err != nil { diff --git a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go index 06f172d82..72c0124a0 100644 --- a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go +++ b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go @@ -45,16 +45,28 @@ func IsDataConsistencyDetectionForWatchListEnabled() bool { return dataConsistencyDetectionForWatchListEnabled } +// SetDataConsistencyDetectionForWatchListEnabledForTest allows to enable/disable data consistency detection for testing purposes. +// It returns a function that restores the original value. +func SetDataConsistencyDetectionForWatchListEnabledForTest(enabled bool) func() { + original := dataConsistencyDetectionForWatchListEnabled + dataConsistencyDetectionForWatchListEnabled = enabled + return func() { + dataConsistencyDetectionForWatchListEnabled = original + } +} + type RetrieveItemsFunc[U any] func() []U type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error) +type TransformFunc func(interface{}) (interface{}, error) + // CheckDataConsistency exists solely for testing purposes. // we cannot use checkWatchListDataConsistencyIfRequested because // it is guarded by an environmental variable. // we cannot manipulate the environmental variable because // it will affect other tests in this package. -func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { +func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listItemTransformFunc TransformFunc, listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) { klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity) return @@ -84,6 +96,15 @@ func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity if err != nil { panic(err) // this should never happen } + if listItemTransformFunc != nil { + for i := range rawListItems { + obj, err := listItemTransformFunc(rawListItems[i]) + if err != nil { + panic(err) + } + rawListItems[i] = obj.(runtime.Object) + } + } listItems := toMetaObjectSliceOrDie(rawListItems) sort.Sort(byUID(listItems)) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go index 6eee935b2..830ec3ca0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/util.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -92,10 +92,21 @@ type OpenAPICanonicalTypeNamer interface { OpenAPICanonicalTypeName() string } +// OpenAPIModelNamer is an interface Go types may implement to provide an OpenAPI model name. +// +// This takes precedence over OpenAPICanonicalTypeNamer, and should be used when a Go type has a model +// name that differs from its canonical type name as determined by Go package name reflection. +type OpenAPIModelNamer interface { + OpenAPIModelName() string +} + // GetCanonicalTypeName will find the canonical type name of a sample object, removing // the "vendor" part of the path func GetCanonicalTypeName(model interface{}) string { - if namer, ok := model.(OpenAPICanonicalTypeNamer); ok { + switch namer := model.(type) { + case OpenAPIModelNamer: + return namer.OpenAPIModelName() + case OpenAPICanonicalTypeNamer: return namer.OpenAPICanonicalTypeName() } t := reflect.TypeOf(model) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go index 97b2f989e..23109816e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go @@ -17,7 +17,6 @@ package strfmt import ( "encoding/base64" "encoding/json" - "fmt" "net/mail" "regexp" "strings" @@ -247,29 +246,6 @@ func (b *Base64) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver -func (b *Base64) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - dbuf := make([]byte, base64.StdEncoding.DecodedLen(len(v))) - n, err := base64.StdEncoding.Decode(dbuf, v) - if err != nil { - return err - } - *b = dbuf[:n] - case string: - vv, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return err - } - *b = Base64(vv) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Base64 from: %#v", v) - } - - return nil -} - func (b Base64) String() string { return base64.StdEncoding.EncodeToString([]byte(b)) } @@ -324,20 +300,6 @@ func (u *URI) UnmarshalText(data []byte) error { // validation is performed late return nil } -// Scan read a value from a database driver -func (u *URI) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = URI(string(v)) - case string: - *u = URI(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) - } - - return nil -} - func (u URI) String() string { return string(u) } @@ -388,20 +350,6 @@ func (e *Email) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (e *Email) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *e = Email(string(v)) - case string: - *e = Email(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Email from: %#v", v) - } - - return nil -} - func (e Email) String() string { return string(e) } @@ -452,20 +400,6 @@ func (h *Hostname) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (h *Hostname) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *h = Hostname(string(v)) - case string: - *h = Hostname(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Hostname from: %#v", v) - } - - return nil -} - func (h Hostname) String() string { return string(h) } @@ -516,20 +450,6 @@ func (u *IPv4) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *IPv4) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = IPv4(string(v)) - case string: - *u = IPv4(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v", v) - } - - return nil -} - func (u IPv4) String() string { return string(u) } @@ -580,20 +500,6 @@ func (u *IPv6) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *IPv6) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = IPv6(string(v)) - case string: - *u = IPv6(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.IPv6 from: %#v", v) - } - - return nil -} - func (u IPv6) String() string { return string(u) } @@ -644,20 +550,6 @@ func (u *CIDR) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *CIDR) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = CIDR(string(v)) - case string: - *u = CIDR(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.CIDR from: %#v", v) - } - - return nil -} - func (u CIDR) String() string { return string(u) } @@ -708,20 +600,6 @@ func (u *MAC) UnmarshalText(data []byte) error { // validation is performed late return nil } -// Scan read a value from a database driver -func (u *MAC) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = MAC(string(v)) - case string: - *u = MAC(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v", v) - } - - return nil -} - func (u MAC) String() string { return string(u) } @@ -772,20 +650,6 @@ func (u *UUID) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *UUID) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID(string(v)) - case string: - *u = UUID(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID from: %#v", v) - } - - return nil -} - func (u UUID) String() string { return string(u) } @@ -839,20 +703,6 @@ func (u *UUID3) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (u *UUID3) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID3(string(v)) - case string: - *u = UUID3(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID3 from: %#v", v) - } - - return nil -} - func (u UUID3) String() string { return string(u) } @@ -906,20 +756,6 @@ func (u *UUID4) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (u *UUID4) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID4(string(v)) - case string: - *u = UUID4(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID4 from: %#v", v) - } - - return nil -} - func (u UUID4) String() string { return string(u) } @@ -973,20 +809,6 @@ func (u *UUID5) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (u *UUID5) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID5(string(v)) - case string: - *u = UUID5(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID5 from: %#v", v) - } - - return nil -} - func (u UUID5) String() string { return string(u) } @@ -1040,20 +862,6 @@ func (u *ISBN) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *ISBN) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = ISBN(string(v)) - case string: - *u = ISBN(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.ISBN from: %#v", v) - } - - return nil -} - func (u ISBN) String() string { return string(u) } @@ -1107,20 +915,6 @@ func (u *ISBN10) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver -func (u *ISBN10) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = ISBN10(string(v)) - case string: - *u = ISBN10(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.ISBN10 from: %#v", v) - } - - return nil -} - func (u ISBN10) String() string { return string(u) } @@ -1174,20 +968,6 @@ func (u *ISBN13) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver -func (u *ISBN13) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = ISBN13(string(v)) - case string: - *u = ISBN13(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.ISBN13 from: %#v", v) - } - - return nil -} - func (u ISBN13) String() string { return string(u) } @@ -1241,20 +1021,6 @@ func (u *CreditCard) UnmarshalText(data []byte) error { // validation is perform return nil } -// Scan read a value from a database driver -func (u *CreditCard) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = CreditCard(string(v)) - case string: - *u = CreditCard(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.CreditCard from: %#v", v) - } - - return nil -} - func (u CreditCard) String() string { return string(u) } @@ -1308,20 +1074,6 @@ func (u *SSN) UnmarshalText(data []byte) error { // validation is performed late return nil } -// Scan read a value from a database driver -func (u *SSN) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = SSN(string(v)) - case string: - *u = SSN(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.SSN from: %#v", v) - } - - return nil -} - func (u SSN) String() string { return string(u) } @@ -1375,20 +1127,6 @@ func (h *HexColor) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (h *HexColor) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *h = HexColor(string(v)) - case string: - *h = HexColor(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.HexColor from: %#v", v) - } - - return nil -} - func (h HexColor) String() string { return string(h) } @@ -1442,20 +1180,6 @@ func (r *RGBColor) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (r *RGBColor) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *r = RGBColor(string(v)) - case string: - *r = RGBColor(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.RGBColor from: %#v", v) - } - - return nil -} - func (r RGBColor) String() string { return string(r) } @@ -1510,20 +1234,6 @@ func (r *Password) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (r *Password) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *r = Password(string(v)) - case string: - *r = Password(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Password from: %#v", v) - } - - return nil -} - func (r Password) String() string { return string(r) } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go index 8fbeb635f..04545296b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go @@ -119,23 +119,6 @@ func ParseDuration(cand string) (time.Duration, error) { return 0, fmt.Errorf("unable to parse %s as duration", cand) } -// Scan reads a Duration value from database driver type. -func (d *Duration) Scan(raw interface{}) error { - switch v := raw.(type) { - // TODO: case []byte: // ? - case int64: - *d = Duration(v) - case float64: - *d = Duration(int64(v)) - case nil: - *d = Duration(0) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Duration from: %#v", v) - } - - return nil -} - // String converts this duration to a string func (d Duration) String() string { return time.Duration(d).String() diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go index b2324db05..d0fd31a9d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go @@ -16,7 +16,6 @@ package strfmt import ( "encoding/json" - "fmt" "regexp" "strings" "time" @@ -114,25 +113,6 @@ func (t *DateTime) UnmarshalText(text []byte) error { return nil } -// Scan scans a DateTime value from database driver type. -func (t *DateTime) Scan(raw interface{}) error { - // TODO: case int64: and case float64: ? - switch v := raw.(type) { - case []byte: - return t.UnmarshalText(v) - case string: - return t.UnmarshalText([]byte(v)) - case time.Time: - *t = DateTime(v) - case nil: - *t = DateTime{} - default: - return fmt.Errorf("cannot sql.Scan() strfmt.DateTime from: %#v", v) - } - - return nil -} - // MarshalJSON returns the DateTime as JSON func (t DateTime) MarshalJSON() ([]byte, error) { return json.Marshal(time.Time(t).Format(MarshalFormat)) diff --git a/vendor/k8s.io/utils/buffer/ring_fixed.go b/vendor/k8s.io/utils/buffer/ring_fixed.go new file mode 100644 index 000000000..a104e12a3 --- /dev/null +++ b/vendor/k8s.io/utils/buffer/ring_fixed.go @@ -0,0 +1,120 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffer + +import ( + "errors" + "io" +) + +// Compile-time check that *TypedRingFixed[byte] implements io.Writer. +var _ io.Writer = (*TypedRingFixed[byte])(nil) + +// ErrInvalidSize indicates size must be > 0 +var ErrInvalidSize = errors.New("size must be positive") + +// TypedRingFixed is a fixed-size circular buffer for elements of type T. +// Writes overwrite older data, keeping only the last N elements. +// Not thread safe. +type TypedRingFixed[T any] struct { + data []T + size int + writeCursor int + written int64 +} + +// NewTypedRingFixed creates a circular buffer with the given capacity (must be > 0). +func NewTypedRingFixed[T any](size int) (*TypedRingFixed[T], error) { + if size <= 0 { + return nil, ErrInvalidSize + } + return &TypedRingFixed[T]{ + data: make([]T, size), + size: size, + }, nil +} + +// Write writes p to the buffer, overwriting old data if needed. +func (r *TypedRingFixed[T]) Write(p []T) (int, error) { + originalLen := len(p) + r.written += int64(originalLen) + + // If the input is larger than our buffer, only keep the last 'size' elements + if originalLen > r.size { + p = p[originalLen-r.size:] + } + + // Copy data, handling wrap-around + n := len(p) + remain := r.size - r.writeCursor + if n <= remain { + copy(r.data[r.writeCursor:], p) + } else { + copy(r.data[r.writeCursor:], p[:remain]) + copy(r.data, p[remain:]) + } + + r.writeCursor = (r.writeCursor + n) % r.size + return originalLen, nil +} + +// Slice returns buffer contents in write order. Don't modify the returned slice. +func (r *TypedRingFixed[T]) Slice() []T { + if r.written == 0 { + return nil + } + + // Buffer hasn't wrapped yet + if r.written < int64(r.size) { + return r.data[:r.writeCursor] + } + + // Buffer has wrapped - need to return data in correct order + // Data from writeCursor to end is oldest, data from 0 to writeCursor is newest + if r.writeCursor == 0 { + return r.data + } + + out := make([]T, r.size) + copy(out, r.data[r.writeCursor:]) + copy(out[r.size-r.writeCursor:], r.data[:r.writeCursor]) + return out +} + +// Size returns the buffer capacity. +func (r *TypedRingFixed[T]) Size() int { + return r.size +} + +// Len returns how many elements are currently in the buffer. +func (r *TypedRingFixed[T]) Len() int { + if r.written < int64(r.size) { + return int(r.written) + } + return r.size +} + +// TotalWritten returns total elements ever written (including overwritten ones). +func (r *TypedRingFixed[T]) TotalWritten() int64 { + return r.written +} + +// Reset clears the buffer. +func (r *TypedRingFixed[T]) Reset() { + r.writeCursor = 0 + r.written = 0 +} diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go index d9c91e3ca..b7cde7fd8 100644 --- a/vendor/k8s.io/utils/exec/exec.go +++ b/vendor/k8s.io/utils/exec/exec.go @@ -18,6 +18,7 @@ package exec import ( "context" + "errors" "io" "io/fs" osexec "os/exec" @@ -97,6 +98,21 @@ func New() Interface { return &executor{} } +// maskErrDotCmd reverts the behavior of osexec.Cmd to what it was before go1.19 +// specifically set the Err field to nil (LookPath returns a new error when the file +// is resolved to the current directory. +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + cmd.Err = maskErrDot(cmd.Err) + return cmd +} + +func maskErrDot(err error) error { + if err != nil && errors.Is(err, osexec.ErrDot) { + return nil + } + return err +} + // Command is part of the Interface interface. func (executor *executor) Command(cmd string, args ...string) Cmd { return (*cmdWrapper)(maskErrDotCmd(osexec.Command(cmd, args...))) diff --git a/vendor/k8s.io/utils/exec/fixup_go118.go b/vendor/k8s.io/utils/exec/fixup_go118.go deleted file mode 100644 index acf45f1cd..000000000 --- a/vendor/k8s.io/utils/exec/fixup_go118.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !go1.19 -// +build !go1.19 - -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package exec - -import ( - osexec "os/exec" -) - -func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { - return cmd -} - -func maskErrDot(err error) error { - return err -} diff --git a/vendor/k8s.io/utils/exec/fixup_go119.go b/vendor/k8s.io/utils/exec/fixup_go119.go deleted file mode 100644 index 55874c929..000000000 --- a/vendor/k8s.io/utils/exec/fixup_go119.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build go1.19 -// +build go1.19 - -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package exec - -import ( - "errors" - osexec "os/exec" -) - -// maskErrDotCmd reverts the behavior of osexec.Cmd to what it was before go1.19 -// specifically set the Err field to nil (LookPath returns a new error when the file -// is resolved to the current directory. -func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { - cmd.Err = maskErrDot(cmd.Err) - return cmd -} - -func maskErrDot(err error) error { - if err != nil && errors.Is(err, osexec.ErrDot) { - return nil - } - return err -} diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go index 7cb7795be..e5d508055 100644 --- a/vendor/k8s.io/utils/net/multi_listen.go +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -21,6 +21,7 @@ import ( "fmt" "net" "sync" + "sync/atomic" ) // connErrPair pairs conn and error which is returned by accept on sub-listeners. @@ -38,6 +39,7 @@ type multiListener struct { connCh chan connErrPair // stopCh communicates from parent to child listeners. stopCh chan struct{} + closed atomic.Bool } // compile time check to ensure *multiListener implements net.Listener @@ -150,10 +152,8 @@ func (ml *multiListener) Accept() (net.Conn, error) { // the go-routines to exit. func (ml *multiListener) Close() error { // Make sure this can be called repeatedly without explosions. - select { - case <-ml.stopCh: + if !ml.closed.CompareAndSwap(false, true) { return fmt.Errorf("use of closed network connection") - default: } // Tell all sub-listeners to stop. diff --git a/vendor/k8s.io/utils/strings/slices/slices.go b/vendor/k8s.io/utils/strings/slices/slices.go deleted file mode 100644 index 8e21838f2..000000000 --- a/vendor/k8s.io/utils/strings/slices/slices.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package slices defines various functions useful with slices of string type. -// The goal is to be as close as possible to -// https://github.com/golang/go/issues/45955. Ideal would be if we can just -// replace "stringslices" if the "slices" package becomes standard. -package slices - -// Equal reports whether two slices are equal: the same length and all -// elements equal. If the lengths are different, Equal returns false. -// Otherwise, the elements are compared in index order, and the -// comparison stops at the first unequal pair. -func Equal(s1, s2 []string) bool { - if len(s1) != len(s2) { - return false - } - for i, n := range s1 { - if n != s2[i] { - return false - } - } - return true -} - -// Filter appends to d each element e of s for which keep(e) returns true. -// It returns the modified d. d may be s[:0], in which case the kept -// elements will be stored in the same slice. -// if the slices overlap in some other way, the results are unspecified. -// To create a new slice with the filtered results, pass nil for d. -func Filter(d, s []string, keep func(string) bool) []string { - for _, n := range s { - if keep(n) { - d = append(d, n) - } - } - return d -} - -// Contains reports whether v is present in s. -func Contains(s []string, v string) bool { - return Index(s, v) >= 0 -} - -// Index returns the index of the first occurrence of v in s, or -1 if -// not present. -func Index(s []string, v string) int { - // "Contains" may be replaced with "Index(s, v) >= 0": - // https://github.com/golang/go/issues/45955#issuecomment-873377947 - for i, n := range s { - if n == v { - return i - } - } - return -1 -} - -// Functions below are not in https://github.com/golang/go/issues/45955 - -// Clone returns a new clone of s. -func Clone(s []string) []string { - // https://github.com/go101/go101/wiki/There-is-not-a-perfect-way-to-clone-slices-in-Go - if s == nil { - return nil - } - c := make([]string, len(s)) - copy(c, s) - return c -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 3eb1d928a..8224982eb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -31,7 +31,7 @@ github.com/Antonboom/testifylint/internal/testify ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c +# github.com/BurntSushi/toml v1.5.0 ## explicit; go 1.18 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal @@ -201,8 +201,8 @@ github.com/containerd/typeurl/v2 # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver -# github.com/coreos/go-systemd/v22 v22.5.0 -## explicit; go 1.12 +# github.com/coreos/go-systemd/v22 v22.6.0 +## explicit; go 1.23 github.com/coreos/go-systemd/v22/daemon github.com/coreos/go-systemd/v22/dbus github.com/coreos/go-systemd/v22/journal @@ -210,9 +210,19 @@ github.com/coreos/go-systemd/v22/journal ## explicit; go 1.21 github.com/curioswitch/go-reassign github.com/curioswitch/go-reassign/internal/analyzer -# github.com/cyphar/filepath-securejoin v0.4.1 +# github.com/cyphar/filepath-securejoin v0.6.1 => github.com/cyphar/filepath-securejoin v0.5.2 ## explicit; go 1.18 github.com/cyphar/filepath-securejoin +github.com/cyphar/filepath-securejoin/internal/consts +github.com/cyphar/filepath-securejoin/pathrs-lite +github.com/cyphar/filepath-securejoin/pathrs-lite/internal +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/assert +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/fd +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/gocompat +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/kernelversion +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/linux +github.com/cyphar/filepath-securejoin/pathrs-lite/internal/procfs +github.com/cyphar/filepath-securejoin/pathrs-lite/procfs # github.com/daixiang0/gci v0.13.5 ## explicit; go 1.21 github.com/daixiang0/gci/pkg/config @@ -640,8 +650,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 -## explicit; go 1.23 +# github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 +## explicit; go 1.24.0 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 ## explicit @@ -889,7 +899,7 @@ github.com/nunnatsa/ginkgolinter/version # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/onsi/ginkgo/v2 v2.27.2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 +# github.com/onsi/ginkgo/v2 v2.28.1 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 ## explicit; go 1.22.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -911,8 +921,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.38.2 -## explicit; go 1.23.0 +# github.com/onsi/gomega v1.39.1 +## explicit; go 1.24.0 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/gcustom @@ -952,10 +962,10 @@ github.com/opencontainers/runc/libcontainer/cgroups github.com/opencontainers/runc/libcontainer/configs github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/utils -# github.com/opencontainers/runtime-spec v1.2.0 +# github.com/opencontainers/runtime-spec v1.3.0 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/selinux v1.11.1 +# github.com/opencontainers/selinux v1.13.0 ## explicit; go 1.19 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label @@ -976,7 +986,7 @@ github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo github.com/openshift-eng/openshift-tests-extension/pkg/junit github.com/openshift-eng/openshift-tests-extension/pkg/util/sets github.com/openshift-eng/openshift-tests-extension/pkg/version -# github.com/openshift/api v0.0.0-20260114133223-6ab113cb7368 +# github.com/openshift/api v0.0.0-20260213155647-8fe9fe363807 ## explicit; go 1.24.0 github.com/openshift/api github.com/openshift/api/annotations @@ -999,6 +1009,8 @@ github.com/openshift/api/config/v1alpha1 github.com/openshift/api/config/v1alpha2 github.com/openshift/api/console github.com/openshift/api/console/v1 +github.com/openshift/api/etcd +github.com/openshift/api/etcd/v1alpha1 github.com/openshift/api/features github.com/openshift/api/helm github.com/openshift/api/helm/v1beta1 @@ -1053,7 +1065,7 @@ github.com/openshift/api/template github.com/openshift/api/template/v1 github.com/openshift/api/user github.com/openshift/api/user/v1 -# github.com/openshift/client-go v0.0.0-20251202151200-fb4471581cf8 +# github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 ## explicit; go 1.24.0 github.com/openshift/client-go/config/applyconfigurations github.com/openshift/client-go/config/applyconfigurations/config/v1 @@ -1110,7 +1122,10 @@ github.com/openshift/cluster-api-actuator-pkg/testutils/resourcebuilder/machine/ github.com/openshift/cluster-control-plane-machine-set-operator/pkg/machineproviders/providers/openshift/machine/v1beta1/failuredomain github.com/openshift/cluster-control-plane-machine-set-operator/pkg/machineproviders/providers/openshift/machine/v1beta1/providerconfig github.com/openshift/cluster-control-plane-machine-set-operator/test/e2e/framework -# github.com/openshift/library-go v0.0.0-20251107090138-0de9712313a5 +# github.com/openshift/controller-runtime-common v0.0.0-20260213175913-767fef058eca +## explicit; go 1.24.0 +github.com/openshift/controller-runtime-common/pkg/tls +# github.com/openshift/library-go v0.0.0-20260213153706-03f1709971c5 ## explicit; go 1.24.0 github.com/openshift/library-go/pkg/apiserver/jsonpatch github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer @@ -1557,13 +1572,13 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# go.yaml.in/yaml/v2 v2.4.2 +# go.yaml.in/yaml/v2 v2.4.3 ## explicit; go 1.15 go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/crypto v0.43.0 +# golang.org/x/crypto v0.47.0 ## explicit; go 1.24.0 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -1586,13 +1601,13 @@ golang.org/x/exp/slices # golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac ## explicit; go 1.18 golang.org/x/exp/typeparams -# golang.org/x/mod v0.28.0 +# golang.org/x/mod v0.32.0 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.46.0 +# golang.org/x/net v0.49.0 ## explicit; go 1.24.0 golang.org/x/net/context golang.org/x/net/html @@ -1612,12 +1627,12 @@ golang.org/x/net/websocket ## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.17.0 +# golang.org/x/sync v0.19.0 ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.37.0 +# golang.org/x/sys v0.40.0 ## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 @@ -1626,10 +1641,10 @@ golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc golang.org/x/sys/windows/svc/mgr -# golang.org/x/term v0.36.0 +# golang.org/x/term v0.39.0 ## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.30.0 +# golang.org/x/text v0.33.0 ## explicit; go 1.24.0 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -1663,7 +1678,7 @@ golang.org/x/text/width # golang.org/x/time v0.14.0 ## explicit; go 1.24.0 golang.org/x/time/rate -# golang.org/x/tools v0.37.0 +# golang.org/x/tools v0.41.0 ## explicit; go 1.24.0 golang.org/x/tools/cover golang.org/x/tools/go/analysis @@ -1689,7 +1704,6 @@ golang.org/x/tools/go/analysis/passes/framepointer golang.org/x/tools/go/analysis/passes/httpresponse golang.org/x/tools/go/analysis/passes/ifaceassert golang.org/x/tools/go/analysis/passes/inspect -golang.org/x/tools/go/analysis/passes/internal/analysisutil golang.org/x/tools/go/analysis/passes/loopclosure golang.org/x/tools/go/analysis/passes/lostcancel golang.org/x/tools/go/analysis/passes/nilfunc @@ -1730,7 +1744,8 @@ golang.org/x/tools/go/types/objectpath golang.org/x/tools/go/types/typeutil golang.org/x/tools/imports golang.org/x/tools/internal/aliases -golang.org/x/tools/internal/analysisinternal +golang.org/x/tools/internal/analysis/analyzerutil +golang.org/x/tools/internal/analysis/typeindex golang.org/x/tools/internal/astutil golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -1743,16 +1758,18 @@ golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports golang.org/x/tools/internal/modindex golang.org/x/tools/internal/moreiters +golang.org/x/tools/internal/packagepath golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/refactor golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal +golang.org/x/tools/internal/typesinternal/typeindex golang.org/x/tools/internal/versions +golang.org/x/tools/refactor/satisfy # golang.org/x/tools/go/expect v0.1.1-deprecated ## explicit; go 1.23.0 -# golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated -## explicit; go 1.23.0 # gomodules.xyz/jsonpatch/v2 v2.5.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 @@ -1876,7 +1893,7 @@ google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb -# gopkg.in/evanphx/json-patch.v4 v4.12.0 +# gopkg.in/evanphx/json-patch.v4 v4.13.0 ## explicit gopkg.in/evanphx/json-patch.v4 # gopkg.in/gcfg.v1 v1.2.3 @@ -2085,7 +2102,7 @@ honnef.co/go/tools/stylecheck/st1021 honnef.co/go/tools/stylecheck/st1022 honnef.co/go/tools/stylecheck/st1023 honnef.co/go/tools/unused -# k8s.io/api v0.34.1 +# k8s.io/api v0.34.3 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -2147,7 +2164,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.34.1 +# k8s.io/apiextensions-apiserver v0.34.3 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -2197,7 +2214,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures -# k8s.io/apimachinery v0.34.1 +# k8s.io/apimachinery v0.34.3 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/apitesting/fuzzer @@ -2278,7 +2295,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.34.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251015171918-61114aa5a292 +# k8s.io/apiserver v0.34.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251015171918-61114aa5a292 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -2452,7 +2469,7 @@ k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.34.1 +# k8s.io/client-go v0.34.3 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -2846,7 +2863,7 @@ k8s.io/cloud-provider-vsphere/pkg/common/config # k8s.io/cluster-bootstrap v0.33.3 ## explicit; go 1.24.0 k8s.io/cluster-bootstrap/token/api -# k8s.io/component-base v0.34.1 +# k8s.io/component-base v0.34.3 ## explicit; go 1.24.0 k8s.io/component-base/cli/flag k8s.io/component-base/codec @@ -2947,7 +2964,7 @@ k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler k8s.io/klog/v2/internal/verbosity k8s.io/klog/v2/textlogger -# k8s.io/kms v0.34.1 +# k8s.io/kms v0.34.3 ## explicit; go 1.24.0 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 @@ -2962,8 +2979,8 @@ k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1 -# k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b -## explicit; go 1.23 +# k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 +## explicit; go 1.23.0 k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 @@ -3453,8 +3470,8 @@ k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1 k8s.io/sample-apiserver/pkg/generated/applyconfiguration/wardle/v1alpha1 k8s.io/sample-apiserver/pkg/generated/clientset/versioned/scheme k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1 -# k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 -## explicit; go 1.18 +# k8s.io/utils v0.0.0-20260108192941-914a6e750570 +## explicit; go 1.23 k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/cpuset @@ -3471,7 +3488,6 @@ k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings -k8s.io/utils/strings/slices k8s.io/utils/trace # mvdan.cc/gofumpt v0.7.0 ## explicit; go 1.22 @@ -3497,7 +3513,7 @@ sigs.k8s.io/cluster-api/api/ipam/v1beta1 sigs.k8s.io/cluster-api/api/ipam/v1beta2 sigs.k8s.io/cluster-api/errors sigs.k8s.io/cluster-api/util/conversion -# sigs.k8s.io/controller-runtime v0.22.3 +# sigs.k8s.io/controller-runtime v0.22.5 ## explicit; go 1.24.0 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder @@ -3541,6 +3557,7 @@ sigs.k8s.io/controller-runtime/pkg/log sigs.k8s.io/controller-runtime/pkg/manager sigs.k8s.io/controller-runtime/pkg/manager/signals sigs.k8s.io/controller-runtime/pkg/metrics +sigs.k8s.io/controller-runtime/pkg/metrics/filters sigs.k8s.io/controller-runtime/pkg/metrics/server sigs.k8s.io/controller-runtime/pkg/predicate sigs.k8s.io/controller-runtime/pkg/reconcile @@ -3561,7 +3578,7 @@ sigs.k8s.io/controller-runtime/tools/setup-envtest/remote sigs.k8s.io/controller-runtime/tools/setup-envtest/store sigs.k8s.io/controller-runtime/tools/setup-envtest/versions sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows -# sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 +# sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 ## explicit; go 1.23 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json @@ -3653,7 +3670,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk ## explicit; go 1.18 sigs.k8s.io/randfill sigs.k8s.io/randfill/bytesource -# sigs.k8s.io/structured-merge-diff/v6 v6.3.0 +# sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 ## explicit; go 1.23 sigs.k8s.io/structured-merge-diff/v6/fieldpath sigs.k8s.io/structured-merge-diff/v6/merge @@ -3664,6 +3681,7 @@ sigs.k8s.io/structured-merge-diff/v6/value ## explicit; go 1.22 sigs.k8s.io/yaml sigs.k8s.io/yaml/kyaml +# github.com/cyphar/filepath-securejoin => github.com/cyphar/filepath-securejoin v0.5.2 # github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 # k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251015171918-61114aa5a292 # k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20251015171918-61114aa5a292 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index a7e491855..a94ec6cc3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -308,6 +308,42 @@ type ByObject struct { // // Defaults to true. EnableWatchBookmarks *bool + + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + // + // SyncPeriod will locally trigger an artificial Update event with the same + // object in both ObjectOld and ObjectNew for everything that is in the + // cache. + // + // Predicates or Handlers that expect ObjectOld and ObjectNew to be different + // (such as GenerationChangedPredicate) will filter out this event, preventing + // it from triggering a reconciliation. + // SyncPeriod does not sync between the local cache and the server. + SyncPeriod *time.Duration } // Config describes all potential options for a given watch. @@ -343,6 +379,42 @@ type Config struct { // // Defaults to true. EnableWatchBookmarks *bool + + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + // + // SyncPeriod will locally trigger an artificial Update event with the same + // object in both ObjectOld and ObjectNew for everything that is in the + // cache. + // + // Predicates or Handlers that expect ObjectOld and ObjectNew to be different + // (such as GenerationChangedPredicate) will filter out this event, preventing + // it from triggering a reconciliation. + // SyncPeriod does not sync between the local cache and the server. + SyncPeriod *time.Duration } // NewCacheFunc - Function for creating a new cache from the options and a rest config. @@ -413,6 +485,7 @@ func optionDefaultsToConfig(opts *Options) Config { Transform: opts.DefaultTransform, UnsafeDisableDeepCopy: opts.DefaultUnsafeDisableDeepCopy, EnableWatchBookmarks: opts.DefaultEnableWatchBookmarks, + SyncPeriod: opts.SyncPeriod, } } @@ -423,6 +496,7 @@ func byObjectToConfig(byObject ByObject) Config { Transform: byObject.Transform, UnsafeDisableDeepCopy: byObject.UnsafeDisableDeepCopy, EnableWatchBookmarks: byObject.EnableWatchBookmarks, + SyncPeriod: byObject.SyncPeriod, } } @@ -436,7 +510,7 @@ func newCache(restConfig *rest.Config, opts Options) newCacheFunc { HTTPClient: opts.HTTPClient, Scheme: opts.Scheme, Mapper: opts.Mapper, - ResyncPeriod: *opts.SyncPeriod, + ResyncPeriod: ptr.Deref(config.SyncPeriod, defaultSyncPeriod), Namespace: namespace, Selector: internal.Selector{ Label: config.LabelSelector, @@ -534,6 +608,7 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { byObject.Transform = defaultedConfig.Transform byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy byObject.EnableWatchBookmarks = defaultedConfig.EnableWatchBookmarks + byObject.SyncPeriod = defaultedConfig.SyncPeriod } opts.ByObject[obj] = byObject @@ -555,10 +630,6 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { opts.DefaultNamespaces[namespace] = cfg } - // Default the resync period to 10 hours if unset - if opts.SyncPeriod == nil { - opts.SyncPeriod = &defaultSyncPeriod - } return opts, nil } @@ -578,6 +649,9 @@ func defaultConfig(toDefault, defaultFrom Config) Config { if toDefault.EnableWatchBookmarks == nil { toDefault.EnableWatchBookmarks = defaultFrom.EnableWatchBookmarks } + if toDefault.SyncPeriod == nil { + toDefault.SyncPeriod = defaultFrom.SyncPeriod + } return toDefault } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index cacba4a9c..d4223eda2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -213,7 +213,12 @@ func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, o // List implements client.Client. func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - if n.namespace != "" { + isNamespaceScoped, err := n.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + if isNamespaceScoped && n.namespace != "" { opts = append(opts, InNamespace(n.namespace)) } return n.client.List(ctx, obj, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go index 98df84c56..71363f0d1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go @@ -124,8 +124,8 @@ type priorityqueue[T comparable] struct { get chan item[T] // waiters is the number of routines blocked in Get, we use it to determine - // if we can push items. - waiters atomic.Int64 + // if we can push items. Every manipulation has to be protected with the lock. + waiters int64 // Configurable for testing now func() time.Time @@ -269,7 +269,7 @@ func (w *priorityqueue[T]) spin() { } } - if w.waiters.Load() == 0 { + if w.waiters == 0 { // Have to keep iterating here to ensure we update metrics // for further items that became ready and set nextReady. return true @@ -277,7 +277,7 @@ func (w *priorityqueue[T]) spin() { w.metrics.get(item.Key, item.Priority) w.locked.Insert(item.Key) - w.waiters.Add(-1) + w.waiters-- delete(w.items, item.Key) toDelete = append(toDelete, item) w.becameReady.Delete(item.Key) @@ -316,7 +316,9 @@ func (w *priorityqueue[T]) GetWithPriority() (_ T, priority int, shutdown bool) return zero, 0, true } - w.waiters.Add(1) + w.lock.Lock() + w.waiters++ + w.lock.Unlock() w.notifyItemOrWaiterAdded() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go index 9bb81ed2a..c9f19da97 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -109,7 +109,11 @@ var ( // Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and // install extension APIs. type Environment struct { - // ControlPlane is the ControlPlane including the apiserver and etcd + // ControlPlane is the ControlPlane including the apiserver and etcd. + // Binary paths (APIServer.Path, Etcd.Path, KubectlPath) can be pre-configured in ControlPlane. + // If DownloadBinaryAssets is true, the downloaded paths will always be used. + // If DownloadBinaryAssets is false and paths are not pre-configured (default is empty), they will be + // automatically resolved using BinaryAssetsDirectory. ControlPlane controlplane.ControlPlane // Scheme is used to determine if conversion webhooks should be enabled @@ -211,6 +215,40 @@ func (te *Environment) Stop() error { return te.ControlPlane.Stop() } +// configureBinaryPaths configures the binary paths for the API server, etcd, and kubectl. +// If DownloadBinaryAssets is true, it downloads and uses those paths. +// If DownloadBinaryAssets is false, it only sets paths that are not already configured (empty). +func (te *Environment) configureBinaryPaths() error { + apiServer := te.ControlPlane.GetAPIServer() + + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &controlplane.Etcd{} + } + + if te.DownloadBinaryAssets { + apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(context.TODO(), + te.BinaryAssetsDirectory, te.DownloadBinaryAssetsVersion, te.DownloadBinaryAssetsIndexURL) + if err != nil { + return err + } + + apiServer.Path = apiServerPath + te.ControlPlane.Etcd.Path = etcdPath + te.ControlPlane.KubectlPath = kubectlPath + } else { + if apiServer.Path == "" { + apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) + } + if te.ControlPlane.Etcd.Path == "" { + te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory) + } + if te.ControlPlane.KubectlPath == "" { + te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory) + } + } + return nil +} + // Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on. func (te *Environment) Start() (*rest.Config, error) { if te.useExistingCluster() { @@ -229,10 +267,6 @@ func (te *Environment) Start() (*rest.Config, error) { } else { apiServer := te.ControlPlane.GetAPIServer() - if te.ControlPlane.Etcd == nil { - te.ControlPlane.Etcd = &controlplane.Etcd{} - } - if os.Getenv(envAttachOutput) == "true" { te.AttachControlPlaneOutput = true } @@ -243,6 +277,9 @@ func (te *Environment) Start() (*rest.Config, error) { if apiServer.Err == nil { apiServer.Err = os.Stderr } + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &controlplane.Etcd{} + } if te.ControlPlane.Etcd.Out == nil { te.ControlPlane.Etcd.Out = os.Stdout } @@ -251,20 +288,8 @@ func (te *Environment) Start() (*rest.Config, error) { } } - if te.DownloadBinaryAssets { - apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(context.TODO(), - te.BinaryAssetsDirectory, te.DownloadBinaryAssetsVersion, te.DownloadBinaryAssetsIndexURL) - if err != nil { - return nil, err - } - - apiServer.Path = apiServerPath - te.ControlPlane.Etcd.Path = etcdPath - te.ControlPlane.KubectlPath = kubectlPath - } else { - apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) - te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory) - te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory) + if err := te.configureBinaryPaths(); err != nil { + return nil, fmt.Errorf("failed to configure binary paths: %w", err) } if err := te.defaultTimeouts(); err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/filters/filters.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/filters/filters.go new file mode 100644 index 000000000..1659502bc --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/filters/filters.go @@ -0,0 +1,122 @@ +package filters + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + "k8s.io/client-go/rest" + + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +// WithAuthenticationAndAuthorization provides a metrics.Filter for authentication and authorization. +// Metrics will be authenticated (via TokenReviews) and authorized (via SubjectAccessReviews) with the +// kube-apiserver. +// For the authentication and authorization the controller needs a ClusterRole +// with the following rules: +// * apiGroups: authentication.k8s.io, resources: tokenreviews, verbs: create +// * apiGroups: authorization.k8s.io, resources: subjectaccessreviews, verbs: create +// +// To scrape metrics e.g. via Prometheus the client needs a ClusterRole +// with the following rule: +// * nonResourceURLs: "/metrics", verbs: get +// +// Note: Please note that configuring this metrics provider will introduce a dependency to "k8s.io/apiserver" +// to your go module. +func WithAuthenticationAndAuthorization(config *rest.Config, httpClient *http.Client) (metricsserver.Filter, error) { + authenticationV1Client, err := authenticationv1.NewForConfigAndClient(config, httpClient) + if err != nil { + return nil, err + } + authorizationV1Client, err := authorizationv1.NewForConfigAndClient(config, httpClient) + if err != nil { + return nil, err + } + + authenticatorConfig := authenticatorfactory.DelegatingAuthenticatorConfig{ + Anonymous: &apiserver.AnonymousAuthConfig{Enabled: false}, // Require authentication. + CacheTTL: 1 * time.Minute, + TokenAccessReviewClient: authenticationV1Client, + TokenAccessReviewTimeout: 10 * time.Second, + // wait.Backoff is copied from: https://github.com/kubernetes/apiserver/blob/v0.29.0/pkg/server/options/authentication.go#L43-L50 + // options.DefaultAuthWebhookRetryBackoff is not used to avoid a dependency on "k8s.io/apiserver/pkg/server/options". + WebhookRetryBackoff: &wait.Backoff{ + Duration: 500 * time.Millisecond, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + }, + } + delegatingAuthenticator, _, err := authenticatorConfig.New() + if err != nil { + return nil, fmt.Errorf("failed to create authenticator: %w", err) + } + + authorizerConfig := authorizerfactory.DelegatingAuthorizerConfig{ + SubjectAccessReviewClient: authorizationV1Client, + AllowCacheTTL: 5 * time.Minute, + DenyCacheTTL: 30 * time.Second, + // wait.Backoff is copied from: https://github.com/kubernetes/apiserver/blob/v0.29.0/pkg/server/options/authentication.go#L43-L50 + // options.DefaultAuthWebhookRetryBackoff is not used to avoid a dependency on "k8s.io/apiserver/pkg/server/options". + WebhookRetryBackoff: &wait.Backoff{ + Duration: 500 * time.Millisecond, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + }, + } + delegatingAuthorizer, err := authorizerConfig.New() + if err != nil { + return nil, fmt.Errorf("failed to create authorizer: %w", err) + } + + return func(log logr.Logger, handler http.Handler) (http.Handler, error) { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + + res, ok, err := delegatingAuthenticator.AuthenticateRequest(req) + if err != nil { + log.Error(err, "Authentication failed") + http.Error(w, "Authentication failed", http.StatusInternalServerError) + return + } + if !ok { + log.V(4).Info("Authentication failed") + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + + attributes := authorizer.AttributesRecord{ + User: res.User, + Verb: strings.ToLower(req.Method), + Path: req.URL.Path, + } + + authorized, reason, err := delegatingAuthorizer.Authorize(ctx, attributes) + if err != nil { + msg := fmt.Sprintf("Authorization for user %s failed", attributes.User.GetName()) + log.Error(err, msg) + http.Error(w, msg, http.StatusInternalServerError) + return + } + if authorized != authorizer.DecisionAllow { + msg := fmt.Sprintf("Authorization denied for user %s", attributes.User.GetName()) + log.V(4).Info(fmt.Sprintf("%s: %s", msg, reason)) + http.Error(w, msg, http.StatusForbidden) + return + } + + handler.ServeHTTP(w, req) + }), nil + }, nil +} diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index d538ac119..3fe528bbf 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -52,8 +52,8 @@ import ( // - bool, for JSON booleans // - float64, for JSON numbers // - string, for JSON strings -// - []interface{}, for JSON arrays -// - map[string]interface{}, for JSON objects +// - []any, for JSON arrays +// - map[string]any, for JSON objects // - nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice length @@ -117,9 +117,6 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // The input can be assumed to be a valid encoding of // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. -// -// By convention, to approximate the behavior of [Unmarshal] itself, -// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. type Unmarshaler interface { UnmarshalJSON([]byte) error } @@ -132,7 +129,7 @@ type UnmarshalTypeError struct { Type reflect.Type // type of Go value it could not be assigned to Offset int64 // error occurred after reading Offset bytes Struct string // name of the struct type containing the field - Field string // the full path from root node to the field + Field string // the full path from root node to the field, include embedded struct } func (e *UnmarshalTypeError) Error() string { @@ -281,7 +278,11 @@ func (d *decodeState) addErrorContext(err error) error { switch err := err.(type) { case *UnmarshalTypeError: err.Struct = d.errorContext.Struct.Name() - err.Field = strings.Join(d.errorContext.FieldStack, ".") + fieldStack := d.errorContext.FieldStack + if err.Field != "" { + fieldStack = append(fieldStack, err.Field) + } + err.Field = strings.Join(fieldStack, ".") } } return err @@ -492,9 +493,9 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm } // Prevent infinite loop if v is an interface pointing to its own address: - // var v interface{} + // var v any // v = &v - if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem().Equal(v) { v = v.Elem() break } @@ -784,7 +785,10 @@ func (d *decodeState) object(v reflect.Value) error { } subv = v destring = f.quoted - for _, i := range f.index { + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + for i, ind := range f.index { if subv.Kind() == reflect.Pointer { if subv.IsNil() { // If a struct embeds a pointer to an unexported type, @@ -804,13 +808,16 @@ func (d *decodeState) object(v reflect.Value) error { } subv = subv.Elem() } - subv = subv.Field(i) - } - if d.errorContext == nil { - d.errorContext = new(errorContext) + if i < len(f.index)-1 { + d.errorContext.FieldStack = append( + d.errorContext.FieldStack, + subv.Type().Field(ind).Name, + ) + } + subv = subv.Field(ind) } - d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.errorContext.Struct = t + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.appendStrictFieldStackKey(f.name) } else if d.disallowUnknownFields { d.saveStrictError(d.newFieldError(unknownStrictErrType, string(key))) @@ -1118,7 +1125,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool // in an empty interface. They are not strictly necessary, // but they avoid the weight of reflection in this common case. -// valueInterface is like value but returns interface{} +// valueInterface is like value but returns any. func (d *decodeState) valueInterface() (val any) { switch d.opcode { default: @@ -1135,7 +1142,7 @@ func (d *decodeState) valueInterface() (val any) { return } -// arrayInterface is like array but returns []interface{}. +// arrayInterface is like array but returns []any. func (d *decodeState) arrayInterface() []any { origStrictFieldStackLen := len(d.strictFieldStack) defer func() { @@ -1170,7 +1177,7 @@ func (d *decodeState) arrayInterface() []any { return v } -// objectInterface is like object but returns map[string]interface{}. +// objectInterface is like object but returns map[string]any. func (d *decodeState) objectInterface() map[string]any { origStrictFieldStackLen := len(d.strictFieldStack) defer func() { diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go index eb73bff58..4e3a1a2f1 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -71,8 +71,8 @@ import ( // // The "omitempty" option specifies that the field should be omitted // from the encoding if the field has an empty value, defined as -// false, 0, a nil pointer, a nil interface value, and any empty array, -// slice, map, or string. +// false, 0, a nil pointer, a nil interface value, and any array, +// slice, map, or string of length zero. // // As a special case, if the field tag is "-", the field is always omitted. // Note that a field with name "-" can still be generated using the tag "-,". @@ -98,6 +98,17 @@ import ( // // Field appears in JSON as key "-". // Field int `json:"-,"` // +// The "omitzero" option specifies that the field should be omitted +// from the encoding if the field has a zero value, according to rules: +// +// 1) If the field type has an "IsZero() bool" method, that will be used to +// determine whether the value is zero. +// +// 2) Otherwise, the value is zero if it is the zero value for its type. +// +// If both "omitempty" and "omitzero" are specified, the field will be omitted +// if the value is either empty or zero (or both). +// // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used @@ -690,7 +701,8 @@ FieldLoop: fv = fv.Field(i) } - if f.omitEmpty && isEmptyValue(fv) { + if (f.omitEmpty && isEmptyValue(fv)) || + (f.omitZero && (f.isZero == nil && fv.IsZero() || (f.isZero != nil && f.isZero(fv)))) { continue } e.WriteByte(next) @@ -808,7 +820,7 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { - ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe + ptr any // always an unsafe.Pointer, but avoids a dependency on package unsafe len int }{v.UnsafePointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { @@ -1039,11 +1051,19 @@ type field struct { index []int typ reflect.Type omitEmpty bool + omitZero bool + isZero func(reflect.Value) bool quoted bool encoder encoderFunc } +type isZeroer interface { + IsZero() bool +} + +var isZeroerType = reflect.TypeFor[isZeroer]() + // typeFields returns a list of fields that JSON should recognize for the given type. // The algorithm is breadth-first search over the set of structs to include - the top struct // and then any reachable anonymous structs. @@ -1135,6 +1155,7 @@ func typeFields(t reflect.Type) structFields { index: index, typ: ft, omitEmpty: opts.Contains("omitempty"), + omitZero: opts.Contains("omitzero"), quoted: quoted, } field.nameBytes = []byte(field.name) @@ -1144,6 +1165,40 @@ func typeFields(t reflect.Type) structFields { field.nameEscHTML = `"` + string(nameEscBuf) + `":` field.nameNonEsc = `"` + field.name + `":` + if field.omitZero { + t := sf.Type + // Provide a function that uses a type's IsZero method. + switch { + case t.Kind() == reflect.Interface && t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + // Avoid panics calling IsZero on a nil interface or + // non-nil interface with nil pointer. + return v.IsNil() || + (v.Elem().Kind() == reflect.Pointer && v.Elem().IsNil()) || + v.Interface().(isZeroer).IsZero() + } + case t.Kind() == reflect.Pointer && t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + // Avoid panics calling IsZero on nil pointer. + return v.IsNil() || v.Interface().(isZeroer).IsZero() + } + case t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + return v.Interface().(isZeroer).IsZero() + } + case reflect.PointerTo(t).Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + if !v.CanAddr() { + // Temporarily box v so we can take the address. + v2 := reflect.New(v.Type()).Elem() + v2.Set(v) + v = v2 + } + return v.Addr().Interface().(isZeroer).IsZero() + } + } + } + fields = append(fields, field) if count[f.typ] > 1 { // If there were multiple instances, add a second, diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go index 48fc4d945..cc2108b92 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go @@ -31,8 +31,8 @@ func NewDecoder(r io.Reader) *Decoder { return &Decoder{r: r} } -// UseNumber causes the Decoder to unmarshal a number into an interface{} as a -// [Number] instead of as a float64. +// UseNumber causes the Decoder to unmarshal a number into an +// interface value as a [Number] instead of as a float64. func (dec *Decoder) UseNumber() { dec.d.useNumber = true } // DisallowUnknownFields causes the Decoder to return an error when the destination diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go index 5d3707a5b..c8138a654 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go @@ -18,6 +18,7 @@ package schema import ( "sync" + "sync/atomic" ) // Schema is a list of named types. @@ -28,7 +29,7 @@ type Schema struct { Types []TypeDef `yaml:"types,omitempty"` once sync.Once - m map[string]TypeDef + m atomic.Pointer[map[string]TypeDef] lock sync.Mutex // Cached results of resolving type references to atoms. Only stores @@ -144,26 +145,28 @@ type Map struct { ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` once sync.Once - m map[string]StructField + m atomic.Pointer[map[string]StructField] } // FindField is a convenience function that returns the referenced StructField, // if it exists, or (nil, false) if it doesn't. func (m *Map) FindField(name string) (StructField, bool) { m.once.Do(func() { - m.m = make(map[string]StructField, len(m.Fields)) + mm := make(map[string]StructField, len(m.Fields)) for _, field := range m.Fields { - m.m[field.Name] = field + mm[field.Name] = field } + m.m.Store(&mm) }) - sf, ok := m.m[name] + sf, ok := (*m.m.Load())[name] return sf, ok } -// CopyInto this instance of Map into the other -// If other is nil this method does nothing. -// If other is already initialized, overwrites it with this instance -// Warning: Not thread safe +// CopyInto clones this instance of Map into dst +// +// If dst is nil this method does nothing. +// If dst is already initialized, overwrites it with this instance. +// Warning: Not thread safe. Only use dst after this function returns. func (m *Map) CopyInto(dst *Map) { if dst == nil { return @@ -175,12 +178,13 @@ func (m *Map) CopyInto(dst *Map) { dst.Unions = m.Unions dst.ElementRelationship = m.ElementRelationship - if m.m != nil { + mm := m.m.Load() + if mm != nil { // If cache is non-nil then the once token had been consumed. // Must reset token and use it again to ensure same semantics. dst.once = sync.Once{} dst.once.Do(func() { - dst.m = m.m + dst.m.Store(mm) }) } } @@ -274,12 +278,13 @@ type List struct { // if it exists, or (nil, false) if it doesn't. func (s *Schema) FindNamedType(name string) (TypeDef, bool) { s.once.Do(func() { - s.m = make(map[string]TypeDef, len(s.Types)) + sm := make(map[string]TypeDef, len(s.Types)) for _, t := range s.Types { - s.m[t.Name] = t + sm[t.Name] = t } + s.m.Store(&sm) }) - t, ok := s.m[name] + t, ok := (*s.m.Load())[name] return t, ok } @@ -352,10 +357,11 @@ func (s *Schema) Resolve(tr TypeRef) (Atom, bool) { return result, true } -// Clones this instance of Schema into the other -// If other is nil this method does nothing. -// If other is already initialized, overwrites it with this instance -// Warning: Not thread safe +// CopyInto clones this instance of Schema into dst +// +// If dst is nil this method does nothing. +// If dst is already initialized, overwrites it with this instance. +// Warning: Not thread safe. Only use dst after this function returns. func (s *Schema) CopyInto(dst *Schema) { if dst == nil { return @@ -364,12 +370,13 @@ func (s *Schema) CopyInto(dst *Schema) { // Schema type is considered immutable so sharing references dst.Types = s.Types - if s.m != nil { + sm := s.m.Load() + if sm != nil { // If cache is non-nil then the once token had been consumed. // Must reset token and use it again to ensure same semantics. dst.once = sync.Once{} dst.once.Do(func() { - dst.m = s.m + dst.m.Store(sm) }) } } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go index 86de5105d..0db1734f9 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go @@ -58,6 +58,10 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { defer w.allocator.Free(l) // If list is null or empty just return if l == nil || l.Length() == 0 { + // For extraction, we just return the value as is (which is nil or empty). For extraction the difference matters. + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -71,6 +75,7 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { } var newItems []interface{} + hadMatches := false iter := l.RangeUsing(w.allocator) defer w.allocator.Free(iter) for iter.Next() { @@ -80,24 +85,40 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { path, _ := fieldpath.MakePath(pe) // save items on the path when we shouldExtract // but ignore them when we are removing (i.e. !w.shouldExtract) - if w.toRemove.Has(path) { - if w.shouldExtract { - newItems = append(newItems, removeItemsWithSchema(item, w.toRemove, w.schema, t.ElementType, w.shouldExtract).Unstructured()) - } else { - continue + isExactPathMatch := w.toRemove.Has(path) + isPrefixMatch := !w.toRemove.WithPrefix(pe).Empty() + if w.shouldExtract { + if isPrefixMatch { + item = removeItemsWithSchema(item, w.toRemove.WithPrefix(pe), w.schema, t.ElementType, w.shouldExtract) + } + if isExactPathMatch || isPrefixMatch { + newItems = append(newItems, item.Unstructured()) } - } - if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { - item = removeItemsWithSchema(item, subset, w.schema, t.ElementType, w.shouldExtract) } else { - // don't save items not on the path when we shouldExtract. - if w.shouldExtract { + if isExactPathMatch { continue } + if isPrefixMatch { + // Removing nested items within this list item and preserve if it becomes empty + hadMatches = true + wasMap := item.IsMap() + wasList := item.IsList() + item = removeItemsWithSchema(item, w.toRemove.WithPrefix(pe), w.schema, t.ElementType, w.shouldExtract) + // If item returned null but we're removing items within the structure(not the item itself), + // preserve the empty container structure + if item.IsNull() && !w.shouldExtract { + if wasMap { + item = value.NewValueInterface(map[string]interface{}{}) + } else if wasList { + item = value.NewValueInterface([]interface{}{}) + } + } + } + newItems = append(newItems, item.Unstructured()) } - newItems = append(newItems, item.Unstructured()) } - if len(newItems) > 0 { + // Preserve empty lists (non-nil) instead of converting to null when items were matched and removed + if len(newItems) > 0 || (hadMatches && !w.shouldExtract) { w.out = newItems } return nil @@ -113,6 +134,10 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { } // If map is null or empty just return if m == nil || m.Empty() { + // For extraction, we just return the value as is (which is nil or empty). For extraction the difference matters. + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -131,6 +156,7 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { } newMap := map[string]interface{}{} + hadMatches := false m.Iterate(func(k string, val value.Value) bool { pe := fieldpath.PathElement{FieldName: &k} path, _ := fieldpath.MakePath(pe) @@ -148,7 +174,19 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { return true } if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { + hadMatches = true + wasMap := val.IsMap() + wasList := val.IsList() val = removeItemsWithSchema(val, subset, w.schema, fieldType, w.shouldExtract) + // If val returned null but we're removing items within the structure (not the field itself), + // preserve the empty container structure + if val.IsNull() && !w.shouldExtract { + if wasMap { + val = value.NewValueInterface(map[string]interface{}{}) + } else if wasList { + val = value.NewValueInterface([]interface{}{}) + } + } } else { // don't save values not on the path when we shouldExtract. if w.shouldExtract { @@ -158,7 +196,8 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { newMap[k] = val.Unstructured() return true }) - if len(newMap) > 0 { + // Preserve empty maps (non-nil) instead of converting to null when items were matched and removed + if len(newMap) > 0 || (hadMatches && !w.shouldExtract) { w.out = newMap } return nil diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v6/value/reflectcache.go index 3b4a402ee..75b7085c3 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v6/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/value/reflectcache.go @@ -84,6 +84,10 @@ func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool { func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value { // field might be nested within 'inline' structs for _, elem := range f.fieldPath { + if safeIsNil(structVal) { + // if any part of the path is nil, return the zero value for the field type + return reflect.Zero(f.fieldType) + } structVal = dereference(structVal).FieldByIndex(elem) } return structVal