diff --git a/.github/ISSUE_TEMPLATE/kubernetes_bump.md b/.github/ISSUE_TEMPLATE/kubernetes_bump.md index 12ffe6062bff..e03b905e93ac 100644 --- a/.github/ISSUE_TEMPLATE/kubernetes_bump.md +++ b/.github/ISSUE_TEMPLATE/kubernetes_bump.md @@ -25,6 +25,8 @@ changes should be cherry-picked to all release series that will support the new * [ ] Modify CAPD to use the new Kubernetes release after it is GA: * Bump the Kubernetes version in `test/*` except for `test/infrastructure/kind/*`. * Prior art: https://github.com/kubernetes-sigs/cluster-api/pull/11030 +* [ ] Start testing with next Kubernetes release on main by bumping `KUBERNETES_VERSION_LATEST_CI` in `docker.yaml` + * Prior art: https://github.com/kubernetes-sigs/cluster-api/pull/12709 * [ ] Ensure the jobs are adjusted to provide test coverage according to our [support policy](https://cluster-api.sigs.k8s.io/reference/versions.html#supported-kubernetes-versions): * At the `.versions` section in the `cluster-api-prowjob-gen.yaml` file in [test-infra](https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-sigs/cluster-api/): diff --git a/.github/workflows/pr-gh-workflow-approve.yaml b/.github/workflows/pr-gh-workflow-approve.yaml index f493fd40032d..28be4dac7151 100644 --- a/.github/workflows/pr-gh-workflow-approve.yaml +++ b/.github/workflows/pr-gh-workflow-approve.yaml @@ -19,7 +19,7 @@ jobs: actions: write steps: - name: Update PR - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 continue-on-error: true with: github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/pr-md-link-check.yaml b/.github/workflows/pr-md-link-check.yaml index 36c64b54c11a..d5e4f43f1eb1 100644 --- a/.github/workflows/pr-md-link-check.yaml +++ b/.github/workflows/pr-md-link-check.yaml @@ -14,7 +14,7 @@ jobs: name: Broken Links runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # tag=v6.0.0 - uses: gaurav-nelson/github-action-markdown-link-check@3c3b66f1f7d0900e37b71eca45b63ea9eedfce31 # tag=1.0.17 with: use-quiet-mode: 'yes' diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d1a0e5041b08..cfde4a2efa38 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,12 +17,12 @@ jobs: release_tag: ${{ steps.release-version.outputs.release_version }} steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # tag=v6.0.0 with: fetch-depth: 0 - name: Get changed files id: changed-files - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # tag=v46.0.5 + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # tag=v47.0.0 - name: Get release version id: release-version run: | @@ -88,14 +88,14 @@ jobs: env: RELEASE_TAG: ${{needs.push_release_tags.outputs.release_tag}} - name: checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # tag=v6.0.0 with: fetch-depth: 0 ref: ${{ env.RELEASE_TAG }} - name: Calculate go version run: echo "go_version=$(make go-version)" >> $GITHUB_ENV - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # tag=v6.1.0 with: go-version: ${{ env.go_version }} - name: generate release artifacts @@ -106,7 +106,7 @@ jobs: curl -L "https://raw.githubusercontent.com/${{ github.repository }}/main/CHANGELOG/${{ env.RELEASE_TAG }}.md" \ -o "${{ env.RELEASE_TAG }}.md" - name: Release - uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # tag=v2.3.2 + uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # tag=v2.5.0 with: draft: true files: out/* diff --git a/.github/workflows/weekly-md-link-check.yaml b/.github/workflows/weekly-md-link-check.yaml index 2a57872a0921..5ce68914de2a 100644 --- a/.github/workflows/weekly-md-link-check.yaml +++ b/.github/workflows/weekly-md-link-check.yaml @@ -14,10 +14,10 @@ jobs: strategy: fail-fast: false matrix: - branch: [ main, release-1.10, release-1.9, release-1.8 ] + branch: [ main, release-1.11, release-1.10, release-1.9 ] runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # tag=v6.0.0 with: ref: ${{ matrix.branch }} - uses: gaurav-nelson/github-action-markdown-link-check@3c3b66f1f7d0900e37b71eca45b63ea9eedfce31 # tag=1.0.17 diff --git a/.github/workflows/weekly-security-scan.yaml b/.github/workflows/weekly-security-scan.yaml index b2fbc9a88aaf..16d30b6a26e7 100644 --- a/.github/workflows/weekly-security-scan.yaml +++ b/.github/workflows/weekly-security-scan.yaml @@ -13,19 +13,19 @@ jobs: strategy: fail-fast: false matrix: - branch: [ main, release-1.10, release-1.9, release-1.8 ] + branch: [ main, release-1.11, release-1.10, release-1.9 ] name: Trivy runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # tag=v6.0.0 with: ref: ${{ matrix.branch }} - name: Calculate go version id: vars run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # tag=v6.1.0 with: go-version: ${{ steps.vars.outputs.go_version }} - name: Run verify security target diff --git a/.github/workflows/weekly-test-release.yaml b/.github/workflows/weekly-test-release.yaml index 3ac6a6daf54d..7d02df535ad6 100644 --- a/.github/workflows/weekly-test-release.yaml +++ b/.github/workflows/weekly-test-release.yaml @@ -17,10 +17,10 @@ jobs: strategy: fail-fast: false matrix: - branch: [ main, release-1.10, release-1.9, release-1.8 ] + branch: [ main, release-1.11, release-1.10, release-1.9 ] runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # tag=v6.0.0 with: ref: ${{ matrix.branch }} fetch-depth: 0 @@ -32,7 +32,7 @@ jobs: - name: Calculate go version run: echo "go_version=$(make go-version)" >> $GITHUB_ENV - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # tag=v6.1.0 with: go-version: ${{ env.go_version }} - name: Test release diff --git a/.gitignore b/.gitignore index eef96f2d5d1d..a3299e3e025d 100644 --- a/.gitignore +++ b/.gitignore @@ -12,13 +12,13 @@ hack/tools/bin # E2E test templates test/e2e/data/infrastructure-docker/**/cluster-template*.yaml -!test/e2e/data/infrastructure-docker/**/clusterclass-quick-start.yaml !test/e2e/data/infrastructure-docker/**/clusterclass-quick-start-runtimesdk.yaml !test/e2e/data/infrastructure-docker/**/clusterclass-quick-start-runtimesdk-v1beta1.yaml !test/e2e/data/infrastructure-docker/**/cluster-template-in-memory.yaml !test/e2e/data/infrastructure-docker/**/clusterclass-in-memory.yaml test/e2e/data/infrastructure-docker/**/clusterclass-*.yaml test/e2e/data/infrastructure-inmemory/**/cluster-template*.yaml +!test/e2e/data/infrastructure-docker/**/clusterclass-quick-start.yaml # Output of Makefile targets using sed on MacOS systems *.yaml-e diff --git a/.golangci-kal.yml b/.golangci-kal.yml index bae5742288ea..eea02298655b 100644 --- a/.golangci-kal.yml +++ b/.golangci-kal.yml @@ -18,12 +18,15 @@ linters: - "conditions" # Ensure conditions have the correct json tags and markers. - "conflictingmarkers" - "duplicatemarkers" # Ensure there are no exact duplicate markers. for types and fields. + - "forbiddenmarkers" # Ensure that types and fields do not contain any markers that are forbidden. - "integers" # Ensure only int32 and int64 are used for integers. - "jsontags" # Ensure every field has a json tag. - "maxlength" # Ensure all strings and arrays have maximum lengths/maximum items. - "nobools" # Bools do not evolve over time, should use enums instead. + - "nodurations" # Prevents usage of `Duration` types. - "nofloats" # Ensure floats are not used. - "nomaps" # Ensure maps are not used. + - "nonullable" # Ensure that types and fields do not have the nullable marker. - "notimestamp" # Prevents usage of 'Timestamp' fields - "optionalfields" # Ensure that all fields marked as optional adhere to being pointers and # having the `omitempty` value in their `json` tag where appropriate. @@ -50,6 +53,12 @@ linters: - ["default", "kubebuilder:default"] - ["required", "kubebuilder:validation:Required", "k8s:required"] description: "A field with a default value cannot be required" + forbiddenmarkers: + markers: + # We don't want to do any defaulting (including OpenAPI) anymore on API fields because we prefer + # to have a clear signal on user intent. This also allows us to easily change the default behavior if necessary. + - identifier: "kubebuilder:default" + - identifier: "default" conditions: isFirstField: Warn # Require conditions to be the first field in the status struct. usePatchStrategy: Forbid # Forbid patchStrategy markers on the Conditions field. @@ -83,15 +92,15 @@ linters: ## Excludes for current apiVersions that can be removed once v1beta1 is removed. # .status.deprecated.v1beta1.conditions fields are using v1beta1.Condition types. - path: "api/addons/v1beta2|api/bootstrap/kubeadm/v1beta2|api/controlplane/kubeadm/v1beta2|api/core/v1beta2|api/ipam/v1beta2|api/runtime/v1beta2" - text: "Conditions field must be a slice of metav1.Condition" + text: "Conditions field in .*V1Beta1DeprecatedStatus must be a slice of metav1.Condition" linters: - kubeapilinter - path: "api/addons/v1beta2|api/bootstrap/kubeadm/v1beta2|api/controlplane/kubeadm/v1beta2|api/core/v1beta2|api/ipam/v1beta2|api/runtime/v1beta2" - text: "ssatags: Conditions should have a listType marker for proper Server-Side Apply behavior" + text: "ssatags: .*Conditions should have a listType marker for proper Server-Side Apply behavior" linters: - kubeapilinter - path: "api/core/v1beta2" - text: "field Conditions type Conditions must have a maximum items, add kubebuilder:validation:MaxItems marker" + text: "field .*Conditions type Conditions must have a maximum items, add kubebuilder:validation:MaxItems marker" linters: - kubeapilinter - path: "api/core/v1beta2/condition_types.go" @@ -102,24 +111,18 @@ linters: ## Excludes for current clusterctl v1alpha3 and Runtime Hooks v1alpha1 apiVersions (can be fixed once we bump their apiVersion). # Note: The types in api/runtime/hooks/v1alpha1 are not CRDs, so e.g. SSA markers don't make sense there. - path: "cmd/clusterctl/api/v1alpha3|api/runtime/hooks/v1alpha1" - text: "optionalfields|requiredfields|maxlength|ssatags" + text: "maxlength|ssatags" linters: - kubeapilinter - - ## Excludes for JSONSchemaProps - # controller-gen does not allow to add MaxItems to Schemaless fields: https://github.com/kubernetes-sigs/kube-api-linter/issues/120 - - path: "api/core/v1beta2/clusterclass_types.go" - text: "maxlength: field (AllOf|OneOf|AnyOf) must have a maximum items, add kubebuilder:validation:MaxItems marker" - linters: - - kubeapilinter - # controller-gen does not allow to add listType to Schemaless fields: https://github.com/kubernetes-sigs/kube-api-linter/issues/120 - - path: "api/core/v1beta2/clusterclass_types.go" - text: "ssatags: (AllOf|OneOf|AnyOf) should have a listType marker for proper Server-Side Apply behavior" + - path: "cmd/clusterctl/api/v1alpha3|api/runtime/hooks/v1alpha1/(common_types.go|discovery_types.go|lifecyclehooks_types.go|topologymutation_types.go|topologymutation_variable_types.go)" + text: "optionalfields|requiredfields" linters: - kubeapilinter + + ## Excludes for JSONSchemaProps # We want to align to the JSON tags of the CustomResourceDefinition fields. - path: "api/core/v1beta2/clusterclass_types" - text: "field (XPreserveUnknownFields|XPreserveUnknownFields|XValidations|XMetadata|XIntOrString) json tag does not match pattern" + text: "field JSONSchemaProps.(XPreserveUnknownFields|XPreserveUnknownFields|XValidations|XMetadata|XIntOrString) json tag does not match pattern" linters: - kubeapilinter # We want to align Properties to the corresponding field in CustomResourceDefinitions. @@ -142,7 +145,7 @@ linters: ## Excludes for kubeadm types # We want to align the FeatureGates field to the FeatureGates field in kubeadm. - path: "api/bootstrap/kubeadm/v1beta2/kubeadm_types.go" - text: "nomaps: FeatureGates should not use a map type, use a list type with a unique name/identifier instead" + text: "nomaps: ClusterConfiguration.FeatureGates should not use a map type, use a list type with a unique name/identifier instead" linters: - kubeapilinter @@ -161,6 +164,16 @@ linters: linters: - kubeapilinter + # Excludes for existing default markers + - path: "api/core/v1beta2/clusterclass_types.go" + text: 'forbiddenmarkers: field ValidationRule.Reason has forbidden marker "kubebuilder:default=FieldValueInvalid"' + linters: + - kubeapilinter + - path: "api/core/v1beta2/clusterclass_types.go" + text: 'forbiddenmarkers: field ValidationRule.Reason has forbidden marker "default=ref\(sigs.k8s.io/cluster-api/api/core/v1beta2.FieldValueInvalid\)"' + linters: + - kubeapilinter + # TODO: Excludes that should be removed once the corresponding issues in KAL are fixed # KAL incorrectly reports that the Taints field doesn't have to be a pointer (it has to be to preserve []). # See: https://github.com/kubernetes-sigs/kube-api-linter/issues/116 diff --git a/.golangci.yml b/.golangci.yml index bc313cbadf91..4d7e78db2b8a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -20,9 +20,11 @@ linters: - durationcheck # multiplying two durations - errcheck # unchecked errors - errchkjson # invalid types passed to json encoder + - forbidigo # allows to block usage of funcs - ginkgolinter # ginkgo and gomega - gocritic # bugs, performance, style (we could add custom ones to this one) - godot # checks that comments end in a period + - godox # block FIXMEs - goprintffuncname # printft-like functions should be named with f at the end - gosec # potential security problems - govet # basically 'go vet' @@ -51,6 +53,10 @@ linters: # TODO: It will be dropped when the Go version migration is done. - usetesting settings: + forbidigo: + forbid: + - pattern: ctrl.NewControllerManagedBy + msg: Use capicontrollerutil.NewControllerManagedBy instead ginkgolinter: forbid-focus-container: true gocritic: @@ -83,6 +89,9 @@ linters: - ^ \+.* - ^ ANCHOR.* - '^ (alpha|beta|GA): v.*' + godox: + keywords: + - FIXME # FIXME's should be removed before merging PRs gosec: excludes: # integer overflow conversion int -> int32 @@ -174,6 +183,10 @@ linters: alias: "" - pkg: sigs.k8s.io/cluster-api/internal/topology/names alias: topologynames + - pkg: sigs.k8s.io/cluster-api/internal/util/client + alias: "clientutil" + - pkg: sigs.k8s.io/cluster-api/internal/util/controller + alias: "capicontrollerutil" # CAPD - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3 alias: infrav1alpha3 @@ -257,19 +270,11 @@ linters: - linters: - staticcheck text: 'SA1019: (clusterv1alpha3.*|clusterv1alpha4.*) is deprecated: This type will be removed in one of the next releases.' - # Specific exclude rules for deprecated feature flags - - linters: - - staticcheck - text: 'SA1019: feature.ClusterResourceSet is deprecated: ClusterResourceSet feature is now GA and the corresponding feature flag will be removed in 1.12 release.' # v1Beta1 deprecated fields - linters: - staticcheck text: 'SA1019: .*\.Deprecated\.V1Beta1.* is deprecated' - # CR v0.21 deprecated Result.Requeue, will be fixed incrementally and tracked via https://github.com/kubernetes-sigs/cluster-api/issues/12272 - - linters: - - staticcheck - text: 'SA1019: .*(res|result|i|j)\.Requeue is deprecated: Use `RequeueAfter` instead' - # TODO: var-naming: avoid meaningless package names by revive + # TODO: var-naming: avoid meaningless package names by revive # * test/infrastructure/docker/internal/docker/types/ # * bootstrap/kubeadm/types/ # * internal/webhooks/util/ @@ -402,6 +407,22 @@ linters: - staticcheck path: (.+)\.go$ text: 'QF1008: could remove embedded field' + - linters: + - revive + path: errors/.*\.go$ + text: 'var-naming: avoid package names that conflict with Go standard library package names' + - linters: + - revive + path: internal/util/hash/.*\.go$ + text: 'var-naming: avoid package names that conflict with Go standard library package names' + - linters: + - revive + path: internal/controllers/topology/cluster/patches/api/.*\.go$ + text: 'var-naming: avoid meaningless package names' + - linters: + - revive + path: test/infrastructure/inmemory/pkg/server/api/.*\.go$ + text: 'var-naming: avoid meaningless package names' issues: max-issues-per-linter: 0 max-same-issues: 0 diff --git a/CHANGELOG/v1.10.5.md b/CHANGELOG/v1.10.5.md new file mode 100644 index 000000000000..05c8fea9a6f0 --- /dev/null +++ b/CHANGELOG/v1.10.5.md @@ -0,0 +1,37 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.28.x -> v1.33.x +- Workload Cluster: v1.26.x -> v1.33.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.10.4 +## :chart_with_upwards_trend: Overview +- 6 new commits merged +- 2 feature additions ✨ +- 1 bug fixed 🐛 + +## :sparkles: New Features +- KCP: Bump corefile-migration to v1.0.27 (#12637) +- Runtime SDK: Add mTLS support to runtime extension server and client (#12519) + +## :bug: Bug Fixes +- MachineDeployment: Fix second rolling update for MD rolloutAfter (#12555) + +## :seedling: Others +- Dependency: Bump Go to v1.23.11 (#12530) +- Dependency: Bump Go to v1.23.12 (#12612) +- Testing: Skipping test that is failing because of infra issues (#12566) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/coredns/corefile-migration: [v1.0.26 → v1.0.27](https://github.com/coredns/corefile-migration/compare/v1.0.26...v1.0.27) + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.10.6.md b/CHANGELOG/v1.10.6.md new file mode 100644 index 000000000000..452488e9a18d --- /dev/null +++ b/CHANGELOG/v1.10.6.md @@ -0,0 +1,26 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.28.x -> v1.33.x +- Workload Cluster: v1.26.x -> v1.33.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.10.5 +## :chart_with_upwards_trend: Overview +- 1 new commit merged + +## :seedling: Others +- Dependency: Bump github.com/go-viper/mapstructure/v2 to fix CVE (#12681) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/go-viper/mapstructure/v2: [v2.3.0 → v2.4.0](https://github.com/go-viper/mapstructure/compare/v2.3.0...v2.4.0) + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.10.7.md b/CHANGELOG/v1.10.7.md new file mode 100644 index 000000000000..195ff7e07034 --- /dev/null +++ b/CHANGELOG/v1.10.7.md @@ -0,0 +1,30 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.28.x -> v1.33.x +- Workload Cluster: v1.26.x -> v1.33.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.10.6 +## :chart_with_upwards_trend: Overview +- 2 new commits merged +- 1 feature addition ✨ + +## :sparkles: New Features +- KCP: Bump coredns/corefile-migration to v1.0.28 (#12749) + +## :seedling: Others +- e2e: Fix flaky test in extensionconfig_controller_test.go (#12783) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/coredns/corefile-migration: [v1.0.27 → v1.0.28](https://github.com/coredns/corefile-migration/compare/v1.0.27...v1.0.28) + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.10.8.md b/CHANGELOG/v1.10.8.md new file mode 100644 index 000000000000..7d4dd6df5cc3 --- /dev/null +++ b/CHANGELOG/v1.10.8.md @@ -0,0 +1,33 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.28.x -> v1.33.x +- Workload Cluster: v1.26.x -> v1.33.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.10.7 +## :chart_with_upwards_trend: Overview +- 4 new commits merged +- 1 feature addition ✨ + +## :sparkles: New Features +- KCP: Bump coredns/corefile-migration to v1.0.29 (#12864) + +## :seedling: Others +- clusterctl: Bump cert-manager to v1.19.1 (#12876) +- clusterctl: Bump cert-manager version to v1.19.0 (#12834) + +:book: Additionally, there has been 1 contribution to our documentation and book. (#12838) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/coredns/corefile-migration: [v1.0.28 → v1.0.29](https://github.com/coredns/corefile-migration/compare/v1.0.28...v1.0.29) + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.11.0-rc.1.md b/CHANGELOG/v1.11.0-rc.1.md new file mode 100644 index 000000000000..6b7756cafe76 --- /dev/null +++ b/CHANGELOG/v1.11.0-rc.1.md @@ -0,0 +1,515 @@ +🚨 This is a RELEASE CANDIDATE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + +## Highlights + +- Bumped to Go 1.24, controller-runtime v0.21, k8s.io/* v0.33, controller-gen v0.18 (also moved to `sigs.k8s.io/randfill`) (#12191) +- v1beta2 API version has been introduced and considering the awesome amount of improvements it marks an important + step in the journey towards graduating our API to v1. + - Accordingly there is now a new v1beta2 version of our contract for providers. + - Improve status: + - The transition to the new K8s aligned conditions using `metav1.Conditions` types and the new condition semantic + has been completed. + - Replica counters are now consistent with new conditions and across all resources; new replica counters have been added at cluster level. + - Semantic of contract fields in status have been improved and are now consistent across all resources. + - The confusing `FailureReason` and `FailureMessage` fields have been dropped. + - Support CC across namespaces: API changes planned for this feature have been implemented. + - Improve object references: + - Unnecessary fields have been dropped from object reference. + - Object references are now GitOps friendly (API version is not overwritten anymore by controllers). + - KubeadmConfig and KubeadmControlPlane APIs have been aligned with kubeadm v1beta4 API. + - Additionally, fields inferred from top level objects have been removed, thus getting rid of a common source of confusion/issues. +- Compliance with K8s API guidelines: + - Thanks to the adoption of the [KAL linter](https://github.com/kubernetes-sigs/kube-api-linter) compliance with K8s API guidelines has been greatly improved. + - All Duration fields are now represented as `*int32` fields with units being part of the field name. + - All `bool` fields have been changed to `*bool` to preserve user intent. + - Extensive work has been done to ensure `required` and `optional` is explicitly set in the API, and that + both serialization and validation works accordingly: + - Stop rendering empty structs (review of all occurrences of `omitempty` and introduction of `omitzero`) + - Do not allow `""` when it is not semantically different from value not set (either you have to provide a non-empty string value or not set the field at all). + - Do not allow `0` when it is not semantically different from value not set (either you have to provide a non-0 int value or not set the field at all). + - Do not allow `{}` when it is not semantically different from value not set (either you have to set at least one property in the object or not set the field at all). + - Do not allow `[]` when it is not semantically different from value not set (either you have to set at least one item in the list or not set the field at all). + - Ensure validation for all enum types. + - Missing list markers have been added for SSA. + - Drop unnecessary pointers: + - After fixing `required` and `optional` according to K8s API guidelines, extensive work has been done to + drop unnecessary pointers thus improving the usability of the API's Go structs. + - Avoid embedding structs: Coupling between API types has been reduced by reducing the usage of embedded structs. + - Extensive work has been done to improve consistency across all resources, e.g.: + - Fields for Machine deletion are under a new `deletion` struct in all resources. + - Settings about `rollout` have been logically grouped in all resources. + - Settings about health checks and `remediation` have been logically grouped in all resources. + - Missing validations have been added where required. + - Tech debt has been reduced by dropping deprecated fields. +* ClusterClass: Fix continuous reconciles because of apiVersion differences in Cluster topology controller (#12341) +* KCP/CABPK: Add CertificateValidityPeriod and CACertificateValidityPeriod to KubeadmConfig (#12335) +* KCP: Fix timeout handling in GetAPIServerCertificateExpiry and DialContext (#12554) +* Machine: fallback to InfraMachine providerID during deletion if Machine providerID is not set (#11985) +* Runtime SDK: + * Optimize size of Runtime Hook requests (#12462) + * Add mTLS support to Runtime Extension server and client (#12517) +* Improved e2e test coverage, e.g.: + * additional checks that resourceVersion stays stable after tests and that conditions are healthy (#12546 #12111) + * test coverage for scaling from/to 0 with CAPD & cluster-autoscaler (#12572) +* New providers in clusterctl: Scaleway (#12357), cdk8s (#12332) + +See [Cluster API v1.10 compared to v1.11](https://main.cluster-api.sigs.k8s.io/developer/providers/migrations/v1.10-to-v1.11) for more details + +## Notes for workload cluster upgrade to Kubernetes v1.34 with KCP + +* Context: Kubernetes/kubeadm <=> etcd compatibility: + * kubeadm v1.33 only supports etcd v3.5 for Kubernetes v1.33 + * kubeadm v1.34 only supports etcd v3.6 for Kubernetes v1.34 +* The upgrade to etcd v3.6 requires etcd >= v3.5.20 (https://etcd.io/blog/2025/upgrade_from_3.5_to_3.6_issue/) +* Accordingly, when upgrading from Kubernetes v1.33 to v1.34: + * ensure etcd >= v3.5.20 is used with Kubernetes v1.33 before the upgrade + * upgrade to Kubernetes v1.34 and etcd v3.6 at the same time + +## Deprecation and Removals Warning + +- Cluster: Remove deprecated index ByClusterClassName, ClusterByClusterClassClassName and ClusterClassNameField (#12269) +- ClusterClass: Remove deprecated ClusterVariable.definitionFrom field (#12202) +- ClusterClass: Remove deprecated Cluster.spec.topology.rolloutAfter field (#12268) +- ClusterClass: Remove deprecated ClusterCacheTracker and corresponding types (#12270) +- clusterctl: Remove deprecated `clusterctl alpha topology plan` command (#12283) +- ClusterResourceSet: Remove deprecated ClusterResourceSetBinding.DeleteBinding method (#12267) +- MachineDeployment: Removed deprecated revisionHistory (#12274) +- MachineDeployment: Remove deprecated spec.progressDeadlineSeconds (#12232) +- KCP/CABPK: Remove deprecated KubeadmConfig useExperimentalRetryJoin (#12234) +- API: Deprecate v1alpha1 & v1beta1 API packages (#12254) + +:warning: **RELEASE CANDIDATE NOTES** :warning: +## 👌 Kubernetes version support + +- Management Cluster: v1.30.x -> v1.33.x +- Workload Cluster: v1.28.x -> v1.33.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.11.0-rc.0 +## :chart_with_upwards_trend: Overview +- 21 new commits merged +- 3 feature additions ✨ +- 9 bugs fixed 🐛 + +## :sparkles: New Features +- CAPD: Add scale from/to 0 support for CAPD (#12591) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.1 (#12625) +- KCP/CABPK/CI: Bump KAL to pick up latest requiredfields linter, add Min/MaxLength to BootstrapToken (#12604) + +## :bug: Bug Fixes +- CAPD: Run CAPD conversion tests in CI (#12588) +- e2e: Fix autoscaler e2e test flake (#12627) +- KCP: Fix ControlPlaneComponentHealthCheckSeconds validation in KubeadmConfigSpec.Validate (#12624) +- KCP: Fix conversion issue in KubeadmControlPlaneTemplate with rolloutStrategy.type (#12622) +- ClusterClass/MachinePool: Fix MP error in desired state calculation during Cluster creation (#12621) +- Runtime SDK: Export ExtensionConfig webhook (#12599) +- Testing: Fix flaky TestFuzzyConversion (Cluster) test (#12630) +- Testing: Fix flaky TestReconcileMachinePhases unit test (#12632) +- Testing: Fix flaky TestReconcileState unit test (#12633) + +## :seedling: Others +- Dependency: Bump Go to v1.24.6 (#12615) +- e2e: Add quickstart e2e test with v1beta1 with ClusterClass and RuntimeSDK (#12590) +- e2e: Improve check for Cluster Available condition in e2e tests (#12596) +- Logging: Reduce noisy logs (#12626) +- KCP/CABPK: Stop using unsafe for EnvVar conversion (#12631) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +_Nothing has changed._ + +### Removed +_Nothing has changed._ + +
+More details about the release + +## Changes since v1.10.0 +## :chart_with_upwards_trend: Overview +- 359 new commits merged +- 88 breaking changes :warning: +- 28 feature additions ✨ +- 50 bugs fixed 🐛 + +## :memo: Proposals +- Core: Update autoscaling from zero enhancement proposal with support for platform-aware autoscale from zero (#11962) + +## :warning: Breaking Changes +- API: Add additional MinProperties & MinItems validation across multiple APIs (#12538) +- API: Add CAPD v1beta2 types (#12226) +- API: Add Minimum=0 marker to all MinReadySeconds fields (#12474) +- API: Add omitempty to required string without zero value (#12548) +- API: Add omitzero on struct without zero value (#12550) +- API: Add v1beta2 types (#12037) +- API: Align Spec fields to optionalfields API conventions (#12431) +- API: Align Status fields to optionalfields API conventions (#12435) +- API: Change .status.replicas fields to pointer + omitempty (#12250) +- API: Change all *metav1.Time fields to metav1.Time (#12518) +- API: Change bool to *bool for all API types (#12436) +- API: Change type of *string fields with invalid zero value to string (#12429) +- API: Change type of int32 fields with valid zero value to *int32 (#12424) +- API/Conditions: add V1Beta1 suffix and remove V1Beta2 suffix from condition types and reasons in v1beta2 packages (#12091) +- API: Drop unnecessary fields from contract-versioned object references (#12356) +- API: Fix required fields linter findings (#12558) +- API: Improve Cluster CRD Go type (#12489) +- API: Migrate API to use *Seconds instead of metav1.Duration fields (#12327) +- API: Move APIs to ./api (#12262) +- API/Partially revert: Remove DefaulterRemoveUnknownOrOmitableFields mutating webhook option (#12290) +- API: Promote v1beta2 conditions (#12066) +- API: Remove DefaulterRemoveUnknownOrOmitableFields mutating webhook option (#12231) +- API: Remove IPFamily from public APIs (move to CAPD/kind util) (#12215) +- API: Remove pointers from ClusterClass and Cluster healthCheck fields (#12525) +- API: Remove pointers from remaining pointer struct fields (#12545) +- API: Rename namingStrategy => naming, machineNamingStrategy => machineNaming (#12524) +- API: Restructure strategy fields (#12506) +- CABPK: Align KubeadmConfig to kubeadm v1beta4 types (#12282) +- CAPD: Align CAPD conversion to conversion of other providers (#12481) +- CAPD/Conditions: add V1Beta1 suffix and remove V1Beta2 suffix from condition types and reasons in CAPD v1beta2 packages (#12393) +- CAPD: Implement v1beta2 contract in CAPD (#12409) +- CAPD: Keep using v1beta1 condition in CAPD Docker backend (#12450) +- CAPD: Promote v1beta2 condition in CAPD (#12362) +- CAPD: Stop using v1beta1 status in CAPD controllers (#12438) +- CAPD: Update example manifests (#12480) +- Cluster: Remove deprecated index ByClusterClassName, ClusterByClusterClassClassName and ClusterClassNameField (#12269) +- ClusterClass: Drop unnecessary fields from ClusterClass template references (#12358) +- ClusterClass: Move infrastructure namingStrategy field in ClusterClass (#12216) +- ClusterClass: Remove ClusterVariable.DefinitionFrom field (#12202) +- ClusterClass: Remove DefaulterRemoveUnknownOrOmitableFields mutating webhook option (again) (#12404) +- ClusterClass: Remove deprecated Cluster.spec.topology.rolloutAfter field (#12268) +- ClusterClass: Remove deprecated ClusterCacheTracker and corresponding types (#12270) +- ClusterClass: Rename deprecated ClusterClass Metadata fields to DeprecatedV1Beta1Metadata (#12273) +- ClusterClass: Rename runtime extension fields in ClusterClass ExternalPatchDefinition (#12281) +- ClusterClass: Restructure classRef field in Cluster.spec.topology (#12235) +- clusterctl: Clusterctl describe defaults to v1beta2 (#12369) +- clusterctl: Remove clusterctl alpha topology plan (#12283) +- ClusterResourceSet: Change ClusterResourceSetBinding Bindings field from []*ResourceSetBinding to []ResourceSetBinding (#12476) +- ClusterResourceSet: Make clusterName field in ClusterResourceSetBinding required (#12276) +- ClusterResourceSet: Remove deprecated ClusterResourceSetBinding.DeleteBinding method (#12267) +- Conditions: Swap condition packages (#12086) +- Dependency: Bump to controller-runtime v0.21 / controller-tools v0.18 / k8s.io/* v0.33 / move to randfill (#12191) +- e2e: Migrate E2E tests to v1beta2 (#12451) +- e2e/Test: default to strict field validation & fix unknown field in ClusterClass YAML (#12501) +- IPAM: Refactor reference types for IPAM (#12365) +- KCP: KCP tolerates diff not leading to changes on machines (#12402) +- KCP: Rename LastRemediationStatus.Timestamp to Time in KCP (#12452) +- Machine: Drop unnecessary fields from Machine status.nodeRef (#12352) +- MachineDeployment: Drop revisionHistory in MachineDeployment (#12274) +- MachineDeployment: Remove MD spec.progressDeadlineSeconds (#12232) +- MachineHealthCheck: Drop unnecessary fields from remediationTemplate references (#12368) +- MachineHealthCheck: Rename MHC unhealthyConditions to unhealthyNodeConditions (#12245) +- MachineSet: Make Template in MachineSet & Spec in MachineTemplateSpec required (#12420) +- API/CAPD: Update ControlPlaneEndpoint InfraCluster contract, align CAPD to infra contracts (#12465) +- API/Cluster: Add initialization to Cluster status (#12098) +- API/Control-plane/Bootstrap/KCP/CABPK/Cluster: Implement v1beta2 contract in cluster controller, KCP, CABPK (#12094) +- API/KCP/CABPK/CI: Enable nomaps linter, Remove unused kubeadm ClusterStatus struct, Migrate Cluster.status.failureDomains to array (#12083) +- API/Machine: Add initialization to Machine Status (#12101) +- API/Machine: Move Machine deletion timeout fields into deletion group, move KCP machineTemplate spec fields to machineTemplate.spec (#12499) +- API/MachinePool: Add initialization to MachinePool Status (#12102) +- ClusterClass/MachineHealthCheck/Cluster: Restructure MHC fields in MHC, Cluster and ClusterClass CRDs (#12504) +- clusterctl/Documentation: Remove reference and configurations for Packet (Equinix Metal) (#12143) +- KCP/CABPK: Change BootstrapToken.Token from *BootstrapTokenString to BootstrapTokenString (#12565) +- KCP/CABPK: Change envVars fields from []EnvVar to *[]EnvVar (#12539) +- KCP/CABPK: Change User.PasswdFrom from *PasswdSource to PasswdSource + add omitzero, extend SSA patch helper to handle arrays (#12560) +- KCP/CABPK: Inline ControlPlaneComponent struct in APIServer / ControllerManager / Scheduler in CABPK (#12446) +- KCP/CABPK: Remove KubeadmConfig UseExperimentalRetryJoin (#12234) +- KCP/CABPK: Remove more defaulting from KubeadmConfig/KubeadmConfigTemplate/KCP/KCPTemplate (#12495) +- KCP/CABPK: Remove redundant fields from CABPK / KCP ClusterConfiguration (#12319) +- KCP/CABPK: Remove TypeMeta from KubeadmConfigSpec (#12350) +- KCP/MachineSet/CABPK/CAPD/e2e/Cluster: Cleanup version handling of unsupported Kubernetes releases (#12303) +- Machine/Cluster: Stop using FailureReason and FailureMessage in controllers (#12148) +- Machine/MachinePool/MachineSet/MachineDeployment: Add MinReadySeconds to Machine and remove it from MachineDeployment, MachineSet, MachinePool. (#12153) +- Machine/MachineSet/MachineDeployment/Cluster: Stop using deprecated replica counters in controllers (#12149) +- MachineSet/MachineDeployment: Use MachineSetDeletePolicy enum in MD & MS API (#12419) +- Runtime SDK/MachineDeployment: Make DeletePolicy & FailurePolicy enum fields non-pointers (#12453) +- Runtime SDK: Add v1beta2 API for ExtensionConfig (#12197) +- Runtime SDK: Change ExtensionConfig handler timeoutSeconds from *int32 to int32 & add Minimum=1 (#12475) + +## :sparkles: New Features +- API: Block imports to internal packages in our API + restructure import restrictions (#12302) +- API: Deprecate v1alpha1 & v1beta1 API packages (#12254) +- API: Remove pointer, add omitzero & MinProperties for initialization fields/structs (#12482) +- CAPD: Add scale from/to 0 support for CAPD (#12591) +- CI: Add conflicting markers linter (#12569) +- CI: Bump KAL & add the notimestamps linter (#12520) +- clusterctl: Add Scaleway infrastructure provider to clusterctl (#12357) +- clusterctl: Adding Addon Provider for cdk8s (CAAPC) to cluster-api (#12332) +- clusterctl: Clearer diagnostics when provider metadata is missing or repo URL is stale (#12238) +- clusterctl: Validate provider metadata (#12242) +- Dependency: Bump controller-tools v0.17.3, conversion-gen v0.33.0 (#12129) +- Dependency: Complete bump to Kubernetes v1.33 (#12206) +- Dependency: Update KUBEBUILDER_ENVTEST_KUBERNETES_VERSION (#12130) +- e2e: Bump Kubernetes version used for testing to v1.34.0-beta.0 (#12516) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.1 (#12625) +- e2e: From 1.10 use GetStableReleaseOfMinor instead of GetLatestReleaseOfMinor (#12118) +- Machine: Implement v1beta2 contract in Machine controller (#12038) +- MachinePool/Feat: set new replica fields for machine pools (#12528) +- API/CI: Enable ssatags KAL linter (#12470) +- KCP/CABPK/CI: Bump KAL to pick up latest requiredfields linter, add Min/MaxLength to BootstrapToken (#12604) +- KCP/CABPK: Add CertificateValidityPeriod and CACertificateValidityPeriod to KubeadmConfig (#12335) +- KCP/CABPK: Reintroduce KCP/CABPK ClusterConfiguration controlPlaneEndpoint (#12423) +- KCP/CABPK: Stop requiring init or cluster configuration for first CP machine (#12540) +- Runtime SDK/ClusterClass: Extend Cluster builtin to include metadata (#12014) +- Runtime SDK/ClusterClass: Optimize size of runtime hook requests (#12462) +- Runtime SDK: Add mTLS support to runtime extension server and client (#12517) +- Runtime SDK: Extend cluster builtin to include classNamespace (#12050) +- Testing: Bump Kubernetes in tests to v1.33.0 and claim support for v1.33 (#12104) + +## :bug: Bug Fixes +- API: Ensure all pointer status fields are dereferenced correctly (#12412) +- Bootstrap: Make joinConfiguration.discovery.bootstrapToken.token optional (#12107) +- Bootstrap: Relax minLength for bootstrap.dataSecretName to 0 (#12164) +- CABPK: Fix rendering of .Append = false in CABPK (#12437) +- CABPK: Fix rendering of ntp.enabled & users.inactive *bool values in cloud init (#12394) +- CABPK: Increase ignition additionalConfig maxSize from 10 to 32 KB (#12222) +- CABPK: Make KubeadmConfig FileSystem.Label optional (#12019) +- CAPD: Fix IPv6 CAPD e2e test (#12488) +- CAPD: Fix worker machine count in CAPD template (#12028) +- CAPD: Run CAPD conversion tests in CI (#12588) +- CAPIM: Fix CAPD in-memory templates (#12013) +- CAPIM/Mux: fix error check (#12230) +- CI: Fix conversion-verifier and fix findings (#12349) +- CI: Fixing failed to install kind for e2e tests (#12361) +- ClusterClass: Fix continuous reconciles because of apiVersion differences in Cluster topology controller (#12341) +- clusterctl: Accept upper case version (#12237) +- clusterctl: Add missing API version to NS object (#12200) +- clusterctl: Clusterctl upgrade hangs for a time on CRD migration when new version contains a number of new CRDs (#11984) +- ClusterResourceSet: Fix potential panic if ClusterResourceSetStrategy is not defined or incorrect (#12096) +- Conditions: Fix condition handling during controller start (#12536) +- e2e: Bump cluster-autoscaler to v1.33, adjust RBAC, pin apiVersion to v1beta1 (#12502) +- e2e: Fix autoscaler e2e test flake (#12627) +- e2e: Fix Available/Ready checks on E2E test (#12549) +- e2e: Fix e2e tests by fixing the etcd tag (#12523) +- e2e: Stop overwriting ExtraPortMappings if WithDockerSockMount option is used (#12012) +- IPAM: Enable conversion in CRDs (#12198) +- IPAM: Revert condition func changes for IPAddressClaim v1beta1 (#12223) +- KCP: Allow transition of KubeadmControlPlaneTemplate from defaulted rolloutStrategy to unset (#12467) +- KCP: Fix ControlPlaneComponentHealthCheckSeconds validation in KubeadmConfigSpec.Validate (#12624) +- KCP: Fix conversion issue in KubeadmControlPlaneTemplate with rolloutStrategy.type (#12622) +- KCP: Fix nil pointer in conversion (#12292) +- KCP: Fix rollout when init configuration in KCP is empty (#12344) +- KCP: Fix timeout handling in GetAPIServerCertificateExpiry and DialContext (#12554) +- Machine/Machine deletion: fallback to InfraMachine providerID if Machine providerID is not set (#11985) +- MachineDeployment: Bug fix to set machinedeployment AvailableReplicas (#12410) +- MachineDeployment: Fix second rolling update for MD rolloutAfter (#12261) +- MachineSet: Fix v1beta1 MachinesReady condition on MachineSet (#12535) +- API/ClusterClass: Fix MaxLength of worker topology Name fields (#12072) +- ClusterClass/MachinePool: Fix MP error in desired state calculation during Cluster creation (#12621) +- Dependency/CI: Upgrade golangci-lint to v2.1.0 (#12170) +- Testing/CI: Fix the condition to check whether cluster has v1beta2 conditions (#12100) +- Runtime SDK: Export ExtensionConfig webhook (#12599) +- Testing: Fix flakes in TestAPIAndWebhookChanges unit test (#12526) +- Testing: Fix flaky TestFuzzyConversion (Cluster) test (#12630) +- Testing: Fix flaky TestReconcileMachinePhases unit test (#12632) +- Testing: Fix flaky TestReconcileState unit test (#12633) +- Testing: Fix race condition in InMemoryMachine controller tests (#12347) +- Testing: Fix Test_ValidateCluster unit tests for mink8s (#12564) +- util/CRD migration: Fix cases where update validation fails (#11991) +- util: Fix typo for WithOwnedV1beta1Conditions to WithOwnedV1Beta1Conditions (#12218) + +## :seedling: Others +- API: Drop hardcoded v1beta1 references (#12027) +- API: Enable optionalfields linter and fix remaining findings (#12299) +- API: Move internal/apis to internal/api (#12296) +- API: Remove old godoc comment, remove unnecessary cast in KCP (#12479) +- API: Remove unused List conversion funcs (#12054) +- API: Set minimum=1 on ObservedGeneration and KubeadmConfig APIEndpoint bindPort (#12417) +- API: Set print columns for v1beta2 types (#12534) +- CAPD: Ensure CAPD v1beta1 API package only imports core v1beta1 (#12405) +- CAPIM/Mux: Ignore net.ErrClosed error during listener close & server shutdown (#12212) +- CI: Add govulncheck to ensure vulnerability (#12108) +- CI: Bump E2E to Kubernetes v1.33.0-rc.1 (#12099) +- CI: Bump golangci-lint v2 (#12088) +- CI: Bump KAL and remove enum exclude (#12500) +- CI: Bump KAL to 20250605073038, cleanup excludes, fix IPAM prefix field, add MaxItems to Machine.status.addresses (#12326) +- CI: Bump KAL to 20250626 + enable uniquemarkers linter (#12427) +- CI/Chore: Update golangci-lint to v2.3.0 (#12573) +- CI: Enable duplicatemarkers linter (#12228) +- CI: Enable statusoptional linter (#12229) +- CI: Fix `make generate-go-openapi` if parent directory name does not equal `cluster-api` (#12461) +- CI: Remove govulncheck from the verify target (#12348) +- CI: Restructure excludes in KAL linter config (#12445) +- CI: Switch plugin to kube-api-linter (#12089) +- CI: Update version matrix for github workflows for release-1.10 (#11992) +- CI: Use release artifacts for CAPI v1.10 (#12147) +- Cluster: Add validation for Cluster spec.controlPlaneRef, spec.infrastructureRef and spec.topology (#12454) +- Cluster: Ensure Cluster.status.failureDomains are alphabetically sorted (#12416) +- Cluster: Improve error message if rebase fails because target ClusterClass is not reconciled (#12415) +- ClusterClass: Add DropEmptyStruct to ssa patch helper (#12442) +- ClusterClass/Extend topology upgrade test: add bool removal test case (#12484) +- ClusterClass: Improve CC RefVersionsUpToDate condition message (#12472) +- ClusterClass: Improve validation of worker topology names in Cluster resource (#12561) +- ClusterClass: Improve webhook output to include the names of the clusters blocking a deletion (#12060) +- ClusterClass: Make infrastructure and controlPlane required in ClusterClass (#12444) +- clusterctl: Add filename to clusterctl error about bad YAML (#12189) +- clusterctl: Add support for compatible contracts to clusterctl (#12018) +- clusterctl: Bump cert-manager to v1.17.1 (#12044) +- clusterctl: Bump cert-manager to v1.17.2 (#12210) +- clusterctl: Bump cert-manager to v1.18.0 (#12342) +- clusterctl: Bump cert-manager to v1.18.1 (#12378) +- clusterctl: Bump cert-manager to v1.18.2 (#12478) +- clusterctl: Change k0smotron repo location (#12225) +- clusterctl: Cleanup clusterctl tests assets (#12510) +- clusterctl: Enforce skip upgrade policy in clusterctl (#12017) +- Community meeting: Add JoelSpeed to approvers (#12204) +- Conditions: Cleanup v1beta1 updateStatus functions (#12190) +- Conditions: Drop usage of v1beta1 conditions (#12109) +- Control-plane: Avoid large number of connection error traces in kubeadm controlplane controller (#12106) +- Dependency: Bump Go 1.24 (#12128) +- Dependency: Bump go to v1.23.8 (#12052) +- Dependency: Bump Go to v1.24.5 (#12509) +- Dependency: Bump Go to v1.24.6 (#12615) +- Dependency: Bump kustomize to v5.7.0 (#12432) +- Dependency: Bump several tool versions in Makefile (#12433) +- Dependency: Bump sigs.k8s.io/kind to v0.28.0 (#12243) +- Dependency: Bump sigs.k8s.io/kind to v0.29.0 (#12257) +- Dependency: Bump to Go v1.24.4, github.com/cloudflare/circl v1.6.1 (#12351) +- Dependency: Fix CVE-2025-54388 (#12574) +- Dependency: Update github.com/go-viper/mapstructure/v2 to v2.3.0 (#12421) +- Devtools: Add KubeVirt support to Tilt dev workflow (#11697) +- Devtools: Fix Tiltfile (#12541) +- Devtools/Metrics: use v1beta2 for condition metrics and add metrics for dockercluster devcluster dockermachine devmachine extensionconfig ipaddressclaim and crs (#12006) +- e2e: Add an option to override custom node image name for kind cluster (#12186) +- e2e: Add quickstart e2e test with v1beta1 with ClusterClass and RuntimeSDK (#12590) +- e2e: Add resource version check to clusterctl upgrade tests (#12546) +- e2e: Add retry for SSA requests against Kubernetes < v1.29 in clusterctl upgrade tests (#12067) +- e2e: Bump clusterctl_upgrade_test.go main and 1.10 tests to k8s v1.33.0 (#12193) +- e2e: Bump Kubernetes version used for testing to v1.33.0-rc.0 (#12073) +- e2e: Fix ResourceVersion flake for MachinePools (#12552) +- e2e: Improve check for Cluster Available condition in e2e tests (#12596) +- e2e: Only run DescribeCluster if v1beta2 Cluster CRD is there (#12279) +- e2e: Print the entire object diff if resource versions are not stable in e2e tests (#12527) +- e2e: Remove redundant check in verifyV1Beta2ConditionsTrueV1Beta1 (#12477) +- e2e: Set extraArgs in test extension (#12557) +- e2e: Skipping test that is failing because of infra issues (#12496) +- KCP: Add --etcd-client-log-level flag to KCP (#12271) +- KCP: Allow unsetting etcd.local, etcd.external and dns (#12065) +- KCP: Bump corefile-migration library to v1.0.26 (#12058) +- KCP: Fix typo in forward etcd leadership error message (#12056) +- Logging: Reduce noisy logs (#12626) +- Misc: Remove jackfrancis from reviewers (#12134) +- KCP/CABPK/KCP: Set MinItems=1 on ExternalEtcd.Endpoints (#12411) +- KCP/CABPK: Remove unused updateClusterStatus (#12295) +- KCP/CABPK: Stop using unsafe for EnvVar conversion (#12631) +- KCP/MachineSet/MachineHealthCheck: Remove explicit defaulting of MS deletePolicy, MHC maxUnhealthy, KCPTemplate rolloutStrategy (#12464) +- MachinePool/MachineSet/MachineDeployment: Add validation to ensure ClusterName fields are equal in MD/MS/MP (#12447) +- Testing/CI/e2e: Add checker to validate conditions for v1beta2 (#12111) +- Testing/CI: Fix e2e test capi-e2e-release-1.8 (#12379) +- Testing/CI: Fix flaky test in extensionconfig_controller_test.go (#12386) +- Release: Add validation for PREVIOUS_RELEASE_TAG in release-notes-tool (#12380) +- Release: Postpone v1.11 code freeze by one week (#12498) +- Release: Prepare main for v1.11 development (#12000) +- Release: Use github.base_ref in markdown-link-check (#12034) +- Runtime SDK: Block dependencies to internal packages for the RX implementation (#12297) +- Runtime SDK: Fix lifecycle hooks conversions (#12507) +- Runtime SDK: Stop registering API types in the runtime extension scheme (#12042) +- Testing: Add test/framework/* tests in CI (#12469) +- Testing/Framework: Watch logs from init containers (#12208) +- Testing: Release Notes Generator - Test cases for main.go and ref.go (#11882) +- Testing: Test changes planned to comply optionalrequired linter (#12414) +- util: Move contract version & GetCompatibleVersions to contract package (#12032) +- util: Recover v1.10 util packages for conditions, patch and paused to util/deprecated/v1beta1 for provider migrations (#12224) + +:book: Additionally, there have been 51 contributions to our documentation and book. (#11029, #11998, #12004, #12057, #12074, #12093, #12117, #12120, #12122, #12125, #12126, #12131, #12139, #12140, #12145, #12150, #12163, #12165, #12188, #12201, #12205, #12236, #12246, #12266, #12284, #12287, #12306, #12309, #12328, #12333, #12377, #12382, #12403, #12418, #12428, #12439, #12443, #12455, #12483, #12491, #12503, #12521, #12532, #12543, #12571, #12575, #12576, #12587, #12589, #12595, #12602) + +## Dependencies + +### Added +- github.com/envoyproxy/go-control-plane/envoy: [v1.32.4](https://github.com/envoyproxy/go-control-plane/tree/envoy/v1.32.4) +- github.com/envoyproxy/go-control-plane/ratelimit: [v0.1.0](https://github.com/envoyproxy/go-control-plane/tree/ratelimit/v0.1.0) +- github.com/klauspost/compress: [v1.18.0](https://github.com/klauspost/compress/tree/v1.18.0) +- github.com/kylelemons/godebug: [v1.1.0](https://github.com/kylelemons/godebug/tree/v1.1.0) +- github.com/prashantv/gostub: [v1.1.0](https://github.com/prashantv/gostub/tree/v1.1.0) +- go.opentelemetry.io/auto/sdk: v1.1.0 +- go.uber.org/automaxprocs: v1.6.0 +- go.yaml.in/yaml/v2: v2.4.2 +- go.yaml.in/yaml/v3: v3.0.3 +- gopkg.in/go-jose/go-jose.v2: v2.6.3 +- sigs.k8s.io/randfill: v1.0.0 + +### Changed +- cel.dev/expr: v0.18.0 → v0.19.1 +- github.com/cloudflare/circl: [v1.3.7 → v1.6.1](https://github.com/cloudflare/circl/compare/v1.3.7...v1.6.1) +- github.com/cncf/xds/go: [b4127c9 → cff3c89](https://github.com/cncf/xds/compare/b4127c9...cff3c89) +- github.com/coreos/go-oidc: [v2.2.1+incompatible → v2.3.0+incompatible](https://github.com/coreos/go-oidc/compare/v2.2.1...v2.3.0) +- github.com/envoyproxy/go-control-plane: [v0.13.1 → v0.13.4](https://github.com/envoyproxy/go-control-plane/compare/v0.13.1...v0.13.4) +- github.com/envoyproxy/protoc-gen-validate: [v1.1.0 → v1.2.1](https://github.com/envoyproxy/protoc-gen-validate/compare/v1.1.0...v1.2.1) +- github.com/go-logr/logr: [v1.4.2 → v1.4.3](https://github.com/go-logr/logr/compare/v1.4.2...v1.4.3) +- github.com/go-viper/mapstructure/v2: [v2.2.1 → v2.3.0](https://github.com/go-viper/mapstructure/compare/v2.2.1...v2.3.0) +- github.com/golang-jwt/jwt/v4: [v4.5.0 → v4.5.2](https://github.com/golang-jwt/jwt/compare/v4.5.0...v4.5.2) +- github.com/golang/glog: [v1.2.2 → v1.2.4](https://github.com/golang/glog/compare/v1.2.2...v1.2.4) +- github.com/google/cel-go: [v0.22.0 → v0.23.2](https://github.com/google/cel-go/compare/v0.22.0...v0.23.2) +- github.com/google/gnostic-models: [v0.6.8 → v0.6.9](https://github.com/google/gnostic-models/compare/v0.6.8...v0.6.9) +- github.com/google/pprof: [40e02aa → 27863c8](https://github.com/google/pprof/compare/40e02aa...27863c8) +- github.com/gorilla/websocket: [v1.5.3 → e064f32](https://github.com/gorilla/websocket/compare/v1.5.3...e064f32) +- github.com/grpc-ecosystem/grpc-gateway/v2: [v2.20.0 → v2.24.0](https://github.com/grpc-ecosystem/grpc-gateway/compare/v2.20.0...v2.24.0) +- github.com/onsi/ginkgo/v2: [v2.23.3 → v2.23.4](https://github.com/onsi/ginkgo/compare/v2.23.3...v2.23.4) +- github.com/onsi/gomega: [v1.36.3 → v1.38.0](https://github.com/onsi/gomega/compare/v1.36.3...v1.38.0) +- github.com/pmezard/go-difflib: [5d4384e → v1.0.0](https://github.com/pmezard/go-difflib/compare/5d4384e...v1.0.0) +- github.com/prometheus/client_golang: [v1.19.1 → v1.22.0](https://github.com/prometheus/client_golang/compare/v1.19.1...v1.22.0) +- github.com/prometheus/common: [v0.55.0 → v0.62.0](https://github.com/prometheus/common/compare/v0.55.0...v0.62.0) +- github.com/rogpeppe/go-internal: [v1.12.0 → v1.13.1](https://github.com/rogpeppe/go-internal/compare/v1.12.0...v1.13.1) +- github.com/spf13/pflag: [v1.0.6 → v1.0.7](https://github.com/spf13/pflag/compare/v1.0.6...v1.0.7) +- github.com/spf13/viper: [v1.20.0 → v1.20.1](https://github.com/spf13/viper/compare/v1.20.0...v1.20.1) +- github.com/stretchr/objx: [v0.5.0 → v0.5.2](https://github.com/stretchr/objx/compare/v0.5.0...v0.5.2) +- go.etcd.io/etcd/api/v3: v3.5.20 → v3.5.22 +- go.etcd.io/etcd/client/pkg/v3: v3.5.20 → v3.5.22 +- go.etcd.io/etcd/client/v2: v2.305.16 → v2.305.21 +- go.etcd.io/etcd/client/v3: v3.5.20 → v3.5.22 +- go.etcd.io/etcd/pkg/v3: v3.5.16 → v3.5.21 +- go.etcd.io/etcd/raft/v3: v3.5.16 → v3.5.21 +- go.etcd.io/etcd/server/v3: v3.5.16 → v3.5.21 +- go.opentelemetry.io/contrib/detectors/gcp: v1.29.0 → v1.34.0 +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc: v0.54.0 → v0.58.0 +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp: v0.54.0 → v0.58.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: v1.27.0 → v1.33.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace: v1.28.0 → v1.33.0 +- go.opentelemetry.io/otel/metric: v1.29.0 → v1.34.0 +- go.opentelemetry.io/otel/sdk/metric: v1.29.0 → v1.34.0 +- go.opentelemetry.io/otel/sdk: v1.29.0 → v1.34.0 +- go.opentelemetry.io/otel/trace: v1.29.0 → v1.34.0 +- go.opentelemetry.io/otel: v1.29.0 → v1.34.0 +- go.opentelemetry.io/proto/otlp: v1.3.1 → v1.4.0 +- golang.org/x/crypto: v0.36.0 → v0.40.0 +- golang.org/x/mod: v0.23.0 → v0.25.0 +- golang.org/x/net: v0.37.0 → v0.42.0 +- golang.org/x/oauth2: v0.28.0 → v0.30.0 +- golang.org/x/sync: v0.12.0 → v0.16.0 +- golang.org/x/sys: v0.31.0 → v0.34.0 +- golang.org/x/term: v0.30.0 → v0.33.0 +- golang.org/x/text: v0.23.0 → v0.27.0 +- golang.org/x/time: v0.8.0 → v0.9.0 +- golang.org/x/tools: v0.30.0 → v0.34.0 +- google.golang.org/genproto/googleapis/api: e6fa225 → 5f5ef82 +- google.golang.org/genproto/googleapis/rpc: 3abc09e → 1a7da9e +- google.golang.org/grpc: v1.67.3 → v1.71.3 +- google.golang.org/protobuf: v1.36.5 → v1.36.6 +- k8s.io/api: v0.32.3 → v0.33.3 +- k8s.io/apiextensions-apiserver: v0.32.3 → v0.33.3 +- k8s.io/apimachinery: v0.32.3 → v0.33.3 +- k8s.io/apiserver: v0.32.3 → v0.33.3 +- k8s.io/client-go: v0.32.3 → v0.33.3 +- k8s.io/cluster-bootstrap: v0.32.3 → v0.33.3 +- k8s.io/code-generator: v0.32.3 → v0.33.3 +- k8s.io/component-base: v0.32.3 → v0.33.3 +- k8s.io/gengo/v2: 2b36238 → 1244d31 +- k8s.io/kms: v0.32.3 → v0.33.3 +- k8s.io/kube-openapi: 32ad38e → c8a335a +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.31.0 → v0.31.2 +- sigs.k8s.io/controller-runtime: v0.20.4 → v0.21.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.4.2 → v4.6.0 +- sigs.k8s.io/yaml: v1.4.0 → v1.6.0 + +### Removed +- github.com/asaskevich/govalidator: [f61b66f](https://github.com/asaskevich/govalidator/tree/f61b66f) +- github.com/go-kit/log: [v0.2.1](https://github.com/go-kit/log/tree/v0.2.1) +- github.com/go-logfmt/logfmt: [v0.5.1](https://github.com/go-logfmt/logfmt/tree/v0.5.1) +- gopkg.in/square/go-jose.v2: v2.6.0 + +
+
+_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.11.0.md b/CHANGELOG/v1.11.0.md new file mode 100644 index 000000000000..cfe13edb494b --- /dev/null +++ b/CHANGELOG/v1.11.0.md @@ -0,0 +1,469 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.30.x -> v1.33.x +- Workload Cluster: v1.28.x -> v1.33.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Highlights + +- Bumped to Go 1.24, controller-runtime v0.21, k8s.io/* v0.33, controller-gen v0.18 (also moved to `sigs.k8s.io/randfill`) (#12191) +- v1beta2 API version has been introduced and considering the awesome amount of improvements it marks an important + step in the journey towards graduating our API to v1. + - Accordingly there is now a new v1beta2 version of our contract for providers. + - Improve status: + - The transition to the new K8s aligned conditions using `metav1.Conditions` types and the new condition semantic + has been completed. + - Replica counters are now consistent with new conditions and across all resources; new replica counters have been added at cluster level. + - Semantic of contract fields in status have been improved and are now consistent across all resources. + - The confusing `FailureReason` and `FailureMessage` fields have been dropped. + - Support CC across namespaces: API changes planned for this feature have been implemented. + - Improve object references: + - Unnecessary fields have been dropped from object reference. + - Object references are now GitOps friendly (API version is not overwritten anymore by controllers). + - KubeadmConfig and KubeadmControlPlane APIs have been aligned with kubeadm v1beta4 API. + - Additionally, fields inferred from top level objects have been removed, thus getting rid of a common source of confusion/issues. +- Compliance with K8s API guidelines: + - Thanks to the adoption of the [KAL linter](https://github.com/kubernetes-sigs/kube-api-linter) compliance with K8s API guidelines has been greatly improved. + - All Duration fields are now represented as `*int32` fields with units being part of the field name. + - All `bool` fields have been changed to `*bool` to preserve user intent. + - Extensive work has been done to ensure `required` and `optional` is explicitly set in the API, and that + both serialization and validation works accordingly: + - Stop rendering empty structs (review of all occurrences of `omitempty` and introduction of `omitzero`) + - Do not allow `""` when it is not semantically different from value not set (either you have to provide a non-empty string value or not set the field at all). + - Do not allow `0` when it is not semantically different from value not set (either you have to provide a non-0 int value or not set the field at all). + - Do not allow `{}` when it is not semantically different from value not set (either you have to set at least one property in the object or not set the field at all). + - Do not allow `[]` when it is not semantically different from value not set (either you have to set at least one item in the list or not set the field at all). + - Ensure validation for all enum types. + - Missing list markers have been added for SSA. + - Drop unnecessary pointers: + - After fixing `required` and `optional` according to K8s API guidelines, extensive work has been done to + drop unnecessary pointers thus improving the usability of the API's Go structs. + - Avoid embedding structs: Coupling between API types has been reduced by reducing the usage of embedded structs. + - Extensive work has been done to improve consistency across all resources, e.g.: + - Fields for Machine deletion are under a new `deletion` struct in all resources. + - Settings about `rollout` have been logically grouped in all resources. + - Settings about health checks and `remediation` have been logically grouped in all resources. + - Missing validations have been added where required. + - Tech debt has been reduced by dropping deprecated fields. +* ClusterClass: Fix continuous reconciles because of apiVersion differences in Cluster topology controller (#12341) +* KCP/CABPK: Add CertificateValidityPeriod and CACertificateValidityPeriod to KubeadmConfig (#12335) +* KCP: Fix timeout handling in GetAPIServerCertificateExpiry and DialContext (#12554) +* Machine: fallback to InfraMachine providerID during deletion if Machine providerID is not set (#11985) +* Runtime SDK: + * Optimize size of Runtime Hook requests (#12462) + * Add mTLS support to Runtime Extension server and client (#12517) +* Improved e2e test coverage, e.g.: + * additional checks that resourceVersion stays stable after tests and that conditions are healthy (#12546 #12111) + * test coverage for scaling from/to 0 with CAPD & cluster-autoscaler (#12572) +* New providers in clusterctl: Scaleway (#12357), cdk8s (#12332) + +See [Cluster API v1.10 compared to v1.11](https://main.cluster-api.sigs.k8s.io/developer/providers/migrations/v1.10-to-v1.11) for more details + +## Notes for workload cluster upgrade to Kubernetes v1.34 with KCP + +* Context: Kubernetes/kubeadm <=> etcd compatibility: + * kubeadm v1.33 only supports etcd v3.5 for Kubernetes v1.33 + * kubeadm v1.34 only supports etcd v3.6 for Kubernetes v1.34 +* The upgrade to etcd v3.6 requires etcd >= v3.5.20 (https://etcd.io/blog/2025/upgrade_from_3.5_to_3.6_issue/) +* Accordingly, when upgrading from Kubernetes v1.33 to v1.34: + * ensure etcd >= v3.5.20 is used with Kubernetes v1.33 before the upgrade + * upgrade to Kubernetes v1.34 and etcd v3.6 at the same time + +## Deprecation and Removals Warning + +- Cluster: Remove deprecated index ByClusterClassName, ClusterByClusterClassClassName and ClusterClassNameField (#12269) +- ClusterClass: Remove deprecated ClusterVariable.definitionFrom field (#12202) +- ClusterClass: Remove deprecated Cluster.spec.topology.rolloutAfter field (#12268) +- ClusterClass: Remove deprecated ClusterCacheTracker and corresponding types (#12270) +- clusterctl: Remove deprecated `clusterctl alpha topology plan` command (#12283) +- ClusterResourceSet: Remove deprecated ClusterResourceSetBinding.DeleteBinding method (#12267) +- MachineDeployment: Removed deprecated revisionHistory (#12274) +- MachineDeployment: Remove deprecated spec.progressDeadlineSeconds (#12232) +- KCP/CABPK: Remove deprecated KubeadmConfig useExperimentalRetryJoin (#12234) +- API: Deprecate v1alpha1 & v1beta1 API packages (#12254) + +## Changes since v1.10.0 +## :chart_with_upwards_trend: Overview +- 360 new commits merged +- 88 breaking changes :warning: +- 29 feature additions ✨ +- 50 bugs fixed 🐛 + +## :memo: Proposals +- Core: Update autoscaling from zero enhancement proposal with support for platform-aware autoscale from zero (#11962) + +## :warning: Breaking Changes +- API: Add additional MinProperties & MinItems validation across multiple APIs (#12538) +- API: Add CAPD v1beta2 types (#12226) +- API: Add Minimum=0 marker to all MinReadySeconds fields (#12474) +- API: Add omitempty to required string without zero value (#12548) +- API: Add omitzero on struct without zero value (#12550) +- API: Add v1beta2 types (#12037) +- API: Align Spec fields to optionalfields API conventions (#12431) +- API: Align Status fields to optionalfields API conventions (#12435) +- API: Change .status.replicas fields to pointer + omitempty (#12250) +- API: Change all *metav1.Time fields to metav1.Time (#12518) +- API: Change bool to *bool for all API types (#12436) +- API: Change type of *string fields with invalid zero value to string (#12429) +- API: Change type of int32 fields with valid zero value to *int32 (#12424) +- API/Conditions: add V1Beta1 suffix and remove V1Beta2 suffix from condition types and reasons in v1beta2 packages (#12091) +- API: Drop unnecessary fields from contract-versioned object references (#12356) +- API: Fix required fields linter findings (#12558) +- API: Improve Cluster CRD Go type (#12489) +- API: Migrate API to use *Seconds instead of metav1.Duration fields (#12327) +- API: Move APIs to ./api (#12262) +- API/Partially revert: Remove DefaulterRemoveUnknownOrOmitableFields mutating webhook option (#12290) +- API: Promote v1beta2 conditions (#12066) +- API: Remove DefaulterRemoveUnknownOrOmitableFields mutating webhook option (#12231) +- API: Remove IPFamily from public APIs (move to CAPD/kind util) (#12215) +- API: Remove pointers from ClusterClass and Cluster healthCheck fields (#12525) +- API: Remove pointers from remaining pointer struct fields (#12545) +- API: Rename namingStrategy => naming, machineNamingStrategy => machineNaming (#12524) +- API: Restructure strategy fields (#12506) +- CABPK: Align KubeadmConfig to kubeadm v1beta4 types (#12282) +- CAPD: Align CAPD conversion to conversion of other providers (#12481) +- CAPD/Conditions: add V1Beta1 suffix and remove V1Beta2 suffix from condition types and reasons in CAPD v1beta2 packages (#12393) +- CAPD: Implement v1beta2 contract in CAPD (#12409) +- CAPD: Keep using v1beta1 condition in CAPD Docker backend (#12450) +- CAPD: Promote v1beta2 condition in CAPD (#12362) +- CAPD: Stop using v1beta1 status in CAPD controllers (#12438) +- CAPD: Update example manifests (#12480) +- Cluster: Remove deprecated index ByClusterClassName, ClusterByClusterClassClassName and ClusterClassNameField (#12269) +- ClusterClass: Drop unnecessary fields from ClusterClass template references (#12358) +- ClusterClass: Move infrastructure namingStrategy field in ClusterClass (#12216) +- ClusterClass: Remove ClusterVariable.DefinitionFrom field (#12202) +- ClusterClass: Remove DefaulterRemoveUnknownOrOmitableFields mutating webhook option (again) (#12404) +- ClusterClass: Remove deprecated Cluster.spec.topology.rolloutAfter field (#12268) +- ClusterClass: Remove deprecated ClusterCacheTracker and corresponding types (#12270) +- ClusterClass: Rename deprecated ClusterClass Metadata fields to DeprecatedV1Beta1Metadata (#12273) +- ClusterClass: Rename runtime extension fields in ClusterClass ExternalPatchDefinition (#12281) +- ClusterClass: Restructure classRef field in Cluster.spec.topology (#12235) +- clusterctl: Clusterctl describe defaults to v1beta2 (#12369) +- clusterctl: Remove clusterctl alpha topology plan (#12283) +- ClusterResourceSet: Change ClusterResourceSetBinding Bindings field from []*ResourceSetBinding to []ResourceSetBinding (#12476) +- ClusterResourceSet: Make clusterName field in ClusterResourceSetBinding required (#12276) +- ClusterResourceSet: Remove deprecated ClusterResourceSetBinding.DeleteBinding method (#12267) +- Conditions: Swap condition packages (#12086) +- Dependency: Bump to controller-runtime v0.21 / controller-tools v0.18 / k8s.io/* v0.33 / move to randfill (#12191) +- e2e: Migrate E2E tests to v1beta2 (#12451) +- e2e/Test/e2e: default to strict field validation & fix unknown field in ClusterClass YAML (#12501) +- IPAM: Refactor reference types for IPAM (#12365) +- KCP: KCP tolerates diff not leading to changes on machines (#12402) +- KCP: Rename LastRemediationStatus.Timestamp to Time in KCP (#12452) +- Machine: Drop unnecessary fields from Machine status.nodeRef (#12352) +- MachineDeployment: Drop revisionHistory in MachineDeployment (#12274) +- MachineDeployment: Remove MD spec.progressDeadlineSeconds (#12232) +- MachineHealthCheck: Drop unnecessary fields from remediationTemplate references (#12368) +- MachineHealthCheck: Rename MHC unhealthyConditions to unhealthyNodeConditions (#12245) +- MachineSet: Make Template in MachineSet & Spec in MachineTemplateSpec required (#12420) +- API/CAPD: Update ControlPlaneEndpoint InfraCluster contract, align CAPD to infra contracts (#12465) +- API/Cluster: Add initialization to Cluster status (#12098) +- API/Control-plane/Bootstrap/KCP/CABPK/Cluster: Implement v1beta2 contract in cluster controller, KCP, CABPK (#12094) +- API/KCP/CABPK/CI: Enable nomaps linter, Remove unused kubeadm ClusterStatus struct, Migrate Cluster.status.failureDomains to array (#12083) +- API/Machine: Add initialization to Machine Status (#12101) +- API/Machine: Move Machine deletion timeout fields into deletion group, move KCP machineTemplate spec fields to machineTemplate.spec (#12499) +- API/MachinePool: Add initialization to MachinePool Status (#12102) +- ClusterClass/MachineHealthCheck/Cluster: Restructure MHC fields in MHC, Cluster and ClusterClass CRDs (#12504) +- clusterctl/Documentation: Remove reference and configurations for Packet (Equinix Metal) (#12143) +- KCP/CABPK: Change BootstrapToken.Token from *BootstrapTokenString to BootstrapTokenString (#12565) +- KCP/CABPK: Change envVars fields from []EnvVar to *[]EnvVar (#12539) +- KCP/CABPK: Change User.PasswdFrom from *PasswdSource to PasswdSource + add omitzero, extend SSA patch helper to handle arrays (#12560) +- KCP/CABPK: Inline ControlPlaneComponent struct in APIServer / ControllerManager / Scheduler in CABPK (#12446) +- KCP/CABPK: Remove KubeadmConfig UseExperimentalRetryJoin (#12234) +- KCP/CABPK: Remove more defaulting from KubeadmConfig/KubeadmConfigTemplate/KCP/KCPTemplate (#12495) +- KCP/CABPK: Remove redundant fields from CABPK / KCP ClusterConfiguration (#12319) +- KCP/CABPK: Remove TypeMeta from KubeadmConfigSpec (#12350) +- KCP/MachineSet/CABPK/CAPD/e2e/Cluster: Cleanup version handling of unsupported Kubernetes releases (#12303) +- Machine/Cluster: Stop using FailureReason and FailureMessage in controllers (#12148) +- Machine/MachinePool/MachineSet/MachineDeployment: Add MinReadySeconds to Machine and remove it from MachineDeployment, MachineSet, MachinePool. (#12153) +- Machine/MachineSet/MachineDeployment/Cluster: Stop using deprecated replica counters in controllers (#12149) +- MachineSet/MachineDeployment: Use MachineSetDeletePolicy enum in MD & MS API (#12419) +- Runtime SDK/MachineDeployment: Make DeletePolicy & FailurePolicy enum fields non-pointers (#12453) +- Runtime SDK: Add v1beta2 API for ExtensionConfig (#12197) +- Runtime SDK: Change ExtensionConfig handler timeoutSeconds from *int32 to int32 & add Minimum=1 (#12475) + +## :sparkles: New Features +- API: Block imports to internal packages in our API + restructure import restrictions (#12302) +- API: Deprecate v1alpha1 & v1beta1 API packages (#12254) +- API: Remove pointer, add omitzero & MinProperties for initialization fields/structs (#12482) +- CAPD: Add scale from/to 0 support for CAPD (#12591) +- CI: Add conflicting markers linter (#12569) +- CI: Bump KAL & add the notimestamps linter (#12520) +- clusterctl: Add Scaleway infrastructure provider to clusterctl (#12357) +- clusterctl: Adding Addon Provider for cdk8s (CAAPC) to cluster-api (#12332) +- clusterctl: Clearer diagnostics when provider metadata is missing or repo URL is stale (#12238) +- clusterctl: Validate provider metadata (#12242) +- Dependency: Bump controller-tools v0.17.3, conversion-gen v0.33.0 (#12129) +- Dependency: Complete bump to Kubernetes v1.33 (#12206) +- Dependency: Update KUBEBUILDER_ENVTEST_KUBERNETES_VERSION (#12130) +- e2e: Bump Kubernetes version used for testing to v1.34.0-beta.0 (#12516) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.1 (#12625) +- e2e: From 1.10 use GetStableReleaseOfMinor instead of GetLatestReleaseOfMinor (#12118) +- KCP: Bump corefile-migration to v1.0.27 (#12639) +- Machine: Implement v1beta2 contract in Machine controller (#12038) +- MachinePool/Feat: set new replica fields for machine pools (#12528) +- API/CI: Enable ssatags KAL linter (#12470) +- KCP/CABPK/CI: Bump KAL to pick up latest requiredfields linter, add Min/MaxLength to BootstrapToken (#12604) +- KCP/CABPK: Add CertificateValidityPeriod and CACertificateValidityPeriod to KubeadmConfig (#12335) +- KCP/CABPK: Reintroduce KCP/CABPK ClusterConfiguration controlPlaneEndpoint (#12423) +- KCP/CABPK: Stop requiring init or cluster configuration for first CP machine (#12540) +- Runtime SDK/ClusterClass: Extend Cluster builtin to include metadata (#12014) +- Runtime SDK/ClusterClass: Optimize size of runtime hook requests (#12462) +- Runtime SDK: Add mTLS support to runtime extension server and client (#12517) +- Runtime SDK: Extend cluster builtin to include classNamespace (#12050) +- Testing: Bump Kubernetes in tests to v1.33.0 and claim support for v1.33 (#12104) + +## :bug: Bug Fixes +- API: Ensure all pointer status fields are dereferenced correctly (#12412) +- Bootstrap: Make joinConfiguration.discovery.bootstrapToken.token optional (#12107) +- Bootstrap: Relax minLength for bootstrap.dataSecretName to 0 (#12164) +- CABPK: Fix rendering of .Append = false in CABPK (#12437) +- CABPK: Fix rendering of ntp.enabled & users.inactive *bool values in cloud init (#12394) +- CABPK: Increase ignition additionalConfig maxSize from 10 to 32 KB (#12222) +- CABPK: Make KubeadmConfig FileSystem.Label optional (#12019) +- CAPD: Fix IPv6 CAPD e2e test (#12488) +- CAPD: Fix worker machine count in CAPD template (#12028) +- CAPD: Run CAPD conversion tests in CI (#12588) +- CAPIM: Fix CAPD in-memory templates (#12013) +- CAPIM/Mux: fix error check (#12230) +- CI: Fix conversion-verifier and fix findings (#12349) +- CI: Fixing failed to install kind for e2e tests (#12361) +- ClusterClass: Fix continuous reconciles because of apiVersion differences in Cluster topology controller (#12341) +- clusterctl: Accept upper case version (#12237) +- clusterctl: Add missing API version to NS object (#12200) +- clusterctl: Clusterctl upgrade hangs for a time on CRD migration when new version contains a number of new CRDs (#11984) +- ClusterResourceSet: Fix potential panic if ClusterResourceSetStrategy is not defined or incorrect (#12096) +- Conditions: Fix condition handling during controller start (#12536) +- e2e: Bump cluster-autoscaler to v1.33, adjust RBAC, pin apiVersion to v1beta1 (#12502) +- e2e: Fix autoscaler e2e test flake (#12627) +- e2e: Fix Available/Ready checks on E2E test (#12549) +- e2e: Fix e2e tests by fixing the etcd tag (#12523) +- e2e: Stop overwriting ExtraPortMappings if WithDockerSockMount option is used (#12012) +- IPAM: Enable conversion in CRDs (#12198) +- IPAM: Revert condition func changes for IPAddressClaim v1beta1 (#12223) +- KCP: Allow transition of KubeadmControlPlaneTemplate from defaulted rolloutStrategy to unset (#12467) +- KCP: Fix ControlPlaneComponentHealthCheckSeconds validation in KubeadmConfigSpec.Validate (#12624) +- KCP: Fix conversion issue in KubeadmControlPlaneTemplate with rolloutStrategy.type (#12622) +- KCP: Fix nil pointer in conversion (#12292) +- KCP: Fix rollout when init configuration in KCP is empty (#12344) +- KCP: Fix timeout handling in GetAPIServerCertificateExpiry and DialContext (#12554) +- Machine/Machine deletion: fallback to InfraMachine providerID if Machine providerID is not set (#11985) +- MachineDeployment: Bug fix to set machinedeployment AvailableReplicas (#12410) +- MachineDeployment: Fix second rolling update for MD rolloutAfter (#12261) +- MachineSet: Fix v1beta1 MachinesReady condition on MachineSet (#12535) +- API/ClusterClass: Fix MaxLength of worker topology Name fields (#12072) +- ClusterClass/MachinePool: Fix MP error in desired state calculation during Cluster creation (#12621) +- Dependency/CI: Upgrade golangci-lint to v2.1.0 (#12170) +- Testing/CI: Fix the condition to check whether cluster has v1beta2 conditions (#12100) +- Runtime SDK: Export ExtensionConfig webhook (#12599) +- Testing: Fix flakes in TestAPIAndWebhookChanges unit test (#12526) +- Testing: Fix flaky TestFuzzyConversion (Cluster) test (#12630) +- Testing: Fix flaky TestReconcileMachinePhases unit test (#12632) +- Testing: Fix flaky TestReconcileState unit test (#12633) +- Testing: Fix race condition in InMemoryMachine controller tests (#12347) +- Testing: Fix Test_ValidateCluster unit tests for mink8s (#12564) +- util/CRD migration: Fix cases where update validation fails (#11991) +- util: Fix typo for WithOwnedV1beta1Conditions to WithOwnedV1Beta1Conditions (#12218) + +## :seedling: Others +- API: Drop hardcoded v1beta1 references (#12027) +- API: Enable optionalfields linter and fix remaining findings (#12299) +- API: Move internal/apis to internal/api (#12296) +- API: Remove old godoc comment, remove unnecessary cast in KCP (#12479) +- API: Remove unused List conversion funcs (#12054) +- API: Set minimum=1 on ObservedGeneration and KubeadmConfig APIEndpoint bindPort (#12417) +- API: Set print columns for v1beta2 types (#12534) +- CAPD: Ensure CAPD v1beta1 API package only imports core v1beta1 (#12405) +- CAPIM/Mux: Ignore net.ErrClosed error during listener close & server shutdown (#12212) +- CI: Add govulncheck to ensure vulnerability (#12108) +- CI: Bump E2E to Kubernetes v1.33.0-rc.1 (#12099) +- CI: Bump golangci-lint v2 (#12088) +- CI: Bump KAL and remove enum exclude (#12500) +- CI: Bump KAL to 20250605073038, cleanup excludes, fix IPAM prefix field, add MaxItems to Machine.status.addresses (#12326) +- CI: Bump KAL to 20250626 + enable uniquemarkers linter (#12427) +- CI/Chore: Update golangci-lint to v2.3.0 (#12573) +- CI: Enable duplicatemarkers linter (#12228) +- CI: Enable statusoptional linter (#12229) +- CI: Fix `make generate-go-openapi` if parent directory name does not equal `cluster-api` (#12461) +- CI: Remove govulncheck from the verify target (#12348) +- CI: Restructure excludes in KAL linter config (#12445) +- CI: Switch plugin to kube-api-linter (#12089) +- CI: Update version matrix for github workflows for release-1.10 (#11992) +- CI: Use release artifacts for CAPI v1.10 (#12147) +- Cluster: Add validation for Cluster spec.controlPlaneRef, spec.infrastructureRef and spec.topology (#12454) +- Cluster: Ensure Cluster.status.failureDomains are alphabetically sorted (#12416) +- Cluster: Improve error message if rebase fails because target ClusterClass is not reconciled (#12415) +- ClusterClass: Add DropEmptyStruct to ssa patch helper (#12442) +- ClusterClass: Extend topology upgrade test: add bool removal test case (#12484) +- ClusterClass: Improve CC RefVersionsUpToDate condition message (#12472) +- ClusterClass: Improve validation of worker topology names in Cluster resource (#12561) +- ClusterClass: Improve webhook output to include the names of the clusters blocking a deletion (#12060) +- ClusterClass: Make infrastructure and controlPlane required in ClusterClass (#12444) +- clusterctl: Add filename to clusterctl error about bad YAML (#12189) +- clusterctl: Add support for compatible contracts to clusterctl (#12018) +- clusterctl: Bump cert-manager to v1.17.1 (#12044) +- clusterctl: Bump cert-manager to v1.17.2 (#12210) +- clusterctl: Bump cert-manager to v1.18.0 (#12342) +- clusterctl: Bump cert-manager to v1.18.1 (#12378) +- clusterctl: Bump cert-manager to v1.18.2 (#12478) +- clusterctl: Change k0smotron repo location (#12225) +- clusterctl: Cleanup clusterctl tests assets (#12510) +- clusterctl: Enforce skip upgrade policy in clusterctl (#12017) +- Community meeting: Add JoelSpeed to approvers (#12204) +- Conditions: Cleanup v1beta1 updateStatus functions (#12190) +- Conditions: Drop usage of v1beta1 conditions (#12109) +- Control-plane: Avoid large number of connection error traces in kubeadm controlplane controller (#12106) +- Dependency: Bump Go 1.24 (#12128) +- Dependency: Bump go to v1.23.8 (#12052) +- Dependency: Bump Go to v1.24.5 (#12509) +- Dependency: Bump Go to v1.24.6 (#12615) +- Dependency: Bump kustomize to v5.7.0 (#12432) +- Dependency: Bump several tool versions in Makefile (#12433) +- Dependency: Bump sigs.k8s.io/kind to v0.28.0 (#12243) +- Dependency: Bump sigs.k8s.io/kind to v0.29.0 (#12257) +- Dependency: Bump to Go v1.24.4, github.com/cloudflare/circl v1.6.1 (#12351) +- Dependency: Fix CVE-2025-54388 (#12574) +- Dependency: Update github.com/go-viper/mapstructure/v2 to v2.3.0 (#12421) +- Devtools: Add KubeVirt support to Tilt dev workflow (#11697) +- Devtools: Fix Tiltfile (#12541) +- Devtools/Metrics: use v1beta2 for condition metrics and add metrics for dockercluster devcluster dockermachine devmachine extensionconfig ipaddressclaim and crs (#12006) +- e2e: Add an option to override custom node image name for kind cluster (#12186) +- e2e: Add quickstart e2e test with v1beta1 with ClusterClass and RuntimeSDK (#12590) +- e2e: Add resource version check to clusterctl upgrade tests (#12546) +- e2e: Add retry for SSA requests against Kubernetes < v1.29 in clusterctl upgrade tests (#12067) +- e2e: Bump clusterctl_upgrade_test.go main and 1.10 tests to k8s v1.33.0 (#12193) +- e2e: Bump Kubernetes version used for testing to v1.33.0-rc.0 (#12073) +- e2e: Fix ResourceVersion flake for MachinePools (#12552) +- e2e: Improve check for Cluster Available condition in e2e tests (#12596) +- e2e: Only run DescribeCluster if v1beta2 Cluster CRD is there (#12279) +- e2e: Print the entire object diff if resource versions are not stable in e2e tests (#12527) +- e2e: Remove redundant check in verifyV1Beta2ConditionsTrueV1Beta1 (#12477) +- e2e: Set extraArgs in test extension (#12557) +- e2e: Skipping test that is failing because of infra issues (#12496) +- KCP: Add --etcd-client-log-level flag to KCP (#12271) +- KCP: Allow unsetting etcd.local, etcd.external and dns (#12065) +- KCP: Bump corefile-migration library to v1.0.26 (#12058) +- KCP: Fix typo in forward etcd leadership error message (#12056) +- Logging: Reduce noisy logs (#12626) +- Misc: Remove jackfrancis from reviewers (#12134) +- KCP/CABPK/KCP: Set MinItems=1 on ExternalEtcd.Endpoints (#12411) +- KCP/CABPK: Remove unused updateClusterStatus (#12295) +- KCP/CABPK: Stop using unsafe for EnvVar conversion (#12631) +- KCP/MachineSet/MachineHealthCheck: Remove explicit defaulting of MS deletePolicy, MHC maxUnhealthy, KCPTemplate rolloutStrategy (#12464) +- MachinePool/MachineSet/MachineDeployment: Add validation to ensure ClusterName fields are equal in MD/MS/MP (#12447) +- Testing/CI/e2e: Add checker to validate conditions for v1beta2 (#12111) +- Testing/CI: Fix e2e test capi-e2e-release-1.8 (#12379) +- Testing/CI: Fix flaky test in extensionconfig_controller_test.go (#12386) +- Release: Add validation for PREVIOUS_RELEASE_TAG in release-notes-tool (#12380) +- Release: Postpone v1.11 code freeze by one week (#12498) +- Release: Prepare main for v1.11 development (#12000) +- Release: Use github.base_ref in markdown-link-check (#12034) +- Runtime SDK: Block dependencies to internal packages for the RX implementation (#12297) +- Runtime SDK: Fix lifecycle hooks conversions (#12507) +- Runtime SDK: Stop registering API types in the runtime extension scheme (#12042) +- Testing: Add test/framework/* tests in CI (#12469) +- Testing/Framework: Watch logs from init containers (#12208) +- Testing: Release Notes Generator - Test cases for main.go and ref.go (#11882) +- Testing: Test changes planned to comply optionalrequired linter (#12414) +- util: Move contract version & GetCompatibleVersions to contract package (#12032) +- util: Recover v1.10 util packages for conditions, patch and paused to util/deprecated/v1beta1 for provider migrations (#12224) + +:book: Additionally, there have been 51 contributions to our documentation and book. (#11029, #11998, #12004, #12057, #12074, #12093, #12117, #12120, #12122, #12125, #12126, #12131, #12139, #12140, #12145, #12150, #12163, #12165, #12188, #12201, #12205, #12236, #12246, #12266, #12284, #12287, #12306, #12309, #12328, #12333, #12377, #12382, #12403, #12418, #12428, #12439, #12443, #12455, #12483, #12491, #12503, #12521, #12532, #12543, #12571, #12575, #12576, #12587, #12589, #12595, #12602) + +## Dependencies + +### Added +- github.com/envoyproxy/go-control-plane/envoy: [v1.32.4](https://github.com/envoyproxy/go-control-plane/tree/envoy/v1.32.4) +- github.com/envoyproxy/go-control-plane/ratelimit: [v0.1.0](https://github.com/envoyproxy/go-control-plane/tree/ratelimit/v0.1.0) +- github.com/klauspost/compress: [v1.18.0](https://github.com/klauspost/compress/tree/v1.18.0) +- github.com/kylelemons/godebug: [v1.1.0](https://github.com/kylelemons/godebug/tree/v1.1.0) +- github.com/prashantv/gostub: [v1.1.0](https://github.com/prashantv/gostub/tree/v1.1.0) +- go.opentelemetry.io/auto/sdk: v1.1.0 +- go.uber.org/automaxprocs: v1.6.0 +- go.yaml.in/yaml/v2: v2.4.2 +- go.yaml.in/yaml/v3: v3.0.3 +- gopkg.in/go-jose/go-jose.v2: v2.6.3 +- sigs.k8s.io/randfill: v1.0.0 + +### Changed +- cel.dev/expr: v0.18.0 → v0.19.1 +- github.com/cloudflare/circl: [v1.3.7 → v1.6.1](https://github.com/cloudflare/circl/compare/v1.3.7...v1.6.1) +- github.com/cncf/xds/go: [b4127c9 → cff3c89](https://github.com/cncf/xds/compare/b4127c9...cff3c89) +- github.com/coredns/corefile-migration: [v1.0.26 → v1.0.27](https://github.com/coredns/corefile-migration/compare/v1.0.26...v1.0.27) +- github.com/coreos/go-oidc: [v2.2.1+incompatible → v2.3.0+incompatible](https://github.com/coreos/go-oidc/compare/v2.2.1...v2.3.0) +- github.com/envoyproxy/go-control-plane: [v0.13.1 → v0.13.4](https://github.com/envoyproxy/go-control-plane/compare/v0.13.1...v0.13.4) +- github.com/envoyproxy/protoc-gen-validate: [v1.1.0 → v1.2.1](https://github.com/envoyproxy/protoc-gen-validate/compare/v1.1.0...v1.2.1) +- github.com/go-logr/logr: [v1.4.2 → v1.4.3](https://github.com/go-logr/logr/compare/v1.4.2...v1.4.3) +- github.com/go-viper/mapstructure/v2: [v2.2.1 → v2.3.0](https://github.com/go-viper/mapstructure/compare/v2.2.1...v2.3.0) +- github.com/golang-jwt/jwt/v4: [v4.5.0 → v4.5.2](https://github.com/golang-jwt/jwt/compare/v4.5.0...v4.5.2) +- github.com/golang/glog: [v1.2.2 → v1.2.4](https://github.com/golang/glog/compare/v1.2.2...v1.2.4) +- github.com/google/cel-go: [v0.22.0 → v0.23.2](https://github.com/google/cel-go/compare/v0.22.0...v0.23.2) +- github.com/google/gnostic-models: [v0.6.8 → v0.6.9](https://github.com/google/gnostic-models/compare/v0.6.8...v0.6.9) +- github.com/google/pprof: [40e02aa → 27863c8](https://github.com/google/pprof/compare/40e02aa...27863c8) +- github.com/gorilla/websocket: [v1.5.3 → e064f32](https://github.com/gorilla/websocket/compare/v1.5.3...e064f32) +- github.com/grpc-ecosystem/grpc-gateway/v2: [v2.20.0 → v2.24.0](https://github.com/grpc-ecosystem/grpc-gateway/compare/v2.20.0...v2.24.0) +- github.com/onsi/ginkgo/v2: [v2.23.3 → v2.23.4](https://github.com/onsi/ginkgo/compare/v2.23.3...v2.23.4) +- github.com/onsi/gomega: [v1.36.3 → v1.38.0](https://github.com/onsi/gomega/compare/v1.36.3...v1.38.0) +- github.com/pmezard/go-difflib: [5d4384e → v1.0.0](https://github.com/pmezard/go-difflib/compare/5d4384e...v1.0.0) +- github.com/prometheus/client_golang: [v1.19.1 → v1.22.0](https://github.com/prometheus/client_golang/compare/v1.19.1...v1.22.0) +- github.com/prometheus/common: [v0.55.0 → v0.62.0](https://github.com/prometheus/common/compare/v0.55.0...v0.62.0) +- github.com/rogpeppe/go-internal: [v1.12.0 → v1.13.1](https://github.com/rogpeppe/go-internal/compare/v1.12.0...v1.13.1) +- github.com/spf13/pflag: [v1.0.6 → v1.0.7](https://github.com/spf13/pflag/compare/v1.0.6...v1.0.7) +- github.com/spf13/viper: [v1.20.0 → v1.20.1](https://github.com/spf13/viper/compare/v1.20.0...v1.20.1) +- github.com/stretchr/objx: [v0.5.0 → v0.5.2](https://github.com/stretchr/objx/compare/v0.5.0...v0.5.2) +- go.etcd.io/etcd/api/v3: v3.5.20 → v3.5.22 +- go.etcd.io/etcd/client/pkg/v3: v3.5.20 → v3.5.22 +- go.etcd.io/etcd/client/v2: v2.305.16 → v2.305.21 +- go.etcd.io/etcd/client/v3: v3.5.20 → v3.5.22 +- go.etcd.io/etcd/pkg/v3: v3.5.16 → v3.5.21 +- go.etcd.io/etcd/raft/v3: v3.5.16 → v3.5.21 +- go.etcd.io/etcd/server/v3: v3.5.16 → v3.5.21 +- go.opentelemetry.io/contrib/detectors/gcp: v1.29.0 → v1.34.0 +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc: v0.54.0 → v0.58.0 +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp: v0.54.0 → v0.58.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: v1.27.0 → v1.33.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace: v1.28.0 → v1.33.0 +- go.opentelemetry.io/otel/metric: v1.29.0 → v1.34.0 +- go.opentelemetry.io/otel/sdk/metric: v1.29.0 → v1.34.0 +- go.opentelemetry.io/otel/sdk: v1.29.0 → v1.34.0 +- go.opentelemetry.io/otel/trace: v1.29.0 → v1.34.0 +- go.opentelemetry.io/otel: v1.29.0 → v1.34.0 +- go.opentelemetry.io/proto/otlp: v1.3.1 → v1.4.0 +- golang.org/x/crypto: v0.36.0 → v0.40.0 +- golang.org/x/mod: v0.23.0 → v0.25.0 +- golang.org/x/net: v0.37.0 → v0.42.0 +- golang.org/x/oauth2: v0.28.0 → v0.30.0 +- golang.org/x/sync: v0.12.0 → v0.16.0 +- golang.org/x/sys: v0.31.0 → v0.34.0 +- golang.org/x/term: v0.30.0 → v0.33.0 +- golang.org/x/text: v0.23.0 → v0.27.0 +- golang.org/x/time: v0.8.0 → v0.9.0 +- golang.org/x/tools: v0.30.0 → v0.34.0 +- google.golang.org/genproto/googleapis/api: e6fa225 → 5f5ef82 +- google.golang.org/genproto/googleapis/rpc: 3abc09e → 1a7da9e +- google.golang.org/grpc: v1.67.3 → v1.71.3 +- google.golang.org/protobuf: v1.36.5 → v1.36.6 +- k8s.io/api: v0.32.3 → v0.33.3 +- k8s.io/apiextensions-apiserver: v0.32.3 → v0.33.3 +- k8s.io/apimachinery: v0.32.3 → v0.33.3 +- k8s.io/apiserver: v0.32.3 → v0.33.3 +- k8s.io/client-go: v0.32.3 → v0.33.3 +- k8s.io/cluster-bootstrap: v0.32.3 → v0.33.3 +- k8s.io/code-generator: v0.32.3 → v0.33.3 +- k8s.io/component-base: v0.32.3 → v0.33.3 +- k8s.io/gengo/v2: 2b36238 → 1244d31 +- k8s.io/kms: v0.32.3 → v0.33.3 +- k8s.io/kube-openapi: 32ad38e → c8a335a +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.31.0 → v0.31.2 +- sigs.k8s.io/controller-runtime: v0.20.4 → v0.21.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.4.2 → v4.6.0 +- sigs.k8s.io/yaml: v1.4.0 → v1.6.0 + +### Removed +- github.com/asaskevich/govalidator: [f61b66f](https://github.com/asaskevich/govalidator/tree/f61b66f) +- github.com/go-kit/log: [v0.2.1](https://github.com/go-kit/log/tree/v0.2.1) +- github.com/go-logfmt/logfmt: [v0.5.1](https://github.com/go-logfmt/logfmt/tree/v0.5.1) +- gopkg.in/square/go-jose.v2: v2.6.0 + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.11.1.md b/CHANGELOG/v1.11.1.md new file mode 100644 index 000000000000..3daea6a4298c --- /dev/null +++ b/CHANGELOG/v1.11.1.md @@ -0,0 +1,51 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.30.x -> v1.34.x +- Workload Cluster: v1.28.x -> v1.34.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.11.0 +## :chart_with_upwards_trend: Overview +- 22 new commits merged +- 3 feature additions ✨ +- 8 bugs fixed 🐛 + +## :sparkles: New Features +- CI: Bump autoscaler to a9cb59fdd (#12707) +- CI: Bump Kubernetes in tests to v1.34.0 and claim support for v1.34 (#12705) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.2 (#12659) + +## :bug: Bug Fixes +- API: Only try to convert infraRefs if they are set (#12693) +- API: Register conversion funcs in schemes (#12697) +- CABPK: Always use latest apiVersion when getting owner of KubeadmConfig in CABPK (#12689) +- ClusterClass: Ensure holder field path in GeneratePatchRequest is set based on contract (#12691) +- ClusterClass: Fix field paths in ClusterClass compatibility validation errors (#12670) +- ClusterClass: Stop adding conversion-data annotation to Cluster object (#12721) +- e2e/CAPD: Remove finalizers during deletion if ownerRef was never set (#12678) +- Testing: Fix KubeadmConfig fuzz test flake (#12682) + +## :seedling: Others +- clusterctl: Allow metadata.yaml's Kind to be empty (#12715) +- Dependency: Bump github.com/go-viper/mapstructure/v2 to fix CVE (#12680) +- Dependency: Bump to envtest v1.34.0 (#12706) +- e2e: Bump to kind v0.30.0 (#12708) +- e2e: Get kind mgmt cluster logs in clusterctl upgrade test (#12688) +- Misc: Log version and git commit on controller start (#12696) +- Release/clusterctl: Add CAPRKE2 to release tool’s issue-opening providers list (#12717) + +:book: Additionally, there have been 4 contributions to our documentation and book. (#12667, #12668, #12671, #12674) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/go-viper/mapstructure/v2: [v2.3.0 → v2.4.0](https://github.com/go-viper/mapstructure/compare/v2.3.0...v2.4.0) + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.11.2.md b/CHANGELOG/v1.11.2.md new file mode 100644 index 000000000000..e422c0a134b4 --- /dev/null +++ b/CHANGELOG/v1.11.2.md @@ -0,0 +1,38 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.30.x -> v1.34.x +- Workload Cluster: v1.28.x -> v1.34.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.11.1 +## :chart_with_upwards_trend: Overview +- 6 new commits merged +- 1 feature addition ✨ +- 1 bug fixed 🐛 + +## :sparkles: New Features +- KCP: Bump coredns/corefile-migration to v1.0.28 (#12750) + +## :bug: Bug Fixes +- clusterctl: Verify providers need upgrade before applying (#12768) + +## :seedling: Others +- Autoscaling: Bump autoscaler in e2e tests to v1.33.1 (#12792) +- clusterctl: Add Metal3 as an IPAMProvider (#12760) +- Dependency: Bump go to v1.24.7 (#12735) + +:book: Additionally, there has been 1 contribution to our documentation and book. (#12780) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/coredns/corefile-migration: [v1.0.27 → v1.0.28](https://github.com/coredns/corefile-migration/compare/v1.0.27...v1.0.28) + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.11.3.md b/CHANGELOG/v1.11.3.md new file mode 100644 index 000000000000..74df98d4ebe5 --- /dev/null +++ b/CHANGELOG/v1.11.3.md @@ -0,0 +1,38 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.30.x -> v1.34.x +- Workload Cluster: v1.28.x -> v1.34.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.11.2 +## :chart_with_upwards_trend: Overview +- 13 new commits merged +- 1 feature addition ✨ + +## :sparkles: New Features +- KCP: Bump coredns/corefile-migration to v1.0.29 (#12863) + +## :seedling: Others +- CAPD: Recreate container if we re-enter reconciliation and it exists but is not running (#12933) +- clusterctl: Bump cert-manager to v1.19.0 (#12832) +- clusterctl: Bump cert-manager to v1.19.1 (#12875) +- Dependency: Bump Go to v1.24.8 (#12829) +- Dependency: Bump Go to v1.24.9 (#12868) +- e2e: Fix self-hosted to actually read DOCKER_PRELOAD_IMAGES from the e2e config (#12932) +- Runtime SDK: Add hint to look into controller logs to runtime client error response (#12850) + +:book: Additionally, there have been 5 contributions to our documentation and book. (#12837, #12839, #12887, #12894, #12916) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/coredns/corefile-migration: [v1.0.28 → v1.0.29](https://github.com/coredns/corefile-migration/compare/v1.0.28...v1.0.29) + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.12.0-beta.0.md b/CHANGELOG/v1.12.0-beta.0.md new file mode 100644 index 000000000000..4568babdf52b --- /dev/null +++ b/CHANGELOG/v1.12.0-beta.0.md @@ -0,0 +1,298 @@ +🚨 This is a BETA RELEASE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). +
+More details about the release + +:warning: **BETA RELEASE NOTES** :warning: + +## Changes since v1.11.0 +## :chart_with_upwards_trend: Overview +- 197 new commits merged +- 3 breaking changes :warning: +- 34 feature additions ✨ +- 26 bugs fixed 🐛 + +## :warning: Breaking Changes +- CI: Improve KAL config docs for forbidding OpenAPI defaulting (#12869) +- clusterctl/Feat(clusterctl): block move when Cluster or ClusterClass is paused (#12786) +- Dependency: Bump to controller-runtime v0.22 & controller-tools v0.19 (#12634) + +## :sparkles: New Features +- CABPK: Add EncryptionAlgorithm to Kubeadmconfig (#12859) +- CAPD: Add scale from/to 0 support for CAPD (#12572) +- CI: Bump autoscaler to a9cb59fdd (#12643) +- CI: Bump Kubernetes in tests to v1.34.0 and claim support for v1.34 (#12699) +- ClusterCache: Add GetUncachedClient() (#12803) +- ClusterClass: Add .spec.upgrade.external.generateUpgradePlanExtension field to ClusterClass (#12809) +- ClusterClass: Add types and hook for GenerateUpgradePlan (#12823) +- ClusterClass: Additional validation in Cluster/ClusterClass webhook for chained upgrades (#12816) +- ClusterClass: Call GenerateUpgradePlanRequest Runtime Extension (#12903) +- ClusterClass: Implement core logic for chained upgrades (#12726) +- Control-plane: Add new control-plane provider HCP (#12800) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.1 (#12623) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.2 (#12658) +- e2e: Implement e2e test for in-place updates (#12938) +- KCP: Bump coredns/corefile-migration to v1.0.28 (#12748) +- KCP: Bump coredns/corefile-migration to v1.0.29 (#12862) +- KCP: Bump corefile-migration to v1.0.27 (#12636) +- KCP: Compare ClusterConfiguration via KubeadmConfig instead of annotation on Machine (#12758) +- KCP: Extend rollout logic for in-place updates (#12840) +- KCP: Implement CanUpdateMachine (#12857) +- KCP: Implement trigger in-place update (#12897) +- Machine: Add in-place updates support for machine controller (#12831) +- MachineDeployment: Add in-place to rollout planner (#12865) +- MachineHealthCheck: Add support for checking Machine conditions in MachineHealthCheck (#12827) +- Misc: Add inplace updates featuregate (#12755) +- ClusterCache/KCP: Deprecate GetClientCertificatePrivateKey and stop using it in KCP (#12846) +- KCP/CABPK/CI: Bump KAL to pick up latest requiredfields linter, add Min/MaxLength to BootstrapToken (#12563) +- KCP/MachineSet/MS: Refactor BootstrapConfig/InfraMachine managedFields for in-place (#12890) +- Runtime SDK/IPAM/MachinePool: Cleanup exp packages (#12651) +- Runtime SDK: Add in-place update hooks to API (#12343) +- Runtime SDK: Add lifecycle hooks for chained-upgrade (#12878) +- Runtime SDK: Call new lifecycle hooks for chained-upgrades (#12891) +- Runtime SDK: Ensure ExtensionConfig controller can be used outside of the core provider (#12754) +- Runtime SDK: Implement GenerateUpgradePlan handler (#12927) + +## :bug: Bug Fixes +- API: Only try to convert infraRefs if they are set (#12686) +- API: Register conversion funcs in schemes (#12687) +- CABPK: Always use latest apiVersion when getting owner of KubeadmConfig in CABPK (#12685) +- CAPD: Fix the format error of healthCheck in test templates (#12787) +- CAPD: Remove finalizers during deletion if ownerRef was never set (#12675) +- CAPD: Run CAPD conversion tests in CI (#12583) +- CAPIM/Fix(proxy): eliminate data race in DialContext (#12778) +- ClusterClass: Ensure holder field path in GeneratePatchRequest is set based on contract (#12684) +- ClusterClass: Fix field paths in ClusterClass compatibility validation errors (#12660) +- ClusterClass: Stop adding conversion-data annotation to Cluster object (#12719) +- clusterctl: Removing Ready/Available prefix from STATUS Column (#12729) +- clusterctl: Verify providers need upgrade before applying (#12753) +- e2e: Fix autoscaler e2e test flake (#12613) +- e2e: Fix e2e test issues introduced by chained upgrades (#12766) +- e2e: Fix objects with Changed Resource Versions flake (#12848) +- e2e: Fix upgrade runtimesdk test (#12833) +- KCP: Fix ControlPlaneComponentHealthCheckSeconds validation in KubeadmConfigSpec.Validate (#12609) +- KCP: Fix conversion issue in KubeadmControlPlaneTemplate with rolloutStrategy.type (#12608) +- MachineDeployment: Fix race conditions ScaleDownOldMS (#12812) +- MachineDeployment: Fix race conditions ScaleDownOldMS OnDelete (#12830) +- ClusterClass/MachinePool: Fix MP error in desired state calculation during Cluster creation (#12607) +- Runtime SDK: Export ExtensionConfig webhook (#12598) +- Testing: Fix flaky TestFuzzyConversion (Cluster) test (#12618) +- Testing: Fix flaky TestReconcileMachinePhases unit test (#12616) +- Testing: Fix flaky TestReconcileState unit test (#12617) +- Testing: Fix KubeadmConfig fuzz test flake (#12679) + +## :seedling: Others +- Autoscaling: Bump autoscaler in e2e tests to v1.33.1 (#12790) +- CAPD: Cleanup CAPD exp packages (#12672) +- CAPD: Recreate container if we re-enter reconciliation and it exists but is not running (#12923) +- CI: Add OpenAPI defaulting detection for KubeadmConfig by using forbiddenmarkers (#12851) +- CI: Block FIXME comments (#12772) +- CI: Bump KAL & add nodurations linter (#12743) +- CI: Bump KAL & add nonullable and forbiddenmarkers linter (#12724) +- CI: Bump KAL & drop schemaless excludes (#12646) +- CI: Bump KAL, adjust excludes (#12650) +- CI: Bump to golangci-linter version to v2.4.0 (#12703) +- CI: Update metrics configuration to v1beta2 (#12642) +- ClusterClass: Add input validations for desired state generator function (#12655) +- clusterctl: Add Metal3 as an IPAMProvider (#12756) +- clusterctl: Allow metadata.yaml's Kind to be empty (#12714) +- clusterctl: Bump cert-manager to v1.19.0 (#12828) +- clusterctl: Bump cert-manager to v1.19.1 (#12873) +- clusterctl: Bumping tablewriter to v1.0.9 - latest (#12781) +- clusterctl: Show Available condition for CP (#12759) +- Dependency: Bump github.com/go-viper/mapstructure/v2 to fix CVE (#12677) +- Dependency: Bump Go to v1.24.6 (#12611) +- Dependency: Bump go to v1.24.7 (#12733) +- Dependency: Bump Go to v1.24.8 (#12826) +- Dependency: Bump Go to v1.24.9 (#12867) +- Dependency: Bump to envtest v1.34.0 (#12702) +- Devtools: Promtail to alloy migration (#11945) +- e2e: Add quickstart e2e test with v1beta1 with ClusterClass and RuntimeSDK (#12577) +- e2e: Bump autoscaler in e2e tests to v1.34.0 (#12806) +- e2e: Bump pause image in e2e tests to 3.10.1 (default for Kubernetes v1.34) (#12731) +- e2e: Bump to kind v0.30.0 (#12701) +- e2e: Enable IPv6 test again (#12597) +- e2e: Fix self-hosted to actually read DOCKER_PRELOAD_IMAGES from the e2e config (#12907) +- e2e: Get kind mgmt cluster logs in clusterctl upgrade test (#12676) +- e2e: Improve check for Cluster Available condition in e2e tests (#12594) +- e2e/Scripts: fix kind build git commit setting (#12858) +- e2e: Set startup taint for autoscaler in e2e tests (#12736) +- e2e: Start testing against Kubernetes v1.35 (#12709) +- KCP: Add current/desired objects to NotUpToDateResult & refactor object creation (#12817) +- KCP: Check for error before checking reconcile result (#12935) +- KCP/Cleanup KCP code: variable/func renames, func order (#12793) +- KCP: Enable websocket dialer with fallback to spdy (#12902) +- KCP: Simplify cleanupConfigFields in KCP (#12776) +- KCP: Simplify KCP matchesKubeadmConfig (#12813) +- Logging: Reduce noisy logs (#12606) +- Machine: Add DisableCertPrivateKey function for clustercache for test flake (#12921) +- Machine: Consider updating condition when computing Machine's ready condition (#12939) +- Machine: Fix TestReconcileMachinePhases flake (#12818) +- Machine: Implement Updating Machine phase (#12940) +- Machine/TestReconcileMachinePhases: use apireader to directly talk to apiserver (#12819) +- MachineDeployment: Add in-place to machineset controller (#12906) +- MachineDeployment: Add rollout planner (#12804) +- MachineDeployment: Cleanup getMachinesSucceeded flag from MD controller (#12882) +- MachineDeployment: Fix misleading log statements and optimize logic (#12871) +- MachineDeployment: Move compute and create ms to rollout planner (#12841) +- MachineDeployment: Refactor MachineTemplateUpToDate (#12811) +- MachineDeployment: Simplify rollout planner (#12899) +- MachineHealthCheck: No longer requeue when remediation is not allowed (#12924) +- MachineSet: Fix flakes in syncMachines unit test (#12918) +- MachineSet: Fix race conditions with global scheme in TestMachineSetReconciler_reconcileUnhealthyMachines (#12919) +- MachineSet: Refactor BootstrapConfig/InfraMachine creation in MachineSet controller (#12881) +- Misc: Avoid using deprecated client.Patch method (#12737) +- Misc: Consistent webhook file/folder structure (#12791) +- Misc/Feat(Tiltfile): Add Proxy Support to Docker Build-Args (#12669) +- Misc: Log version and git commit on controller start (#12694) +- Misc: Stop setting and relying on TypeMeta in typed objects (#12533) +- Misc: Use errors package of Go (#10875) +- KCP/CABPK: Stop using unsafe for EnvVar conversion (#12619) +- KCP/MachineSet/MachineDeployment: Remove unused CleanUpManagedFieldsForSSAAdoption code (#12788) +- Release/clusterctl: Add CAPRKE2 to release tool’s issue-opening providers list (#12713) +- Release/Testing: Added test cases for list.go and github.go (#11937) +- Release: Prepare main branch for v1.12 development (#12723) +- Runtime SDK: Add defensive response status checking in runtime client (#12898) +- Runtime SDK: Add hint to look into controller logs to runtime client error response (#12849) +- Runtime SDK: Deduplicate extension filtering and response validation logic (#12905) +- Testing: Add clusterapi crd groups to audit logs for envtest (#12883) +- Testing: Enable audit logs for envtest-based unit tests if ARTIFACTS env var is set (#12847) +- Testing: Implement unit-tests for desired state generator (#12656) +- Testing: Update version matrix for GitHub workflows for release 1.11 (#12586) +- util: Add items to cache immediately after apply (#12877) + +:book: Additionally, there have been 30 contributions to our documentation and book. (#12085, #12199, #12562, #12581, #12582, #12593, #12600, #12601, #12647, #12662, #12663, #12673, #12710, #12741, #12761, #12777, #12779, #12797, #12814, #12835, #12836, #12854, #12866, #12880, #12885, #12892, #12893, #12896, #12917, #12942) + +## Dependencies + +### Added +- github.com/gkampitakis/ciinfo: [v0.3.2](https://github.com/gkampitakis/ciinfo/tree/v0.3.2) +- github.com/gkampitakis/go-diff: [v1.3.2](https://github.com/gkampitakis/go-diff/tree/v1.3.2) +- github.com/gkampitakis/go-snaps: [v0.5.15](https://github.com/gkampitakis/go-snaps/tree/v0.5.15) +- github.com/go-jose/go-jose/v4: [v4.0.4](https://github.com/go-jose/go-jose/tree/v4.0.4) +- github.com/goccy/go-yaml: [v1.18.0](https://github.com/goccy/go-yaml/tree/v1.18.0) +- github.com/golang-jwt/jwt/v5: [v5.2.2](https://github.com/golang-jwt/jwt/tree/v5.2.2) +- github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus: [v1.0.1](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/providers/prometheus/v1.0.1) +- github.com/grpc-ecosystem/go-grpc-middleware/v2: [v2.3.0](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v2.3.0) +- github.com/joshdk/go-junit: [v1.0.0](https://github.com/joshdk/go-junit/tree/v1.0.0) +- github.com/maruel/natural: [v1.1.1](https://github.com/maruel/natural/tree/v1.1.1) +- github.com/mfridman/tparse: [v0.18.0](https://github.com/mfridman/tparse/tree/v0.18.0) +- github.com/olekukonko/cat: [50322a0](https://github.com/olekukonko/cat/tree/50322a0) +- github.com/olekukonko/errors: [v1.1.0](https://github.com/olekukonko/errors/tree/v1.1.0) +- github.com/olekukonko/ll: [v0.1.1](https://github.com/olekukonko/ll/tree/v0.1.1) +- github.com/olekukonko/ts: [78ecb04](https://github.com/olekukonko/ts/tree/78ecb04) +- github.com/spiffe/go-spiffe/v2: [v2.5.0](https://github.com/spiffe/go-spiffe/tree/v2.5.0) +- github.com/tidwall/gjson: [v1.18.0](https://github.com/tidwall/gjson/tree/v1.18.0) +- github.com/tidwall/match: [v1.1.1](https://github.com/tidwall/match/tree/v1.1.1) +- github.com/tidwall/pretty: [v1.2.1](https://github.com/tidwall/pretty/tree/v1.2.1) +- github.com/tidwall/sjson: [v1.2.5](https://github.com/tidwall/sjson/tree/v1.2.5) +- github.com/zeebo/errs: [v1.4.0](https://github.com/zeebo/errs/tree/v1.4.0) +- go.etcd.io/raft/v3: v3.6.0 +- sigs.k8s.io/structured-merge-diff/v6: v6.3.0 + +### Changed +- cel.dev/expr: v0.19.1 → v0.24.0 +- cloud.google.com/go/storage: v1.49.0 → v1.5.0 +- cloud.google.com/go: v0.116.0 → v0.53.0 +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp: [v1.25.0 → v1.26.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/compare/detectors/gcp/v1.25.0...detectors/gcp/v1.26.0) +- github.com/Masterminds/semver/v3: [v3.3.0 → v3.4.0](https://github.com/Masterminds/semver/compare/v3.3.0...v3.4.0) +- github.com/census-instrumentation/opencensus-proto: [v0.4.1 → v0.2.1](https://github.com/census-instrumentation/opencensus-proto/compare/v0.4.1...v0.2.1) +- github.com/cncf/xds/go: [cff3c89 → 2f00578](https://github.com/cncf/xds/compare/cff3c89...2f00578) +- github.com/coredns/corefile-migration: [v1.0.27 → v1.0.29](https://github.com/coredns/corefile-migration/compare/v1.0.27...v1.0.29) +- github.com/emicklei/go-restful/v3: [v3.12.2 → v3.13.0](https://github.com/emicklei/go-restful/compare/v3.12.2...v3.13.0) +- github.com/fsnotify/fsnotify: [v1.8.0 → v1.9.0](https://github.com/fsnotify/fsnotify/compare/v1.8.0...v1.9.0) +- github.com/fxamacker/cbor/v2: [v2.7.0 → v2.9.0](https://github.com/fxamacker/cbor/compare/v2.7.0...v2.9.0) +- github.com/go-viper/mapstructure/v2: [v2.3.0 → v2.4.0](https://github.com/go-viper/mapstructure/compare/v2.3.0...v2.4.0) +- github.com/golang/groupcache: [41bb18b → 8c9f03a](https://github.com/golang/groupcache/compare/41bb18b...8c9f03a) +- github.com/google/cel-go: [v0.23.2 → v0.26.0](https://github.com/google/cel-go/compare/v0.23.2...v0.26.0) +- github.com/google/gnostic-models: [v0.6.9 → v0.7.0](https://github.com/google/gnostic-models/compare/v0.6.9...v0.7.0) +- github.com/google/pprof: [27863c8 → f64d9cf](https://github.com/google/pprof/compare/27863c8...f64d9cf) +- github.com/googleapis/gax-go/v2: [v2.14.1 → v2.0.5](https://github.com/googleapis/gax-go/compare/v2.14.1...v2.0.5) +- github.com/grpc-ecosystem/grpc-gateway/v2: [v2.24.0 → v2.26.3](https://github.com/grpc-ecosystem/grpc-gateway/compare/v2.24.0...v2.26.3) +- github.com/ianlancetaylor/demangle: [bd984b5 → f615e6b](https://github.com/ianlancetaylor/demangle/compare/bd984b5...f615e6b) +- github.com/jonboulle/clockwork: [v0.4.0 → v0.5.0](https://github.com/jonboulle/clockwork/compare/v0.4.0...v0.5.0) +- github.com/mattn/go-colorable: [v0.1.13 → v0.1.14](https://github.com/mattn/go-colorable/compare/v0.1.13...v0.1.14) +- github.com/mattn/go-runewidth: [v0.0.14 → v0.0.16](https://github.com/mattn/go-runewidth/compare/v0.0.14...v0.0.16) +- github.com/modern-go/reflect2: [v1.0.2 → 35a7c28](https://github.com/modern-go/reflect2/compare/v1.0.2...35a7c28) +- github.com/olekukonko/tablewriter: [v0.0.5 → v1.0.9](https://github.com/olekukonko/tablewriter/compare/v0.0.5...v1.0.9) +- github.com/onsi/ginkgo/v2: [v2.23.4 → v2.27.2](https://github.com/onsi/ginkgo/compare/v2.23.4...v2.27.2) +- github.com/onsi/gomega: [v1.38.0 → v1.38.2](https://github.com/onsi/gomega/compare/v1.38.0...v1.38.2) +- github.com/pelletier/go-toml/v2: [v2.2.3 → v2.2.4](https://github.com/pelletier/go-toml/compare/v2.2.3...v2.2.4) +- github.com/rivo/uniseg: [v0.4.2 → v0.4.7](https://github.com/rivo/uniseg/compare/v0.4.2...v0.4.7) +- github.com/rogpeppe/go-internal: [v1.13.1 → v1.14.1](https://github.com/rogpeppe/go-internal/compare/v1.13.1...v1.14.1) +- github.com/sagikazarmark/locafero: [v0.7.0 → v0.11.0](https://github.com/sagikazarmark/locafero/compare/v0.7.0...v0.11.0) +- github.com/sourcegraph/conc: [v0.3.0 → 5f936ab](https://github.com/sourcegraph/conc/compare/v0.3.0...5f936ab) +- github.com/spf13/afero: [v1.12.0 → v1.15.0](https://github.com/spf13/afero/compare/v1.12.0...v1.15.0) +- github.com/spf13/cast: [v1.7.1 → v1.10.0](https://github.com/spf13/cast/compare/v1.7.1...v1.10.0) +- github.com/spf13/cobra: [v1.9.1 → v1.10.1](https://github.com/spf13/cobra/compare/v1.9.1...v1.10.1) +- github.com/spf13/pflag: [v1.0.7 → v1.0.10](https://github.com/spf13/pflag/compare/v1.0.7...v1.0.10) +- github.com/spf13/viper: [v1.20.1 → v1.21.0](https://github.com/spf13/viper/compare/v1.20.1...v1.21.0) +- github.com/stretchr/testify: [v1.10.0 → v1.11.1](https://github.com/stretchr/testify/compare/v1.10.0...v1.11.1) +- go.etcd.io/bbolt: v1.3.11 → v1.4.2 +- go.etcd.io/etcd/api/v3: v3.5.22 → v3.6.5 +- go.etcd.io/etcd/client/pkg/v3: v3.5.22 → v3.6.5 +- go.etcd.io/etcd/client/v3: v3.5.22 → v3.6.5 +- go.etcd.io/etcd/pkg/v3: v3.5.21 → v3.6.4 +- go.etcd.io/etcd/server/v3: v3.5.21 → v3.6.4 +- go.opencensus.io: v0.24.0 → v0.22.3 +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc: v0.58.0 → v0.60.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: v1.33.0 → v1.34.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace: v1.33.0 → v1.34.0 +- go.opentelemetry.io/otel/metric: v1.34.0 → v1.35.0 +- go.opentelemetry.io/otel/trace: v1.34.0 → v1.35.0 +- go.opentelemetry.io/otel: v1.34.0 → v1.35.0 +- go.opentelemetry.io/proto/otlp: v1.4.0 → v1.5.0 +- go.yaml.in/yaml/v3: v3.0.3 → v3.0.4 +- golang.org/x/crypto: v0.40.0 → v0.43.0 +- golang.org/x/mod: v0.25.0 → v0.28.0 +- golang.org/x/net: v0.42.0 → v0.46.0 +- golang.org/x/oauth2: v0.30.0 → v0.32.0 +- golang.org/x/sync: v0.16.0 → v0.17.0 +- golang.org/x/sys: v0.34.0 → v0.37.0 +- golang.org/x/telemetry: bda5523 → aef8a43 +- golang.org/x/term: v0.33.0 → v0.36.0 +- golang.org/x/text: v0.27.0 → v0.30.0 +- golang.org/x/tools: v0.34.0 → v0.37.0 +- google.golang.org/api: v0.215.0 → v0.17.0 +- google.golang.org/genproto/googleapis/api: 5f5ef82 → a0af3ef +- google.golang.org/genproto/googleapis/rpc: 1a7da9e → a0af3ef +- google.golang.org/genproto: e639e21 → 66ed5ce +- google.golang.org/grpc: v1.71.3 → v1.72.3 +- google.golang.org/protobuf: v1.36.6 → v1.36.7 +- k8s.io/api: v0.33.3 → v0.34.1 +- k8s.io/apiextensions-apiserver: v0.33.3 → v0.34.1 +- k8s.io/apimachinery: v0.33.3 → v0.34.1 +- k8s.io/apiserver: v0.33.3 → v0.34.1 +- k8s.io/client-go: v0.33.3 → v0.34.1 +- k8s.io/cluster-bootstrap: v0.33.3 → v0.34.1 +- k8s.io/code-generator: v0.33.3 → v0.34.1 +- k8s.io/component-base: v0.33.3 → v0.34.1 +- k8s.io/gengo/v2: 1244d31 → 85fd79d +- k8s.io/kms: v0.33.3 → v0.34.1 +- k8s.io/kube-openapi: c8a335a → f3f2b99 +- k8s.io/utils: 3ea5e8c → 4c0f3b2 +- sigs.k8s.io/controller-runtime: v0.21.0 → v0.22.4 +- sigs.k8s.io/json: 9aa6b5e → cfa47c3 + +### Removed +- cloud.google.com/go/auth/oauth2adapt: v0.2.6 +- cloud.google.com/go/auth: v0.13.0 +- cloud.google.com/go/iam: v1.2.2 +- cloud.google.com/go/monitoring: v1.21.2 +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric: [v0.48.1](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/tree/exporter/metric/v0.48.1) +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping: [v0.48.1](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/tree/internal/resourcemapping/v0.48.1) +- github.com/golang-jwt/jwt/v4: [v4.5.2](https://github.com/golang-jwt/jwt/tree/v4.5.2) +- github.com/google/s2a-go: [v0.1.8](https://github.com/google/s2a-go/tree/v0.1.8) +- github.com/googleapis/enterprise-certificate-proxy: [v0.3.4](https://github.com/googleapis/enterprise-certificate-proxy/tree/v0.3.4) +- github.com/grpc-ecosystem/go-grpc-middleware: [v1.3.0](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v1.3.0) +- github.com/grpc-ecosystem/grpc-gateway: [v1.16.0](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.16.0) +- github.com/kr/fs: [v0.1.0](https://github.com/kr/fs/tree/v0.1.0) +- github.com/pkg/sftp: [v1.13.7](https://github.com/pkg/sftp/tree/v1.13.7) +- github.com/prashantv/gostub: [v1.1.0](https://github.com/prashantv/gostub/tree/v1.1.0) +- go.etcd.io/etcd/client/v2: v2.305.21 +- go.etcd.io/etcd/raft/v3: v3.5.21 +- go.uber.org/atomic: v1.9.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.6.0 + +
+
+_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.12.0-beta.1.md b/CHANGELOG/v1.12.0-beta.1.md new file mode 100644 index 000000000000..b86ad2267049 --- /dev/null +++ b/CHANGELOG/v1.12.0-beta.1.md @@ -0,0 +1,418 @@ +🚨 This is a BETA RELEASE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + +## Changes since v1.12.0-beta.0 +## :chart_with_upwards_trend: Overview +- 48 new commits merged +- 2 breaking changes :warning: +- 6 feature additions ✨ +- 5 bugs fixed 🐛 + +## :warning: Breaking Changes +- Runtime SDK: Improve chained upgrade observability (#12973) +- Runtime SDK: Make the AfterClusterUpgrade hook blocking (#12984) + +## :sparkles: New Features +- clusterctl: Add conditions filter for clusterctl describe (#12991) +- e2e: Change RuntimeSDK e2e test ClusterClass to use GenerateUpgradePlan extension (#12955) +- MachineDeployment: MD: Implement CanUpdateMachineSet (#12965) +- Misc: Improve logs, errors and conditions (#12992) +- Misc: Introduce & use wait for cache utils (#12957) +- API/Machine/MachineSet/MachineDeployment: Taint propagation: machine related API changes, conversion and feature gate (#12936) + +## :bug: Bug Fixes +- CAPD: Fix: CAPD on rootless podman (#12941) +- ClusterClass: Stop writing zero values for spec.controlPlaneEndpoint to ControlPlane objects (#12958) +- e2e: Do not require kubetest configration if not needed (#12948) +- e2e: Propagate clusterctl variables for cluster upgrades (#12949) +- Machine/MachinePool: Fix MachinePool nodeRef UID mismatch after K8s upgrade (#12392) + +## :seedling: Others +- CABPK: Migrate from Requeue to RequeueAfter in CABPK (#12988) +- Cluster: Allow >1 minor version upgrades if generateUpgradePlan extension is defined (#12979) +- Cluster: Simplify Cluster webhook (#12895) +- ClusterClass: Improve topology reconciled condition (#13002) +- clusterctl: CAPMS: Add metal-stack infrastructure provider (#12925) +- ClusterResourceSet: Remove deprecated ClusterResourceSet feature gate (#12950) +- Dependency: Bump Go to v1.24.10 (#12962) +- Devtools: Drop ALL groups in tilt (#13001) +- e2e: Fix autoscaler test (#12978) +- e2e: Taint propagation: e2e coverage via md rollout test (#12966) +- e2e: Wait for cluster deletion in runtime sdk test (#12956) +- KCP: Fix race condition on KCP initialized condition (#12980) +- KCP: Improve KCP etcd client crt/key caching (#12977) +- Machine: Requeue for Machine Available condition (#12953) +- MachineDeployment: Add more info to logs for rollout changes (#12997) +- MachineDeployment: Rollout-planner improve checks for scalingOrInPlaceUpdateInProgress (#12954) +- MachineHealthCheck: Improve MHC reporting: add reason to condition, add reason+message to log (#12987) +- Misc: Cleanup TestReconcileMachinePhases (#12976) +- Misc: Improve mark hook utils (#12994) +- Misc: Improve wait for cache (#12993) +- Machine/MachineSet/MachineDeployment: Adjust UpToDate condition to consider Updating, move UpToDate condition to Machine ctrl for workers (#12959) +- Release: Release notes: clarify semantic of --previous-release-version (#12995) +- util: Feat: add check version against metadata utility (#12529) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- go.etcd.io/etcd/api/v3: v3.6.5 → v3.6.6 +- go.etcd.io/etcd/client/pkg/v3: v3.6.5 → v3.6.6 +- go.etcd.io/etcd/client/v3: v3.6.5 → v3.6.6 +- golang.org/x/crypto: v0.43.0 → v0.44.0 +- golang.org/x/mod: v0.28.0 → v0.29.0 +- golang.org/x/net: v0.46.0 → v0.47.0 +- golang.org/x/oauth2: v0.32.0 → v0.33.0 +- golang.org/x/sync: v0.17.0 → v0.18.0 +- golang.org/x/sys: v0.37.0 → v0.38.0 +- golang.org/x/telemetry: aef8a43 → 078029d +- golang.org/x/term: v0.36.0 → v0.37.0 +- golang.org/x/text: v0.30.0 → v0.31.0 +- golang.org/x/tools: v0.37.0 → v0.38.0 +- k8s.io/api: v0.34.1 → v0.34.2 +- k8s.io/apiextensions-apiserver: v0.34.1 → v0.34.2 +- k8s.io/apimachinery: v0.34.1 → v0.34.2 +- k8s.io/apiserver: v0.34.1 → v0.34.2 +- k8s.io/client-go: v0.34.1 → v0.34.2 +- k8s.io/cluster-bootstrap: v0.34.1 → v0.34.2 +- k8s.io/code-generator: v0.34.1 → v0.34.2 +- k8s.io/component-base: v0.34.1 → v0.34.2 +- k8s.io/kms: v0.34.1 → v0.34.2 + +### Removed +_Nothing has changed._ + +
+More details about the release + +:warning: **BETA RELEASE NOTES** :warning: + +## Changes since v1.11.0 +## :chart_with_upwards_trend: Overview +- 248 new commits merged +- 5 breaking changes :warning: +- 40 feature additions ✨ +- 31 bugs fixed 🐛 + +## :warning: Breaking Changes +- CI: Improve KAL config docs for forbidding OpenAPI defaulting (#12869) +- clusterctl: Feat(clusterctl): block move when Cluster or ClusterClass is paused (#12786) +- Dependency: Bump to controller-runtime v0.22 & controller-tools v0.19 (#12634) +- Runtime SDK: Improve chained upgrade observability (#12973) +- Runtime SDK: Make the AfterClusterUpgrade hook blocking (#12984) + +## :sparkles: New Features +- CABPK: Add EncryptionAlgorithm to Kubeadmconfig (#12859) +- CAPD: Add scale from/to 0 support for CAPD (#12572) +- CI: Bump autoscaler to a9cb59fdd (#12643) +- CI: Bump Kubernetes in tests to v1.34.0 and claim support for v1.34 (#12699) +- ClusterCache: Add GetUncachedClient() (#12803) +- ClusterClass: Add .spec.upgrade.external.generateUpgradePlanExtension field to ClusterClass (#12809) +- ClusterClass: Add types and hook for GenerateUpgradePlan (#12823) +- ClusterClass: Additional validation in Cluster/ClusterClass webhook for chained upgrades (#12816) +- ClusterClass: Call GenerateUpgradePlanRequest Runtime Extension (#12903) +- ClusterClass: Implement core logic for chained upgrades (#12726) +- clusterctl: Add conditions filter for clusterctl describe (#12991) +- Control-plane: Add new control-plane provider HCP (#12800) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.1 (#12623) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.2 (#12658) +- e2e: Change RuntimeSDK e2e test ClusterClass to use GenerateUpgradePlan extension (#12955) +- e2e: Implement e2e test for in-place updates (#12938) +- KCP: Bump coredns/corefile-migration to v1.0.28 (#12748) +- KCP: Bump coredns/corefile-migration to v1.0.29 (#12862) +- KCP: Bump corefile-migration to v1.0.27 (#12636) +- KCP: Compare ClusterConfiguration via KubeadmConfig instead of annotation on Machine (#12758) +- KCP: Extend rollout logic for in-place updates (#12840) +- KCP: Implement CanUpdateMachine (#12857) +- KCP: Implement trigger in-place update (#12897) +- Machine: Add in-place updates support for machine controller (#12831) +- MachineDeployment: Add in-place to rollout planner (#12865) +- MachineDeployment: MD: Implement CanUpdateMachineSet (#12965) +- MachineHealthCheck: Add support for checking Machine conditions in MachineHealthCheck (#12827) +- Misc: Add inplace updates featuregate (#12755) +- Misc: Improve logs, errors and conditions (#12992) +- Misc: Introduce & use wait for cache utils (#12957) +- API/Machine/MachineSet/MachineDeployment: Taint propagation: machine related API changes, conversion and feature gate (#12936) +- ClusterCache/KCP: ClusterCache: Deprecate GetClientCertificatePrivateKey and stop using it in KCP (#12846) +- KCP/CABPK/CI: Bump KAL to pick up latest requiredfields linter, add Min/MaxLength to BootstrapToken (#12563) +- KCP/MachineSet: KCP/MS: Refactor BootstrapConfig/InfraMachine managedFields for in-place (#12890) +- Runtime SDK/IPAM/MachinePool: Cleanup exp packages (#12651) +- Runtime SDK: Add in-place update hooks to API (#12343) +- Runtime SDK: Add lifecycle hooks for chained-upgrade (#12878) +- Runtime SDK: Call new lifecycle hooks for chained-upgrades (#12891) +- Runtime SDK: Ensure ExtensionConfig controller can be used outside of the core provider (#12754) +- Runtime SDK: Implement GenerateUpgradePlan handler (#12927) + +## :bug: Bug Fixes +- API: Only try to convert infraRefs if they are set (#12686) +- API: Register conversion funcs in schemes (#12687) +- CABPK: Always use latest apiVersion when getting owner of KubeadmConfig in CABPK (#12685) +- CAPD: Fix the format error of healthCheck in test templates (#12787) +- CAPD: Fix: CAPD on rootless podman (#12941) +- CAPD: Remove finalizers during deletion if ownerRef was never set (#12675) +- CAPD: Run CAPD conversion tests in CI (#12583) +- CAPIM: Fix(proxy): eliminate data race in DialContext (#12778) +- ClusterClass: Ensure holder field path in GeneratePatchRequest is set based on contract (#12684) +- ClusterClass: Fix field paths in ClusterClass compatibility validation errors (#12660) +- ClusterClass: Stop adding conversion-data annotation to Cluster object (#12719) +- ClusterClass: Stop writing zero values for spec.controlPlaneEndpoint to ControlPlane objects (#12958) +- clusterctl: Removing Ready/Available prefix from STATUS Column (#12729) +- clusterctl: Verify providers need upgrade before applying (#12753) +- e2e: Do not require kubetest configration if not needed (#12948) +- e2e: Fix autoscaler e2e test flake (#12613) +- e2e: Fix e2e test issues introduced by chained upgrades (#12766) +- e2e: Fix objects with Changed Resource Versions flake (#12848) +- e2e: Fix upgrade runtimesdk test (#12833) +- e2e: Propagate clusterctl variables for cluster upgrades (#12949) +- KCP: Fix ControlPlaneComponentHealthCheckSeconds validation in KubeadmConfigSpec.Validate (#12609) +- KCP: Fix conversion issue in KubeadmControlPlaneTemplate with rolloutStrategy.type (#12608) +- MachineDeployment: Fix race conditions ScaleDownOldMS (#12812) +- MachineDeployment: Fix race conditions ScaleDownOldMS OnDelete (#12830) +- ClusterClass/MachinePool: Fix MP error in desired state calculation during Cluster creation (#12607) +- Machine/MachinePool: Fix MachinePool nodeRef UID mismatch after K8s upgrade (#12392) +- Runtime SDK: Export ExtensionConfig webhook (#12598) +- Testing: Fix flaky TestFuzzyConversion (Cluster) test (#12618) +- Testing: Fix flaky TestReconcileMachinePhases unit test (#12616) +- Testing: Fix flaky TestReconcileState unit test (#12617) +- Testing: Fix KubeadmConfig fuzz test flake (#12679) + +## :seedling: Others +- Autoscaling: Bump autoscaler in e2e tests to v1.33.1 (#12790) +- CABPK: Migrate from Requeue to RequeueAfter in CABPK (#12988) +- CAPD: Cleanup CAPD exp packages (#12672) +- CAPD: Recreate container if we re-enter reconciliation and it exists but is not running (#12923) +- CI: Add OpenAPI defaulting detection for KubeadmConfig by using forbiddenmarkers (#12851) +- CI: Block FIXME comments (#12772) +- CI: Bump KAL & add nodurations linter (#12743) +- CI: Bump KAL & add nonullable and forbiddenmarkers linter (#12724) +- CI: Bump KAL & drop schemaless excludes (#12646) +- CI: Bump KAL, adjust excludes (#12650) +- CI: Bump to golangci-linter version to v2.4.0 (#12703) +- CI: Update metrics configuration to v1beta2 (#12642) +- Cluster: Allow >1 minor version upgrades if generateUpgradePlan extension is defined (#12979) +- Cluster: Simplify Cluster webhook (#12895) +- ClusterClass: Add input validations for desired state generator function (#12655) +- ClusterClass: Improve topology reconciled condition (#13002) +- clusterctl: Add Metal3 as an IPAMProvider (#12756) +- clusterctl: Allow metadata.yaml's Kind to be empty (#12714) +- clusterctl: Bump cert-manager to v1.19.0 (#12828) +- clusterctl: Bump cert-manager to v1.19.1 (#12873) +- clusterctl: Bumping tablewriter to v1.0.9 - latest (#12781) +- clusterctl: CAPMS: Add metal-stack infrastructure provider (#12925) +- clusterctl: Show Available condition for CP (#12759) +- ClusterResourceSet: Remove deprecated ClusterResourceSet feature gate (#12950) +- Dependency: Bump github.com/go-viper/mapstructure/v2 to fix CVE (#12677) +- Dependency: Bump Go to v1.24.10 (#12962) +- Dependency: Bump Go to v1.24.6 (#12611) +- Dependency: Bump go to v1.24.7 (#12733) +- Dependency: Bump Go to v1.24.8 (#12826) +- Dependency: Bump Go to v1.24.9 (#12867) +- Dependency: Bump to envtest v1.34.0 (#12702) +- Devtools: Drop ALL groups in tilt (#13001) +- Devtools: Promtail to alloy migration (#11945) +- e2e: Add quickstart e2e test with v1beta1 with ClusterClass and RuntimeSDK (#12577) +- e2e: Bump autoscaler in e2e tests to v1.34.0 (#12806) +- e2e: Bump pause image in e2e tests to 3.10.1 (default for Kubernetes v1.34) (#12731) +- e2e: Bump to kind v0.30.0 (#12701) +- e2e: Enable IPv6 test again (#12597) +- e2e: Fix autoscaler test (#12978) +- e2e: Fix self-hosted to actually read DOCKER_PRELOAD_IMAGES from the e2e config (#12907) +- e2e: Get kind mgmt cluster logs in clusterctl upgrade test (#12676) +- e2e: Improve check for Cluster Available condition in e2e tests (#12594) +- e2e: Scripts: fix kind build git commit setting (#12858) +- e2e: Set startup taint for autoscaler in e2e tests (#12736) +- e2e: Start testing against Kubernetes v1.35 (#12709) +- e2e: Taint propagation: e2e coverage via md rollout test (#12966) +- e2e: Wait for cluster deletion in runtime sdk test (#12956) +- KCP: Add current/desired objects to NotUpToDateResult & refactor object creation (#12817) +- KCP: Check for error before checking reconcile result (#12935) +- KCP: Cleanup KCP code: variable/func renames, func order (#12793) +- KCP: Enable websocket dialer with fallback to spdy (#12902) +- KCP: Fix race condition on KCP initialized condition (#12980) +- KCP: Improve KCP etcd client crt/key caching (#12977) +- KCP: Simplify cleanupConfigFields in KCP (#12776) +- KCP: Simplify KCP matchesKubeadmConfig (#12813) +- Logging: Reduce noisy logs (#12606) +- Machine: Add DisableCertPrivateKey function for clustercache for test flake (#12921) +- Machine: Consider updating condition when computing Machine's ready condition (#12939) +- Machine: Fix TestReconcileMachinePhases flake (#12818) +- Machine: Implement Updating Machine phase (#12940) +- Machine: Requeue for Machine Available condition (#12953) +- Machine: TestReconcileMachinePhases: use apireader to directly talk to apiserver (#12819) +- MachineDeployment: Add in-place to machineset controller (#12906) +- MachineDeployment: Add more info to logs for rollout changes (#12997) +- MachineDeployment: Add rollout planner (#12804) +- MachineDeployment: Cleanup getMachinesSucceeded flag from MD controller (#12882) +- MachineDeployment: Fix misleading log statements and optimize logic (#12871) +- MachineDeployment: Move compute and create ms to rollout planner (#12841) +- MachineDeployment: Refactor MachineTemplateUpToDate (#12811) +- MachineDeployment: Rollout-planner improve checks for scalingOrInPlaceUpdateInProgress (#12954) +- MachineDeployment: Simplify rollout planner (#12899) +- MachineHealthCheck: Improve MHC reporting: add reason to condition, add reason+message to log (#12987) +- MachineHealthCheck: No longer requeue when remediation is not allowed (#12924) +- MachineSet: Fix flakes in syncMachines unit test (#12918) +- MachineSet: Fix race conditions with global scheme in TestMachineSetReconciler_reconcileUnhealthyMachines (#12919) +- MachineSet: Refactor BootstrapConfig/InfraMachine creation in MachineSet controller (#12881) +- Misc: Avoid using deprecated client.Patch method (#12737) +- Misc: Cleanup TestReconcileMachinePhases (#12976) +- Misc: Consistent webhook file/folder structure (#12791) +- Misc: Feat(Tiltfile): Add Proxy Support to Docker Build-Args (#12669) +- Misc: Improve mark hook utils (#12994) +- Misc: Improve wait for cache (#12993) +- Misc: Log version and git commit on controller start (#12694) +- Misc: Stop setting and relying on TypeMeta in typed objects (#12533) +- Misc: Use errors package of Go (#10875) +- KCP/CABPK: Stop using unsafe for EnvVar conversion (#12619) +- KCP/MachineSet/MachineDeployment: Remove unused CleanUpManagedFieldsForSSAAdoption code (#12788) +- Machine/MachineSet/MachineDeployment: Adjust UpToDate condition to consider Updating, move UpToDate condition to Machine ctrl for workers (#12959) +- Release/clusterctl: Add CAPRKE2 to release tool’s issue-opening providers list (#12713) +- Release/Testing: Added test cases for list.go and github.go (#11937) +- Release: Prepare main branch for v1.12 development (#12723) +- Release: Release notes: clarify semantic of --previous-release-version (#12995) +- Runtime SDK: Add defensive response status checking in runtime client (#12898) +- Runtime SDK: Add hint to look into controller logs to runtime client error response (#12849) +- Runtime SDK: Deduplicate extension filtering and response validation logic (#12905) +- Testing: Add clusterapi crd groups to audit logs for envtest (#12883) +- Testing: Enable audit logs for envtest-based unit tests if ARTIFACTS env var is set (#12847) +- Testing: Implement unit-tests for desired state generator (#12656) +- Testing: Update version matrix for GitHub workflows for release 1.11 (#12586) +- util: Add items to cache immediately after apply (#12877) +- util: Feat: add check version against metadata utility (#12529) + +:book: Additionally, there have been 38 contributions to our documentation and book. (#12085, #12199, #12329, #12562, #12581, #12582, #12593, #12600, #12601, #12647, #12662, #12663, #12673, #12710, #12741, #12761, #12777, #12779, #12797, #12810, #12814, #12835, #12836, #12854, #12866, #12880, #12885, #12892, #12893, #12896, #12917, #12934, #12942, #12944, #12951, #12961, #12970, #12998) + +## Dependencies + +### Added +- github.com/gkampitakis/ciinfo: [v0.3.2](https://github.com/gkampitakis/ciinfo/tree/v0.3.2) +- github.com/gkampitakis/go-diff: [v1.3.2](https://github.com/gkampitakis/go-diff/tree/v1.3.2) +- github.com/gkampitakis/go-snaps: [v0.5.15](https://github.com/gkampitakis/go-snaps/tree/v0.5.15) +- github.com/go-jose/go-jose/v4: [v4.0.4](https://github.com/go-jose/go-jose/tree/v4.0.4) +- github.com/goccy/go-yaml: [v1.18.0](https://github.com/goccy/go-yaml/tree/v1.18.0) +- github.com/golang-jwt/jwt/v5: [v5.2.2](https://github.com/golang-jwt/jwt/tree/v5.2.2) +- github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus: [v1.0.1](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/providers/prometheus/v1.0.1) +- github.com/grpc-ecosystem/go-grpc-middleware/v2: [v2.3.0](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v2.3.0) +- github.com/joshdk/go-junit: [v1.0.0](https://github.com/joshdk/go-junit/tree/v1.0.0) +- github.com/maruel/natural: [v1.1.1](https://github.com/maruel/natural/tree/v1.1.1) +- github.com/mfridman/tparse: [v0.18.0](https://github.com/mfridman/tparse/tree/v0.18.0) +- github.com/olekukonko/cat: [50322a0](https://github.com/olekukonko/cat/tree/50322a0) +- github.com/olekukonko/errors: [v1.1.0](https://github.com/olekukonko/errors/tree/v1.1.0) +- github.com/olekukonko/ll: [v0.1.1](https://github.com/olekukonko/ll/tree/v0.1.1) +- github.com/olekukonko/ts: [78ecb04](https://github.com/olekukonko/ts/tree/78ecb04) +- github.com/spiffe/go-spiffe/v2: [v2.5.0](https://github.com/spiffe/go-spiffe/tree/v2.5.0) +- github.com/tidwall/gjson: [v1.18.0](https://github.com/tidwall/gjson/tree/v1.18.0) +- github.com/tidwall/match: [v1.1.1](https://github.com/tidwall/match/tree/v1.1.1) +- github.com/tidwall/pretty: [v1.2.1](https://github.com/tidwall/pretty/tree/v1.2.1) +- github.com/tidwall/sjson: [v1.2.5](https://github.com/tidwall/sjson/tree/v1.2.5) +- github.com/zeebo/errs: [v1.4.0](https://github.com/zeebo/errs/tree/v1.4.0) +- go.etcd.io/raft/v3: v3.6.0 +- sigs.k8s.io/structured-merge-diff/v6: v6.3.0 + +### Changed +- cel.dev/expr: v0.19.1 → v0.24.0 +- cloud.google.com/go/storage: v1.49.0 → v1.5.0 +- cloud.google.com/go: v0.116.0 → v0.53.0 +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp: [v1.25.0 → v1.26.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/compare/detectors/gcp/v1.25.0...detectors/gcp/v1.26.0) +- github.com/Masterminds/semver/v3: [v3.3.0 → v3.4.0](https://github.com/Masterminds/semver/compare/v3.3.0...v3.4.0) +- github.com/census-instrumentation/opencensus-proto: [v0.4.1 → v0.2.1](https://github.com/census-instrumentation/opencensus-proto/compare/v0.4.1...v0.2.1) +- github.com/cncf/xds/go: [cff3c89 → 2f00578](https://github.com/cncf/xds/compare/cff3c89...2f00578) +- github.com/coredns/corefile-migration: [v1.0.27 → v1.0.29](https://github.com/coredns/corefile-migration/compare/v1.0.27...v1.0.29) +- github.com/emicklei/go-restful/v3: [v3.12.2 → v3.13.0](https://github.com/emicklei/go-restful/compare/v3.12.2...v3.13.0) +- github.com/fsnotify/fsnotify: [v1.8.0 → v1.9.0](https://github.com/fsnotify/fsnotify/compare/v1.8.0...v1.9.0) +- github.com/fxamacker/cbor/v2: [v2.7.0 → v2.9.0](https://github.com/fxamacker/cbor/compare/v2.7.0...v2.9.0) +- github.com/go-viper/mapstructure/v2: [v2.3.0 → v2.4.0](https://github.com/go-viper/mapstructure/compare/v2.3.0...v2.4.0) +- github.com/golang/groupcache: [41bb18b → 8c9f03a](https://github.com/golang/groupcache/compare/41bb18b...8c9f03a) +- github.com/google/cel-go: [v0.23.2 → v0.26.0](https://github.com/google/cel-go/compare/v0.23.2...v0.26.0) +- github.com/google/gnostic-models: [v0.6.9 → v0.7.0](https://github.com/google/gnostic-models/compare/v0.6.9...v0.7.0) +- github.com/google/pprof: [27863c8 → f64d9cf](https://github.com/google/pprof/compare/27863c8...f64d9cf) +- github.com/googleapis/gax-go/v2: [v2.14.1 → v2.0.5](https://github.com/googleapis/gax-go/compare/v2.14.1...v2.0.5) +- github.com/grpc-ecosystem/grpc-gateway/v2: [v2.24.0 → v2.26.3](https://github.com/grpc-ecosystem/grpc-gateway/compare/v2.24.0...v2.26.3) +- github.com/ianlancetaylor/demangle: [bd984b5 → f615e6b](https://github.com/ianlancetaylor/demangle/compare/bd984b5...f615e6b) +- github.com/jonboulle/clockwork: [v0.4.0 → v0.5.0](https://github.com/jonboulle/clockwork/compare/v0.4.0...v0.5.0) +- github.com/mattn/go-colorable: [v0.1.13 → v0.1.14](https://github.com/mattn/go-colorable/compare/v0.1.13...v0.1.14) +- github.com/mattn/go-runewidth: [v0.0.14 → v0.0.16](https://github.com/mattn/go-runewidth/compare/v0.0.14...v0.0.16) +- github.com/modern-go/reflect2: [v1.0.2 → 35a7c28](https://github.com/modern-go/reflect2/compare/v1.0.2...35a7c28) +- github.com/olekukonko/tablewriter: [v0.0.5 → v1.0.9](https://github.com/olekukonko/tablewriter/compare/v0.0.5...v1.0.9) +- github.com/onsi/ginkgo/v2: [v2.23.4 → v2.27.2](https://github.com/onsi/ginkgo/compare/v2.23.4...v2.27.2) +- github.com/onsi/gomega: [v1.38.0 → v1.38.2](https://github.com/onsi/gomega/compare/v1.38.0...v1.38.2) +- github.com/pelletier/go-toml/v2: [v2.2.3 → v2.2.4](https://github.com/pelletier/go-toml/compare/v2.2.3...v2.2.4) +- github.com/rivo/uniseg: [v0.4.2 → v0.4.7](https://github.com/rivo/uniseg/compare/v0.4.2...v0.4.7) +- github.com/rogpeppe/go-internal: [v1.13.1 → v1.14.1](https://github.com/rogpeppe/go-internal/compare/v1.13.1...v1.14.1) +- github.com/sagikazarmark/locafero: [v0.7.0 → v0.11.0](https://github.com/sagikazarmark/locafero/compare/v0.7.0...v0.11.0) +- github.com/sourcegraph/conc: [v0.3.0 → 5f936ab](https://github.com/sourcegraph/conc/compare/v0.3.0...5f936ab) +- github.com/spf13/afero: [v1.12.0 → v1.15.0](https://github.com/spf13/afero/compare/v1.12.0...v1.15.0) +- github.com/spf13/cast: [v1.7.1 → v1.10.0](https://github.com/spf13/cast/compare/v1.7.1...v1.10.0) +- github.com/spf13/cobra: [v1.9.1 → v1.10.1](https://github.com/spf13/cobra/compare/v1.9.1...v1.10.1) +- github.com/spf13/pflag: [v1.0.7 → v1.0.10](https://github.com/spf13/pflag/compare/v1.0.7...v1.0.10) +- github.com/spf13/viper: [v1.20.1 → v1.21.0](https://github.com/spf13/viper/compare/v1.20.1...v1.21.0) +- github.com/stretchr/testify: [v1.10.0 → v1.11.1](https://github.com/stretchr/testify/compare/v1.10.0...v1.11.1) +- go.etcd.io/bbolt: v1.3.11 → v1.4.2 +- go.etcd.io/etcd/api/v3: v3.5.22 → v3.6.6 +- go.etcd.io/etcd/client/pkg/v3: v3.5.22 → v3.6.6 +- go.etcd.io/etcd/client/v3: v3.5.22 → v3.6.6 +- go.etcd.io/etcd/pkg/v3: v3.5.21 → v3.6.4 +- go.etcd.io/etcd/server/v3: v3.5.21 → v3.6.4 +- go.opencensus.io: v0.24.0 → v0.22.3 +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc: v0.58.0 → v0.60.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: v1.33.0 → v1.34.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace: v1.33.0 → v1.34.0 +- go.opentelemetry.io/otel/metric: v1.34.0 → v1.35.0 +- go.opentelemetry.io/otel/trace: v1.34.0 → v1.35.0 +- go.opentelemetry.io/otel: v1.34.0 → v1.35.0 +- go.opentelemetry.io/proto/otlp: v1.4.0 → v1.5.0 +- go.yaml.in/yaml/v3: v3.0.3 → v3.0.4 +- golang.org/x/crypto: v0.40.0 → v0.44.0 +- golang.org/x/mod: v0.25.0 → v0.29.0 +- golang.org/x/net: v0.42.0 → v0.47.0 +- golang.org/x/oauth2: v0.30.0 → v0.33.0 +- golang.org/x/sync: v0.16.0 → v0.18.0 +- golang.org/x/sys: v0.34.0 → v0.38.0 +- golang.org/x/telemetry: bda5523 → 078029d +- golang.org/x/term: v0.33.0 → v0.37.0 +- golang.org/x/text: v0.27.0 → v0.31.0 +- golang.org/x/tools: v0.34.0 → v0.38.0 +- google.golang.org/api: v0.215.0 → v0.17.0 +- google.golang.org/genproto/googleapis/api: 5f5ef82 → a0af3ef +- google.golang.org/genproto/googleapis/rpc: 1a7da9e → a0af3ef +- google.golang.org/genproto: e639e21 → 66ed5ce +- google.golang.org/grpc: v1.71.3 → v1.72.3 +- google.golang.org/protobuf: v1.36.6 → v1.36.7 +- k8s.io/api: v0.33.3 → v0.34.2 +- k8s.io/apiextensions-apiserver: v0.33.3 → v0.34.2 +- k8s.io/apimachinery: v0.33.3 → v0.34.2 +- k8s.io/apiserver: v0.33.3 → v0.34.2 +- k8s.io/client-go: v0.33.3 → v0.34.2 +- k8s.io/cluster-bootstrap: v0.33.3 → v0.34.2 +- k8s.io/code-generator: v0.33.3 → v0.34.2 +- k8s.io/component-base: v0.33.3 → v0.34.2 +- k8s.io/gengo/v2: 1244d31 → 85fd79d +- k8s.io/kms: v0.33.3 → v0.34.2 +- k8s.io/kube-openapi: c8a335a → f3f2b99 +- k8s.io/utils: 3ea5e8c → 4c0f3b2 +- sigs.k8s.io/controller-runtime: v0.21.0 → v0.22.4 +- sigs.k8s.io/json: 9aa6b5e → cfa47c3 + +### Removed +- cloud.google.com/go/auth/oauth2adapt: v0.2.6 +- cloud.google.com/go/auth: v0.13.0 +- cloud.google.com/go/iam: v1.2.2 +- cloud.google.com/go/monitoring: v1.21.2 +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric: [v0.48.1](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/tree/exporter/metric/v0.48.1) +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping: [v0.48.1](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/tree/internal/resourcemapping/v0.48.1) +- github.com/golang-jwt/jwt/v4: [v4.5.2](https://github.com/golang-jwt/jwt/tree/v4.5.2) +- github.com/google/s2a-go: [v0.1.8](https://github.com/google/s2a-go/tree/v0.1.8) +- github.com/googleapis/enterprise-certificate-proxy: [v0.3.4](https://github.com/googleapis/enterprise-certificate-proxy/tree/v0.3.4) +- github.com/grpc-ecosystem/go-grpc-middleware: [v1.3.0](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v1.3.0) +- github.com/grpc-ecosystem/grpc-gateway: [v1.16.0](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.16.0) +- github.com/kr/fs: [v0.1.0](https://github.com/kr/fs/tree/v0.1.0) +- github.com/pkg/sftp: [v1.13.7](https://github.com/pkg/sftp/tree/v1.13.7) +- github.com/prashantv/gostub: [v1.1.0](https://github.com/prashantv/gostub/tree/v1.1.0) +- go.etcd.io/etcd/client/v2: v2.305.21 +- go.etcd.io/etcd/raft/v3: v3.5.21 +- go.uber.org/atomic: v1.9.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.6.0 + +
+
+_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.12.0-rc.0.md b/CHANGELOG/v1.12.0-rc.0.md new file mode 100644 index 000000000000..d1d43e3ab1a6 --- /dev/null +++ b/CHANGELOG/v1.12.0-rc.0.md @@ -0,0 +1,400 @@ +🚨 This is a RELEASE CANDIDATE. Use it only for testing purposes. If you find any bugs, file an [issue](https://github.com/kubernetes-sigs/cluster-api/issues/new). + + +:warning: **RELEASE CANDIDATE NOTES** :warning: +## 👌 Kubernetes version support + +- Management Cluster: v1.31.x -> v1.34.x +- Workload Cluster: v1.29.x -> v1.34.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Highlights + +- Bumped to Go 1.24, controller-runtime v0.22, k8s.io/* v0.34, controller-gen v0.19 +- In-place updates + - Can be enabled with the new `InPlaceUpdates` feature gate + - More details can be found in the [proposal](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240807-in-place-updates.md) +- Chained upgrades + - More details can be found in the [proposal](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20250513-chained-and-efficient-upgrades-for-clusters-with-managed-topologies.md) + - Runtime SDK: Make the AfterClusterUpgrade hook blocking (#12984) +- MachineHealthCheck: Add support for checking Machine conditions (#12827) +- Machine: First part of the [Taint propagation proposal](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20250513-propogate-taints.md) (Machine/MachineSet/MachineDeployment) (#12936, #12966) + - [Feature tracking issue](https://github.com/kubernetes-sigs/cluster-api/issues/12972) + - Can be enabled with the new `MachineTaintPropagation` feature gate +- KCP/CABPK: Add EncryptionAlgorithm field to Kubeadmconfig to support keys generated with `RSA-3072`, `RSA-4096`, `ECDSA-P256`, `ECDSA-P384` (#12859) +- Introduce ReconcilerRateLimiting feature (#13006) + - Can be enabled with the new `ReconcilerRateLimiting` feature gate + - Rate-limits all reconcilers to at most 1 request per second + +Note: Cluster API is only supported on conformant Kubernetes Clusters and contract-relevant provider resources (e.g. InfraCluster) have to be implemented as CRDs (i.e. not via an aggregated apiserver). + +## Other changes + +- clusterctl: Add conditions filter for clusterctl describe (#12991) +- clusterctl: `clusterctl move` blocks when Cluster or ClusterClass is paused (#12786) +- KCP: Enable websocket dialer with fallback to spdy (for communication with etcd) (#12902) +- Runtime SDK: Add defensive response status checking in runtime client (#12898) +- Improved logging across several controllers +- Improved e2e test coverage, e.g.: CAPD: Add scale from/to 0 support for CAPD (#12572) +- New providers in clusterctl: HCP (#12800) control plane provider, Metal3 IPAM provider (#12756), metal-stack infrastructure provider (#12925) + +## Deprecation and Removals Warning + +- ClusterResourceSet: Remove deprecated ClusterResourceSet feature gate (#12950) +- Reminder: [v1alpha3 & v1alpha4 will be removed in CAPI v1.13](https://github.com/kubernetes-sigs/cluster-api/issues/11919) (they are already not served since a while) +- Reminder: [v1beta1 is on track to be unserved in CAPI v1.14](https://github.com/kubernetes-sigs/cluster-api/issues/11920) + - Reminder: Provider should start implementing the v1beta2 contract ASAP. + +For additional details for providers, please take a look at [Cluster API v1.11 compared to v1.12](https://main.cluster-api.sigs.k8s.io/developer/providers/migrations/v1.11-to-v1.12). + +
+More details about the release + +## Changes since v1.11.0 +## :chart_with_upwards_trend: Overview +- 272 new commits merged +- 5 breaking changes :warning: +- 42 feature additions ✨ +- 35 bugs fixed 🐛 + +## :warning: Breaking Changes +- CI: Improve KAL config docs for forbidding OpenAPI defaulting (#12869) +- clusterctl: Block move when Cluster or ClusterClass is paused (#12786) +- Dependency: Bump to controller-runtime v0.22 & controller-tools v0.19 (#12634) +- Runtime SDK: Improve chained upgrade observability (#12973) +- Runtime SDK: Make the AfterClusterUpgrade hook blocking (#12984) + +## :sparkles: New Features +- API/Machine/MachineSet/MachineDeployment: Machine related API changes, conversion and feature gate (#12936) +- CABPK: Add EncryptionAlgorithm to Kubeadmconfig (#12859) +- CAPD: Add scale from/to 0 support for CAPD (#12572) +- CI: Bump autoscaler to a9cb59fdd (#12643) +- CI: Bump Kubernetes in tests to v1.34.0 and claim support for v1.34 (#12699) +- ClusterCache/KCP: Deprecate GetClientCertificatePrivateKey and stop using it in KCP (#12846) +- ClusterCache: Add GetUncachedClient() (#12803) +- ClusterClass: Add .spec.upgrade.external.generateUpgradePlanExtension field to ClusterClass (#12809) +- ClusterClass: Add types and hook for GenerateUpgradePlan (#12823) +- ClusterClass: Additional validation in Cluster/ClusterClass webhook for chained upgrades (#12816) +- ClusterClass: Call GenerateUpgradePlanRequest Runtime Extension (#12903) +- ClusterClass: Implement core logic for chained upgrades (#12726) +- clusterctl: Add conditions filter for clusterctl describe (#12991) +- Control-plane: Add new control-plane provider HCP (#12800) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.1 (#12623) +- e2e: Bump Kubernetes version used for testing to v1.34.0-rc.2 (#12658) +- e2e: Bump Kubernetes version used for testing to v1.35.0-beta.0 (#13029) +- e2e: Change RuntimeSDK e2e test ClusterClass to use GenerateUpgradePlan extension (#12955) +- e2e: Implement e2e test for in-place updates (#12938) +- KCP/CABPK/CI: Bump KAL to pick up latest requiredfields linter, add Min/MaxLength to BootstrapToken (#12563) +- KCP/MachineSet: Refactor BootstrapConfig/InfraMachine managedFields for in-place (#12890) +- KCP: Bump coredns/corefile-migration to v1.0.28 (#12748) +- KCP: Bump coredns/corefile-migration to v1.0.29 (#12862) +- KCP: Bump corefile-migration to v1.0.27 (#12636) +- KCP: Compare ClusterConfiguration via KubeadmConfig instead of annotation on Machine (#12758) +- KCP: Extend rollout logic for in-place updates (#12840) +- KCP: Implement CanUpdateMachine (#12857) +- KCP: Implement trigger in-place update (#12897) +- Machine: Add in-place updates support for machine controller (#12831) +- MachineDeployment: Add in-place to rollout planner (#12865) +- MachineDeployment: Implement CanUpdateMachineSet (#12965) +- MachineHealthCheck: Add support for checking Machine conditions in MachineHealthCheck (#12827) +- Misc: Add inplace updates featuregate (#12755) +- Misc: Improve logs, errors and conditions (#12992) +- Misc: Introduce & use wait for cache utils (#12957) +- Misc: Introduce reconciler rate-limiting and hook caching (#13006) +- Runtime SDK/IPAM/MachinePool: Cleanup exp packages (#12651) +- Runtime SDK: Add in-place update hooks to API (#12343) +- Runtime SDK: Add lifecycle hooks for chained-upgrade (#12878) +- Runtime SDK: Call new lifecycle hooks for chained-upgrades (#12891) +- Runtime SDK: Ensure ExtensionConfig controller can be used outside of the core provider (#12754) +- Runtime SDK: Implement GenerateUpgradePlan handler (#12927) + +## :bug: Bug Fixes +- API: Only try to convert infraRefs if they are set (#12686) +- API: Register conversion funcs in schemes (#12687) +- CABPK: Always use latest apiVersion when getting owner of KubeadmConfig in CABPK (#12685) +- CAPD: CAPD on rootless podman (#12941) +- CAPD: Fix the format error of healthCheck in test templates (#12787) +- CAPD: Remove finalizers during deletion if ownerRef was never set (#12675) +- CAPD: Run CAPD conversion tests in CI (#12583) +- CAPIM: Eliminate data race in DialContext (#12778) +- ClusterClass/MachinePool: Fix MP error in desired state calculation during Cluster creation (#12607) +- ClusterClass: Ensure holder field path in GeneratePatchRequest is set based on contract (#12684) +- ClusterClass: Fix field paths in ClusterClass compatibility validation errors (#12660) +- ClusterClass: Fix wait for cache in reconcile_state.go (#13032) +- ClusterClass: Stop adding conversion-data annotation to Cluster object (#12719) +- ClusterClass: Stop writing zero values for spec.controlPlaneEndpoint to ControlPlane objects (#12958) +- clusterctl: Removing Ready/Available prefix from STATUS Column (#12729) +- clusterctl: Verify providers need upgrade before applying (#12753) +- Devtools: Fix kube-state-metrics deployment (#13024) +- e2e: Do not require kubetest configration if not needed (#12948) +- e2e: Fix autoscaler e2e test flake (#12613) +- e2e: Fix e2e test issues introduced by chained upgrades (#12766) +- e2e: Fix objects with Changed Resource Versions flake (#12848) +- e2e: Fix upgrade runtimesdk test (#12833) +- e2e: Propagate clusterctl variables for cluster upgrades (#12949) +- KCP: Fix ControlPlaneComponentHealthCheckSeconds validation in KubeadmConfigSpec.Validate (#12609) +- KCP: Fix conversion issue in KubeadmControlPlaneTemplate with rolloutStrategy.type (#12608) +- KCP: Fix KCP KubeadmConfig isJoin detection (#13035) +- Machine/MachinePool: Fix MachinePool nodeRef UID mismatch after K8s upgrade (#12392) +- MachineDeployment: Fix race conditions ScaleDownOldMS (#12812) +- MachineDeployment: Fix race conditions ScaleDownOldMS OnDelete (#12830) +- MachineDeployment: Fix rollout with unavailable machines (#13020) +- Runtime SDK: Export ExtensionConfig webhook (#12598) +- Testing: Fix flaky TestFuzzyConversion (Cluster) test (#12618) +- Testing: Fix flaky TestReconcileMachinePhases unit test (#12616) +- Testing: Fix flaky TestReconcileState unit test (#12617) +- Testing: Fix KubeadmConfig fuzz test flake (#12679) + +## :seedling: Others +- Autoscaling: Bump autoscaler in e2e tests to v1.33.1 (#12790) +- CABPK: Migrate from Requeue to RequeueAfter in CABPK (#12988) +- CAPD: Cleanup CAPD exp packages (#12672) +- CAPD: Recreate container if we re-enter reconciliation and it exists but is not running (#12923) +- CI: Add OpenAPI defaulting detection for KubeadmConfig by using forbiddenmarkers (#12851) +- CI: Block FIXME comments (#12772) +- CI: Bump KAL & add nodurations linter (#12743) +- CI: Bump KAL & add nonullable and forbiddenmarkers linter (#12724) +- CI: Bump KAL & drop schemaless excludes (#12646) +- CI: Bump KAL, adjust excludes (#12650) +- CI: Bump Kube API Linter version (#12974) +- CI: Bump to golangci-linter version to v2.4.0 (#12703) +- CI: Update metrics configuration to v1beta2 (#12642) +- Cluster: Allow >1 minor version upgrades if generateUpgradePlan extension is defined (#12979) +- Cluster: Simplify Cluster webhook (#12895) +- ClusterClass: Add input validations for desired state generator function (#12655) +- ClusterClass: Improve topology reconciled condition (#13002) +- clusterctl: Add metal-stack infrastructure provider (#12925) +- clusterctl: Add Metal3 as an IPAMProvider (#12756) +- clusterctl: Allow metadata.yaml's Kind to be empty (#12714) +- clusterctl: Bump cert-manager to v1.19.0 (#12828) +- clusterctl: Bump cert-manager to v1.19.1 (#12873) +- clusterctl: Bumping tablewriter to v1.0.9 - latest (#12781) +- clusterctl: Show Available condition for CP (#12759) +- ClusterResourceSet: Remove deprecated ClusterResourceSet feature gate (#12950) +- Dependency: Bump github.com/go-viper/mapstructure/v2 to fix CVE (#12677) +- Dependency: Bump Go to v1.24.10 (#12962) +- Dependency: Bump Go to v1.24.6 (#12611) +- Dependency: Bump go to v1.24.7 (#12733) +- Dependency: Bump Go to v1.24.8 (#12826) +- Dependency: Bump Go to v1.24.9 (#12867) +- Dependency: Bump golang.org/x/crypto to v0.45 to fix CVE (#13036) +- Dependency: Bump to envtest v1.34.0 (#12702) +- Devtools: Drop ALL groups in tilt (#13001) +- Devtools: Extend CR dashboard to handle capi_reconcile metrics (#13040) +- Devtools: Promtail to alloy migration (#11945) +- Documentation/MachinePool: Add structure for area/machinepool ownership in OWNERS files (#13018) +- e2e: Add quickstart e2e test with v1beta1 with ClusterClass and RuntimeSDK (#12577) +- e2e: Bump autoscaler in e2e tests to v1.34.0 (#12806) +- e2e: Bump pause image in e2e tests to 3.10.1 (default for Kubernetes v1.34) (#12731) +- e2e: Bump to kind v0.30.0 (#12701) +- e2e: E2e coverage via md rollout test (#12966) +- e2e: Enable IPv6 test again (#12597) +- e2e: Fix autoscaler test (#12978) +- e2e: Fix kind build git commit setting (#12858) +- e2e: Fix md-rollout test to test in-place taint changes before doing the rollout change (#13031) +- e2e: Fix self-hosted to actually read DOCKER_PRELOAD_IMAGES from the e2e config (#12907) +- e2e: Get kind mgmt cluster logs in clusterctl upgrade test (#12676) +- e2e: Improve check for Cluster Available condition in e2e tests (#12594) +- e2e: Set startup taint for autoscaler in e2e tests (#12736) +- e2e: Start testing against Kubernetes v1.35 (#12709) +- e2e: Use wait-machine-upgrade timeout in ClusterClassChanges tests to wait for machines to be ready (#13013) +- e2e: Wait for cluster deletion in runtime sdk test (#12956) +- KCP/CABPK: Stop using unsafe for EnvVar conversion (#12619) +- KCP/MachineSet/MachineDeployment: Remove unused CleanUpManagedFieldsForSSAAdoption code (#12788) +- KCP: Add current/desired objects to NotUpToDateResult & refactor object creation (#12817) +- KCP: Avoid KCP rollouts if only ControlPlaneComponentHealthCheckSeconds is changed (#13026) +- KCP: Check for error before checking reconcile result (#12935) +- KCP: Enable websocket dialer with fallback to spdy (#12902) +- KCP: Fix race condition on KCP initialized condition (#12980) +- KCP: Improve KCP etcd client crt/key caching (#12977) +- KCP: Simplify cleanupConfigFields in KCP (#12776) +- KCP: Simplify KCP matchesKubeadmConfig (#12813) +- KCP: Variable/func renames, func order (#12793) +- Logging: Reduce noisy logs (#12606) +- Machine/MachineSet/MachineDeployment: Adjust UpToDate condition to consider Updating, move UpToDate condition to Machine ctrl for workers (#12959) +- Machine: Add DisableCertPrivateKey function for clustercache for test flake (#12921) +- Machine: Consider updating condition when computing Machine's ready condition (#12939) +- Machine: Fix TestReconcileMachinePhases flake (#12818) +- Machine: Implement Updating Machine phase (#12940) +- Machine: Requeue for Machine Available condition (#12953) +- Machine: Use apireader to directly talk to apiserver (#12819) +- MachineDeployment: Add in-place to machineset controller (#12906) +- MachineDeployment: Add more info to logs for rollout changes (#12997) +- MachineDeployment: Add rollout planner (#12804) +- MachineDeployment: Cleanup getMachinesSucceeded flag from MD controller (#12882) +- MachineDeployment: Defer in-place updates for machines not yet provisioned (#13007) +- MachineDeployment: Fix misleading log statements and optimize logic (#12871) +- MachineDeployment: Move compute and create ms to rollout planner (#12841) +- MachineDeployment: Refactor MachineTemplateUpToDate (#12811) +- MachineDeployment: Rollout-planner improve checks for scalingOrInPlaceUpdateInProgress (#12954) +- MachineDeployment: Simplify rollout planner (#12899) +- MachineHealthCheck: Add reason to condition, add reason+message to log (#12987) +- MachineHealthCheck: No longer requeue when remediation is not allowed (#12924) +- MachinePool: Migrate from Requeue to RequeueAfter in MachinePool tests (#13027) +- MachineSet: Fix flakes in syncMachines unit test (#12918) +- MachineSet: Fix race conditions with global scheme in TestMachineSetReconciler_reconcileUnhealthyMachines (#12919) +- MachineSet: Refactor BootstrapConfig/InfraMachine creation in MachineSet controller (#12881) +- Misc: Add Proxy Support to Docker Build-Args (#12669) +- Misc: Avoid using deprecated client.Patch method (#12737) +- Misc: Cleanup TestReconcileMachinePhases (#12976) +- Misc: Consistent webhook file/folder structure (#12791) +- Misc: Improve mark hook utils (#12994) +- Misc: Improve wait for cache (#12993) +- Misc: Log version and git commit on controller start (#12694) +- Misc: Stop setting and relying on TypeMeta in typed objects (#12533) +- Misc: Use errors package of Go (#10875) +- Release/clusterctl: Add CAPRKE2 to release tool’s issue-opening providers list (#12713) +- Release/Testing: Added test cases for list.go and github.go (#11937) +- Release: Clarify semantic of --previous-release-version (#12995) +- Release: Prepare main branch for v1.12 development (#12723) +- Release: Update release notes tool to handle multiples_areas and colons (#12738) +- Runtime SDK: Add defensive response status checking in runtime client (#12898) +- Runtime SDK: Add hint to look into controller logs to runtime client error response (#12849) +- Runtime SDK: Deduplicate extension filtering and response validation logic (#12905) +- Runtime SDK: Migrate from Requeue to RequeueAfter in extensionconfig (#13000) +- Testing: Add clusterapi crd groups to audit logs for envtest (#12883) +- Testing: Enable audit logs for envtest-based unit tests if ARTIFACTS env var is set (#12847) +- Testing: Implement unit-tests for desired state generator (#12656) +- Testing: Update version matrix for GitHub workflows for release 1.11 (#12586) +- util: Add check version against metadata utility (#12529) +- util: Add items to cache immediately after apply (#12877) + +:book: Additionally, there have been 42 contributions to our documentation and book. (#12085, #12199, #12329, #12562, #12581, #12582, #12593, #12600, #12601, #12647, #12662, #12663, #12673, #12710, #12741, #12761, #12777, #12779, #12797, #12810, #12814, #12835, #12836, #12854, #12866, #12880, #12885, #12892, #12893, #12896, #12917, #12934, #12942, #12944, #12951, #12961, #12970, #12998, #13010, #13011, #13012, #13021) + +## Dependencies + +### Added +- github.com/gkampitakis/ciinfo: [v0.3.2](https://github.com/gkampitakis/ciinfo/tree/v0.3.2) +- github.com/gkampitakis/go-diff: [v1.3.2](https://github.com/gkampitakis/go-diff/tree/v1.3.2) +- github.com/gkampitakis/go-snaps: [v0.5.15](https://github.com/gkampitakis/go-snaps/tree/v0.5.15) +- github.com/go-jose/go-jose/v4: [v4.0.4](https://github.com/go-jose/go-jose/tree/v4.0.4) +- github.com/goccy/go-yaml: [v1.18.0](https://github.com/goccy/go-yaml/tree/v1.18.0) +- github.com/golang-jwt/jwt/v5: [v5.2.2](https://github.com/golang-jwt/jwt/tree/v5.2.2) +- github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus: [v1.0.1](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/providers/prometheus/v1.0.1) +- github.com/grpc-ecosystem/go-grpc-middleware/v2: [v2.3.0](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v2.3.0) +- github.com/joshdk/go-junit: [v1.0.0](https://github.com/joshdk/go-junit/tree/v1.0.0) +- github.com/maruel/natural: [v1.1.1](https://github.com/maruel/natural/tree/v1.1.1) +- github.com/mfridman/tparse: [v0.18.0](https://github.com/mfridman/tparse/tree/v0.18.0) +- github.com/olekukonko/cat: [50322a0](https://github.com/olekukonko/cat/tree/50322a0) +- github.com/olekukonko/errors: [v1.1.0](https://github.com/olekukonko/errors/tree/v1.1.0) +- github.com/olekukonko/ll: [v0.1.1](https://github.com/olekukonko/ll/tree/v0.1.1) +- github.com/olekukonko/ts: [78ecb04](https://github.com/olekukonko/ts/tree/78ecb04) +- github.com/spiffe/go-spiffe/v2: [v2.5.0](https://github.com/spiffe/go-spiffe/tree/v2.5.0) +- github.com/tidwall/gjson: [v1.18.0](https://github.com/tidwall/gjson/tree/v1.18.0) +- github.com/tidwall/match: [v1.1.1](https://github.com/tidwall/match/tree/v1.1.1) +- github.com/tidwall/pretty: [v1.2.1](https://github.com/tidwall/pretty/tree/v1.2.1) +- github.com/tidwall/sjson: [v1.2.5](https://github.com/tidwall/sjson/tree/v1.2.5) +- github.com/zeebo/errs: [v1.4.0](https://github.com/zeebo/errs/tree/v1.4.0) +- go.etcd.io/raft/v3: v3.6.0 +- sigs.k8s.io/structured-merge-diff/v6: v6.3.0 + +### Changed +- cel.dev/expr: v0.19.1 → v0.24.0 +- cloud.google.com/go/storage: v1.49.0 → v1.5.0 +- cloud.google.com/go: v0.116.0 → v0.53.0 +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp: [v1.25.0 → v1.26.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/compare/detectors/gcp/v1.25.0...detectors/gcp/v1.26.0) +- github.com/Masterminds/semver/v3: [v3.3.0 → v3.4.0](https://github.com/Masterminds/semver/compare/v3.3.0...v3.4.0) +- github.com/census-instrumentation/opencensus-proto: [v0.4.1 → v0.2.1](https://github.com/census-instrumentation/opencensus-proto/compare/v0.4.1...v0.2.1) +- github.com/cncf/xds/go: [cff3c89 → 2f00578](https://github.com/cncf/xds/compare/cff3c89...2f00578) +- github.com/coredns/corefile-migration: [v1.0.27 → v1.0.29](https://github.com/coredns/corefile-migration/compare/v1.0.27...v1.0.29) +- github.com/emicklei/go-restful/v3: [v3.12.2 → v3.13.0](https://github.com/emicklei/go-restful/compare/v3.12.2...v3.13.0) +- github.com/fsnotify/fsnotify: [v1.8.0 → v1.9.0](https://github.com/fsnotify/fsnotify/compare/v1.8.0...v1.9.0) +- github.com/fxamacker/cbor/v2: [v2.7.0 → v2.9.0](https://github.com/fxamacker/cbor/compare/v2.7.0...v2.9.0) +- github.com/go-viper/mapstructure/v2: [v2.3.0 → v2.4.0](https://github.com/go-viper/mapstructure/compare/v2.3.0...v2.4.0) +- github.com/golang/groupcache: [41bb18b → 8c9f03a](https://github.com/golang/groupcache/compare/41bb18b...8c9f03a) +- github.com/google/cel-go: [v0.23.2 → v0.26.0](https://github.com/google/cel-go/compare/v0.23.2...v0.26.0) +- github.com/google/gnostic-models: [v0.6.9 → v0.7.0](https://github.com/google/gnostic-models/compare/v0.6.9...v0.7.0) +- github.com/google/pprof: [27863c8 → f64d9cf](https://github.com/google/pprof/compare/27863c8...f64d9cf) +- github.com/googleapis/gax-go/v2: [v2.14.1 → v2.0.5](https://github.com/googleapis/gax-go/compare/v2.14.1...v2.0.5) +- github.com/grpc-ecosystem/grpc-gateway/v2: [v2.24.0 → v2.26.3](https://github.com/grpc-ecosystem/grpc-gateway/compare/v2.24.0...v2.26.3) +- github.com/ianlancetaylor/demangle: [bd984b5 → f615e6b](https://github.com/ianlancetaylor/demangle/compare/bd984b5...f615e6b) +- github.com/jonboulle/clockwork: [v0.4.0 → v0.5.0](https://github.com/jonboulle/clockwork/compare/v0.4.0...v0.5.0) +- github.com/mattn/go-colorable: [v0.1.13 → v0.1.14](https://github.com/mattn/go-colorable/compare/v0.1.13...v0.1.14) +- github.com/mattn/go-runewidth: [v0.0.14 → v0.0.16](https://github.com/mattn/go-runewidth/compare/v0.0.14...v0.0.16) +- github.com/modern-go/reflect2: [v1.0.2 → 35a7c28](https://github.com/modern-go/reflect2/compare/v1.0.2...35a7c28) +- github.com/olekukonko/tablewriter: [v0.0.5 → v1.0.9](https://github.com/olekukonko/tablewriter/compare/v0.0.5...v1.0.9) +- github.com/onsi/ginkgo/v2: [v2.23.4 → v2.27.2](https://github.com/onsi/ginkgo/compare/v2.23.4...v2.27.2) +- github.com/onsi/gomega: [v1.38.0 → v1.38.2](https://github.com/onsi/gomega/compare/v1.38.0...v1.38.2) +- github.com/pelletier/go-toml/v2: [v2.2.3 → v2.2.4](https://github.com/pelletier/go-toml/compare/v2.2.3...v2.2.4) +- github.com/prometheus/client_model: [v0.6.1 → v0.6.2](https://github.com/prometheus/client_model/compare/v0.6.1...v0.6.2) +- github.com/rivo/uniseg: [v0.4.2 → v0.4.7](https://github.com/rivo/uniseg/compare/v0.4.2...v0.4.7) +- github.com/rogpeppe/go-internal: [v1.13.1 → v1.14.1](https://github.com/rogpeppe/go-internal/compare/v1.13.1...v1.14.1) +- github.com/sagikazarmark/locafero: [v0.7.0 → v0.11.0](https://github.com/sagikazarmark/locafero/compare/v0.7.0...v0.11.0) +- github.com/sourcegraph/conc: [v0.3.0 → 5f936ab](https://github.com/sourcegraph/conc/compare/v0.3.0...5f936ab) +- github.com/spf13/afero: [v1.12.0 → v1.15.0](https://github.com/spf13/afero/compare/v1.12.0...v1.15.0) +- github.com/spf13/cast: [v1.7.1 → v1.10.0](https://github.com/spf13/cast/compare/v1.7.1...v1.10.0) +- github.com/spf13/cobra: [v1.9.1 → v1.10.1](https://github.com/spf13/cobra/compare/v1.9.1...v1.10.1) +- github.com/spf13/pflag: [v1.0.7 → v1.0.10](https://github.com/spf13/pflag/compare/v1.0.7...v1.0.10) +- github.com/spf13/viper: [v1.20.1 → v1.21.0](https://github.com/spf13/viper/compare/v1.20.1...v1.21.0) +- github.com/stretchr/testify: [v1.10.0 → v1.11.1](https://github.com/stretchr/testify/compare/v1.10.0...v1.11.1) +- go.etcd.io/bbolt: v1.3.11 → v1.4.2 +- go.etcd.io/etcd/api/v3: v3.5.22 → v3.6.6 +- go.etcd.io/etcd/client/pkg/v3: v3.5.22 → v3.6.6 +- go.etcd.io/etcd/client/v3: v3.5.22 → v3.6.6 +- go.etcd.io/etcd/pkg/v3: v3.5.21 → v3.6.4 +- go.etcd.io/etcd/server/v3: v3.5.21 → v3.6.4 +- go.opencensus.io: v0.24.0 → v0.22.3 +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc: v0.58.0 → v0.60.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: v1.33.0 → v1.34.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace: v1.33.0 → v1.34.0 +- go.opentelemetry.io/otel/metric: v1.34.0 → v1.35.0 +- go.opentelemetry.io/otel/trace: v1.34.0 → v1.35.0 +- go.opentelemetry.io/otel: v1.34.0 → v1.35.0 +- go.opentelemetry.io/proto/otlp: v1.4.0 → v1.5.0 +- go.uber.org/zap: v1.27.0 → v1.27.1 +- go.yaml.in/yaml/v3: v3.0.3 → v3.0.4 +- golang.org/x/crypto: v0.40.0 → v0.45.0 +- golang.org/x/mod: v0.25.0 → v0.29.0 +- golang.org/x/net: v0.42.0 → v0.47.0 +- golang.org/x/oauth2: v0.30.0 → v0.33.0 +- golang.org/x/sync: v0.16.0 → v0.18.0 +- golang.org/x/sys: v0.34.0 → v0.38.0 +- golang.org/x/telemetry: bda5523 → 078029d +- golang.org/x/term: v0.33.0 → v0.37.0 +- golang.org/x/text: v0.27.0 → v0.31.0 +- golang.org/x/tools: v0.34.0 → v0.38.0 +- google.golang.org/api: v0.215.0 → v0.17.0 +- google.golang.org/genproto/googleapis/api: 5f5ef82 → a0af3ef +- google.golang.org/genproto/googleapis/rpc: 1a7da9e → a0af3ef +- google.golang.org/genproto: e639e21 → 66ed5ce +- google.golang.org/grpc: v1.71.3 → v1.72.3 +- google.golang.org/protobuf: v1.36.6 → v1.36.7 +- k8s.io/api: v0.33.3 → v0.34.2 +- k8s.io/apiextensions-apiserver: v0.33.3 → v0.34.2 +- k8s.io/apimachinery: v0.33.3 → v0.34.2 +- k8s.io/apiserver: v0.33.3 → v0.34.2 +- k8s.io/client-go: v0.33.3 → v0.34.2 +- k8s.io/cluster-bootstrap: v0.33.3 → v0.34.2 +- k8s.io/code-generator: v0.33.3 → v0.34.2 +- k8s.io/component-base: v0.33.3 → v0.34.2 +- k8s.io/gengo/v2: 1244d31 → 85fd79d +- k8s.io/kms: v0.33.3 → v0.34.2 +- k8s.io/kube-openapi: c8a335a → f3f2b99 +- k8s.io/utils: 3ea5e8c → 4c0f3b2 +- sigs.k8s.io/controller-runtime: v0.21.0 → v0.22.4 +- sigs.k8s.io/json: 9aa6b5e → cfa47c3 + +### Removed +- cloud.google.com/go/auth/oauth2adapt: v0.2.6 +- cloud.google.com/go/auth: v0.13.0 +- cloud.google.com/go/iam: v1.2.2 +- cloud.google.com/go/monitoring: v1.21.2 +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric: [v0.48.1](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/tree/exporter/metric/v0.48.1) +- github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping: [v0.48.1](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/tree/internal/resourcemapping/v0.48.1) +- github.com/golang-jwt/jwt/v4: [v4.5.2](https://github.com/golang-jwt/jwt/tree/v4.5.2) +- github.com/google/s2a-go: [v0.1.8](https://github.com/google/s2a-go/tree/v0.1.8) +- github.com/googleapis/enterprise-certificate-proxy: [v0.3.4](https://github.com/googleapis/enterprise-certificate-proxy/tree/v0.3.4) +- github.com/grpc-ecosystem/go-grpc-middleware: [v1.3.0](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v1.3.0) +- github.com/grpc-ecosystem/grpc-gateway: [v1.16.0](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.16.0) +- github.com/kr/fs: [v0.1.0](https://github.com/kr/fs/tree/v0.1.0) +- github.com/pkg/sftp: [v1.13.7](https://github.com/pkg/sftp/tree/v1.13.7) +- github.com/prashantv/gostub: [v1.1.0](https://github.com/prashantv/gostub/tree/v1.1.0) +- go.etcd.io/etcd/client/v2: v2.305.21 +- go.etcd.io/etcd/raft/v3: v3.5.21 +- go.uber.org/atomic: v1.9.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.6.0 + +
+
+_Thanks to all our contributors!_ 😊 diff --git a/CHANGELOG/v1.9.11.md b/CHANGELOG/v1.9.11.md new file mode 100644 index 000000000000..fe92ca013399 --- /dev/null +++ b/CHANGELOG/v1.9.11.md @@ -0,0 +1,38 @@ +## 👌 Kubernetes version support + +- Management Cluster: v1.28.x -> v1.32.x +- Workload Cluster: v1.26.x -> v1.32.x + +[More information about version support can be found here](https://cluster-api.sigs.k8s.io/reference/versions.html) + +## Changes since v1.9.10 +## :chart_with_upwards_trend: Overview +- 7 new commits merged +- 1 feature addition ✨ +- 1 bug fixed 🐛 + +## :sparkles: New Features +- KCP: Bump corefile-migration to v1.0.27 (#12638) + +## :bug: Bug Fixes +- MachineDeployment: Fix second rolling update for MD rolloutAfter (#12556) + +## :seedling: Others +- Dependency: Bump github.com/docker/docker to v28.0.2+incompatible to fix CVE (#12645) +- Dependency: Bump Go to v1.23.11 (#12531) +- Dependency: Bump Go to v1.23.12 (#12620) +- Dependency/Security: Add CVE-2025-22868 to Trivy ignore file (#12508) +- Testing: Skipping test that is failing because of infra issues (#12567) + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/coredns/corefile-migration: [v1.0.26 → v1.0.27](https://github.com/coredns/corefile-migration/compare/v1.0.26...v1.0.27) + +### Removed +_Nothing has changed._ + +_Thanks to all our contributors!_ 😊 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 636f40a3845f..bce312ea26a4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -75,10 +75,8 @@ Cluster API follows upstream Kubernetes semantic versioning. With the v1 release - A (*patch*) release SHOULD only include backwards compatible set of bugfixes. -These guarantees extend to all code exposed in our Go Module, including -*types from dependencies in public APIs*. -Types and functions not in public APIs are not considered part of the guarantee. -The test module, clusterctl, and experiments do not provide any backward compatible guarantees. +see [Cluster API release support](https://cluster-api.sigs.k8s.io/reference/versions.html#cluster-api-release-support)) for +more details about supported releases and for considerations that might apply if you are importing Cluster API go modules as a dependency. #### Backporting a patch @@ -88,7 +86,7 @@ Any backport MUST NOT be breaking for API or behavioral changes. We usually backport critical bugs or security fixes, changes to support new Kubernetes minor versions (see [supported Kubernetes versions](https://cluster-api.sigs.k8s.io/reference/versions.html#supported-kubernetes-versions)), documentation and test signal improvements. Everything else is considered case by case. Release branches outside of the [standard support period](https://github.com/kubernetes-sigs/cluster-api/blob/main/CONTRIBUTING.md#cluster-api-release-support) are usually frozen, -although maintainers may allow backports to releases in [maintenance mode](https://github.com/kubernetes-sigs/cluster-api/blob/main/CONTRIBUTING.md#cluster-api-release-support) in specific situations +although maintainers may allow backports to releases in [maintenance mode](https://github.com/kubernetes-sigs/cluster-api/blob/main/CONTRIBUTING.md#cluster-api-release-support) in specific situations like CVEs, security, and other critical bug fixes. ### APIs @@ -379,7 +377,7 @@ licenses dependencies and other artifacts use. For go dependencies only dependen This project follows the [Kubernetes API conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md). We enforce the API conventions via [kube-api-linter](https://github.com/kubernetes-sigs/kube-api-linter). -The corresponding configuration field can be found [here](https://github.com/kubernetes-sigs/cluster-api/blob/main/.golangci-kal.yml). +The corresponding configuration field can be found [here](https://github.com/kubernetes-sigs/cluster-api/blob/main/.golangci-kal.yml). Minor additions to the conventions are listed below. @@ -460,3 +458,5 @@ As of today there are following OWNERS files/Owner groups defining sub areas: - [Test](https://github.com/kubernetes-sigs/cluster-api/tree/main/test) - [Test Framework](https://github.com/kubernetes-sigs/cluster-api/tree/main/test/framework) - [Docs](https://github.com/kubernetes-sigs/cluster-api/tree/main/docs) +- [Machine pools](https://github.com/kubernetes-sigs/cluster-api/tree/main/internal/controllers/machinepool) +- Ignition support [in kubeadm Bootstrap Provider](https://github.com/kubernetes-sigs/cluster-api/tree/main/bootstrap/kubeadm/internal/ignition) and [in CAPD](https://github.com/kubernetes-sigs/cluster-api/tree/main/test/infrastructure/docker/internal/provisioning/ignition) diff --git a/Makefile b/Makefile index 83270cd4a22d..117aa9356d92 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ SHELL:=/usr/bin/env bash # # Go. # -GO_VERSION ?= 1.24.9 +GO_VERSION ?= 1.24.11 GO_DIRECTIVE_VERSION ?= 1.24.0 GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION) @@ -31,6 +31,10 @@ GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION) GOTOOLCHAIN = go$(GO_VERSION) export GOTOOLCHAIN +# Required with Go 1.24 to enable usage of synctest in unit tests +# Can be removed after we bumped to Go 1.25. +export GOEXPERIMENT=synctest + # Use GOPROXY environment variable if set GOPROXY := $(shell go env GOPROXY) ifeq ($(GOPROXY),) @@ -106,12 +110,12 @@ KUSTOMIZE_BIN := kustomize KUSTOMIZE := $(abspath $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER)) KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v5 -SETUP_ENVTEST_VER := release-0.21 +SETUP_ENVTEST_VER := release-0.22 SETUP_ENVTEST_BIN := setup-envtest SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER)) SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest -CONTROLLER_GEN_VER := v0.18.0 +CONTROLLER_GEN_VER := v0.19.0 CONTROLLER_GEN_BIN := controller-gen CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER)) CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen @@ -121,7 +125,7 @@ GOTESTSUM_BIN := gotestsum GOTESTSUM := $(abspath $(TOOLS_BIN_DIR)/$(GOTESTSUM_BIN)-$(GOTESTSUM_VER)) GOTESTSUM_PKG := gotest.tools/gotestsum -CONVERSION_GEN_VER := v0.33.0 +CONVERSION_GEN_VER := v0.34.0 CONVERSION_GEN_BIN := conversion-gen # We are intentionally using the binary without version suffix, to avoid the version # in generated files. @@ -172,6 +176,11 @@ GOVULNCHECK_VER := v1.1.4 GOVULNCHECK := $(abspath $(TOOLS_BIN_DIR)/$(GOVULNCHECK_BIN)-$(GOVULNCHECK_VER)) GOVULNCHECK_PKG := golang.org/x/vuln/cmd/govulncheck +CRANE_BIN := crane +CRANE_VER := v0.20.7 +CRANE := $(abspath $(TOOLS_BIN_DIR)/$(CRANE_BIN)-$(CRANE_VER)) +CRANE_PKG := github.com/google/go-containerregistry/cmd/crane + IMPORT_BOSS_BIN := import-boss IMPORT_BOSS_VER := v0.28.1 IMPORT_BOSS := $(abspath $(TOOLS_BIN_DIR)/$(IMPORT_BOSS_BIN)) @@ -289,10 +298,6 @@ generate-manifests-core: $(CONTROLLER_GEN) $(KUSTOMIZE) ## Generate manifests e. paths=./internal/controllers/... \ paths=./internal/webhooks/... \ paths=./internal/api/addons/... \ - paths=./exp/internal/controllers/... \ - paths=./exp/internal/webhooks/... \ - paths=./exp/ipam/internal/webhooks/... \ - paths=./exp/runtime/internal/controllers/... \ crd:crdVersions=v1 \ rbac:roleName=manager-role \ output:crd:dir=./config/crd/bases \ @@ -374,9 +379,6 @@ generate-manifests-docker-infrastructure: $(CONTROLLER_GEN) ## Generate manifest cd $(CAPD_DIR); $(CONTROLLER_GEN) \ paths=./ \ paths=./api/... \ - paths=./exp/api/... \ - paths=./exp/internal/controllers/... \ - paths=./exp/internal/webhooks/... \ paths=./internal/controllers/... \ paths=./internal/webhooks/... \ crd:crdVersions=v1 \ @@ -434,11 +436,10 @@ generate-go-deepcopy-kubeadm-control-plane: $(CONTROLLER_GEN) ## Generate deepco .PHONY: generate-go-deepcopy-docker-infrastructure generate-go-deepcopy-docker-infrastructure: $(CONTROLLER_GEN) generate-go-deepcopy-in-memory-infrastructure ## Generate deepcopy go code for docker infrastructure provider - $(MAKE) clean-generated-deepcopy SRC_DIRS="$(CAPD_DIR)/api,$(CAPD_DIR)/exp/api" + $(MAKE) clean-generated-deepcopy SRC_DIRS="$(CAPD_DIR)/api" cd $(CAPD_DIR); $(CONTROLLER_GEN) \ object:headerFile=../../../hack/boilerplate/boilerplate.generatego.txt \ - paths=./api/... \ - paths=./exp/api/... + paths=./api/... .PHONY: generate-go-deepcopy-in-memory-infrastructure generate-go-deepcopy-in-memory-infrastructure: $(CONTROLLER_GEN) ## Generate deepcopy go code for in-memory cloud resources @@ -483,7 +484,7 @@ generate-go-conversions-addons-api: $(CONVERSION_GEN) ## Generate conversions go ./api/addons/v1beta1 .PHONY: generate-go-conversions-core-ipam -generate-go-conversions-core-ipam: $(CONVERSION_GEN) ## Generate conversions go code for core exp IPAM +generate-go-conversions-core-ipam: $(CONVERSION_GEN) ## Generate conversions go code for IPAM $(MAKE) clean-generated-conversions SRC_DIRS="./api/ipam/v1beta1,./api/ipam/v1alpha1" $(CONVERSION_GEN) \ --output-file=zz_generated.conversion.go \ @@ -538,10 +539,7 @@ generate-go-conversions-docker-infrastructure: $(CONVERSION_GEN) ## Generate con --go-header-file=../../../hack/boilerplate/boilerplate.generatego.txt \ ./api/v1alpha3 \ ./api/v1alpha4 \ - ./api/v1beta1 \ - ./exp/api/v1alpha3 \ - ./exp/api/v1alpha4 \ - ./exp/api/v1beta1 + ./api/v1beta1 .PHONY: generate-go-conversions-test-extension generate-go-conversions-test-extension: $(CONVERSION_GEN) ## Generate conversions go code for test runtime extension provider @@ -573,7 +571,7 @@ generate-doctoc: TRACE=$(TRACE) ./hack/generate-doctoc.sh .PHONY: generate-e2e-templates -generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v0.3 v0.4 v1.5 v1.6 v1.8 v1.9 v1.10 main) ## Generate cluster templates for all versions +generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v0.3 v0.4 v1.5 v1.6 v1.9 v1.10 v1.11 main) ## Generate cluster templates for all versions DOCKER_TEMPLATES := test/e2e/data/infrastructure-docker @@ -595,11 +593,6 @@ generate-e2e-templates-v1.6: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.6/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.6/cluster-template.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.6/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.6/cluster-template-topology.yaml -.PHONY: generate-e2e-templates-v1.8 -generate-e2e-templates-v1.8: $(KUSTOMIZE) - $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.8/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.8/cluster-template.yaml - $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.8/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.8/cluster-template-topology.yaml - .PHONY: generate-e2e-templates-v1.9 generate-e2e-templates-v1.9: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.9/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.9/cluster-template.yaml @@ -610,10 +603,16 @@ generate-e2e-templates-v1.10: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.10/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.10/cluster-template.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.10/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.10/cluster-template-topology.yaml +.PHONY: generate-e2e-templates-v1.11 +generate-e2e-templates-v1.11: $(KUSTOMIZE) + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.11/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.11/cluster-template.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1.11/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1.11/cluster-template-topology.yaml + .PHONY: generate-e2e-templates-main generate-e2e-templates-main: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-md-remediation.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-md-taints --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-md-taints.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-kcp-remediation.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-kcp-adoption/step1 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-kcp-adoption.yaml echo "---" >> $(DOCKER_TEMPLATES)/main/cluster-template-kcp-adoption.yaml @@ -626,6 +625,7 @@ generate-e2e-templates-main: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-ipv6 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-ipv6.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-dualstack-ipv6-primary --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-dualstack-ipv6-primary.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-dualstack-ipv4-primary --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-dualstack-ipv4-primary.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-in-place --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-in-place.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-no-workers --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-no-workers.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-runtimesdk-v1beta1 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-runtimesdk-v1beta1.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology-kcp-only --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology-kcp-only.yaml @@ -733,7 +733,6 @@ verify-conversions: $(CONVERSION_VERIFIER) ## Verifies expected API conversion ./api/... \ ./internal/api/... \ ./test/infrastructure/docker/api/... \ - ./test/infrastructure/docker/exp/api/... .PHONY: verify-doctoc verify-doctoc: generate-doctoc @@ -950,30 +949,38 @@ test-cover: ## Run unit and integration tests and generate a coverage report go tool cover -func=out/coverage.out -o out/coverage.txt go tool cover -html=out/coverage.out -o out/coverage.html -.PHONY: test-docker-infrastructure -test-docker-infrastructure: $(SETUP_ENVTEST) ## Run unit and integration tests with race detector for docker infrastructure provider +.PHONY: test-infrastructure +test-infrastructure: $(SETUP_ENVTEST) ## Run unit and integration tests with race detector for docker infrastructure provider + # Note: Fuzz tests are not executed with race detector because they would just time out. + # To achieve that, all files with fuzz tests have the "!race" build tag, to still run fuzz tests + # we have an additional `go test` run that focuses on "TestFuzzyConversion". + cd test/infrastructure; KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -race ./... $(TEST_ARGS) + $(MAKE) test-infrastructure-conversions TEST_ARGS="$(TEST_ARGS)" + +.PHONY: test-infrastructure-no-race +test-infrastructure-no-race: $(SETUP_ENVTEST) ## Run unit and integration tests with no race detector for docker infrastructure provider # Note: Fuzz tests are not executed with race detector because they would just time out. # To achieve that, all files with fuzz tests have the "!race" build tag, to still run fuzz tests # we have an additional `go test` run that focuses on "TestFuzzyConversion". - cd $(CAPD_DIR); KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -race ./... $(TEST_ARGS) - $(MAKE) test-docker-infrastructure-conversions TEST_ARGS="$(TEST_ARGS)" + cd test/infrastructure; KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test ./... $(TEST_ARGS) -.PHONY: test-docker-infrastructure-conversions -test-docker-infrastructure-conversions: $(SETUP_ENVTEST) ## Run conversions test for docker infrastructure provider - cd $(CAPD_DIR); KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -run "^TestFuzzyConversion$$" ./... $(TEST_ARGS) +.PHONY: test-infrastructure-conversions +test-infrastructure-conversions: $(SETUP_ENVTEST) ## Run conversions test for docker infrastructure provider + cd test/infrastructure; KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -run "^TestFuzzyConversion$$" ./... $(TEST_ARGS) -.PHONY: test-docker-infrastructure-verbose -test-docker-infrastructure-verbose: ## Run unit and integration tests with race detector and with verbose flag for docker infrastructure provider - $(MAKE) test-docker-infrastructure TEST_ARGS="$(TEST_ARGS) -v" +.PHONY: test-infrastructure-verbose +test-infrastructure-verbose: ## Run unit and integration tests with race detector and with verbose flag for docker infrastructure provider + $(MAKE) test-infrastructure TEST_ARGS="$(TEST_ARGS) -v" -.PHONY: test-docker-infrastructure-junit -test-docker-infrastructure-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integration tests with race detector and generate a junit report for docker infrastructure provider - cd $(CAPD_DIR); set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -race -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit.infra_docker.exitcode) | tee $(ARTIFACTS)/junit.infra_docker.stdout +.PHONY: test-infrastructure-junit +test-infrastructure-junit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit and integration tests with race detector and generate a junit report for docker infrastructure provider + cd test/infrastructure; set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -race -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit.infra_docker.exitcode) | tee $(ARTIFACTS)/junit.infra_docker.stdout $(GOTESTSUM) --junitfile $(ARTIFACTS)/junit.infra_docker.xml --raw-command cat $(ARTIFACTS)/junit.infra_docker.stdout exit $$(cat $(ARTIFACTS)/junit.infra_docker.exitcode) - cd $(CAPD_DIR); set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -run "^TestFuzzyConversion$$" -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit-fuzz.infra_docker.exitcode) | tee $(ARTIFACTS)/junit-fuzz.infra_docker.stdout + cd test/infrastructure; set +o errexit; (KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -run "^TestFuzzyConversion$$" -json ./... $(TEST_ARGS); echo $$? > $(ARTIFACTS)/junit-fuzz.infra_docker.exitcode) | tee $(ARTIFACTS)/junit-fuzz.infra_docker.stdout $(GOTESTSUM) --junitfile $(ARTIFACTS)/junit-fuzz.infra_docker.xml --raw-command cat $(ARTIFACTS)/junit-fuzz.infra_docker.stdout exit $$(cat $(ARTIFACTS)/junit-fuzz.infra_docker.exitcode) + .PHONY: test-test-extension test-test-extension: $(SETUP_ENVTEST) ## Run unit and integration tests for the test extension cd $(TEST_EXTENSION_DIR); KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" go test -race ./... $(TEST_ARGS) @@ -1443,6 +1450,9 @@ $(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint. .PHONY: $(GOVULNCHECK_BIN) $(GOVULNCHECK_BIN): $(GOVULNCHECK) ## Build a local copy of govulncheck. +.PHONY: $(CRANE_BIN) +$(CRANE_BIN): $(CRANE) ## Build a local copy of crane. + .PHONY: $(IMPORT_BOSS_BIN) $(IMPORT_BOSS_BIN): $(IMPORT_BOSS) @@ -1504,6 +1514,9 @@ $(GOLANGCI_LINT_KAL): $(GOLANGCI_LINT) # Build golangci-lint-kal from custom con $(GOVULNCHECK): # Build govulncheck. GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOVULNCHECK_PKG) $(GOVULNCHECK_BIN) $(GOVULNCHECK_VER) +$(CRANE): # Build crane. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(CRANE_PKG) $(CRANE_BIN) $(CRANE_VER) + $(IMPORT_BOSS): # Build import-boss GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(IMPORT_BOSS_PKG) $(IMPORT_BOSS_BIN) $(IMPORT_BOSS_VER) diff --git a/Tiltfile b/Tiltfile index f9d62e2be08f..2c16eaa73392 100644 --- a/Tiltfile +++ b/Tiltfile @@ -172,7 +172,7 @@ def load_provider_tilt_files(): tilt_helper_dockerfile_header = """ # Tilt image -FROM golang:1.24.9 as tilt-helper +FROM golang:1.24.11 as tilt-helper # Install delve. Note this should be kept in step with the Go release minor version. RUN go install github.com/go-delve/delve/cmd/dlv@v1.24 # Support live reloading with Tilt @@ -183,7 +183,7 @@ RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com """ tilt_dockerfile_header = """ -FROM golang:1.24.9 as tilt +FROM golang:1.24.11 as tilt WORKDIR / COPY --from=tilt-helper /process.txt . COPY --from=tilt-helper /start.sh . @@ -249,7 +249,7 @@ def build_go_binary(context, reload_deps, debug, go_main, binary_name, label): build_cmd = build_cmd, ), deps = live_reload_deps, - labels = [label, "ALL.binaries"], + labels = [label], ) def build_docker_image(image, context, binary_name, additional_docker_build_commands, additional_docker_helper_commands, port_forwards): @@ -290,7 +290,12 @@ def build_docker_image(image, context, binary_name, additional_docker_build_comm ref = image, context = context + "/.tiltbuild/bin/", dockerfile_contents = dockerfile_contents, - build_args = {"binary_name": binary_name}, + build_args = { + "binary_name": binary_name, + "http_proxy": os.getenv("http_proxy", ""), + "https_proxy": os.getenv("https_proxy", ""), + "no_proxy": os.getenv("no_proxy", ""), + }, target = "tilt", only = binary_name, live_update = [ @@ -363,7 +368,7 @@ def enable_provider(name, debug): workload = find_object_name(objs, "Deployment"), objects = [find_object_qualified_name(objs, "Provider")] + find_all_objects_names(additional_objs), new_name = label.lower() + "_controller", - labels = [label, "ALL.controllers"], + labels = [label], port_forwards = port_forwards, links = links, resource_deps = ["provider_crd"] + p.get("resource_deps", []), @@ -420,9 +425,9 @@ def deploy_provider_crds(): ) def deploy_observability(): - if "promtail" in settings.get("deploy_observability", []): - k8s_yaml(read_file("./.tiltbuild/yaml/promtail.observability.yaml"), allow_duplicates = True) - k8s_resource(workload = "promtail", extra_pod_selectors = [{"app": "promtail"}], labels = ["observability"], resource_deps = ["loki"], objects = ["promtail:serviceaccount"]) + if "alloy" in settings.get("deploy_observability", []): + k8s_yaml(read_file("./.tiltbuild/yaml/alloy.observability.yaml"), allow_duplicates = True) + k8s_resource(workload = "alloy", extra_pod_selectors = [{"app": "alloy"}], labels = ["observability"], resource_deps = ["loki"], objects = ["alloy:serviceaccount"]) if "loki" in settings.get("deploy_observability", []): k8s_yaml(read_file("./.tiltbuild/yaml/loki.observability.yaml"), allow_duplicates = True) diff --git a/api/bootstrap/kubeadm/v1beta1/conversion.go b/api/bootstrap/kubeadm/v1beta1/conversion.go index 3f6bf3eeb155..8c5d7c57f410 100644 --- a/api/bootstrap/kubeadm/v1beta1/conversion.go +++ b/api/bootstrap/kubeadm/v1beta1/conversion.go @@ -81,6 +81,9 @@ func RestoreKubeadmConfigSpec(restored *bootstrapv1.KubeadmConfigSpec, dst *boot dst.ClusterConfiguration.CACertificateValidityPeriodDays = restored.ClusterConfiguration.CACertificateValidityPeriodDays } } + if restored.ClusterConfiguration.EncryptionAlgorithm != "" { + dst.ClusterConfiguration.EncryptionAlgorithm = restored.ClusterConfiguration.EncryptionAlgorithm + } } func RestoreBoolIntentKubeadmConfigSpec(src *KubeadmConfigSpec, dst *bootstrapv1.KubeadmConfigSpec, hasRestored bool, restored *bootstrapv1.KubeadmConfigSpec) error { diff --git a/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go b/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go index bae21feb41fd..16b20da9ed1e 100644 --- a/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go +++ b/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go @@ -691,6 +691,7 @@ func autoConvert_v1beta2_ClusterConfiguration_To_v1beta1_ClusterConfiguration(in out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) // WARNING: in.CertificateValidityPeriodDays requires manual conversion: does not exist in peer-type // WARNING: in.CACertificateValidityPeriodDays requires manual conversion: does not exist in peer-type + // WARNING: in.EncryptionAlgorithm requires manual conversion: does not exist in peer-type return nil } diff --git a/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go b/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go index e9f17e19a28b..17e3bfc7840f 100644 --- a/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go +++ b/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go @@ -72,6 +72,23 @@ const ( KubeadmConfigDataSecretNotAvailableReason = clusterv1.NotAvailableReason ) +// EncryptionAlgorithmType can define an asymmetric encryption algorithm type. +// +kubebuilder:validation:Enum=ECDSA-P256;ECDSA-P384;RSA-2048;RSA-3072;RSA-4096 +type EncryptionAlgorithmType string + +const ( + // EncryptionAlgorithmECDSAP256 defines the ECDSA encryption algorithm type with curve P256. + EncryptionAlgorithmECDSAP256 EncryptionAlgorithmType = "ECDSA-P256" + // EncryptionAlgorithmECDSAP384 defines the ECDSA encryption algorithm type with curve P384. + EncryptionAlgorithmECDSAP384 EncryptionAlgorithmType = "ECDSA-P384" + // EncryptionAlgorithmRSA2048 defines the RSA encryption algorithm type with key size 2048 bits. + EncryptionAlgorithmRSA2048 EncryptionAlgorithmType = "RSA-2048" + // EncryptionAlgorithmRSA3072 defines the RSA encryption algorithm type with key size 3072 bits. + EncryptionAlgorithmRSA3072 EncryptionAlgorithmType = "RSA-3072" + // EncryptionAlgorithmRSA4096 defines the RSA encryption algorithm type with key size 4096 bits. + EncryptionAlgorithmRSA4096 EncryptionAlgorithmType = "RSA-4096" +) + // InitConfiguration contains a list of elements that is specific "kubeadm init"-only runtime // information. // +kubebuilder:validation:MinProperties=1 @@ -174,16 +191,7 @@ type ClusterConfiguration struct { CertificatesDir string `json:"certificatesDir,omitempty"` // imageRepository sets the container registry to pull images from. - // * If not set, the default registry of kubeadm will be used, i.e. - // * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - // * k8s.gcr.io (old registry): all older versions - // Please note that when imageRepository is not set we don't allow upgrades to - // versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - // a newer patch version with the new registry instead (i.e. >= v1.22.17, - // >= v1.23.15, >= v1.24.9, >= v1.25.0). - // * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - // `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - // and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + // If not set, the default registry of kubeadm will be used (registry.k8s.io). // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 @@ -208,6 +216,16 @@ type ClusterConfiguration struct { // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=36500 CACertificateValidityPeriodDays int32 `json:"caCertificateValidityPeriodDays,omitempty"` + + // encryptionAlgorithm holds the type of asymmetric encryption algorithm used for keys and certificates. + // Can be one of "RSA-2048", "RSA-3072", "RSA-4096", "ECDSA-P256" or "ECDSA-P384". + // For Kubernetes 1.34 or above, "ECDSA-P384" is supported. + // If not specified, Cluster API will use RSA-2048 as default. + // When this field is modified every certificate generated afterward will use the new + // encryptionAlgorithm. Existing CA certificates and service account keys are not rotated. + // This field is only supported with Kubernetes v1.31 or above. + // +optional + EncryptionAlgorithm EncryptionAlgorithmType `json:"encryptionAlgorithm,omitempty"` } // IsDefined returns true if the ClusterConfiguration is defined. @@ -356,6 +374,11 @@ type APIEndpoint struct { BindPort int32 `json:"bindPort,omitempty"` } +// IsDefined returns true if the APIEndpoint is defined. +func (r *APIEndpoint) IsDefined() bool { + return r.AdvertiseAddress != "" || r.BindPort != 0 +} + // NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join". // Note: The NodeRegistrationOptions struct has to be kept in sync with the structs in MarshalJSON. // +kubebuilder:validation:MinProperties=1 diff --git a/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go b/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go index 95aeb6a5fb88..e3622270b490 100644 --- a/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go +++ b/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go @@ -46,10 +46,6 @@ const ( // SkipKubeProxyAnnotation annotation explicitly skips reconciling kube-proxy if set. SkipKubeProxyAnnotation = "controlplane.cluster.x-k8s.io/skip-kube-proxy" - // KubeadmClusterConfigurationAnnotation is a machine annotation that stores the json-marshalled string of KCP ClusterConfiguration. - // This annotation is used to detect any changes in ClusterConfiguration and trigger machine rollout in KCP. - KubeadmClusterConfigurationAnnotation = "controlplane.cluster.x-k8s.io/kubeadm-cluster-configuration" - // RemediationInProgressAnnotation is used to keep track that a KCP remediation is in progress, and more // specifically it tracks that the system is in between having deleted an unhealthy machine and recreating its replacement. // NOTE: if something external to CAPI removes this annotation the system cannot detect the above situation; this can lead to @@ -424,11 +420,6 @@ type KubeadmControlPlaneSpec struct { Replicas *int32 `json:"replicas,omitempty"` // version defines the desired Kubernetes version. - // Please note that if kubeadmConfigSpec.ClusterConfiguration.imageRepository is not set - // we don't allow upgrades to versions >= v1.22.0 for which kubeadm uses the old registry (k8s.gcr.io). - // Please use a newer patch version with the new registry instead. The default registries of kubeadm are: - // * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - // * k8s.gcr.io (old registry): all older versions // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 diff --git a/api/core/v1beta1/cluster_types.go b/api/core/v1beta1/cluster_types.go index c6ff0853b180..5d6d69a9b36d 100644 --- a/api/core/v1beta1/cluster_types.go +++ b/api/core/v1beta1/cluster_types.go @@ -1092,12 +1092,12 @@ func (c *ClusterStatus) GetTypedPhase() ClusterPhase { type APIEndpoint struct { // host is the hostname on which the API server is serving. // TODO: Can't set MinLength=1 for now, because this struct is not always used in pointer fields so today we have cases where host is set to an empty string. - // +required + // +optional // +kubebuilder:validation:MaxLength=512 Host string `json:"host"` // port is the port on which the API server is serving. - // +required + // +optional Port int32 `json:"port"` } diff --git a/api/core/v1beta1/conversion.go b/api/core/v1beta1/conversion.go index 03d7a7a9ad8e..a240707da77a 100644 --- a/api/core/v1beta1/conversion.go +++ b/api/core/v1beta1/conversion.go @@ -73,6 +73,11 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { return err } + dst.Spec.Topology.ControlPlane.HealthCheck.Checks.UnhealthyMachineConditions = restored.Spec.Topology.ControlPlane.HealthCheck.Checks.UnhealthyMachineConditions + for i, md := range restored.Spec.Topology.Workers.MachineDeployments { + dst.Spec.Topology.Workers.MachineDeployments[i].HealthCheck.Checks.UnhealthyMachineConditions = md.HealthCheck.Checks.UnhealthyMachineConditions + } + // Recover intent for bool values converted to *bool. clusterv1.Convert_bool_To_Pointer_bool(src.Spec.Paused, ok, restored.Spec.Paused, &dst.Spec.Paused) @@ -145,6 +150,11 @@ func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { return err } + dst.Spec.ControlPlane.HealthCheck.Checks.UnhealthyMachineConditions = restored.Spec.ControlPlane.HealthCheck.Checks.UnhealthyMachineConditions + for i, md := range restored.Spec.Workers.MachineDeployments { + dst.Spec.Workers.MachineDeployments[i].HealthCheck.Checks.UnhealthyMachineConditions = md.HealthCheck.Checks.UnhealthyMachineConditions + } + // Recover intent for bool values converted to *bool. for i, patch := range dst.Spec.Patches { for j, definition := range patch.Definitions { @@ -248,6 +258,10 @@ func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { dst.Status.Variables[i] = variable } + dst.Spec.KubernetesVersions = restored.Spec.KubernetesVersions + + dst.Spec.Upgrade.External.GenerateUpgradePlanExtension = restored.Spec.Upgrade.External.GenerateUpgradePlanExtension + return nil } @@ -394,6 +408,11 @@ func (src *Machine) ConvertTo(dstRaw conversion.Hub) error { // Recover other values. if ok { dst.Spec.MinReadySeconds = restored.Spec.MinReadySeconds + dst.Spec.Taints = restored.Spec.Taints + // Restore the phase, this also means that any client using v1beta1 during a round-trip + // won't be able to write the Phase field. But that's okay as the only client writing the Phase + // field should be the Machine controller. + dst.Status.Phase = restored.Status.Phase } return nil @@ -432,6 +451,17 @@ func (src *MachineSet) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.Spec.MinReadySeconds = &src.Spec.MinReadySeconds } + restored := &clusterv1.MachineSet{} + ok, err := utilconversion.UnmarshalData(src, restored) + if err != nil { + return err + } + + // Recover other values + if ok { + dst.Spec.Template.Spec.Taints = restored.Spec.Template.Spec.Taints + } + return nil } @@ -449,7 +479,8 @@ func (dst *MachineSet) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = ptr.Deref(src.Spec.Template.Spec.MinReadySeconds, 0) dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) - return nil + + return utilconversion.MarshalData(src, dst) } func (src *MachineDeployment) ConvertTo(dstRaw conversion.Hub) error { @@ -474,6 +505,11 @@ func (src *MachineDeployment) ConvertTo(dstRaw conversion.Hub) error { // Recover intent for bool values converted to *bool. clusterv1.Convert_bool_To_Pointer_bool(src.Spec.Paused, ok, restored.Spec.Paused, &dst.Spec.Paused) + // Recover other values + if ok { + dst.Spec.Template.Spec.Taints = restored.Spec.Template.Spec.Taints + } + return nil } @@ -509,6 +545,8 @@ func (src *MachineHealthCheck) ConvertTo(dstRaw conversion.Hub) error { return err } + dst.Spec.Checks.UnhealthyMachineConditions = restored.Spec.Checks.UnhealthyMachineConditions + clusterv1.Convert_int32_To_Pointer_int32(src.Status.ExpectedMachines, ok, restored.Status.ExpectedMachines, &dst.Status.ExpectedMachines) clusterv1.Convert_int32_To_Pointer_int32(src.Status.CurrentHealthy, ok, restored.Status.CurrentHealthy, &dst.Status.CurrentHealthy) clusterv1.Convert_int32_To_Pointer_int32(src.Status.RemediationsAllowed, ok, restored.Status.RemediationsAllowed, &dst.Status.RemediationsAllowed) @@ -558,6 +596,11 @@ func (src *MachinePool) ConvertTo(dstRaw conversion.Hub) error { dst.Status.Initialization = initialization } + // Recover other values + if ok { + dst.Spec.Template.Spec.Taints = restored.Spec.Template.Spec.Taints + } + return nil } @@ -1637,6 +1680,13 @@ func Convert_v1beta2_MachineStatus_To_v1beta1_MachineStatus(in *clusterv1.Machin if err := autoConvert_v1beta2_MachineStatus_To_v1beta1_MachineStatus(in, out, s); err != nil { return err } + + // Convert v1beta2 Updating phase to v1beta1 Running as Updating did not exist in v1beta1. + // We don't have to support a round-trip as only the core CAPI controller should write the Phase field. + if out.Phase == "Updating" { + out.Phase = "Running" + } + if !reflect.DeepEqual(in.LastUpdated, metav1.Time{}) { out.LastUpdated = ptr.To(in.LastUpdated) } @@ -2285,8 +2335,6 @@ func convertToObjectReference(ref clusterv1.ContractVersionedObjectReference, na } func Convert_v1beta1_JSONSchemaProps_To_v1beta2_JSONSchemaProps(in *JSONSchemaProps, out *clusterv1.JSONSchemaProps, s apimachineryconversion.Scope) error { - // This conversion func is also required due to a bug in conversion gen that does not recognize the changes for converting bool to *bool. - // By implementing this func, autoConvert_v1beta1_JSONSchemaProps_To_v1beta2_JSONSchemaProps is generated properly. if err := autoConvert_v1beta1_JSONSchemaProps_To_v1beta2_JSONSchemaProps(in, out, s); err != nil { return err } diff --git a/api/core/v1beta1/conversion_test.go b/api/core/v1beta1/conversion_test.go index 10d557048460..172f76e186fb 100644 --- a/api/core/v1beta1/conversion_test.go +++ b/api/core/v1beta1/conversion_test.go @@ -514,6 +514,9 @@ func hubMachineSpec(in *clusterv1.MachineSpec, c randfill.Continue) { func hubMachineStatus(in *clusterv1.MachineStatus, c randfill.Continue) { c.FillNoCustom(in) + + in.Phase = []string{"Updating", "Running"}[c.Intn(2)] + // Drop empty structs with only omit empty fields. if in.Deprecated != nil { if in.Deprecated.V1Beta1 == nil || reflect.DeepEqual(in.Deprecated.V1Beta1, &clusterv1.MachineV1Beta1DeprecatedStatus{}) { diff --git a/api/core/v1beta1/zz_generated.conversion.go b/api/core/v1beta1/zz_generated.conversion.go index eb54f254dd5a..c0514e67d223 100644 --- a/api/core/v1beta1/zz_generated.conversion.go +++ b/api/core/v1beta1/zz_generated.conversion.go @@ -1207,6 +1207,8 @@ func autoConvert_v1beta2_ClusterClassSpec_To_v1beta1_ClusterClassSpec(in *v1beta } else { out.Patches = nil } + // WARNING: in.Upgrade requires manual conversion: does not exist in peer-type + // WARNING: in.KubernetesVersions requires manual conversion: does not exist in peer-type return nil } @@ -3165,6 +3167,7 @@ func autoConvert_v1beta2_MachineSpec_To_v1beta1_MachineSpec(in *v1beta2.MachineS // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type out.ReadinessGates = *(*[]MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) // WARNING: in.Deletion requires manual conversion: does not exist in peer-type + // WARNING: in.Taints requires manual conversion: does not exist in peer-type return nil } diff --git a/api/core/v1beta1/zz_generated.openapi.go b/api/core/v1beta1/zz_generated.openapi.go index 13a78b23719c..723cf5a90c93 100644 --- a/api/core/v1beta1/zz_generated.openapi.go +++ b/api/core/v1beta1/zz_generated.openapi.go @@ -154,7 +154,6 @@ func schema_cluster_api_api_core_v1beta1_APIEndpoint(ref common.ReferenceCallbac }, }, }, - Required: []string{"host", "port"}, }, }, } diff --git a/api/core/v1beta2/cluster_types.go b/api/core/v1beta2/cluster_types.go index af0969853155..9666455e9035 100644 --- a/api/core/v1beta2/cluster_types.go +++ b/api/core/v1beta2/cluster_types.go @@ -80,17 +80,27 @@ const ( // failing due to an error. ClusterTopologyReconciledFailedReason = "ReconcileFailed" + // ClusterTopologyReconciledClusterCreatingReason documents reconciliation of a Cluster topology + // not yet created because the BeforeClusterCreate hook is blocking. + ClusterTopologyReconciledClusterCreatingReason = "ClusterCreating" + // ClusterTopologyReconciledControlPlaneUpgradePendingReason documents reconciliation of a Cluster topology // not yet completed because Control Plane is not yet updated to match the desired topology spec. + // + // Deprecated: please use ClusterUpgrading instead. ClusterTopologyReconciledControlPlaneUpgradePendingReason = "ControlPlaneUpgradePending" // ClusterTopologyReconciledMachineDeploymentsCreatePendingReason documents reconciliation of a Cluster topology // not yet completed because at least one of the MachineDeployments is yet to be created. // This generally happens because new MachineDeployment creations are held off while the ControlPlane is not stable. + // + // Deprecated: please use ClusterUpgrading instead. ClusterTopologyReconciledMachineDeploymentsCreatePendingReason = "MachineDeploymentsCreatePending" // ClusterTopologyReconciledMachineDeploymentsUpgradePendingReason documents reconciliation of a Cluster topology // not yet completed because at least one of the MachineDeployments is not yet updated to match the desired topology spec. + // + // Deprecated: please use ClusterUpgrading instead. ClusterTopologyReconciledMachineDeploymentsUpgradePendingReason = "MachineDeploymentsUpgradePending" // ClusterTopologyReconciledMachineDeploymentsUpgradeDeferredReason documents reconciliation of a Cluster topology @@ -99,11 +109,15 @@ const ( // ClusterTopologyReconciledMachinePoolsUpgradePendingReason documents reconciliation of a Cluster topology // not yet completed because at least one of the MachinePools is not yet updated to match the desired topology spec. + // + // Deprecated: please use ClusterUpgrading instead. ClusterTopologyReconciledMachinePoolsUpgradePendingReason = "MachinePoolsUpgradePending" // ClusterTopologyReconciledMachinePoolsCreatePendingReason documents reconciliation of a Cluster topology // not yet completed because at least one of the MachinePools is yet to be created. // This generally happens because new MachinePool creations are held off while the ControlPlane is not stable. + // + // Deprecated: please use ClusterUpgrading instead. ClusterTopologyReconciledMachinePoolsCreatePendingReason = "MachinePoolsCreatePending" // ClusterTopologyReconciledMachinePoolsUpgradeDeferredReason documents reconciliation of a Cluster topology @@ -112,8 +126,13 @@ const ( // ClusterTopologyReconciledHookBlockingReason documents reconciliation of a Cluster topology // not yet completed because at least one of the lifecycle hooks is blocking. + // + // Deprecated: please use ClusterUpgrading instead. ClusterTopologyReconciledHookBlockingReason = "LifecycleHookBlocking" + // ClusterTopologyReconciledClusterUpgradingReason documents reconciliation of a Cluster topology + // not yet completed because a cluster upgrade is still in progress. + ClusterTopologyReconciledClusterUpgradingReason = "ClusterUpgrading" // ClusterTopologyReconciledClusterClassNotReconciledReason documents reconciliation of a Cluster topology not // yet completed because the ClusterClass has not reconciled yet. If this condition persists there may be an issue // with the ClusterClass surfaced in the ClusterClass status or controller logs. @@ -725,6 +744,16 @@ type ControlPlaneTopologyHealthCheckChecks struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=100 UnhealthyNodeConditions []UnhealthyNodeCondition `json:"unhealthyNodeConditions,omitempty"` + + // unhealthyMachineConditions contains a list of the machine conditions that determine + // whether a machine is considered unhealthy. The conditions are combined in a + // logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + UnhealthyMachineConditions []UnhealthyMachineCondition `json:"unhealthyMachineConditions,omitempty"` } // ControlPlaneTopologyHealthCheckRemediation configures if and how remediations are triggered if a control plane Machine is unhealthy. @@ -975,6 +1004,16 @@ type MachineDeploymentTopologyHealthCheckChecks struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=100 UnhealthyNodeConditions []UnhealthyNodeCondition `json:"unhealthyNodeConditions,omitempty"` + + // unhealthyMachineConditions contains a list of the machine conditions that determine + // whether a machine is considered unhealthy. The conditions are combined in a + // logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + UnhealthyMachineConditions []UnhealthyMachineCondition `json:"unhealthyMachineConditions,omitempty"` } // MachineDeploymentTopologyHealthCheckRemediation configures if and how remediations are triggered if a MachineDeployment Machine is unhealthy. diff --git a/api/core/v1beta2/clusterclass_types.go b/api/core/v1beta2/clusterclass_types.go index 80d78f358553..12e8cc19c5d4 100644 --- a/api/core/v1beta2/clusterclass_types.go +++ b/api/core/v1beta2/clusterclass_types.go @@ -135,6 +135,22 @@ type ClusterClassSpec struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=1000 Patches []ClusterClassPatch `json:"patches,omitempty"` + + // upgrade defines the upgrade configuration for clusters using this ClusterClass. + // +optional + Upgrade ClusterClassUpgrade `json:"upgrade,omitempty,omitzero"` + + // kubernetesVersions is the list of Kubernetes versions that can be + // used for clusters using this ClusterClass. + // The list of version must be ordered from the older to the newer version, and there should be + // at least one version for every minor in between the first and the last version. + // +optional + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + // +kubebuilder:validation:items:MinLength=1 + // +kubebuilder:validation:items:MaxLength=256 + KubernetesVersions []string `json:"kubernetesVersions,omitempty"` } // InfrastructureClass defines the class for the infrastructure cluster. @@ -265,6 +281,16 @@ type ControlPlaneClassHealthCheckChecks struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=100 UnhealthyNodeConditions []UnhealthyNodeCondition `json:"unhealthyNodeConditions,omitempty"` + + // unhealthyMachineConditions contains a list of the machine conditions that determine + // whether a machine is considered unhealthy. The conditions are combined in a + // logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + UnhealthyMachineConditions []UnhealthyMachineCondition `json:"unhealthyMachineConditions,omitempty"` } // ControlPlaneClassHealthCheckRemediation configures if and how remediations are triggered if a control plane Machine is unhealthy. @@ -526,6 +552,16 @@ type MachineDeploymentClassHealthCheckChecks struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=100 UnhealthyNodeConditions []UnhealthyNodeCondition `json:"unhealthyNodeConditions,omitempty"` + + // unhealthyMachineConditions contains a list of the machine conditions that determine + // whether a machine is considered unhealthy. The conditions are combined in a + // logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + UnhealthyMachineConditions []UnhealthyMachineCondition `json:"unhealthyMachineConditions,omitempty"` } // MachineDeploymentClassHealthCheckRemediation configures if and how remediations are triggered if a MachineDeployment Machine is unhealthy. @@ -1240,6 +1276,24 @@ type ClusterClassPatch struct { External *ExternalPatchDefinition `json:"external,omitempty"` } +// ClusterClassUpgrade defines the upgrade configuration for clusters using the ClusterClass. +// +kubebuilder:validation:MinProperties=1 +type ClusterClassUpgrade struct { + // external defines external runtime extensions for upgrade operations. + // +optional + External ClusterClassUpgradeExternal `json:"external,omitempty,omitzero"` +} + +// ClusterClassUpgradeExternal defines external runtime extensions for upgrade operations. +// +kubebuilder:validation:MinProperties=1 +type ClusterClassUpgradeExternal struct { + // generateUpgradePlanExtension references an extension which is called to generate upgrade plan. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=512 + GenerateUpgradePlanExtension string `json:"generateUpgradePlanExtension,omitempty"` +} + // PatchDefinition defines a patch which is applied to customize the referenced templates. type PatchDefinition struct { // selector defines on which templates the patch should be applied. diff --git a/api/core/v1beta2/common_types.go b/api/core/v1beta2/common_types.go index 55c37a288bde..78b064afb12c 100644 --- a/api/core/v1beta2/common_types.go +++ b/api/core/v1beta2/common_types.go @@ -36,6 +36,10 @@ const ( // to track the name of the MachineDeployment topology it represents. ClusterTopologyMachineDeploymentNameLabel = "topology.cluster.x-k8s.io/deployment-name" + // ClusterTopologyUpgradeStepAnnotation tracks the version of the current upgrade step. + // It is only set when an upgrade is in progress, and it contains the control plane version computed by topology controller. + ClusterTopologyUpgradeStepAnnotation = "topology.internal.cluster.x-k8s.io/upgrade-step" + // ClusterTopologyHoldUpgradeSequenceAnnotation can be used to hold the entire MachineDeployment upgrade sequence. // If the annotation is set on a MachineDeployment topology in Cluster.spec.topology.workers, the Kubernetes upgrade // for this MachineDeployment topology and all subsequent ones is deferred. @@ -95,6 +99,9 @@ const ( // AnnotationsFromMachineAnnotation is the annotation set on nodes to track the annotations that originated from machines. AnnotationsFromMachineAnnotation = "cluster.x-k8s.io/annotations-from-machine" + // TaintsFromMachineAnnotation is the annotation set on nodes to track the taints that originated from machines. + TaintsFromMachineAnnotation = "cluster.x-k8s.io/taints-from-machine" + // OwnerNameAnnotation is the annotation set on nodes identifying the owner name. OwnerNameAnnotation = "cluster.x-k8s.io/owner-name" @@ -302,7 +309,7 @@ type MachineAddress struct { } // MachineAddresses is a slice of MachineAddress items to be used by infrastructure providers. -// +kubebuilder:validation:MaxItems=32 +// +kubebuilder:validation:MaxItems=128 // +listType=atomic type MachineAddresses []MachineAddress @@ -401,3 +408,58 @@ func (r *ContractVersionedObjectReference) GroupKind() schema.GroupKind { Kind: r.Kind, } } + +// MachineTaint defines a taint equivalent to corev1.Taint, but additionally having a propagation field. +type MachineTaint struct { + // key is the taint key to be applied to a node. + // Must be a valid qualified name of maximum size 63 characters + // with an optional subdomain prefix of maximum size 253 characters, + // separated by a `/`. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=317 + // +kubebuilder:validation:Pattern=^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/)?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + // +kubebuilder:validation:XValidation:rule="self.contains('/') ? ( self.split('/') [0].size() <= 253 && self.split('/') [1].size() <= 63 && self.split('/').size() == 2 ) : self.size() <= 63",message="key must be a valid qualified name of max size 63 characters with an optional subdomain prefix of max size 253 characters" + Key string `json:"key,omitempty"` + + // value is the taint value corresponding to the taint key. + // It must be a valid label value of maximum size 63 characters. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + Value string `json:"value,omitempty"` + + // effect is the effect for the taint. Valid values are NoSchedule, PreferNoSchedule and NoExecute. + // +required + // +kubebuilder:validation:Enum=NoSchedule;PreferNoSchedule;NoExecute + Effect corev1.TaintEffect `json:"effect,omitempty"` + + // propagation defines how this taint should be propagated to nodes. + // Valid values are 'Always' and 'OnInitialization'. + // Always: The taint will be continuously reconciled. If it is not set for a node, it will be added during reconciliation. + // OnInitialization: The taint will be added during node initialization. If it gets removed from the node later on it will not get added again. + // +required + Propagation MachineTaintPropagation `json:"propagation,omitempty"` +} + +// MachineTaintPropagation defines when a taint should be propagated to nodes. +// +kubebuilder:validation:Enum=Always;OnInitialization +type MachineTaintPropagation string + +const ( + // MachineTaintPropagationAlways means the taint should be continuously reconciled and kept on the node. + // - If an Always taint is added to the Machine, the taint will be added to the node. + // - If an Always taint is removed from the Machine, the taint will be removed from the node. + // - If an OnInitialization taint is changed to Always, the Machine controller will ensure the taint is set on the node. + // - If an Always taint is removed from the node, it will be re-added during reconciliation. + MachineTaintPropagationAlways MachineTaintPropagation = "Always" + + // MachineTaintPropagationOnInitialization means the taint should be set once during initialization and then + // left alone. + // - If an OnInitialization taint is added to the Machine, the taint will only be added to the node on initialization. + // - If an OnInitialization taint is removed from the Machine nothing will be changed on the node. + // - If an Always taint is changed to OnInitialization, the taint will only be added to the node on initialization. + // - If an OnInitialization taint is removed from the node, it will not be re-added during reconciliation. + MachineTaintPropagationOnInitialization MachineTaintPropagation = "OnInitialization" +) diff --git a/api/core/v1beta2/machine_phase_types.go b/api/core/v1beta2/machine_phase_types.go index 1ca955156444..b849ea61d654 100644 --- a/api/core/v1beta2/machine_phase_types.go +++ b/api/core/v1beta2/machine_phase_types.go @@ -45,6 +45,10 @@ const ( // become a Kubernetes Node in a Ready state. MachinePhaseRunning = MachinePhase("Running") + // MachinePhaseUpdating is the Machine state when the Machine + // is updating. + MachinePhaseUpdating = MachinePhase("Updating") + // MachinePhaseDeleting is the Machine state when a delete // request has been sent to the API Server, // but its infrastructure has not yet been fully deleted. diff --git a/api/core/v1beta2/machine_types.go b/api/core/v1beta2/machine_types.go index a60f736ba21c..399d3971ce2d 100644 --- a/api/core/v1beta2/machine_types.go +++ b/api/core/v1beta2/machine_types.go @@ -87,6 +87,17 @@ const ( // ManagedNodeAnnotationDomain is one of the CAPI managed Node annotation domains. ManagedNodeAnnotationDomain = "node.cluster.x-k8s.io" + + // PendingAcknowledgeMoveAnnotation is an internal annotation added by the MS controller to a machine when being + // moved from the oldMS to the newMS. The annotation is removed as soon as the MS controller get the acknowledgment about the + // replica being accounted from the corresponding MD. + // Note: The annotation is added when reconciling the oldMS, and it is removed when reconciling the newMS. + // Note: This annotation is used in pair with AcknowledgedMoveAnnotation on MachineSets. + PendingAcknowledgeMoveAnnotation = "in-place-updates.internal.cluster.x-k8s.io/pending-acknowledge-move" + + // UpdateInProgressAnnotation is an internal annotation added to machines by the controller owning the Machine when in-place update + // is started, e.g. by the MachineSet controller; the annotation will be removed by the Machine controller when in-place update is completed. + UpdateInProgressAnnotation = "in-place-updates.internal.cluster.x-k8s.io/update-in-progress" ) // Machine's Available condition and corresponding reasons. @@ -109,7 +120,7 @@ const ( // Machine's Ready condition and corresponding reasons. const ( // MachineReadyCondition is true if the Machine's deletionTimestamp is not set, Machine's BootstrapConfigReady, InfrastructureReady, - // NodeHealthy and HealthCheckSucceeded (if present) conditions are true; if other conditions are defined in spec.readinessGates, + // NodeHealthy and HealthCheckSucceeded (if present) conditions are true, Updating condition is false; if other conditions are defined in spec.readinessGates, // these conditions must be true as well. // Note: // - When summarizing the Deleting condition: @@ -151,6 +162,28 @@ const ( // MachineNotUpToDateReason surface when a Machine spec does not match the spec of the Machine's owner resource, e.g. KubeadmControlPlane or MachineDeployment. MachineNotUpToDateReason = "NotUpToDate" + + // MachineUpToDateUpdatingReason surface when a Machine spec matches the spec of the Machine's owner resource, + // but the Machine is still updating in-place. + MachineUpToDateUpdatingReason = "Updating" +) + +// Machine's Updating condition and corresponding reasons. +// Note: Updating condition is set by the Machine controller during in-place updates. +const ( + // MachineUpdatingCondition is true while an in-place update is in progress on the Machine. + // The condition is owned by the Machine controller and is used to track the progress of in-place updates. + // This condition is considered when computing the UpToDate condition. + MachineUpdatingCondition = "Updating" + + // MachineNotUpdatingReason surfaces when the Machine is not performing an in-place update. + MachineNotUpdatingReason = "NotUpdating" + + // MachineInPlaceUpdatingReason surfaces when the Machine is waiting for in-place update to complete. + MachineInPlaceUpdatingReason = "InPlaceUpdating" + + // MachineInPlaceUpdateFailedReason surfaces when the in-place update has failed. + MachineInPlaceUpdateFailedReason = "InPlaceUpdateFailed" ) // Machine's BootstrapConfigReady condition and corresponding reasons. @@ -276,6 +309,10 @@ const ( // defined by a MachineHealthCheck object. MachineHealthCheckUnhealthyNodeReason = "UnhealthyNode" + // MachineHealthCheckUnhealthyMachineReason surfaces when the machine does not pass the health checks + // defined by a MachineHealthCheck object. + MachineHealthCheckUnhealthyMachineReason = "UnhealthyMachine" + // MachineHealthCheckNodeStartupTimeoutReason surfaces when the node hosted on the machine does not appear within // the timeout defined by a MachineHealthCheck object. MachineHealthCheckNodeStartupTimeoutReason = "NodeStartupTimeout" @@ -451,6 +488,23 @@ type MachineSpec struct { // deletion contains configuration options for Machine deletion. // +optional Deletion MachineDeletionSpec `json:"deletion,omitempty,omitzero"` + + // taints are the node taints that Cluster API will manage. + // This list is not necessarily complete: other Kubernetes components may add or remove other taints from nodes, + // e.g. the node controller might add the node.kubernetes.io/not-ready taint. + // Only those taints defined in this list will be added or removed by core Cluster API controllers. + // + // There can be at most 64 taints. + // A pod would have to tolerate all existing taints to run on the corresponding node. + // + // NOTE: This list is implemented as a "map" type, meaning that individual elements can be managed by different owners. + // +optional + // +listType=map + // +listMapKey=key + // +listMapKey=effect + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=64 + Taints []MachineTaint `json:"taints,omitempty"` } // MachineDeletionSpec contains configuration options for Machine deletion. @@ -502,7 +556,7 @@ type MachineReadinessGate struct { type MachineStatus struct { // conditions represents the observations of a Machine's current state. // Known condition types are Available, Ready, UpToDate, BootstrapConfigReady, InfrastructureReady, NodeReady, - // NodeHealthy, Deleting, Paused. + // NodeHealthy, Updating, Deleting, Paused. // If a MachineHealthCheck is targeting this machine, also HealthCheckSucceeded, OwnerRemediated conditions are added. // Additionally control plane Machines controlled by KubeadmControlPlane will have following additional conditions: // APIServerPodHealthy, ControllerManagerPodHealthy, SchedulerPodHealthy, EtcdPodHealthy, EtcdMemberHealthy. @@ -537,7 +591,7 @@ type MachineStatus struct { // phase represents the current phase of machine actuation. // +optional - // +kubebuilder:validation:Enum=Pending;Provisioning;Provisioned;Running;Deleting;Deleted;Failed;Unknown + // +kubebuilder:validation:Enum=Pending;Provisioning;Provisioned;Running;Updating;Deleting;Deleted;Failed;Unknown Phase string `json:"phase,omitempty"` // certificatesExpiryDate is the expiry date of the machine certificates. @@ -695,6 +749,7 @@ func (m *MachineStatus) GetTypedPhase() MachinePhase { MachinePhaseProvisioning, MachinePhaseProvisioned, MachinePhaseRunning, + MachinePhaseUpdating, MachinePhaseDeleting, MachinePhaseDeleted, MachinePhaseFailed: diff --git a/api/core/v1beta2/machinehealthcheck_types.go b/api/core/v1beta2/machinehealthcheck_types.go index 9a7e31cae44d..9a1f66bc8b07 100644 --- a/api/core/v1beta2/machinehealthcheck_types.go +++ b/api/core/v1beta2/machinehealthcheck_types.go @@ -111,6 +111,16 @@ type MachineHealthCheckChecks struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=100 UnhealthyNodeConditions []UnhealthyNodeCondition `json:"unhealthyNodeConditions,omitempty"` + + // unhealthyMachineConditions contains a list of the machine conditions that determine + // whether a machine is considered unhealthy. The conditions are combined in a + // logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + UnhealthyMachineConditions []UnhealthyMachineCondition `json:"unhealthyMachineConditions,omitempty"` } // MachineHealthCheckRemediation configures if and how remediations are triggered if a Machine is unhealthy. @@ -227,7 +237,33 @@ type UnhealthyNodeCondition struct { // timeoutSeconds is the duration that a node must be in a given status for, // after which the node is considered unhealthy. - // For example, with a value of "1h", the node must match the status + // For example, with a value of "3600", the node must match the status + // for at least 1 hour before being considered unhealthy. + // +required + // +kubebuilder:validation:Minimum=0 + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` +} + +// UnhealthyMachineCondition represents a Machine condition type and value with a timeout +// specified as a duration. When the named condition has been in the given +// status for at least the timeout value, a machine is considered unhealthy. +type UnhealthyMachineCondition struct { + // type of Machine condition + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=316 + // +kubebuilder:validation:XValidation:rule="!(self in ['Ready','Available','HealthCheckSucceeded','OwnerRemediated','ExternallyRemediated'])",message="type must not be one of: Ready, Available, HealthCheckSucceeded, OwnerRemediated, ExternallyRemediated" + // +required + Type string `json:"type,omitempty"` + + // status of the condition, one of True, False, Unknown. + // +required + // +kubebuilder:validation:Enum=True;False;Unknown + Status metav1.ConditionStatus `json:"status,omitempty"` + + // timeoutSeconds is the duration that a machine must be in a given status for, + // after which the machine is considered unhealthy. + // For example, with a value of "3600", the machine must match the status // for at least 1 hour before being considered unhealthy. // +required // +kubebuilder:validation:Minimum=0 diff --git a/api/core/v1beta2/machineset_types.go b/api/core/v1beta2/machineset_types.go index 8a5a92db7d1b..80cb19d36256 100644 --- a/api/core/v1beta2/machineset_types.go +++ b/api/core/v1beta2/machineset_types.go @@ -33,6 +33,29 @@ const ( // MachineSetFinalizer is the finalizer used by the MachineSet controller to // ensure ordered cleanup of corresponding Machines when a Machineset is being deleted. MachineSetFinalizer = "cluster.x-k8s.io/machineset" + + // MachineSetMoveMachinesToMachineSetAnnotation is an internal annotation added by the MD controller to the oldMS + // when it should scale down by moving machines that can be updated in-place to the newMS instead of deleting them. + // The annotation value is the newMS name. + // Note: This annotation is used in pair with MachineSetReceiveMachinesFromMachineSetsAnnotation to perform a two-ways check before moving a machine from oldMS to newMS: + // + // "oldMS must have: move to newMS" and "newMS must have: receive replicas from oldMS" + MachineSetMoveMachinesToMachineSetAnnotation = "in-place-updates.internal.cluster.x-k8s.io/move-machines-to-machineset" + + // MachineSetReceiveMachinesFromMachineSetsAnnotation is an internal annotation added by the MD controller to the newMS + // when it should receive replicas from oldMSs as a first step of an in-place update operation + // The annotation value is a comma separated list of oldMSs. + // Note: This annotation is used in pair with MachineSetMoveMachinesToMachineSetAnnotation to perform a two-ways check before moving a machine from oldMS to newMS: + // + // "oldMS must have: move to newMS" and "newMS must have: receive replicas from oldMS" + MachineSetReceiveMachinesFromMachineSetsAnnotation = "in-place-updates.internal.cluster.x-k8s.io/receive-machines-from-machinesets" + + // AcknowledgedMoveAnnotation is an internal annotation with a list of machines added by the MD controller + // to a MachineSet when it acknowledges a machine pending acknowledge after being moved from an oldMS. + // The annotation value is a comma separated list of Machines already acknowledged; a machine is dropped + // from this annotation as soon as pending-acknowledge-move is removed from the machine; the annotation is dropped when empty. + // Note: This annotation is used in pair with PendingAcknowledgeMoveAnnotation on Machines. + AcknowledgedMoveAnnotation = "in-place-updates.internal.cluster.x-k8s.io/acknowledged-move" ) // MachineSetSpec defines the desired state of MachineSet. diff --git a/api/core/v1beta2/v1beta1_condition_consts.go b/api/core/v1beta2/v1beta1_condition_consts.go index aef565c0aa2f..b619c6e0d7f2 100644 --- a/api/core/v1beta2/v1beta1_condition_consts.go +++ b/api/core/v1beta2/v1beta1_condition_consts.go @@ -157,6 +157,11 @@ const ( // UnhealthyNodeConditionV1Beta1Reason is the reason used when a machine's node has one of the MachineHealthCheck's unhealthy conditions. UnhealthyNodeConditionV1Beta1Reason = "UnhealthyNode" + + // UnhealthyMachineConditionV1Beta1Reason is the reason used when a machine has one of the MachineHealthCheck's unhealthy conditions. + // When both machine and node issues are detected, this reason takes precedence over node-related reasons + // (NodeNotFoundV1Beta1Reason, NodeStartupTimeoutV1Beta1Reason, UnhealthyNodeConditionV1Beta1Reason). + UnhealthyMachineConditionV1Beta1Reason = "UnhealthyMachine" ) const ( @@ -295,17 +300,27 @@ const ( // failing due to an error. TopologyReconcileFailedV1Beta1Reason = "TopologyReconcileFailed" + // TopologyReconciledClusterCreatingV1Beta1Reason documents reconciliation of a Cluster topology + // not yet created because the BeforeClusterCreate hook is blocking. + TopologyReconciledClusterCreatingV1Beta1Reason = "ClusterCreating" + // TopologyReconciledControlPlaneUpgradePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology // not yet completed because Control Plane is not yet updated to match the desired topology spec. + // + // Deprecated: please use ClusterUpgrading instead. TopologyReconciledControlPlaneUpgradePendingV1Beta1Reason = "ControlPlaneUpgradePending" // TopologyReconciledMachineDeploymentsCreatePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology // not yet completed because at least one of the MachineDeployments is yet to be created. // This generally happens because new MachineDeployment creations are held off while the ControlPlane is not stable. + // + // Deprecated: please use ClusterUpgrading instead. TopologyReconciledMachineDeploymentsCreatePendingV1Beta1Reason = "MachineDeploymentsCreatePending" // TopologyReconciledMachineDeploymentsUpgradePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology // not yet completed because at least one of the MachineDeployments is not yet updated to match the desired topology spec. + // + // Deprecated: please use ClusterUpgrading instead. TopologyReconciledMachineDeploymentsUpgradePendingV1Beta1Reason = "MachineDeploymentsUpgradePending" // TopologyReconciledMachineDeploymentsUpgradeDeferredV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology @@ -314,11 +329,15 @@ const ( // TopologyReconciledMachinePoolsUpgradePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology // not yet completed because at least one of the MachinePools is not yet updated to match the desired topology spec. + // + // Deprecated: please use ClusterUpgrading instead. TopologyReconciledMachinePoolsUpgradePendingV1Beta1Reason = "MachinePoolsUpgradePending" // TopologyReconciledMachinePoolsCreatePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology // not yet completed because at least one of the MachinePools is yet to be created. // This generally happens because new MachinePool creations are held off while the ControlPlane is not stable. + // + // Deprecated: please use ClusterUpgrading instead. TopologyReconciledMachinePoolsCreatePendingV1Beta1Reason = "MachinePoolsCreatePending" // TopologyReconciledMachinePoolsUpgradeDeferredV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology @@ -327,8 +346,14 @@ const ( // TopologyReconciledHookBlockingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology // not yet completed because at least one of the lifecycle hooks is blocking. + // + // Deprecated: please use ClusterUpgrading instead. TopologyReconciledHookBlockingV1Beta1Reason = "LifecycleHookBlocking" + // TopologyReconciledClusterUpgradingV1Beta1Reason documents reconciliation of a Cluster topology + // not yet completed because a cluster upgrade is still in progress. + TopologyReconciledClusterUpgradingV1Beta1Reason = "ClusterUpgrading" + // TopologyReconciledClusterClassNotReconciledV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology not // yet completed because the ClusterClass has not reconciled yet. If this condition persists there may be an issue // with the ClusterClass surfaced in the ClusterClass status or controller logs. diff --git a/api/core/v1beta2/zz_generated.deepcopy.go b/api/core/v1beta2/zz_generated.deepcopy.go index 49d1f6655253..5adb8a56c456 100644 --- a/api/core/v1beta2/zz_generated.deepcopy.go +++ b/api/core/v1beta2/zz_generated.deepcopy.go @@ -253,6 +253,12 @@ func (in *ClusterClassSpec) DeepCopyInto(out *ClusterClassSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + out.Upgrade = in.Upgrade + if in.KubernetesVersions != nil { + in, out := &in.KubernetesVersions, &out.KubernetesVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClassSpec. @@ -363,6 +369,37 @@ func (in *ClusterClassTemplateReference) DeepCopy() *ClusterClassTemplateReferen return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClassUpgrade) DeepCopyInto(out *ClusterClassUpgrade) { + *out = *in + out.External = in.External +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClassUpgrade. +func (in *ClusterClassUpgrade) DeepCopy() *ClusterClassUpgrade { + if in == nil { + return nil + } + out := new(ClusterClassUpgrade) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterClassUpgradeExternal) DeepCopyInto(out *ClusterClassUpgradeExternal) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClassUpgradeExternal. +func (in *ClusterClassUpgradeExternal) DeepCopy() *ClusterClassUpgradeExternal { + if in == nil { + return nil + } + out := new(ClusterClassUpgradeExternal) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClassV1Beta1DeprecatedStatus) DeepCopyInto(out *ClusterClassV1Beta1DeprecatedStatus) { *out = *in @@ -803,6 +840,13 @@ func (in *ControlPlaneClassHealthCheckChecks) DeepCopyInto(out *ControlPlaneClas (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.UnhealthyMachineConditions != nil { + in, out := &in.UnhealthyMachineConditions, &out.UnhealthyMachineConditions + *out = make([]UnhealthyMachineCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneClassHealthCheckChecks. @@ -979,6 +1023,13 @@ func (in *ControlPlaneTopologyHealthCheckChecks) DeepCopyInto(out *ControlPlaneT (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.UnhealthyMachineConditions != nil { + in, out := &in.UnhealthyMachineConditions, &out.UnhealthyMachineConditions + *out = make([]UnhealthyMachineCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneTopologyHealthCheckChecks. @@ -1567,6 +1618,13 @@ func (in *MachineDeploymentClassHealthCheckChecks) DeepCopyInto(out *MachineDepl (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.UnhealthyMachineConditions != nil { + in, out := &in.UnhealthyMachineConditions, &out.UnhealthyMachineConditions + *out = make([]UnhealthyMachineCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentClassHealthCheckChecks. @@ -2034,6 +2092,13 @@ func (in *MachineDeploymentTopologyHealthCheckChecks) DeepCopyInto(out *MachineD (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.UnhealthyMachineConditions != nil { + in, out := &in.UnhealthyMachineConditions, &out.UnhealthyMachineConditions + *out = make([]UnhealthyMachineCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentTopologyHealthCheckChecks. @@ -2439,6 +2504,13 @@ func (in *MachineHealthCheckChecks) DeepCopyInto(out *MachineHealthCheckChecks) (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.UnhealthyMachineConditions != nil { + in, out := &in.UnhealthyMachineConditions, &out.UnhealthyMachineConditions + *out = make([]UnhealthyMachineCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckChecks. @@ -3369,6 +3441,11 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { copy(*out, *in) } in.Deletion.DeepCopyInto(&out.Deletion) + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]MachineTaint, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. @@ -3427,6 +3504,21 @@ func (in *MachineStatus) DeepCopy() *MachineStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTaint) DeepCopyInto(out *MachineTaint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTaint. +func (in *MachineTaint) DeepCopy() *MachineTaint { + if in == nil { + return nil + } + out := new(MachineTaint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { *out = *in @@ -3664,6 +3756,26 @@ func (in *Topology) DeepCopy() *Topology { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnhealthyMachineCondition) DeepCopyInto(out *UnhealthyMachineCondition) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnhealthyMachineCondition. +func (in *UnhealthyMachineCondition) DeepCopy() *UnhealthyMachineCondition { + if in == nil { + return nil + } + out := new(UnhealthyMachineCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UnhealthyNodeCondition) DeepCopyInto(out *UnhealthyNodeCondition) { *out = *in diff --git a/api/core/v1beta2/zz_generated.openapi.go b/api/core/v1beta2/zz_generated.openapi.go index 6ada26c78cd5..6eecc0d326c7 100644 --- a/api/core/v1beta2/zz_generated.openapi.go +++ b/api/core/v1beta2/zz_generated.openapi.go @@ -42,6 +42,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassStatusVariable": schema_cluster_api_api_core_v1beta2_ClusterClassStatusVariable(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassStatusVariableDefinition": schema_cluster_api_api_core_v1beta2_ClusterClassStatusVariableDefinition(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassTemplateReference": schema_cluster_api_api_core_v1beta2_ClusterClassTemplateReference(ref), + "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassUpgrade": schema_cluster_api_api_core_v1beta2_ClusterClassUpgrade(ref), + "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassUpgradeExternal": schema_cluster_api_api_core_v1beta2_ClusterClassUpgradeExternal(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassV1Beta1DeprecatedStatus": schema_cluster_api_api_core_v1beta2_ClusterClassV1Beta1DeprecatedStatus(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassVariable": schema_cluster_api_api_core_v1beta2_ClusterClassVariable(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassVariableMetadata": schema_cluster_api_api_core_v1beta2_ClusterClassVariableMetadata(ref), @@ -161,6 +163,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineSetV1Beta1DeprecatedStatus": schema_cluster_api_api_core_v1beta2_MachineSetV1Beta1DeprecatedStatus(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineSpec": schema_cluster_api_api_core_v1beta2_MachineSpec(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineStatus": schema_cluster_api_api_core_v1beta2_MachineStatus(ref), + "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineTaint": schema_cluster_api_api_core_v1beta2_MachineTaint(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineTemplateSpec": schema_cluster_api_api_core_v1beta2_MachineTemplateSpec(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineV1Beta1DeprecatedStatus": schema_cluster_api_api_core_v1beta2_MachineV1Beta1DeprecatedStatus(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.NetworkRanges": schema_cluster_api_api_core_v1beta2_NetworkRanges(ref), @@ -171,6 +174,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachineDeploymentClass": schema_cluster_api_api_core_v1beta2_PatchSelectorMatchMachineDeploymentClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.PatchSelectorMatchMachinePoolClass": schema_cluster_api_api_core_v1beta2_PatchSelectorMatchMachinePoolClass(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.Topology": schema_cluster_api_api_core_v1beta2_Topology(ref), + "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition": schema_cluster_api_api_core_v1beta2_UnhealthyMachineCondition(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition": schema_cluster_api_api_core_v1beta2_UnhealthyNodeCondition(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.ValidationRule": schema_cluster_api_api_core_v1beta2_ValidationRule(ref), "sigs.k8s.io/cluster-api/api/core/v1beta2.VariableSchema": schema_cluster_api_api_core_v1beta2_VariableSchema(ref), @@ -617,12 +621,39 @@ func schema_cluster_api_api_core_v1beta2_ClusterClassSpec(ref common.ReferenceCa }, }, }, + "upgrade": { + SchemaProps: spec.SchemaProps{ + Description: "upgrade defines the upgrade configuration for clusters using this ClusterClass.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassUpgrade"), + }, + }, + "kubernetesVersions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "kubernetesVersions is the list of Kubernetes versions that can be used for clusters using this ClusterClass. The list of version must be ordered from the older to the newer version, and there should be at least one version for every minor in between the first and the last version.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"infrastructure", "controlPlane"}, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterAvailabilityGate", "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassPatch", "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassVariable", "sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.InfrastructureClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.WorkersClass"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterAvailabilityGate", "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassPatch", "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassUpgrade", "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassVariable", "sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.InfrastructureClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.WorkersClass"}, } } @@ -823,6 +854,48 @@ func schema_cluster_api_api_core_v1beta2_ClusterClassTemplateReference(ref commo } } +func schema_cluster_api_api_core_v1beta2_ClusterClassUpgrade(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterClassUpgrade defines the upgrade configuration for clusters using the ClusterClass.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "external": { + SchemaProps: spec.SchemaProps{ + Description: "external defines external runtime extensions for upgrade operations.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassUpgradeExternal"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/core/v1beta2.ClusterClassUpgradeExternal"}, + } +} + +func schema_cluster_api_api_core_v1beta2_ClusterClassUpgradeExternal(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterClassUpgradeExternal defines external runtime extensions for upgrade operations.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "generateUpgradePlanExtension": { + SchemaProps: spec.SchemaProps{ + Description: "generateUpgradePlanExtension references an extension which is called to generate upgrade plan.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_cluster_api_api_core_v1beta2_ClusterClassV1Beta1DeprecatedStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1616,11 +1689,30 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneClassHealthCheckChecks(ref }, }, }, + "unhealthyMachineConditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "unhealthyMachineConditions contains a list of the machine conditions that determine whether a machine is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the machine is unhealthy.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, } } @@ -1899,11 +1991,30 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneTopologyHealthCheckChecks(r }, }, }, + "unhealthyMachineConditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "unhealthyMachineConditions contains a list of the machine conditions that determine whether a machine is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the machine is unhealthy.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, } } @@ -2920,11 +3031,30 @@ func schema_cluster_api_api_core_v1beta2_MachineDeploymentClassHealthCheckChecks }, }, }, + "unhealthyMachineConditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "unhealthyMachineConditions contains a list of the machine conditions that determine whether a machine is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the machine is unhealthy.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, } } @@ -3700,11 +3830,30 @@ func schema_cluster_api_api_core_v1beta2_MachineDeploymentTopologyHealthCheckChe }, }, }, + "unhealthyMachineConditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "unhealthyMachineConditions contains a list of the machine conditions that determine whether a machine is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the machine is unhealthy.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, } } @@ -4332,11 +4481,30 @@ func schema_cluster_api_api_core_v1beta2_MachineHealthCheckChecks(ref common.Ref }, }, }, + "unhealthyMachineConditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "unhealthyMachineConditions contains a list of the machine conditions that determine whether a machine is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the machine is unhealthy.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyMachineCondition", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, } } @@ -5996,12 +6164,35 @@ func schema_cluster_api_api_core_v1beta2_MachineSpec(ref common.ReferenceCallbac Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeletionSpec"), }, }, + "taints": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "key", + "effect", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "taints are the node taints that Cluster API will manage. This list is not necessarily complete: other Kubernetes components may add or remove other taints from nodes, e.g. the node controller might add the node.kubernetes.io/not-ready taint. Only those taints defined in this list will be added or removed by core Cluster API controllers.\n\nThere can be at most 64 taints. A pod would have to tolerate all existing taints to run on the corresponding node.\n\nNOTE: This list is implemented as a \"map\" type, meaning that individual elements can be managed by different owners.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.MachineTaint"), + }, + }, + }, + }, + }, }, Required: []string{"clusterName", "bootstrap", "infrastructureRef"}, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta2.Bootstrap", "sigs.k8s.io/cluster-api/api/core/v1beta2.ContractVersionedObjectReference", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeletionSpec", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.Bootstrap", "sigs.k8s.io/cluster-api/api/core/v1beta2.ContractVersionedObjectReference", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeletionSpec", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineTaint"}, } } @@ -6022,7 +6213,7 @@ func schema_cluster_api_api_core_v1beta2_MachineStatus(ref common.ReferenceCallb }, }, SchemaProps: spec.SchemaProps{ - Description: "conditions represents the observations of a Machine's current state. Known condition types are Available, Ready, UpToDate, BootstrapConfigReady, InfrastructureReady, NodeReady, NodeHealthy, Deleting, Paused. If a MachineHealthCheck is targeting this machine, also HealthCheckSucceeded, OwnerRemediated conditions are added. Additionally control plane Machines controlled by KubeadmControlPlane will have following additional conditions: APIServerPodHealthy, ControllerManagerPodHealthy, SchedulerPodHealthy, EtcdPodHealthy, EtcdMemberHealthy.", + Description: "conditions represents the observations of a Machine's current state. Known condition types are Available, Ready, UpToDate, BootstrapConfigReady, InfrastructureReady, NodeReady, NodeHealthy, Updating, Deleting, Paused. If a MachineHealthCheck is targeting this machine, also HealthCheckSucceeded, OwnerRemediated conditions are added. Additionally control plane Machines controlled by KubeadmControlPlane will have following additional conditions: APIServerPodHealthy, ControllerManagerPodHealthy, SchedulerPodHealthy, EtcdPodHealthy, EtcdMemberHealthy.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -6114,6 +6305,48 @@ func schema_cluster_api_api_core_v1beta2_MachineStatus(ref common.ReferenceCallb } } +func schema_cluster_api_api_core_v1beta2_MachineTaint(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachineTaint defines a taint equivalent to corev1.Taint, but additionally having a propagation field.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Description: "key is the taint key to be applied to a node. Must be a valid qualified name of maximum size 63 characters with an optional subdomain prefix of maximum size 253 characters, separated by a `/`.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "value is the taint value corresponding to the taint key. It must be a valid label value of maximum size 63 characters.", + Type: []string{"string"}, + Format: "", + }, + }, + "effect": { + SchemaProps: spec.SchemaProps{ + Description: "effect is the effect for the taint. Valid values are NoSchedule, PreferNoSchedule and NoExecute.", + Type: []string{"string"}, + Format: "", + }, + }, + "propagation": { + SchemaProps: spec.SchemaProps{ + Description: "propagation defines how this taint should be propagated to nodes. Valid values are 'Always' and 'OnInitialization'. Always: The taint will be continuously reconciled. If it is not set for a node, it will be added during reconciliation. OnInitialization: The taint will be added during node initialization. If it gets removed from the node later on it will not get added again.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"key", "effect", "propagation"}, + }, + }, + } +} + func schema_cluster_api_api_core_v1beta2_MachineTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -6518,6 +6751,41 @@ func schema_cluster_api_api_core_v1beta2_Topology(ref common.ReferenceCallback) } } +func schema_cluster_api_api_core_v1beta2_UnhealthyMachineCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UnhealthyMachineCondition represents a Machine condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a machine is considered unhealthy.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "type of Machine condition", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "timeoutSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "timeoutSeconds is the duration that a machine must be in a given status for, after which the machine is considered unhealthy. For example, with a value of \"3600\", the machine must match the status for at least 1 hour before being considered unhealthy.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"type", "status", "timeoutSeconds"}, + }, + }, + } +} + func schema_cluster_api_api_core_v1beta2_UnhealthyNodeCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -6541,7 +6809,7 @@ func schema_cluster_api_api_core_v1beta2_UnhealthyNodeCondition(ref common.Refer }, "timeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. For example, with a value of \"1h\", the node must match the status for at least 1 hour before being considered unhealthy.", + Description: "timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. For example, with a value of \"3600\", the node must match the status for at least 1 hour before being considered unhealthy.", Type: []string{"integer"}, Format: "int32", }, diff --git a/api/runtime/hooks/v1alpha1/common_types.go b/api/runtime/hooks/v1alpha1/common_types.go index 8838dfaa6ac0..f184c9c7f953 100644 --- a/api/runtime/hooks/v1alpha1/common_types.go +++ b/api/runtime/hooks/v1alpha1/common_types.go @@ -134,3 +134,15 @@ func (r *CommonRetryResponse) GetRetryAfterSeconds() int32 { func (r *CommonRetryResponse) SetRetryAfterSeconds(retryAfterSeconds int32) { r.RetryAfterSeconds = retryAfterSeconds } + +// PatchType defines the supported patch types. +// +kubebuilder:validation:Enum=JSONPatch;JSONMergePatch +type PatchType string + +const ( + // JSONPatchType identifies a https://datatracker.ietf.org/doc/html/rfc6902 JSON patch. + JSONPatchType PatchType = "JSONPatch" + + // JSONMergePatchType identifies a https://datatracker.ietf.org/doc/html/rfc7386 JSON merge patch. + JSONMergePatchType PatchType = "JSONMergePatch" +) diff --git a/api/runtime/hooks/v1alpha1/inplaceupdate_types.go b/api/runtime/hooks/v1alpha1/inplaceupdate_types.go new file mode 100644 index 000000000000..a301c39e9aa6 --- /dev/null +++ b/api/runtime/hooks/v1alpha1/inplaceupdate_types.go @@ -0,0 +1,254 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" +) + +// CanUpdateMachineRequest is the request of the CanUpdateMachine hook. +// +kubebuilder:object:root=true +type CanUpdateMachineRequest struct { + metav1.TypeMeta `json:",inline"` + + // CommonRequest contains fields common to all request types. + CommonRequest `json:",inline"` + + // current contains the current state of the Machine and related objects. + // +required + Current CanUpdateMachineRequestObjects `json:"current,omitempty,omitzero"` + + // desired contains the desired state of the Machine and related objects. + // +required + Desired CanUpdateMachineRequestObjects `json:"desired,omitempty,omitzero"` +} + +// CanUpdateMachineRequestObjects groups objects for CanUpdateMachineRequest. +type CanUpdateMachineRequestObjects struct { + // machine is the full Machine object. + // +required + Machine clusterv1.Machine `json:"machine,omitempty,omitzero"` + + // infrastructureMachine is the infra Machine object. + // +required + InfrastructureMachine runtime.RawExtension `json:"infrastructureMachine,omitempty,omitzero"` + + // bootstrapConfig is the bootstrap config object. + // +optional + BootstrapConfig runtime.RawExtension `json:"bootstrapConfig,omitempty,omitzero"` +} + +var _ ResponseObject = &CanUpdateMachineResponse{} + +// CanUpdateMachineResponse is the response of the CanUpdateMachine hook. +// +kubebuilder:object:root=true +type CanUpdateMachineResponse struct { + metav1.TypeMeta `json:",inline"` + + // CommonResponse contains Status and Message fields common to all response types. + CommonResponse `json:",inline"` + + // machinePatch when applied to the current Machine spec, indicates changes handled in-place. + // Only fields in spec have to be covered by the patch. + // +optional + MachinePatch Patch `json:"machinePatch,omitempty,omitzero"` + + // infrastructureMachinePatch indicates infra Machine spec changes handled in-place. + // Only fields in spec have to be covered by the patch. + // +optional + InfrastructureMachinePatch Patch `json:"infrastructureMachinePatch,omitempty,omitzero"` + + // bootstrapConfigPatch indicates bootstrap config spec changes handled in-place. + // Only fields in spec have to be covered by the patch. + // +optional + BootstrapConfigPatch Patch `json:"bootstrapConfigPatch,omitempty,omitzero"` +} + +// Patch is a single patch (JSONPatch or JSONMergePatch) which can include multiple operations. +type Patch struct { + // patchType JSONPatch or JSONMergePatch. + // +required + PatchType PatchType `json:"patchType,omitempty"` + + // patch data for the target object. + // +required + Patch []byte `json:"patch,omitempty"` +} + +// IsDefined returns true if one of the fields of Patch is set. +func (p *Patch) IsDefined() bool { + return p.PatchType != "" || len(p.Patch) > 0 +} + +// CanUpdateMachine is the hook that will be called to determine if an extension +// can handle specific machine changes for in-place updates. +func CanUpdateMachine(*CanUpdateMachineRequest, *CanUpdateMachineResponse) {} + +// CanUpdateMachineSetRequest is the request of the CanUpdateMachineSet hook. +// +kubebuilder:object:root=true +type CanUpdateMachineSetRequest struct { + metav1.TypeMeta `json:",inline"` + + // CommonRequest contains fields common to all request types. + CommonRequest `json:",inline"` + + // current contains the current state of the MachineSet and related objects. + // +required + Current CanUpdateMachineSetRequestObjects `json:"current,omitempty,omitzero"` + + // desired contains the desired state of the MachineSet and related objects. + // +required + Desired CanUpdateMachineSetRequestObjects `json:"desired,omitempty,omitzero"` +} + +// CanUpdateMachineSetRequestObjects groups objects for CanUpdateMachineSetRequest. +type CanUpdateMachineSetRequestObjects struct { + // machineSet is the full MachineSet object. + // Only fields in spec.template.spec have to be covered by the patch. + // +required + MachineSet clusterv1.MachineSet `json:"machineSet,omitempty,omitzero"` + + // infrastructureMachineTemplate is the provider-specific InfrastructureMachineTemplate object. + // Only fields in spec.template.spec have to be covered by the patch. + // +required + InfrastructureMachineTemplate runtime.RawExtension `json:"infrastructureMachineTemplate,omitempty,omitzero"` + + // bootstrapConfigTemplate is the provider-specific BootstrapConfigTemplate object. + // Only fields in spec.template.spec have to be covered by the patch. + // +optional + BootstrapConfigTemplate runtime.RawExtension `json:"bootstrapConfigTemplate,omitempty,omitzero"` +} + +var _ ResponseObject = &CanUpdateMachineSetResponse{} + +// CanUpdateMachineSetResponse is the response of the CanUpdateMachineSet hook. +// +kubebuilder:object:root=true +type CanUpdateMachineSetResponse struct { + metav1.TypeMeta `json:",inline"` + + // CommonResponse contains Status and Message fields common to all response types. + CommonResponse `json:",inline"` + + // machineSetPatch when applied to the current MachineSet spec, indicates changes handled in-place. + // +optional + MachineSetPatch Patch `json:"machineSetPatch,omitempty,omitzero"` + + // infrastructureMachineTemplatePatch indicates infra template spec changes handled in-place. + // +optional + InfrastructureMachineTemplatePatch Patch `json:"infrastructureMachineTemplatePatch,omitempty,omitzero"` + + // bootstrapConfigTemplatePatch indicates bootstrap template spec changes handled in-place. + // +optional + BootstrapConfigTemplatePatch Patch `json:"bootstrapConfigTemplatePatch,omitempty,omitzero"` +} + +// CanUpdateMachineSet is the hook that will be called to determine if an extension +// can handle specific MachineSet changes for in-place updates. +func CanUpdateMachineSet(*CanUpdateMachineSetRequest, *CanUpdateMachineSetResponse) {} + +// UpdateMachineRequest is the request of the UpdateMachine hook. +// +kubebuilder:object:root=true +type UpdateMachineRequest struct { + metav1.TypeMeta `json:",inline"` + + // CommonRequest contains fields common to all request types. + CommonRequest `json:",inline"` + + // desired contains the desired state of the Machine and related objects. + // +required + Desired UpdateMachineRequestObjects `json:"desired,omitempty,omitzero"` +} + +// UpdateMachineRequestObjects groups objects for UpdateMachineRequest. +type UpdateMachineRequestObjects struct { + // machine is the full Machine object. + // +required + Machine clusterv1.Machine `json:"machine,omitempty,omitzero"` + + // infrastructureMachine is the infra Machine object. + // +required + InfrastructureMachine runtime.RawExtension `json:"infrastructureMachine,omitempty,omitzero"` + + // bootstrapConfig is the bootstrap config object. + // +optional + BootstrapConfig runtime.RawExtension `json:"bootstrapConfig,omitempty,omitzero"` +} + +var _ RetryResponseObject = &UpdateMachineResponse{} + +// UpdateMachineResponse is the response of the UpdateMachine hook. +// The status of the update operation is determined by the CommonRetryResponse fields: +// - Status=Success + RetryAfterSeconds > 0: update is in progress +// - Status=Success + RetryAfterSeconds = 0: update completed successfully +// - Status=Failure: update failed +// +kubebuilder:object:root=true +type UpdateMachineResponse struct { + metav1.TypeMeta `json:",inline"` + + // CommonRetryResponse contains Status, Message and RetryAfterSeconds fields. + CommonRetryResponse `json:",inline"` +} + +// UpdateMachine is the hook that will be called to perform in-place updates on a machine. +// This hook should be idempotent and can be called multiple times for the same machine +// until it reports Done or Failed status. +func UpdateMachine(*UpdateMachineRequest, *UpdateMachineResponse) {} + +func init() { + catalogBuilder.RegisterHook(CanUpdateMachine, &runtimecatalog.HookMeta{ + Tags: []string{"In-Place Update Hooks"}, + Summary: "Cluster API Runtime will call this hook to determine if an extension can handle specific Machine changes", + Description: "Called during update planning to determine if an extension can handle Machine changes. " + + "The request contains current and desired state for Machine, InfraMachine and optionally BootstrapConfig. " + + "Extensions should return per-object patches to be applied on current objects to indicate which changes they can handle in-place.\n" + + "\n" + + "Notes:\n" + + "- This hook is called during the planning phase of updates\n" + + "- Only spec is provided, status fields are not included\n" + + "- If no extension can cover the required changes, CAPI will fallback to rolling updates\n" + + "- Only fields in Machine/InfraMachine/BootstrapConfig spec have to be covered by patches\n", + }) + + catalogBuilder.RegisterHook(CanUpdateMachineSet, &runtimecatalog.HookMeta{ + Tags: []string{"In-Place Update Hooks"}, + Summary: "Cluster API Runtime will call this hook to determine if an extension can handle specific MachineSet changes", + Description: "Called during update planning to determine if an extension can handle MachineSet changes. " + + "The request contains current and desired state for MachineSet, InfraMachineTemplate and optionally BootstrapConfigTemplate. " + + "Extensions should return per-object patches to be applied on current objects to indicate which changes they can handle in-place.\n" + + "\n" + + "Notes:\n" + + "- This hook is called during the planning phase of updates\n" + + "- Only spec is provided, status fields are not included\n" + + "- If no extension can cover the required changes, CAPI will fallback to rolling updates\n" + + "- Only fields in MachineSet/InfraMachineTemplate/BootstrapConfigTemplate spec.template.spec have to be covered by patches\n", + }) + + catalogBuilder.RegisterHook(UpdateMachine, &runtimecatalog.HookMeta{ + Tags: []string{"In-Place Update Hooks"}, + Summary: "Cluster API Runtime will call this hook to perform in-place updates on a Machine", + Description: "Cluster API Runtime will call this hook to perform the actual in-place update on a Machine. " + + "The request contains the desired state for Machine, InfraMachine and optionally BootstrapConfig. " + + "The hook will be called repeatedly until it reports Done or Failed status.\n" + + "\n" + + "Notes:\n" + + "- This hook must be idempotent - it can be called multiple times for the same Machine\n", + }) +} diff --git a/api/runtime/hooks/v1alpha1/lifecyclehooks_types.go b/api/runtime/hooks/v1alpha1/lifecyclehooks_types.go index bf0b8e15384b..71c6455a0fa9 100644 --- a/api/runtime/hooks/v1alpha1/lifecyclehooks_types.go +++ b/api/runtime/hooks/v1alpha1/lifecyclehooks_types.go @@ -97,6 +97,22 @@ type BeforeClusterUpgradeRequest struct { // toKubernetesVersion is the target Kubernetes version of the upgrade. // +required ToKubernetesVersion string `json:"toKubernetesVersion"` + + // controlPlaneUpgrades is the list of version upgrade steps for the control plane. + // +optional + ControlPlaneUpgrades []UpgradeStepInfo `json:"controlPlaneUpgrades,omitempty"` + + // workersUpgrades is the list of version upgrade steps for the workers. + // +optional + WorkersUpgrades []UpgradeStepInfo `json:"workersUpgrades,omitempty"` +} + +// UpgradeStepInfo provide info about a single version upgrade step. +type UpgradeStepInfo struct { + // version is the Kubernetes version for this upgrade step. + // +required + // +kubebuilder:validation:MinLength=1 + Version string `json:"version,omitempty"` } var _ RetryResponseObject = &BeforeClusterUpgradeResponse{} @@ -114,6 +130,50 @@ type BeforeClusterUpgradeResponse struct { // before the updated version is propagated to the underlying objects. func BeforeClusterUpgrade(*BeforeClusterUpgradeRequest, *BeforeClusterUpgradeResponse) {} +// BeforeControlPlaneUpgradeRequest is the request of the BeforeControlPlane hook. +// +kubebuilder:object:root=true +type BeforeControlPlaneUpgradeRequest struct { + metav1.TypeMeta `json:",inline"` + + // CommonRequest contains fields common to all request types. + CommonRequest `json:",inline"` + + // cluster is the cluster object the lifecycle hook corresponds to. + // +required + Cluster clusterv1beta1.Cluster `json:"cluster"` + + // fromKubernetesVersion is the current Kubernetes version of the control plane for the next upgrade step. + // +required + FromKubernetesVersion string `json:"fromKubernetesVersion"` + + // toKubernetesVersion is the target Kubernetes version of the control plane for the next upgrade step. + // +required + ToKubernetesVersion string `json:"toKubernetesVersion"` + + // controlPlaneUpgrades is the list of the remaining version upgrade steps for the control plane, if any. + // +optional + ControlPlaneUpgrades []UpgradeStepInfo `json:"controlPlaneUpgrades,omitempty"` + + // workersUpgrades is the list of the remaining version upgrade steps for workers, if any. + // +optional + WorkersUpgrades []UpgradeStepInfo `json:"workersUpgrades,omitempty"` +} + +var _ RetryResponseObject = &BeforeControlPlaneUpgradeResponse{} + +// BeforeControlPlaneUpgradeResponse is the response of the BeforeControlPlaneUpgrade hook. +// +kubebuilder:object:root=true +type BeforeControlPlaneUpgradeResponse struct { + metav1.TypeMeta `json:",inline"` + + // CommonRetryResponse contains Status, Message and RetryAfterSeconds fields. + CommonRetryResponse `json:",inline"` +} + +// BeforeControlPlaneUpgrade is the hook that will be called before a new version is propagated to the control plane object. +func BeforeControlPlaneUpgrade(*BeforeControlPlaneUpgradeRequest, *BeforeControlPlaneUpgradeResponse) { +} + // AfterControlPlaneUpgradeRequest is the request of the AfterControlPlaneUpgrade hook. // +kubebuilder:object:root=true type AfterControlPlaneUpgradeRequest struct { @@ -126,9 +186,17 @@ type AfterControlPlaneUpgradeRequest struct { // +required Cluster clusterv1beta1.Cluster `json:"cluster"` - // kubernetesVersion is the Kubernetes version of the Control Plane after the upgrade. + // kubernetesVersion is the Kubernetes version of the control plane after an upgrade step. // +required KubernetesVersion string `json:"kubernetesVersion"` + + // controlPlaneUpgrades is the list of the remaining version upgrade steps for the control plane, if any. + // +optional + ControlPlaneUpgrades []UpgradeStepInfo `json:"controlPlaneUpgrades,omitempty"` + + // workersUpgrades is the list of the remaining version upgrade steps for workers, if any. + // +optional + WorkersUpgrades []UpgradeStepInfo `json:"workersUpgrades,omitempty"` } var _ RetryResponseObject = &AfterControlPlaneUpgradeResponse{} @@ -146,6 +214,90 @@ type AfterControlPlaneUpgradeResponse struct { // Kubernetes version and before the target version is propagated to the workload machines. func AfterControlPlaneUpgrade(*AfterControlPlaneUpgradeRequest, *AfterControlPlaneUpgradeResponse) {} +// BeforeWorkersUpgradeRequest is the request of the BeforeWorkersUpgrade hook. +// +kubebuilder:object:root=true +type BeforeWorkersUpgradeRequest struct { + metav1.TypeMeta `json:",inline"` + + // CommonRequest contains fields common to all request types. + CommonRequest `json:",inline"` + + // cluster is the cluster object the lifecycle hook corresponds to. + // +required + Cluster clusterv1beta1.Cluster `json:"cluster"` + + // fromKubernetesVersion is the current Kubernetes version of the workers for the next upgrade step. + // +required + FromKubernetesVersion string `json:"fromKubernetesVersion"` + + // toKubernetesVersion is the target Kubernetes version of the workers for the next upgrade step. + // +required + ToKubernetesVersion string `json:"toKubernetesVersion"` + + // controlPlaneUpgrades is the list of the remaining version upgrade steps for the control plane, if any. + // +optional + ControlPlaneUpgrades []UpgradeStepInfo `json:"controlPlaneUpgrades,omitempty"` + + // workersUpgrades is the list of the remaining version upgrade steps for workers, if any. + // +optional + WorkersUpgrades []UpgradeStepInfo `json:"workersUpgrades,omitempty"` +} + +var _ RetryResponseObject = &BeforeWorkersUpgradeResponse{} + +// BeforeWorkersUpgradeResponse is the response of the BeforeWorkersUpgrade hook. +// +kubebuilder:object:root=true +type BeforeWorkersUpgradeResponse struct { + metav1.TypeMeta `json:",inline"` + + // CommonRetryResponse contains Status, Message and RetryAfterSeconds fields. + CommonRetryResponse `json:",inline"` +} + +// BeforeWorkersUpgrade is the hook that will be called before a new version is propagated to workers. +func BeforeWorkersUpgrade(*BeforeWorkersUpgradeRequest, *BeforeWorkersUpgradeResponse) { +} + +// AfterWorkersUpgradeRequest is the request of the AfterWorkersUpgrade hook. +// +kubebuilder:object:root=true +type AfterWorkersUpgradeRequest struct { + metav1.TypeMeta `json:",inline"` + + // CommonRequest contains fields common to all request types. + CommonRequest `json:",inline"` + + // cluster is the cluster object the lifecycle hook corresponds to. + // +required + Cluster clusterv1beta1.Cluster `json:"cluster"` + + // kubernetesVersion is the Kubernetes version of the workers after an upgrade step. + // +required + KubernetesVersion string `json:"kubernetesVersion"` + + // controlPlaneUpgrades is the list of the remaining version upgrade steps for the control plane, if any. + // +optional + ControlPlaneUpgrades []UpgradeStepInfo `json:"controlPlaneUpgrades,omitempty"` + + // workersUpgrades is the list of the remaining version upgrade steps for workers, if any. + // +optional + WorkersUpgrades []UpgradeStepInfo `json:"workersUpgrades,omitempty"` +} + +var _ RetryResponseObject = &AfterWorkersUpgradeResponse{} + +// AfterWorkersUpgradeResponse is the response of the AfterWorkersUpgrade hook. +// +kubebuilder:object:root=true +type AfterWorkersUpgradeResponse struct { + metav1.TypeMeta `json:",inline"` + + // CommonRetryResponse contains Status, Message and RetryAfterSeconds fields. + CommonRetryResponse `json:",inline"` +} + +// AfterWorkersUpgrade is the hook called after the control plane is successfully upgraded to the target +// Kubernetes version and before the target version is propagated to the workload machines. +func AfterWorkersUpgrade(*AfterWorkersUpgradeRequest, *AfterWorkersUpgradeResponse) {} + // AfterClusterUpgradeRequest is the request of the AfterClusterUpgrade hook. // +kubebuilder:object:root=true type AfterClusterUpgradeRequest struct { @@ -170,8 +322,8 @@ var _ ResponseObject = &AfterClusterUpgradeResponse{} type AfterClusterUpgradeResponse struct { metav1.TypeMeta `json:",inline"` - // CommonResponse contains Status and Message fields common to all response types. - CommonResponse `json:",inline"` + // CommonRetryResponse contains Status, Message and RetryAfterSeconds fields. + CommonRetryResponse `json:",inline"` } // AfterClusterUpgrade is the hook that is called after the entire cluster is updated @@ -243,18 +395,58 @@ func init() { "tasks before the new version is propagated to the control plane", }) + catalogBuilder.RegisterHook(BeforeControlPlaneUpgrade, &runtimecatalog.HookMeta{ + Tags: []string{"Lifecycle Hooks"}, + Summary: "Cluster API Runtime will call this hook before the control plane is upgraded", + Description: "This hook is called before a new version is propagated to the control plane object.\n" + + "\n" + + "Notes:\n" + + "- This hook will be called only for Clusters with a managed topology\n" + + "- When an upgrade is starting, BeforeControlPlaneUpgrade will be called after BeforeClusterUpgrade is completed\n" + + "- When an upgrade is in progress BeforeControlPlaneUpgrade will be called for each intermediate version that will be applied " + + "to the control plane (instead BeforeClusterUpgrade will be called only once at the beginning of the upgrade)" + + "- This is a blocking hook; Runtime Extension implementers can use this hook to execute " + + "tasks before the new version is propagated to the control plane", + }) + catalogBuilder.RegisterHook(AfterControlPlaneUpgrade, &runtimecatalog.HookMeta{ Tags: []string{"Lifecycle Hooks"}, Summary: "Cluster API Runtime will call this hook after the control plane is upgraded", Description: "Cluster API Runtime will call this hook after the a cluster's control plane has been upgraded to the version specified " + - "in spec.topology.version, and immediately before the new version is going to be propagated to the MachineDeployments. " + + "in spec.topology.version or to an intermediate version in the upgrade plan." + "A control plane upgrade is completed when all the machines in the control plane have been upgraded.\n" + "\n" + "Notes:\n" + "- This hook will be called only for Clusters with a managed topology\n" + "- The call's request contains the Cluster object and the Kubernetes version we upgraded to\n" + "- This is a blocking hook; Runtime Extension implementers can use this hook to execute " + - "tasks before the new version is propagated to the MachineDeployments", + "tasks before the new version is propagated to the MachineDeployments and Machine Pools", + }) + + catalogBuilder.RegisterHook(BeforeWorkersUpgrade, &runtimecatalog.HookMeta{ + Tags: []string{"Lifecycle Hooks"}, + Summary: "Cluster API Runtime will call this hook before the workers are upgraded", + Description: "This hook is called before a new version is propagated to workers.\n" + + "\n" + + "Notes:\n" + + "- This hook will be called only for Clusters with a managed topology\n" + + "- This hook will be called only if workers upgrade must be performed for an intermediate version of " + + "a chained upgrade or when upgrading to the target spec.topology.version.\n" + + "- This is a blocking hook; Runtime Extension implementers can use this hook to execute " + + "tasks before the new version is propagated to the MachineDeployments and Machine Pools", + }) + + catalogBuilder.RegisterHook(AfterWorkersUpgrade, &runtimecatalog.HookMeta{ + Tags: []string{"Lifecycle Hooks"}, + Summary: "Cluster API Runtime will call this hook after workers are upgraded", + Description: "This hook is called after all the workers have been upgraded to the version specified in spec.topology.version " + + "or to an intermediate version in the upgrade plan.\n" + + "\n" + + "Notes:\n" + + "- This hook will be called only for Clusters with a managed topology\n" + + "- The call's request contains the Cluster object, the current Kubernetes version and the Kubernetes version we are upgrading to\n" + + "- This is a blocking hook; Runtime Extension implementers can use this hook to execute " + + "tasks before the upgrade plan continues, or when already at the target spec.topology.version, before AfterClusterUpgrade is called.\n", }) catalogBuilder.RegisterHook(AfterClusterUpgrade, &runtimecatalog.HookMeta{ @@ -266,7 +458,7 @@ func init() { "Notes:\n" + "- This hook will be called only for Clusters with a managed topology\n" + "- The call's request contains the Cluster object and the Kubernetes version we upgraded to \n" + - "- This is a non-blocking hook", + "- This is a blocking hook; Runtime Extension implementers can use this hook to prevent the next upgrade to start.\n", }) catalogBuilder.RegisterHook(BeforeClusterDelete, &runtimecatalog.HookMeta{ diff --git a/api/runtime/hooks/v1alpha1/topologymutation_types.go b/api/runtime/hooks/v1alpha1/topologymutation_types.go index 4efa2742084f..3ea71eae4357 100644 --- a/api/runtime/hooks/v1alpha1/topologymutation_types.go +++ b/api/runtime/hooks/v1alpha1/topologymutation_types.go @@ -101,18 +101,6 @@ type GeneratePatchesResponseItem struct { Patch []byte `json:"patch"` } -// PatchType defines the supported patch types. -// +enum -type PatchType string - -const ( - // JSONPatchType identifies a https://datatracker.ietf.org/doc/html/rfc6902 JSON patch. - JSONPatchType PatchType = "JSONPatch" - - // JSONMergePatchType identifies a https://datatracker.ietf.org/doc/html/rfc7386 JSON merge patch. - JSONMergePatchType PatchType = "JSONMergePatch" -) - // GeneratePatches generates patches during topology reconciliation for the entire Cluster topology. func GeneratePatches(*GeneratePatchesRequest, *GeneratePatchesResponse) {} diff --git a/api/runtime/hooks/v1alpha1/upgrade_plan_types.go b/api/runtime/hooks/v1alpha1/upgrade_plan_types.go new file mode 100644 index 000000000000..660e2434aa3a --- /dev/null +++ b/api/runtime/hooks/v1alpha1/upgrade_plan_types.go @@ -0,0 +1,129 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" +) + +// GenerateUpgradePlanRequest is the request of the GenerateUpgradePlan hook. +// +kubebuilder:object:root=true +type GenerateUpgradePlanRequest struct { + metav1.TypeMeta `json:",inline"` + + // CommonRequest contains fields common to all request types. + CommonRequest `json:",inline"` + + // cluster is the cluster object the GenerateUpgradePlan request corresponds to. + // +required + Cluster clusterv1.Cluster `json:"cluster,omitempty,omitzero"` + + // fromControlPlaneKubernetesVersion is the current Kubernetes version of the control plane. + // +required + // +kubebuilder:validation:MinLength=1 + FromControlPlaneKubernetesVersion string `json:"fromControlPlaneKubernetesVersion,omitempty"` + + // fromWorkersKubernetesVersion is the min current Kubernetes version of the workers. + // +optional + // +kubebuilder:validation:MinLength=1 + FromWorkersKubernetesVersion string `json:"fromWorkersKubernetesVersion,omitempty"` + + // toKubernetesVersion is the target Kubernetes version for the upgrade. + // +required + // +kubebuilder:validation:MinLength=1 + ToKubernetesVersion string `json:"toKubernetesVersion,omitempty"` +} + +var _ ResponseObject = &GenerateUpgradePlanResponse{} + +// GenerateUpgradePlanResponse is the response of the GenerateUpgradePlan hook. +// +kubebuilder:object:root=true +type GenerateUpgradePlanResponse struct { + metav1.TypeMeta `json:",inline"` + + // CommonResponse contains Status and Message fields common to all response types. + CommonResponse `json:",inline"` + + // controlPlaneUpgrades is the list of version upgrade steps for the control plane. + // Each entry represents an intermediate version that must be applied in sequence. + // The following rules apply: + // - there should be at least one version for every minor between fromControlPlaneKubernetesVersion (excluded) and ToKubernetesVersion (included). + // - each version must be: + // - greater than fromControlPlaneKubernetesVersion (or with a different build number) + // - greater than the previous version in the list (or with a different build number) + // - less or equal to ToKubernetesVersion (or with a different build number) + // - the last version in the plan must be equal to ToKubernetesVersion + // +optional + ControlPlaneUpgrades []UpgradeStep `json:"controlPlaneUpgrades,omitempty"` + + // workersUpgrades is the list of version upgrade steps for the workers. + // Each entry represents an intermediate version that must be applied in sequence. + // + // In case the upgrade plan for workers will be left to empty, the system will automatically + // determine the minimal number of workers upgrade steps, thus minimizing impact on workloads and reducing + // the overall upgrade time. + // + // If instead for any reason a custom upgrade path for workers is required, the following rules apply: + // - each version must be: + // - equal to FromControlPlaneKubernetesVersion or to one of the versions in the control plane upgrade plan. + // - greater than FromWorkersKubernetesVersion (or with a different build number) + // - greater than the previous version in the list (or with a different build number) + // - less or equal to the ToKubernetesVersion (or with a different build number) + // - in case of versions with the same major/minor/patch version but different build number, also the order + // of those versions must be the same for control plane and worker upgrade plan. + // - the last version in the plan must be equal to ToKubernetesVersion + // - the upgrade plane must have all the intermediate version which workers must go through to avoid breaking rules + // defining the max version skew between control plane and workers. + // +optional + WorkersUpgrades []UpgradeStep `json:"workersUpgrades,omitempty"` +} + +// UpgradeStep represents a single version upgrade step. +type UpgradeStep struct { + // version is the Kubernetes version for this upgrade step. + // +required + // +kubebuilder:validation:MinLength=1 + Version string `json:"version,omitempty"` +} + +// GenerateUpgradePlan is the hook that will be called to generate an upgrade plan +// for a cluster. This hook allows runtime extensions to specify intermediate +// Kubernetes versions that must be applied during an upgrade from the current +// version to the target version. +func GenerateUpgradePlan(*GenerateUpgradePlanRequest, *GenerateUpgradePlanResponse) {} + +func init() { + catalogBuilder.RegisterHook(GenerateUpgradePlan, &runtimecatalog.HookMeta{ + Tags: []string{"Chained Upgrade Hook"}, + Summary: "Cluster API Runtime will call this hook to generate an upgrade plan for a cluster", + Description: "Cluster API Runtime will call this hook to generate an upgrade plan for a cluster. " + + "Runtime Extension implementers can use this hook to specify intermediate Kubernetes versions " + + "that must be applied during an upgrade from the current version to the target version.\n" + + "\n" + + "For example, if upgrading from v1.29.0 to v1.33.0 requires intermediate versions v1.30.0, " + + "v1.31.0, and v1.32.0, the hook should return these intermediate versions in the response.\n" + + "\n" + + "Notes:\n" + + "- The response may include separate upgrade paths for control plane and workers\n" + + "- The upgrade plan for workers is optional; if missing the system will automatically\n\"" + + " determine the minimal number of workers upgrade steps according to Kubernetes version skew rules.\n" + + "- Each upgrade step represents a version that must be applied in sequence", + }) +} diff --git a/api/runtime/hooks/v1alpha1/zz_generated.deepcopy.go b/api/runtime/hooks/v1alpha1/zz_generated.deepcopy.go index ce59d1aedec7..fb8d4012d7dd 100644 --- a/api/runtime/hooks/v1alpha1/zz_generated.deepcopy.go +++ b/api/runtime/hooks/v1alpha1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *AfterClusterUpgradeRequest) DeepCopyObject() runtime.Object { func (in *AfterClusterUpgradeResponse) DeepCopyInto(out *AfterClusterUpgradeResponse) { *out = *in out.TypeMeta = in.TypeMeta - out.CommonResponse = in.CommonResponse + out.CommonRetryResponse = in.CommonRetryResponse } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AfterClusterUpgradeResponse. @@ -133,6 +133,16 @@ func (in *AfterControlPlaneUpgradeRequest) DeepCopyInto(out *AfterControlPlaneUp out.TypeMeta = in.TypeMeta in.CommonRequest.DeepCopyInto(&out.CommonRequest) in.Cluster.DeepCopyInto(&out.Cluster) + if in.ControlPlaneUpgrades != nil { + in, out := &in.ControlPlaneUpgrades, &out.ControlPlaneUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } + if in.WorkersUpgrades != nil { + in, out := &in.WorkersUpgrades, &out.WorkersUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AfterControlPlaneUpgradeRequest. @@ -178,6 +188,67 @@ func (in *AfterControlPlaneUpgradeResponse) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AfterWorkersUpgradeRequest) DeepCopyInto(out *AfterWorkersUpgradeRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.CommonRequest.DeepCopyInto(&out.CommonRequest) + in.Cluster.DeepCopyInto(&out.Cluster) + if in.ControlPlaneUpgrades != nil { + in, out := &in.ControlPlaneUpgrades, &out.ControlPlaneUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } + if in.WorkersUpgrades != nil { + in, out := &in.WorkersUpgrades, &out.WorkersUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AfterWorkersUpgradeRequest. +func (in *AfterWorkersUpgradeRequest) DeepCopy() *AfterWorkersUpgradeRequest { + if in == nil { + return nil + } + out := new(AfterWorkersUpgradeRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AfterWorkersUpgradeRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AfterWorkersUpgradeResponse) DeepCopyInto(out *AfterWorkersUpgradeResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + out.CommonRetryResponse = in.CommonRetryResponse +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AfterWorkersUpgradeResponse. +func (in *AfterWorkersUpgradeResponse) DeepCopy() *AfterWorkersUpgradeResponse { + if in == nil { + return nil + } + out := new(AfterWorkersUpgradeResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AfterWorkersUpgradeResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BeforeClusterCreateRequest) DeepCopyInto(out *BeforeClusterCreateRequest) { *out = *in @@ -286,6 +357,16 @@ func (in *BeforeClusterUpgradeRequest) DeepCopyInto(out *BeforeClusterUpgradeReq out.TypeMeta = in.TypeMeta in.CommonRequest.DeepCopyInto(&out.CommonRequest) in.Cluster.DeepCopyInto(&out.Cluster) + if in.ControlPlaneUpgrades != nil { + in, out := &in.ControlPlaneUpgrades, &out.ControlPlaneUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } + if in.WorkersUpgrades != nil { + in, out := &in.WorkersUpgrades, &out.WorkersUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BeforeClusterUpgradeRequest. @@ -331,6 +412,128 @@ func (in *BeforeClusterUpgradeResponse) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BeforeControlPlaneUpgradeRequest) DeepCopyInto(out *BeforeControlPlaneUpgradeRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.CommonRequest.DeepCopyInto(&out.CommonRequest) + in.Cluster.DeepCopyInto(&out.Cluster) + if in.ControlPlaneUpgrades != nil { + in, out := &in.ControlPlaneUpgrades, &out.ControlPlaneUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } + if in.WorkersUpgrades != nil { + in, out := &in.WorkersUpgrades, &out.WorkersUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BeforeControlPlaneUpgradeRequest. +func (in *BeforeControlPlaneUpgradeRequest) DeepCopy() *BeforeControlPlaneUpgradeRequest { + if in == nil { + return nil + } + out := new(BeforeControlPlaneUpgradeRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BeforeControlPlaneUpgradeRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BeforeControlPlaneUpgradeResponse) DeepCopyInto(out *BeforeControlPlaneUpgradeResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + out.CommonRetryResponse = in.CommonRetryResponse +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BeforeControlPlaneUpgradeResponse. +func (in *BeforeControlPlaneUpgradeResponse) DeepCopy() *BeforeControlPlaneUpgradeResponse { + if in == nil { + return nil + } + out := new(BeforeControlPlaneUpgradeResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BeforeControlPlaneUpgradeResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BeforeWorkersUpgradeRequest) DeepCopyInto(out *BeforeWorkersUpgradeRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.CommonRequest.DeepCopyInto(&out.CommonRequest) + in.Cluster.DeepCopyInto(&out.Cluster) + if in.ControlPlaneUpgrades != nil { + in, out := &in.ControlPlaneUpgrades, &out.ControlPlaneUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } + if in.WorkersUpgrades != nil { + in, out := &in.WorkersUpgrades, &out.WorkersUpgrades + *out = make([]UpgradeStepInfo, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BeforeWorkersUpgradeRequest. +func (in *BeforeWorkersUpgradeRequest) DeepCopy() *BeforeWorkersUpgradeRequest { + if in == nil { + return nil + } + out := new(BeforeWorkersUpgradeRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BeforeWorkersUpgradeRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BeforeWorkersUpgradeResponse) DeepCopyInto(out *BeforeWorkersUpgradeResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + out.CommonRetryResponse = in.CommonRetryResponse +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BeforeWorkersUpgradeResponse. +func (in *BeforeWorkersUpgradeResponse) DeepCopy() *BeforeWorkersUpgradeResponse { + if in == nil { + return nil + } + out := new(BeforeWorkersUpgradeResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BeforeWorkersUpgradeResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Builtins) DeepCopyInto(out *Builtins) { *out = *in @@ -366,6 +569,152 @@ func (in *Builtins) DeepCopy() *Builtins { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanUpdateMachineRequest) DeepCopyInto(out *CanUpdateMachineRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.CommonRequest.DeepCopyInto(&out.CommonRequest) + in.Current.DeepCopyInto(&out.Current) + in.Desired.DeepCopyInto(&out.Desired) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanUpdateMachineRequest. +func (in *CanUpdateMachineRequest) DeepCopy() *CanUpdateMachineRequest { + if in == nil { + return nil + } + out := new(CanUpdateMachineRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CanUpdateMachineRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanUpdateMachineRequestObjects) DeepCopyInto(out *CanUpdateMachineRequestObjects) { + *out = *in + in.Machine.DeepCopyInto(&out.Machine) + in.InfrastructureMachine.DeepCopyInto(&out.InfrastructureMachine) + in.BootstrapConfig.DeepCopyInto(&out.BootstrapConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanUpdateMachineRequestObjects. +func (in *CanUpdateMachineRequestObjects) DeepCopy() *CanUpdateMachineRequestObjects { + if in == nil { + return nil + } + out := new(CanUpdateMachineRequestObjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanUpdateMachineResponse) DeepCopyInto(out *CanUpdateMachineResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + out.CommonResponse = in.CommonResponse + in.MachinePatch.DeepCopyInto(&out.MachinePatch) + in.InfrastructureMachinePatch.DeepCopyInto(&out.InfrastructureMachinePatch) + in.BootstrapConfigPatch.DeepCopyInto(&out.BootstrapConfigPatch) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanUpdateMachineResponse. +func (in *CanUpdateMachineResponse) DeepCopy() *CanUpdateMachineResponse { + if in == nil { + return nil + } + out := new(CanUpdateMachineResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CanUpdateMachineResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanUpdateMachineSetRequest) DeepCopyInto(out *CanUpdateMachineSetRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.CommonRequest.DeepCopyInto(&out.CommonRequest) + in.Current.DeepCopyInto(&out.Current) + in.Desired.DeepCopyInto(&out.Desired) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanUpdateMachineSetRequest. +func (in *CanUpdateMachineSetRequest) DeepCopy() *CanUpdateMachineSetRequest { + if in == nil { + return nil + } + out := new(CanUpdateMachineSetRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CanUpdateMachineSetRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanUpdateMachineSetRequestObjects) DeepCopyInto(out *CanUpdateMachineSetRequestObjects) { + *out = *in + in.MachineSet.DeepCopyInto(&out.MachineSet) + in.InfrastructureMachineTemplate.DeepCopyInto(&out.InfrastructureMachineTemplate) + in.BootstrapConfigTemplate.DeepCopyInto(&out.BootstrapConfigTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanUpdateMachineSetRequestObjects. +func (in *CanUpdateMachineSetRequestObjects) DeepCopy() *CanUpdateMachineSetRequestObjects { + if in == nil { + return nil + } + out := new(CanUpdateMachineSetRequestObjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanUpdateMachineSetResponse) DeepCopyInto(out *CanUpdateMachineSetResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + out.CommonResponse = in.CommonResponse + in.MachineSetPatch.DeepCopyInto(&out.MachineSetPatch) + in.InfrastructureMachineTemplatePatch.DeepCopyInto(&out.InfrastructureMachineTemplatePatch) + in.BootstrapConfigTemplatePatch.DeepCopyInto(&out.BootstrapConfigTemplatePatch) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanUpdateMachineSetResponse. +func (in *CanUpdateMachineSetResponse) DeepCopy() *CanUpdateMachineSetResponse { + if in == nil { + return nil + } + out := new(CanUpdateMachineSetResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CanUpdateMachineSetResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterBuiltins) DeepCopyInto(out *ClusterBuiltins) { *out = *in @@ -825,6 +1174,67 @@ func (in *GeneratePatchesResponseItem) DeepCopy() *GeneratePatchesResponseItem { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerateUpgradePlanRequest) DeepCopyInto(out *GenerateUpgradePlanRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.CommonRequest.DeepCopyInto(&out.CommonRequest) + in.Cluster.DeepCopyInto(&out.Cluster) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateUpgradePlanRequest. +func (in *GenerateUpgradePlanRequest) DeepCopy() *GenerateUpgradePlanRequest { + if in == nil { + return nil + } + out := new(GenerateUpgradePlanRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GenerateUpgradePlanRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerateUpgradePlanResponse) DeepCopyInto(out *GenerateUpgradePlanResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + out.CommonResponse = in.CommonResponse + if in.ControlPlaneUpgrades != nil { + in, out := &in.ControlPlaneUpgrades, &out.ControlPlaneUpgrades + *out = make([]UpgradeStep, len(*in)) + copy(*out, *in) + } + if in.WorkersUpgrades != nil { + in, out := &in.WorkersUpgrades, &out.WorkersUpgrades + *out = make([]UpgradeStep, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateUpgradePlanResponse. +func (in *GenerateUpgradePlanResponse) DeepCopy() *GenerateUpgradePlanResponse { + if in == nil { + return nil + } + out := new(GenerateUpgradePlanResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GenerateUpgradePlanResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GroupVersionHook) DeepCopyInto(out *GroupVersionHook) { *out = *in @@ -975,6 +1385,125 @@ func (in *MachinePoolBuiltins) DeepCopy() *MachinePoolBuiltins { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Patch) DeepCopyInto(out *Patch) { + *out = *in + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patch. +func (in *Patch) DeepCopy() *Patch { + if in == nil { + return nil + } + out := new(Patch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateMachineRequest) DeepCopyInto(out *UpdateMachineRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.CommonRequest.DeepCopyInto(&out.CommonRequest) + in.Desired.DeepCopyInto(&out.Desired) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateMachineRequest. +func (in *UpdateMachineRequest) DeepCopy() *UpdateMachineRequest { + if in == nil { + return nil + } + out := new(UpdateMachineRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UpdateMachineRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateMachineRequestObjects) DeepCopyInto(out *UpdateMachineRequestObjects) { + *out = *in + in.Machine.DeepCopyInto(&out.Machine) + in.InfrastructureMachine.DeepCopyInto(&out.InfrastructureMachine) + in.BootstrapConfig.DeepCopyInto(&out.BootstrapConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateMachineRequestObjects. +func (in *UpdateMachineRequestObjects) DeepCopy() *UpdateMachineRequestObjects { + if in == nil { + return nil + } + out := new(UpdateMachineRequestObjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateMachineResponse) DeepCopyInto(out *UpdateMachineResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + out.CommonRetryResponse = in.CommonRetryResponse +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateMachineResponse. +func (in *UpdateMachineResponse) DeepCopy() *UpdateMachineResponse { + if in == nil { + return nil + } + out := new(UpdateMachineResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UpdateMachineResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeStep) DeepCopyInto(out *UpgradeStep) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeStep. +func (in *UpgradeStep) DeepCopy() *UpgradeStep { + if in == nil { + return nil + } + out := new(UpgradeStep) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeStepInfo) DeepCopyInto(out *UpgradeStepInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeStepInfo. +func (in *UpgradeStepInfo) DeepCopy() *UpgradeStepInfo { + if in == nil { + return nil + } + out := new(UpgradeStepInfo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidateTopologyRequest) DeepCopyInto(out *ValidateTopologyRequest) { *out = *in diff --git a/api/runtime/hooks/v1alpha1/zz_generated.openapi.go b/api/runtime/hooks/v1alpha1/zz_generated.openapi.go index e72d12416126..787689d73a00 100644 --- a/api/runtime/hooks/v1alpha1/zz_generated.openapi.go +++ b/api/runtime/hooks/v1alpha1/zz_generated.openapi.go @@ -34,13 +34,25 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.AfterControlPlaneInitializedResponse": schema_api_runtime_hooks_v1alpha1_AfterControlPlaneInitializedResponse(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.AfterControlPlaneUpgradeRequest": schema_api_runtime_hooks_v1alpha1_AfterControlPlaneUpgradeRequest(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.AfterControlPlaneUpgradeResponse": schema_api_runtime_hooks_v1alpha1_AfterControlPlaneUpgradeResponse(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.AfterWorkersUpgradeRequest": schema_api_runtime_hooks_v1alpha1_AfterWorkersUpgradeRequest(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.AfterWorkersUpgradeResponse": schema_api_runtime_hooks_v1alpha1_AfterWorkersUpgradeResponse(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeClusterCreateRequest": schema_api_runtime_hooks_v1alpha1_BeforeClusterCreateRequest(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeClusterCreateResponse": schema_api_runtime_hooks_v1alpha1_BeforeClusterCreateResponse(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeClusterDeleteRequest": schema_api_runtime_hooks_v1alpha1_BeforeClusterDeleteRequest(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeClusterDeleteResponse": schema_api_runtime_hooks_v1alpha1_BeforeClusterDeleteResponse(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeClusterUpgradeRequest": schema_api_runtime_hooks_v1alpha1_BeforeClusterUpgradeRequest(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeClusterUpgradeResponse": schema_api_runtime_hooks_v1alpha1_BeforeClusterUpgradeResponse(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeControlPlaneUpgradeRequest": schema_api_runtime_hooks_v1alpha1_BeforeControlPlaneUpgradeRequest(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeControlPlaneUpgradeResponse": schema_api_runtime_hooks_v1alpha1_BeforeControlPlaneUpgradeResponse(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeWorkersUpgradeRequest": schema_api_runtime_hooks_v1alpha1_BeforeWorkersUpgradeRequest(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.BeforeWorkersUpgradeResponse": schema_api_runtime_hooks_v1alpha1_BeforeWorkersUpgradeResponse(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Builtins": schema_api_runtime_hooks_v1alpha1_Builtins(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineRequest": schema_api_runtime_hooks_v1alpha1_CanUpdateMachineRequest(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineRequestObjects": schema_api_runtime_hooks_v1alpha1_CanUpdateMachineRequestObjects(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineResponse": schema_api_runtime_hooks_v1alpha1_CanUpdateMachineResponse(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineSetRequest": schema_api_runtime_hooks_v1alpha1_CanUpdateMachineSetRequest(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineSetRequestObjects": schema_api_runtime_hooks_v1alpha1_CanUpdateMachineSetRequestObjects(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineSetResponse": schema_api_runtime_hooks_v1alpha1_CanUpdateMachineSetResponse(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterBuiltins": schema_api_runtime_hooks_v1alpha1_ClusterBuiltins(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterNetworkBuiltins": schema_api_runtime_hooks_v1alpha1_ClusterNetworkBuiltins(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterTopologyBuiltins": schema_api_runtime_hooks_v1alpha1_ClusterTopologyBuiltins(ref), @@ -60,6 +72,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GeneratePatchesRequestItem": schema_api_runtime_hooks_v1alpha1_GeneratePatchesRequestItem(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GeneratePatchesResponse": schema_api_runtime_hooks_v1alpha1_GeneratePatchesResponse(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GeneratePatchesResponseItem": schema_api_runtime_hooks_v1alpha1_GeneratePatchesResponseItem(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GenerateUpgradePlanRequest": schema_api_runtime_hooks_v1alpha1_GenerateUpgradePlanRequest(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GenerateUpgradePlanResponse": schema_api_runtime_hooks_v1alpha1_GenerateUpgradePlanResponse(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GroupVersionHook": schema_api_runtime_hooks_v1alpha1_GroupVersionHook(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.HolderReference": schema_api_runtime_hooks_v1alpha1_HolderReference(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachineBootstrapBuiltins": schema_api_runtime_hooks_v1alpha1_MachineBootstrapBuiltins(ref), @@ -67,6 +81,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachineDeploymentBuiltins": schema_api_runtime_hooks_v1alpha1_MachineDeploymentBuiltins(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachineInfrastructureRefBuiltins": schema_api_runtime_hooks_v1alpha1_MachineInfrastructureRefBuiltins(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachinePoolBuiltins": schema_api_runtime_hooks_v1alpha1_MachinePoolBuiltins(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Patch": schema_api_runtime_hooks_v1alpha1_Patch(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpdateMachineRequest": schema_api_runtime_hooks_v1alpha1_UpdateMachineRequest(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpdateMachineRequestObjects": schema_api_runtime_hooks_v1alpha1_UpdateMachineRequestObjects(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpdateMachineResponse": schema_api_runtime_hooks_v1alpha1_UpdateMachineResponse(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStep": schema_api_runtime_hooks_v1alpha1_UpgradeStep(ref), + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo": schema_api_runtime_hooks_v1alpha1_UpgradeStepInfo(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ValidateTopologyRequest": schema_api_runtime_hooks_v1alpha1_ValidateTopologyRequest(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ValidateTopologyRequestItem": schema_api_runtime_hooks_v1alpha1_ValidateTopologyRequestItem(ref), "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ValidateTopologyResponse": schema_api_runtime_hooks_v1alpha1_ValidateTopologyResponse(ref), @@ -172,8 +192,16 @@ func schema_api_runtime_hooks_v1alpha1_AfterClusterUpgradeResponse(ref common.Re Format: "", }, }, + "retryAfterSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "retryAfterSeconds when set to a non-zero value signifies that the hook will be called again at a future time.", + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, }, - Required: []string{"status"}, + Required: []string{"status", "retryAfterSeconds"}, }, }, } @@ -322,18 +350,46 @@ func schema_api_runtime_hooks_v1alpha1_AfterControlPlaneUpgradeRequest(ref commo }, "kubernetesVersion": { SchemaProps: spec.SchemaProps{ - Description: "kubernetesVersion is the Kubernetes version of the Control Plane after the upgrade.", + Description: "kubernetesVersion is the Kubernetes version of the control plane after an upgrade step.", Default: "", Type: []string{"string"}, Format: "", }, }, + "controlPlaneUpgrades": { + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneUpgrades is the list of the remaining version upgrade steps for the control plane, if any.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, + }, + }, + "workersUpgrades": { + SchemaProps: spec.SchemaProps{ + Description: "workersUpgrades is the list of the remaining version upgrade steps for workers, if any.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, + }, + }, }, Required: []string{"cluster", "kubernetesVersion"}, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster"}, + "sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"}, } } @@ -389,11 +445,11 @@ func schema_api_runtime_hooks_v1alpha1_AfterControlPlaneUpgradeResponse(ref comm } } -func schema_api_runtime_hooks_v1alpha1_BeforeClusterCreateRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_AfterWorkersUpgradeRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "BeforeClusterCreateRequest is the request of the BeforeClusterCreate hook.", + Description: "AfterWorkersUpgradeRequest is the request of the AfterWorkersUpgrade hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -433,20 +489,56 @@ func schema_api_runtime_hooks_v1alpha1_BeforeClusterCreateRequest(ref common.Ref Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster"), }, }, + "kubernetesVersion": { + SchemaProps: spec.SchemaProps{ + Description: "kubernetesVersion is the Kubernetes version of the workers after an upgrade step.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "controlPlaneUpgrades": { + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneUpgrades is the list of the remaining version upgrade steps for the control plane, if any.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, + }, + }, + "workersUpgrades": { + SchemaProps: spec.SchemaProps{ + Description: "workersUpgrades is the list of the remaining version upgrade steps for workers, if any.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, + }, + }, }, - Required: []string{"cluster"}, + Required: []string{"cluster", "kubernetesVersion"}, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster"}, + "sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"}, } } -func schema_api_runtime_hooks_v1alpha1_BeforeClusterCreateResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_AfterWorkersUpgradeResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "BeforeClusterCreateResponse is the response of the BeforeClusterCreate hook.", + Description: "AfterWorkersUpgradeResponse is the response of the AfterWorkersUpgrade hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -494,11 +586,11 @@ func schema_api_runtime_hooks_v1alpha1_BeforeClusterCreateResponse(ref common.Re } } -func schema_api_runtime_hooks_v1alpha1_BeforeClusterDeleteRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_BeforeClusterCreateRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "BeforeClusterDeleteRequest is the request of the BeforeClusterDelete hook.", + Description: "BeforeClusterCreateRequest is the request of the BeforeClusterCreate hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -547,11 +639,11 @@ func schema_api_runtime_hooks_v1alpha1_BeforeClusterDeleteRequest(ref common.Ref } } -func schema_api_runtime_hooks_v1alpha1_BeforeClusterDeleteResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_BeforeClusterCreateResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "BeforeClusterDeleteResponse is the response of the BeforeClusterDelete hook.", + Description: "BeforeClusterCreateResponse is the response of the BeforeClusterCreate hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -599,11 +691,11 @@ func schema_api_runtime_hooks_v1alpha1_BeforeClusterDeleteResponse(ref common.Re } } -func schema_api_runtime_hooks_v1alpha1_BeforeClusterUpgradeRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_BeforeClusterDeleteRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "BeforeClusterUpgradeRequest is the request of the BeforeClusterUpgrade hook.", + Description: "BeforeClusterDeleteRequest is the request of the BeforeClusterDelete hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -643,24 +735,8 @@ func schema_api_runtime_hooks_v1alpha1_BeforeClusterUpgradeRequest(ref common.Re Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster"), }, }, - "fromKubernetesVersion": { - SchemaProps: spec.SchemaProps{ - Description: "fromKubernetesVersion is the current Kubernetes version of the cluster.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "toKubernetesVersion": { - SchemaProps: spec.SchemaProps{ - Description: "toKubernetesVersion is the target Kubernetes version of the upgrade.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, }, - Required: []string{"cluster", "fromKubernetesVersion", "toKubernetesVersion"}, + Required: []string{"cluster"}, }, }, Dependencies: []string{ @@ -668,11 +744,11 @@ func schema_api_runtime_hooks_v1alpha1_BeforeClusterUpgradeRequest(ref common.Re } } -func schema_api_runtime_hooks_v1alpha1_BeforeClusterUpgradeResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_BeforeClusterDeleteResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "BeforeClusterUpgradeResponse is the response of the BeforeClusterUpgrade hook.", + Description: "BeforeClusterDeleteResponse is the response of the BeforeClusterDelete hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -720,133 +796,182 @@ func schema_api_runtime_hooks_v1alpha1_BeforeClusterUpgradeResponse(ref common.R } } -func schema_api_runtime_hooks_v1alpha1_Builtins(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_BeforeClusterUpgradeRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Builtins represents builtin variables exposed through patches.", + Description: "BeforeClusterUpgradeRequest is the request of the BeforeClusterUpgrade hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "settings": { + SchemaProps: spec.SchemaProps{ + Description: "settings defines key value pairs to be passed to the call.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, "cluster": { SchemaProps: spec.SchemaProps{ - Description: "cluster represents builtin cluster variables.", - Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterBuiltins"), + Description: "cluster is the cluster object the lifecycle hook corresponds to.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster"), }, }, - "controlPlane": { + "fromKubernetesVersion": { SchemaProps: spec.SchemaProps{ - Description: "controlPlane represents builtin ControlPlane variables.", - Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ControlPlaneBuiltins"), + Description: "fromKubernetesVersion is the current Kubernetes version of the cluster.", + Default: "", + Type: []string{"string"}, + Format: "", }, }, - "machineDeployment": { + "toKubernetesVersion": { SchemaProps: spec.SchemaProps{ - Description: "machineDeployment represents builtin MachineDeployment variables.", - Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachineDeploymentBuiltins"), + Description: "toKubernetesVersion is the target Kubernetes version of the upgrade.", + Default: "", + Type: []string{"string"}, + Format: "", }, }, - "machinePool": { + "controlPlaneUpgrades": { SchemaProps: spec.SchemaProps{ - Description: "machinePool represents builtin MachinePool variables.", - Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachinePoolBuiltins"), + Description: "controlPlaneUpgrades is the list of version upgrade steps for the control plane.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, + }, + }, + "workersUpgrades": { + SchemaProps: spec.SchemaProps{ + Description: "workersUpgrades is the list of version upgrade steps for the workers.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, }, }, }, + Required: []string{"cluster", "fromKubernetesVersion", "toKubernetesVersion"}, }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterBuiltins", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ControlPlaneBuiltins", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachineDeploymentBuiltins", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachinePoolBuiltins"}, + "sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"}, } } -func schema_api_runtime_hooks_v1alpha1_ClusterBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_BeforeClusterUpgradeResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ClusterBuiltins represents builtin cluster variables.", + Description: "BeforeClusterUpgradeResponse is the response of the BeforeClusterUpgrade hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "name": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "name is the name of the cluster.", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - "namespace": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "namespace is the namespace of the cluster.", + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", Type: []string{"string"}, Format: "", }, }, - "uid": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "uid is the unqiue identifier of the cluster.", + Description: "status of the call. One of \"Success\" or \"Failure\".\n\nPossible enum values:\n - `\"Failure\"` represents a failure response.\n - `\"Success\"` represents a success response.", + Default: "", Type: []string{"string"}, Format: "", + Enum: []interface{}{"Failure", "Success"}, }, }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "metadata is the metadata set on the Cluster object.", - Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.ObjectMeta"), - }, - }, - "topology": { + "message": { SchemaProps: spec.SchemaProps{ - Description: "topology represents the cluster topology variables.", - Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterTopologyBuiltins"), + Description: "message is a human-readable description of the status of the call.", + Type: []string{"string"}, + Format: "", }, }, - "network": { + "retryAfterSeconds": { SchemaProps: spec.SchemaProps{ - Description: "network represents the cluster network variables.", - Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterNetworkBuiltins"), + Description: "retryAfterSeconds when set to a non-zero value signifies that the hook will be called again at a future time.", + Default: 0, + Type: []string{"integer"}, + Format: "int32", }, }, }, + Required: []string{"status", "retryAfterSeconds"}, }, }, - Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/core/v1beta1.ObjectMeta", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterNetworkBuiltins", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterTopologyBuiltins"}, } } -func schema_api_runtime_hooks_v1alpha1_ClusterNetworkBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_BeforeControlPlaneUpgradeRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ClusterNetworkBuiltins represents builtin cluster network variables.", + Description: "BeforeControlPlaneUpgradeRequest is the request of the BeforeControlPlane hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "serviceDomain": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "serviceDomain is the domain name for services.", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - "services": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "services is the network ranges from which service VIPs are allocated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", }, }, - "pods": { + "settings": { SchemaProps: spec.SchemaProps{ - Description: "pods is the network ranges from which Pod networks are allocated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "settings defines key value pairs to be passed to the call.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: "", @@ -857,7 +982,729 @@ func schema_api_runtime_hooks_v1alpha1_ClusterNetworkBuiltins(ref common.Referen }, }, }, - }, + "cluster": { + SchemaProps: spec.SchemaProps{ + Description: "cluster is the cluster object the lifecycle hook corresponds to.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster"), + }, + }, + "fromKubernetesVersion": { + SchemaProps: spec.SchemaProps{ + Description: "fromKubernetesVersion is the current Kubernetes version of the control plane for the next upgrade step.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "toKubernetesVersion": { + SchemaProps: spec.SchemaProps{ + Description: "toKubernetesVersion is the target Kubernetes version of the control plane for the next upgrade step.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "controlPlaneUpgrades": { + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneUpgrades is the list of the remaining version upgrade steps for the control plane, if any.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, + }, + }, + "workersUpgrades": { + SchemaProps: spec.SchemaProps{ + Description: "workersUpgrades is the list of the remaining version upgrade steps for workers, if any.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, + }, + }, + }, + Required: []string{"cluster", "fromKubernetesVersion", "toKubernetesVersion"}, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_BeforeControlPlaneUpgradeResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BeforeControlPlaneUpgradeResponse is the response of the BeforeControlPlaneUpgrade hook.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status of the call. One of \"Success\" or \"Failure\".\n\nPossible enum values:\n - `\"Failure\"` represents a failure response.\n - `\"Success\"` represents a success response.", + Default: "", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"Failure", "Success"}, + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "message is a human-readable description of the status of the call.", + Type: []string{"string"}, + Format: "", + }, + }, + "retryAfterSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "retryAfterSeconds when set to a non-zero value signifies that the hook will be called again at a future time.", + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"status", "retryAfterSeconds"}, + }, + }, + } +} + +func schema_api_runtime_hooks_v1alpha1_BeforeWorkersUpgradeRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BeforeWorkersUpgradeRequest is the request of the BeforeWorkersUpgrade hook.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "settings": { + SchemaProps: spec.SchemaProps{ + Description: "settings defines key value pairs to be passed to the call.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "cluster": { + SchemaProps: spec.SchemaProps{ + Description: "cluster is the cluster object the lifecycle hook corresponds to.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster"), + }, + }, + "fromKubernetesVersion": { + SchemaProps: spec.SchemaProps{ + Description: "fromKubernetesVersion is the current Kubernetes version of the workers for the next upgrade step.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "toKubernetesVersion": { + SchemaProps: spec.SchemaProps{ + Description: "toKubernetesVersion is the target Kubernetes version of the workers for the next upgrade step.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "controlPlaneUpgrades": { + SchemaProps: spec.SchemaProps{ + Description: "controlPlaneUpgrades is the list of the remaining version upgrade steps for the control plane, if any.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, + }, + }, + "workersUpgrades": { + SchemaProps: spec.SchemaProps{ + Description: "workersUpgrades is the list of the remaining version upgrade steps for workers, if any.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"), + }, + }, + }, + }, + }, + }, + Required: []string{"cluster", "fromKubernetesVersion", "toKubernetesVersion"}, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/core/v1beta1.Cluster", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStepInfo"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_BeforeWorkersUpgradeResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BeforeWorkersUpgradeResponse is the response of the BeforeWorkersUpgrade hook.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status of the call. One of \"Success\" or \"Failure\".\n\nPossible enum values:\n - `\"Failure\"` represents a failure response.\n - `\"Success\"` represents a success response.", + Default: "", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"Failure", "Success"}, + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "message is a human-readable description of the status of the call.", + Type: []string{"string"}, + Format: "", + }, + }, + "retryAfterSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "retryAfterSeconds when set to a non-zero value signifies that the hook will be called again at a future time.", + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"status", "retryAfterSeconds"}, + }, + }, + } +} + +func schema_api_runtime_hooks_v1alpha1_Builtins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Builtins represents builtin variables exposed through patches.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "cluster": { + SchemaProps: spec.SchemaProps{ + Description: "cluster represents builtin cluster variables.", + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterBuiltins"), + }, + }, + "controlPlane": { + SchemaProps: spec.SchemaProps{ + Description: "controlPlane represents builtin ControlPlane variables.", + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ControlPlaneBuiltins"), + }, + }, + "machineDeployment": { + SchemaProps: spec.SchemaProps{ + Description: "machineDeployment represents builtin MachineDeployment variables.", + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachineDeploymentBuiltins"), + }, + }, + "machinePool": { + SchemaProps: spec.SchemaProps{ + Description: "machinePool represents builtin MachinePool variables.", + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachinePoolBuiltins"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterBuiltins", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ControlPlaneBuiltins", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachineDeploymentBuiltins", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.MachinePoolBuiltins"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_CanUpdateMachineRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CanUpdateMachineRequest is the request of the CanUpdateMachine hook.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "settings": { + SchemaProps: spec.SchemaProps{ + Description: "settings defines key value pairs to be passed to the call.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "current": { + SchemaProps: spec.SchemaProps{ + Description: "current contains the current state of the Machine and related objects.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineRequestObjects"), + }, + }, + "desired": { + SchemaProps: spec.SchemaProps{ + Description: "desired contains the desired state of the Machine and related objects.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineRequestObjects"), + }, + }, + }, + Required: []string{"current", "desired"}, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineRequestObjects"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_CanUpdateMachineRequestObjects(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CanUpdateMachineRequestObjects groups objects for CanUpdateMachineRequest.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "machine": { + SchemaProps: spec.SchemaProps{ + Description: "machine is the full Machine object.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.Machine"), + }, + }, + "infrastructureMachine": { + SchemaProps: spec.SchemaProps{ + Description: "infrastructureMachine is the infra Machine object.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + "bootstrapConfig": { + SchemaProps: spec.SchemaProps{ + Description: "bootstrapConfig is the bootstrap config object.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + Required: []string{"machine", "infrastructureMachine"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/runtime.RawExtension", "sigs.k8s.io/cluster-api/api/core/v1beta2.Machine"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_CanUpdateMachineResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CanUpdateMachineResponse is the response of the CanUpdateMachine hook.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status of the call. One of \"Success\" or \"Failure\".\n\nPossible enum values:\n - `\"Failure\"` represents a failure response.\n - `\"Success\"` represents a success response.", + Default: "", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"Failure", "Success"}, + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "message is a human-readable description of the status of the call.", + Type: []string{"string"}, + Format: "", + }, + }, + "machinePatch": { + SchemaProps: spec.SchemaProps{ + Description: "machinePatch when applied to the current Machine spec, indicates changes handled in-place. Only fields in spec have to be covered by the patch.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Patch"), + }, + }, + "infrastructureMachinePatch": { + SchemaProps: spec.SchemaProps{ + Description: "infrastructureMachinePatch indicates infra Machine spec changes handled in-place. Only fields in spec have to be covered by the patch.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Patch"), + }, + }, + "bootstrapConfigPatch": { + SchemaProps: spec.SchemaProps{ + Description: "bootstrapConfigPatch indicates bootstrap config spec changes handled in-place. Only fields in spec have to be covered by the patch.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Patch"), + }, + }, + }, + Required: []string{"status"}, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Patch"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_CanUpdateMachineSetRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CanUpdateMachineSetRequest is the request of the CanUpdateMachineSet hook.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "settings": { + SchemaProps: spec.SchemaProps{ + Description: "settings defines key value pairs to be passed to the call.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "current": { + SchemaProps: spec.SchemaProps{ + Description: "current contains the current state of the MachineSet and related objects.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineSetRequestObjects"), + }, + }, + "desired": { + SchemaProps: spec.SchemaProps{ + Description: "desired contains the desired state of the MachineSet and related objects.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineSetRequestObjects"), + }, + }, + }, + Required: []string{"current", "desired"}, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.CanUpdateMachineSetRequestObjects"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_CanUpdateMachineSetRequestObjects(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CanUpdateMachineSetRequestObjects groups objects for CanUpdateMachineSetRequest.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "machineSet": { + SchemaProps: spec.SchemaProps{ + Description: "machineSet is the full MachineSet object. Only fields in spec.template.spec have to be covered by the patch.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.MachineSet"), + }, + }, + "infrastructureMachineTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "infrastructureMachineTemplate is the provider-specific InfrastructureMachineTemplate object. Only fields in spec.template.spec have to be covered by the patch.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + "bootstrapConfigTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "bootstrapConfigTemplate is the provider-specific BootstrapConfigTemplate object. Only fields in spec.template.spec have to be covered by the patch.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + Required: []string{"machineSet", "infrastructureMachineTemplate"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/runtime.RawExtension", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineSet"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_CanUpdateMachineSetResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CanUpdateMachineSetResponse is the response of the CanUpdateMachineSet hook.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status of the call. One of \"Success\" or \"Failure\".\n\nPossible enum values:\n - `\"Failure\"` represents a failure response.\n - `\"Success\"` represents a success response.", + Default: "", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"Failure", "Success"}, + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "message is a human-readable description of the status of the call.", + Type: []string{"string"}, + Format: "", + }, + }, + "machineSetPatch": { + SchemaProps: spec.SchemaProps{ + Description: "machineSetPatch when applied to the current MachineSet spec, indicates changes handled in-place.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Patch"), + }, + }, + "infrastructureMachineTemplatePatch": { + SchemaProps: spec.SchemaProps{ + Description: "infrastructureMachineTemplatePatch indicates infra template spec changes handled in-place.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Patch"), + }, + }, + "bootstrapConfigTemplatePatch": { + SchemaProps: spec.SchemaProps{ + Description: "bootstrapConfigTemplatePatch indicates bootstrap template spec changes handled in-place.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Patch"), + }, + }, + }, + Required: []string{"status"}, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Patch"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_ClusterBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterBuiltins represents builtin cluster variables.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name is the name of the cluster.", + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "namespace is the namespace of the cluster.", + Type: []string{"string"}, + Format: "", + }, + }, + "uid": { + SchemaProps: spec.SchemaProps{ + Description: "uid is the unqiue identifier of the cluster.", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "metadata is the metadata set on the Cluster object.", + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta1.ObjectMeta"), + }, + }, + "topology": { + SchemaProps: spec.SchemaProps{ + Description: "topology represents the cluster topology variables.", + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterTopologyBuiltins"), + }, + }, + "network": { + SchemaProps: spec.SchemaProps{ + Description: "network represents the cluster network variables.", + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterNetworkBuiltins"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/core/v1beta1.ObjectMeta", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterNetworkBuiltins", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.ClusterTopologyBuiltins"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_ClusterNetworkBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterNetworkBuiltins represents builtin cluster network variables.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "serviceDomain": { + SchemaProps: spec.SchemaProps{ + Description: "serviceDomain is the domain name for services.", + Type: []string{"string"}, + Format: "", + }, + }, + "services": { + SchemaProps: spec.SchemaProps{ + Description: "services is the network ranges from which service VIPs are allocated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "pods": { + SchemaProps: spec.SchemaProps{ + Description: "pods is the network ranges from which Pod networks are allocated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, }, }, } @@ -1456,47 +2303,218 @@ func schema_api_runtime_hooks_v1alpha1_GeneratePatchesRequestItem(ref common.Ref Format: "", }, }, - "holderReference": { + "holderReference": { + SchemaProps: spec.SchemaProps{ + Description: "holderReference is a reference to the object where the template is used.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.HolderReference"), + }, + }, + "object": { + SchemaProps: spec.SchemaProps{ + Description: "object contains the template as a raw object.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + "variables": { + SchemaProps: spec.SchemaProps{ + Description: "variables are variables specific for the current template. For example some builtin variables like MachineDeployment replicas and version are context-sensitive and thus are only added to templates for MachineDeployments and with values which correspond to the current MachineDeployment.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Variable"), + }, + }, + }, + }, + }, + }, + Required: []string{"uid", "holderReference", "object"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/runtime.RawExtension", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.HolderReference", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Variable"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_GeneratePatchesResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GeneratePatchesResponse is the response of the GeneratePatches hook. NOTE: The patches in GeneratePatchesResponse will be applied in the order in which they are defined to the templates of the request. Thus applying changes consecutively when iterating through internal and external patches.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status of the call. One of \"Success\" or \"Failure\".\n\nPossible enum values:\n - `\"Failure\"` represents a failure response.\n - `\"Success\"` represents a success response.", + Default: "", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"Failure", "Success"}, + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "message is a human-readable description of the status of the call.", + Type: []string{"string"}, + Format: "", + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "items is the list of generated patches.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GeneratePatchesResponseItem"), + }, + }, + }, + }, + }, + }, + Required: []string{"status"}, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GeneratePatchesResponseItem"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_GeneratePatchesResponseItem(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GeneratePatchesResponseItem is a generated patch.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "uid": { + SchemaProps: spec.SchemaProps{ + Description: "uid identifies the corresponding template in the request on which the patch should be applied.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "patchType": { + SchemaProps: spec.SchemaProps{ + Description: "patchType defines the type of the patch. One of: \"JSONPatch\" or \"JSONMergePatch\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "patch": { + SchemaProps: spec.SchemaProps{ + Description: "patch contains the patch which should be applied to the template. It must be of the corresponding PatchType.", + Type: []string{"string"}, + Format: "byte", + }, + }, + }, + Required: []string{"uid", "patchType", "patch"}, + }, + }, + } +} + +func schema_api_runtime_hooks_v1alpha1_GenerateUpgradePlanRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GenerateUpgradePlanRequest is the request of the GenerateUpgradePlan hook.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "settings": { + SchemaProps: spec.SchemaProps{ + Description: "settings defines key value pairs to be passed to the call.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "cluster": { + SchemaProps: spec.SchemaProps{ + Description: "cluster is the cluster object the GenerateUpgradePlan request corresponds to.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.Cluster"), + }, + }, + "fromControlPlaneKubernetesVersion": { SchemaProps: spec.SchemaProps{ - Description: "holderReference is a reference to the object where the template is used.", - Default: map[string]interface{}{}, - Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.HolderReference"), + Description: "fromControlPlaneKubernetesVersion is the current Kubernetes version of the control plane.", + Type: []string{"string"}, + Format: "", }, }, - "object": { + "fromWorkersKubernetesVersion": { SchemaProps: spec.SchemaProps{ - Description: "object contains the template as a raw object.", - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Description: "fromWorkersKubernetesVersion is the min current Kubernetes version of the workers.", + Type: []string{"string"}, + Format: "", }, }, - "variables": { + "toKubernetesVersion": { SchemaProps: spec.SchemaProps{ - Description: "variables are variables specific for the current template. For example some builtin variables like MachineDeployment replicas and version are context-sensitive and thus are only added to templates for MachineDeployments and with values which correspond to the current MachineDeployment.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Variable"), - }, - }, - }, + Description: "toKubernetesVersion is the target Kubernetes version for the upgrade.", + Type: []string{"string"}, + Format: "", }, }, }, - Required: []string{"uid", "holderReference", "object"}, + Required: []string{"cluster", "fromControlPlaneKubernetesVersion", "toKubernetesVersion"}, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/runtime.RawExtension", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.HolderReference", "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.Variable"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.Cluster"}, } } -func schema_api_runtime_hooks_v1alpha1_GeneratePatchesResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_api_runtime_hooks_v1alpha1_GenerateUpgradePlanResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "GeneratePatchesResponse is the response of the GeneratePatches hook. NOTE: The patches in GeneratePatchesResponse will be applied in the order in which they are defined to the templates of the request. Thus applying changes consecutively when iterating through internal and external patches.", + Description: "GenerateUpgradePlanResponse is the response of the GenerateUpgradePlan hook.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -1529,64 +2547,40 @@ func schema_api_runtime_hooks_v1alpha1_GeneratePatchesResponse(ref common.Refere Format: "", }, }, - "items": { + "controlPlaneUpgrades": { SchemaProps: spec.SchemaProps{ - Description: "items is the list of generated patches.", + Description: "controlPlaneUpgrades is the list of version upgrade steps for the control plane. Each entry represents an intermediate version that must be applied in sequence. The following rules apply: - there should be at least one version for every minor between \t\tfromControlPlaneKubernetesVersion (excluded) and ToKubernetesVersion (included). - each version must be:\n - greater than fromControlPlaneKubernetesVersion (or with a different build \tnumber)\n - greater than the previous version in the list (or with a different build number)\n - less or equal to ToKubernetesVersion (or with a different build number)\n - the last version in the plan must be equal to ToKubernetesVersion", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GeneratePatchesResponseItem"), + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStep"), }, }, }, }, }, - }, - Required: []string{"status"}, - }, - }, - Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.GeneratePatchesResponseItem"}, - } -} - -func schema_api_runtime_hooks_v1alpha1_GeneratePatchesResponseItem(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GeneratePatchesResponseItem is a generated patch.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "uid": { - SchemaProps: spec.SchemaProps{ - Description: "uid identifies the corresponding template in the request on which the patch should be applied.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "patchType": { - SchemaProps: spec.SchemaProps{ - Description: "patchType defines the type of the patch. One of: \"JSONPatch\" or \"JSONMergePatch\".\n\nPossible enum values:\n - `\"JSONMergePatch\"` identifies a https://datatracker.ietf.org/doc/html/rfc7386 JSON merge patch.\n - `\"JSONPatch\"` identifies a https://datatracker.ietf.org/doc/html/rfc6902 JSON patch.", - Default: "", - Type: []string{"string"}, - Format: "", - Enum: []interface{}{"JSONMergePatch", "JSONPatch"}, - }, - }, - "patch": { + "workersUpgrades": { SchemaProps: spec.SchemaProps{ - Description: "patch contains the patch which should be applied to the template. It must be of the corresponding PatchType.", - Type: []string{"string"}, - Format: "byte", + Description: "workersUpgrades is the list of version upgrade steps for the workers. Each entry represents an intermediate version that must be applied in sequence.\n\nIn case the upgrade plan for workers will be left to empty, the system will automatically determine the minimal number of workers upgrade steps, thus minimizing impact on workloads and reducing the overall upgrade time.\n\nIf instead for any reason a custom upgrade path for workers is required, the following rules apply: - each version must be:\n - equal to FromControlPlaneKubernetesVersion or to one of the versions in the control plane upgrade plan.\n - greater than FromWorkersKubernetesVersion (or with a different build number)\n - greater than the previous version in the list (or with a different build number)\n - less or equal to the ToKubernetesVersion (or with a different build number)\n - in case of versions with the same major/minor/patch version but different build number, also the order\n of those versions must be the same for control plane and worker upgrade plan.\n - the last version in the plan must be equal to ToKubernetesVersion\n - the upgrade plane must have all the intermediate version which workers must go through to avoid breaking rules\n defining the max version skew between control plane and workers.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStep"), + }, + }, + }, }, }, }, - Required: []string{"uid", "patchType", "patch"}, + Required: []string{"status"}, }, }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpgradeStep"}, } } @@ -1871,6 +2865,216 @@ func schema_api_runtime_hooks_v1alpha1_MachinePoolBuiltins(ref common.ReferenceC } } +func schema_api_runtime_hooks_v1alpha1_Patch(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Patch is a single patch (JSONPatch or JSONMergePatch) which can include multiple operations.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "patchType": { + SchemaProps: spec.SchemaProps{ + Description: "patchType JSONPatch or JSONMergePatch.", + Type: []string{"string"}, + Format: "", + }, + }, + "patch": { + SchemaProps: spec.SchemaProps{ + Description: "patch data for the target object.", + Type: []string{"string"}, + Format: "byte", + }, + }, + }, + Required: []string{"patchType", "patch"}, + }, + }, + } +} + +func schema_api_runtime_hooks_v1alpha1_UpdateMachineRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UpdateMachineRequest is the request of the UpdateMachine hook.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "settings": { + SchemaProps: spec.SchemaProps{ + Description: "settings defines key value pairs to be passed to the call.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "desired": { + SchemaProps: spec.SchemaProps{ + Description: "desired contains the desired state of the Machine and related objects.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpdateMachineRequestObjects"), + }, + }, + }, + Required: []string{"desired"}, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1.UpdateMachineRequestObjects"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_UpdateMachineRequestObjects(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UpdateMachineRequestObjects groups objects for UpdateMachineRequest.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "machine": { + SchemaProps: spec.SchemaProps{ + Description: "machine is the full Machine object.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.Machine"), + }, + }, + "infrastructureMachine": { + SchemaProps: spec.SchemaProps{ + Description: "infrastructureMachine is the infra Machine object.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + "bootstrapConfig": { + SchemaProps: spec.SchemaProps{ + Description: "bootstrapConfig is the bootstrap config object.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + Required: []string{"machine", "infrastructureMachine"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/runtime.RawExtension", "sigs.k8s.io/cluster-api/api/core/v1beta2.Machine"}, + } +} + +func schema_api_runtime_hooks_v1alpha1_UpdateMachineResponse(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UpdateMachineResponse is the response of the UpdateMachine hook. The status of the update operation is determined by the CommonRetryResponse fields: - Status=Success + RetryAfterSeconds > 0: update is in progress - Status=Success + RetryAfterSeconds = 0: update completed successfully - Status=Failure: update failed", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status of the call. One of \"Success\" or \"Failure\".\n\nPossible enum values:\n - `\"Failure\"` represents a failure response.\n - `\"Success\"` represents a success response.", + Default: "", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"Failure", "Success"}, + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "message is a human-readable description of the status of the call.", + Type: []string{"string"}, + Format: "", + }, + }, + "retryAfterSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "retryAfterSeconds when set to a non-zero value signifies that the hook will be called again at a future time.", + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"status", "retryAfterSeconds"}, + }, + }, + } +} + +func schema_api_runtime_hooks_v1alpha1_UpgradeStep(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UpgradeStep represents a single version upgrade step.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Description: "version is the Kubernetes version for this upgrade step.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"version"}, + }, + }, + } +} + +func schema_api_runtime_hooks_v1alpha1_UpgradeStepInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UpgradeStepInfo provide info about a single version upgrade step.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Description: "version is the Kubernetes version for this upgrade step.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"version"}, + }, + }, + } +} + func schema_api_runtime_hooks_v1alpha1_ValidateTopologyRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml index 13c7c3dfd2c4..2902ef5f8e90 100644 --- a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml +++ b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: kubeadmconfigs.bootstrap.cluster.x-k8s.io spec: group: bootstrap.cluster.x-k8s.io @@ -641,9 +641,8 @@ spec: a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -816,9 +815,8 @@ spec: a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -1638,9 +1636,8 @@ spec: a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -1815,9 +1812,8 @@ spec: a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -2084,8 +2080,9 @@ spec: in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2143,6 +2140,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2298,8 +2332,9 @@ spec: in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2357,6 +2392,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2545,8 +2617,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2604,6 +2677,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2778,8 +2888,9 @@ spec: in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2837,6 +2948,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -3305,9 +3453,8 @@ spec: a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -3706,9 +3853,8 @@ spec: a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -4205,8 +4351,9 @@ spec: in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4264,6 +4411,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4453,8 +4637,9 @@ spec: in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4512,6 +4697,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4631,6 +4853,22 @@ spec: minLength: 1 type: string type: object + encryptionAlgorithm: + description: |- + encryptionAlgorithm holds the type of asymmetric encryption algorithm used for keys and certificates. + Can be one of "RSA-2048", "RSA-3072", "RSA-4096", "ECDSA-P256" or "ECDSA-P384". + For Kubernetes 1.34 or above, "ECDSA-P384" is supported. + If not specified, Cluster API will use RSA-2048 as default. + When this field is modified every certificate generated afterward will use the new + encryptionAlgorithm. Existing CA certificates and service account keys are not rotated. + This field is only supported with Kubernetes v1.31 or above. + enum: + - ECDSA-P256 + - ECDSA-P384 + - RSA-2048 + - RSA-3072 + - RSA-4096 + type: string etcd: description: |- etcd holds configuration for etcd. @@ -4735,8 +4973,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4794,6 +5033,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4898,16 +5174,7 @@ spec: imageRepository: description: |- imageRepository sets the container registry to pull images from. - * If not set, the default registry of kubeadm will be used, i.e. - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions - Please note that when imageRepository is not set we don't allow upgrades to - versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - a newer patch version with the new registry instead (i.e. >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0). - * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + If not set, the default registry of kubeadm will be used (registry.k8s.io). maxLength: 512 minLength: 1 type: string @@ -4959,8 +5226,9 @@ spec: in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5018,6 +5286,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5526,9 +5831,8 @@ spec: a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -6008,9 +6312,8 @@ spec: a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: diff --git a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml index 0c742aa7913a..6a458e6de99a 100644 --- a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml +++ b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: kubeadmconfigtemplates.bootstrap.cluster.x-k8s.io spec: group: bootstrap.cluster.x-k8s.io @@ -661,9 +661,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at + which the taint was added. format: date-time type: string value: @@ -838,9 +837,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at + which the taint was added. format: date-time type: string value: @@ -1614,9 +1612,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at + which the taint was added. format: date-time type: string value: @@ -1794,9 +1791,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at + which the taint was added. format: date-time type: string value: @@ -2031,8 +2027,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2091,6 +2088,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2251,8 +2285,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2311,6 +2346,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2505,8 +2577,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2566,6 +2639,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2744,8 +2854,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2804,6 +2915,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -3286,9 +3434,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at + which the taint was added. format: date-time type: string value: @@ -3695,9 +3842,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at + which the taint was added. format: date-time type: string value: @@ -4074,8 +4220,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4134,6 +4281,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4328,8 +4512,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4388,6 +4573,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4512,6 +4734,22 @@ spec: minLength: 1 type: string type: object + encryptionAlgorithm: + description: |- + encryptionAlgorithm holds the type of asymmetric encryption algorithm used for keys and certificates. + Can be one of "RSA-2048", "RSA-3072", "RSA-4096", "ECDSA-P256" or "ECDSA-P384". + For Kubernetes 1.34 or above, "ECDSA-P384" is supported. + If not specified, Cluster API will use RSA-2048 as default. + When this field is modified every certificate generated afterward will use the new + encryptionAlgorithm. Existing CA certificates and service account keys are not rotated. + This field is only supported with Kubernetes v1.31 or above. + enum: + - ECDSA-P256 + - ECDSA-P384 + - RSA-2048 + - RSA-3072 + - RSA-4096 + type: string etcd: description: |- etcd holds configuration for etcd. @@ -4618,8 +4856,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4679,6 +4918,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4787,16 +5063,7 @@ spec: imageRepository: description: |- imageRepository sets the container registry to pull images from. - * If not set, the default registry of kubeadm will be used, i.e. - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions - Please note that when imageRepository is not set we don't allow upgrades to - versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - a newer patch version with the new registry instead (i.e. >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0). - * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + If not set, the default registry of kubeadm will be used (registry.k8s.io). maxLength: 512 minLength: 1 type: string @@ -4848,8 +5115,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4908,6 +5176,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5428,9 +5733,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at + which the taint was added. format: date-time type: string value: @@ -5917,9 +6221,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at + which the taint was added. format: date-time type: string value: diff --git a/bootstrap/kubeadm/config/manager/manager.yaml b/bootstrap/kubeadm/config/manager/manager.yaml index d3bc3688efdb..e874de005b08 100644 --- a/bootstrap/kubeadm/config/manager/manager.yaml +++ b/bootstrap/kubeadm/config/manager/manager.yaml @@ -22,7 +22,7 @@ spec: - "--leader-elect" - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" - - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false},PriorityQueue=${EXP_PRIORITY_QUEUE:=false}" + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false},PriorityQueue=${EXP_PRIORITY_QUEUE:=false},ReconcilerRateLimiting=${EXP_RECONCILER_RATE_LIMITING:=false}" - "--bootstrap-token-ttl=${KUBEADM_BOOTSTRAP_TOKEN_TTL:=15m}" image: controller:latest name: manager diff --git a/bootstrap/kubeadm/internal/builder/builders.go b/bootstrap/kubeadm/internal/builder/builders.go index f123a69f57b2..7502538a4a88 100644 --- a/bootstrap/kubeadm/internal/builder/builders.go +++ b/bootstrap/kubeadm/internal/builder/builders.go @@ -67,16 +67,14 @@ func (k *KubeadmConfigBuilder) Unstructured() *unstructured.Unstructured { if err != nil { panic(err) } - return &unstructured.Unstructured{Object: rawMap} + u := &unstructured.Unstructured{Object: rawMap} + u.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) + return u } // Build produces a KubeadmConfig from the variable in the KubeadmConfigBuilder. func (k *KubeadmConfigBuilder) Build() *bootstrapv1.KubeadmConfig { config := &bootstrapv1.KubeadmConfig{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: k.namespace, Name: k.name, diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 837e80b93ee1..d01545898b27 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -37,7 +37,6 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -53,6 +52,7 @@ import ( bsutil "sigs.k8s.io/cluster-api/bootstrap/util" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/feature" + capicontrollerutil "sigs.k8s.io/cluster-api/internal/util/controller" "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" @@ -116,33 +116,26 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl } predicateLog := ctrl.LoggerFrom(ctx).WithValues("controller", "kubeadmconfig") - b := ctrl.NewControllerManagedBy(mgr). + b := capicontrollerutil.NewControllerManagedBy(mgr, predicateLog). For(&bootstrapv1.KubeadmConfig{}). WithOptions(options). Watches( &clusterv1.Machine{}, handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc), - builder.WithPredicates(predicates.ResourceIsChanged(mgr.GetScheme(), predicateLog)), ).WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)) if feature.Gates.Enabled(feature.MachinePool) { b = b.Watches( &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc), - builder.WithPredicates(predicates.ResourceIsChanged(mgr.GetScheme(), predicateLog)), ) } b = b.Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmConfigs), - builder.WithPredicates( - predicates.All(mgr.GetScheme(), predicateLog, - predicates.ResourceIsChanged(mgr.GetScheme(), predicateLog), - predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), predicateLog), - predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue), - ), - ), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), predicateLog), + predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue), ).WatchesRawSource(r.ClusterCache.GetClusterSource("kubeadmconfig", r.ClusterToKubeadmConfigs)) if err := b.Complete(r); err != nil { @@ -194,11 +187,6 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Lookup the cluster the config owner is associated with cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName()) if err != nil { - if errors.Cause(err) == util.ErrNoCluster { - log.Info(fmt.Sprintf("%s does not belong to a cluster yet, waiting until it's part of a cluster", configOwner.GetKind())) - return ctrl.Result{}, nil - } - if apierrors.IsNotFound(err) { log.Info("Cluster does not exist yet, waiting until it is created") return ctrl.Result{}, nil @@ -273,12 +261,6 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques if err := patchHelper.Patch(ctx, config, patchOpts...); err != nil { rerr = kerrors.NewAggregate([]error{rerr, err}) } - - // Note: controller-runtime logs a warning that non-empty result is ignored - // if error is not nil, so setting result here to empty to avoid noisy warnings. - if rerr != nil { - retRes = ctrl.Result{} - } }() // Ignore deleted KubeadmConfigs. diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go index 8a39706461e8..d95517ab0260 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_reconciler_test.go @@ -18,6 +18,7 @@ package controllers import ( "testing" + "time" . "github.com/onsi/gomega" ctrl "sigs.k8s.io/controller-runtime" @@ -61,7 +62,7 @@ func TestKubeadmConfigReconciler(t *testing.T) { }, }) g.Expect(err).To(Succeed()) - g.Expect(result.Requeue).To(BeFalse()) + g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) }) }) } diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index 50c708c77b26..ca514f47cea6 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -124,7 +124,6 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t * } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) } @@ -188,7 +187,7 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi controllerOwner := metav1.GetControllerOf(actual) g.Expect(controllerOwner).To(Not(BeNil())) - g.Expect(controllerOwner.Kind).To(Equal(config.Kind)) + g.Expect(controllerOwner.Kind).To(Equal("KubeadmConfig")) g.Expect(controllerOwner.Name).To(Equal(config.Name)) }) @@ -200,7 +199,7 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi controllerOwner := metav1.GetControllerOf(actual) g.Expect(controllerOwner).To(Not(BeNil())) - g.Expect(controllerOwner.Kind).To(Equal(config.Kind)) + g.Expect(controllerOwner.Kind).To(Equal("KubeadmConfig")) g.Expect(controllerOwner.Name).To(Equal(config.Name)) }) t.Run("non-KubeadmConfig controller OwnerReference is replaced", func(*testing.T) { @@ -224,7 +223,7 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi controllerOwner := metav1.GetControllerOf(actual) g.Expect(controllerOwner).To(Not(BeNil())) - g.Expect(controllerOwner.Kind).To(Equal(config.Kind)) + g.Expect(controllerOwner.Kind).To(Equal("KubeadmConfig")) g.Expect(controllerOwner.Name).To(Equal(config.Name)) }) } @@ -297,7 +296,6 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName actual := &bootstrapv1.KubeadmConfig{} g.Expect(myclient.Get(ctx, client.ObjectKey{Namespace: config.Namespace, Name: config.Name}, actual)).To(Succeed()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition) } @@ -476,7 +474,6 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI result, err := k.Reconcile(ctx, tc.request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition, bootstrapv1.KubeadmConfigDataSecretNotAvailableReason) }) @@ -528,7 +525,6 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, "control-plane-init-cfg", metav1.NamespaceDefault) @@ -631,7 +627,6 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(10 * time.Second)) actualConfig := &bootstrapv1.KubeadmConfig{} @@ -709,7 +704,6 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, rt.configName, metav1.NamespaceDefault) @@ -786,7 +780,6 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, rt.configName, metav1.NamespaceDefault) @@ -993,7 +986,6 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, "worker-join-cfg", metav1.NamespaceDefault) @@ -1236,7 +1228,6 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { } { result, err := k.Reconcile(ctx, req) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) } @@ -2085,7 +2076,6 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali } result, err := k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) request = ctrl.Request{ @@ -2096,7 +2086,6 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali } result, err = k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) confList := &bootstrapv1.KubeadmConfigList{} g.Expect(myclient.List(ctx, confList)).To(Succeed()) @@ -2151,7 +2140,6 @@ func TestKubeadmConfigReconciler_Reconcile_PatchWhenErrorOccurred(t *testing.T) result, err := k.Reconcile(ctx, request) g.Expect(err).To(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) cfg, err := getKubeadmConfig(myclient, "control-plane-init-cfg", metav1.NamespaceDefault) @@ -2347,7 +2335,6 @@ contexts: user: default name: default current-context: default -preferences: {} users: - name: default user: diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go b/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go index c4fe0aba558b..ae1032b0254d 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go @@ -276,6 +276,7 @@ func hubClusterConfigurationFuzzer(obj *bootstrapv1.ClusterConfiguration, c rand obj.CertificateValidityPeriodDays = 0 obj.CACertificateValidityPeriodDays = 0 + obj.EncryptionAlgorithm = "" for i, arg := range obj.APIServer.ExtraArgs { if arg.Value == nil { diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go b/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go index ea1aee4b6f8b..a0dadb029f93 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go @@ -399,6 +399,7 @@ func autoConvert_v1beta2_ClusterConfiguration_To_upstreamv1beta3_ClusterConfigur out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) // WARNING: in.CertificateValidityPeriodDays requires manual conversion: does not exist in peer-type // WARNING: in.CACertificateValidityPeriodDays requires manual conversion: does not exist in peer-type + // WARNING: in.EncryptionAlgorithm requires manual conversion: does not exist in peer-type return nil } diff --git a/bootstrap/kubeadm/types/upstreamv1beta4/conversion.go b/bootstrap/kubeadm/types/upstreamv1beta4/conversion.go index e730dac72b11..410d87c8e871 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta4/conversion.go +++ b/bootstrap/kubeadm/types/upstreamv1beta4/conversion.go @@ -67,7 +67,6 @@ func (dst *JoinConfiguration) ConvertFrom(srcRaw conversion.Hub) error { func Convert_upstreamv1beta4_ClusterConfiguration_To_v1beta2_ClusterConfiguration(in *ClusterConfiguration, out *bootstrapv1.ClusterConfiguration, s apimachineryconversion.Scope) error { // Following fields do not exist in CABPK v1beta1 version: // - Proxy (Not supported yet) - // - EncryptionAlgorithm (Not supported yet) if err := autoConvert_upstreamv1beta4_ClusterConfiguration_To_v1beta2_ClusterConfiguration(in, out, s); err != nil { return err } diff --git a/bootstrap/kubeadm/types/upstreamv1beta4/conversion_test.go b/bootstrap/kubeadm/types/upstreamv1beta4/conversion_test.go index 89e2143b8ff0..631d0af42c76 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta4/conversion_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta4/conversion_test.go @@ -107,7 +107,6 @@ func spokeClusterConfigurationFuzzer(obj *ClusterConfiguration, c randfill.Conti c.FillNoCustom(obj) obj.Proxy = Proxy{} - obj.EncryptionAlgorithm = "" obj.CertificateValidityPeriod = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31n(3*365)+1) * time.Hour * 24}) obj.CACertificateValidityPeriod = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31n(100*365)+1) * time.Hour * 24}) diff --git a/bootstrap/kubeadm/types/upstreamv1beta4/zz_generated.conversion.go b/bootstrap/kubeadm/types/upstreamv1beta4/zz_generated.conversion.go index b2f95bf2569f..5bb43af374a6 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta4/zz_generated.conversion.go +++ b/bootstrap/kubeadm/types/upstreamv1beta4/zz_generated.conversion.go @@ -430,7 +430,7 @@ func autoConvert_upstreamv1beta4_ClusterConfiguration_To_v1beta2_ClusterConfigur out.ImageRepository = in.ImageRepository out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) // WARNING: in.ClusterName requires manual conversion: does not exist in peer-type - // WARNING: in.EncryptionAlgorithm requires manual conversion: does not exist in peer-type + out.EncryptionAlgorithm = v1beta2.EncryptionAlgorithmType(in.EncryptionAlgorithm) // WARNING: in.CertificateValidityPeriod requires manual conversion: does not exist in peer-type // WARNING: in.CACertificateValidityPeriod requires manual conversion: does not exist in peer-type return nil @@ -458,6 +458,7 @@ func autoConvert_v1beta2_ClusterConfiguration_To_upstreamv1beta4_ClusterConfigur out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) // WARNING: in.CertificateValidityPeriodDays requires manual conversion: does not exist in peer-type // WARNING: in.CACertificateValidityPeriodDays requires manual conversion: does not exist in peer-type + out.EncryptionAlgorithm = EncryptionAlgorithmType(in.EncryptionAlgorithm) return nil } diff --git a/bootstrap/util/configowner_test.go b/bootstrap/util/configowner_test.go index a2dc6eaae90e..c2d157ed9ef2 100644 --- a/bootstrap/util/configowner_test.go +++ b/bootstrap/util/configowner_test.go @@ -187,10 +187,6 @@ func TestHasNodeRefs(t *testing.T) { t.Run("should return false if there is no nodeRef", func(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Machine", - }, ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", Namespace: metav1.NamespaceDefault, @@ -209,10 +205,6 @@ func TestHasNodeRefs(t *testing.T) { t.Run("should return true if there is a nodeRef for Machine", func(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Machine", - }, ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", Namespace: metav1.NamespaceDefault, @@ -242,10 +234,6 @@ func TestHasNodeRefs(t *testing.T) { machinePools := []clusterv1.MachinePool{ { // No replicas specified (default is 1). No nodeRefs either. - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "MachinePool", - }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", @@ -253,10 +241,6 @@ func TestHasNodeRefs(t *testing.T) { }, { // 1 replica but no nodeRefs - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "MachinePool", - }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", @@ -267,10 +251,6 @@ func TestHasNodeRefs(t *testing.T) { }, { // 2 replicas but only 1 nodeRef - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "MachinePool", - }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", @@ -308,10 +288,6 @@ func TestHasNodeRefs(t *testing.T) { machinePools := []clusterv1.MachinePool{ { // 1 replica (default) and 1 nodeRef - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "MachinePool", - }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", @@ -328,10 +304,6 @@ func TestHasNodeRefs(t *testing.T) { }, { // 2 replicas and nodeRefs - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "MachinePool", - }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", @@ -356,10 +328,6 @@ func TestHasNodeRefs(t *testing.T) { }, { // 0 replicas and 0 nodeRef - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "MachinePool", - }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, Name: "machine-pool-name", @@ -372,12 +340,11 @@ func TestHasNodeRefs(t *testing.T) { for i := range machinePools { content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&machinePools[i]) - if err != nil { - g.Fail(err.Error()) - } - unstructuredOwner := unstructured.Unstructured{} + g.Expect(err).ToNot(HaveOccurred()) + unstructuredOwner := &unstructured.Unstructured{} unstructuredOwner.SetUnstructuredContent(content) - co := ConfigOwner{&unstructuredOwner} + unstructuredOwner.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("MachinePool")) + co := ConfigOwner{unstructuredOwner} result := co.HasNodeRefs() g.Expect(result).To(BeTrue()) diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index 08bf16d5a76b..51e42d86c487 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "time" "github.com/pkg/errors" @@ -41,6 +42,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" + "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/yaml" @@ -232,8 +234,7 @@ func (o *objectMover) checkProvisioningCompleted(ctx context.Context, graph *obj // Checking all the clusters have infrastructure is ready readClusterBackoff := newReadBackoff() clusters := graph.getClusters() - for i := range clusters { - cluster := clusters[i] + for _, cluster := range clusters { clusterObj := &clusterv1.Cluster{} if err := retryWithExponentialBackoff(ctx, readClusterBackoff, func(ctx context.Context) error { return getClusterObj(ctx, o.fromProxy, cluster, clusterObj) @@ -297,6 +298,25 @@ func getClusterObj(ctx context.Context, proxy Proxy, cluster *node, clusterObj * return nil } +// getClusterClassObj retrieves the clusterClassObj corresponding to a node with type ClusterClass. +func getClusterClassObj(ctx context.Context, proxy Proxy, clusterClass *node, clusterClassObj *clusterv1.ClusterClass) error { + c, err := proxy.NewClient(ctx) + if err != nil { + return err + } + + clusterClassObjKey := client.ObjectKey{ + Namespace: clusterClass.identity.Namespace, + Name: clusterClass.identity.Name, + } + + if err := c.Get(ctx, clusterClassObjKey, clusterClassObj); err != nil { + return errors.Wrapf(err, "error reading ClusterClass %s/%s", + clusterClass.identity.Namespace, clusterClass.identity.Name) + } + return nil +} + // getMachineObj retrieves the machineObj corresponding to a node with type Machine. func getMachineObj(ctx context.Context, proxy Proxy, machine *node, machineObj *clusterv1.Machine) error { c, err := proxy.NewClient(ctx) @@ -320,9 +340,17 @@ func (o *objectMover) move(ctx context.Context, graph *objectGraph, toProxy Prox log := logf.Log clusters := graph.getClusters() + if err := checkClustersNotPaused(ctx, o.fromProxy, clusters); err != nil { + return err + } + log.Info("Moving Cluster API objects", "Clusters", len(clusters)) clusterClasses := graph.getClusterClasses() + if err := checkClusterClassesNotPaused(ctx, o.fromProxy, clusterClasses); err != nil { + return err + } + log.Info("Moving Cluster API objects", "ClusterClasses", len(clusterClasses)) // Sets the pause field on the Cluster object in the source management cluster, so the controllers stop reconciling it. @@ -395,9 +423,17 @@ func (o *objectMover) toDirectory(ctx context.Context, graph *objectGraph, direc log := logf.Log clusters := graph.getClusters() + if err := checkClustersNotPaused(ctx, o.fromProxy, clusters); err != nil { + return err + } + log.Info("Starting move of Cluster API objects", "Clusters", len(clusters)) clusterClasses := graph.getClusterClasses() + if err := checkClusterClassesNotPaused(ctx, o.fromProxy, clusterClasses); err != nil { + return err + } + log.Info("Moving Cluster API objects", "ClusterClasses", len(clusterClasses)) // Sets the pause field on the Cluster object in the source management cluster, so the controllers stop reconciling it. @@ -570,8 +606,7 @@ func setClusterPause(ctx context.Context, proxy Proxy, clusters []*node, value b patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"spec\":{\"paused\":%s}}", patchValue))) setClusterPauseBackoff := newWriteBackoff() - for i := range clusters { - cluster := clusters[i] + for _, cluster := range clusters { log.V(5).Info("Set Cluster.Spec.Paused", "paused", value, "Cluster", klog.KRef(cluster.identity.Namespace, cluster.identity.Name)) // Nb. The operation is wrapped in a retry loop to make setClusterPause more resilient to unexpected conditions. @@ -593,8 +628,7 @@ func setClusterClassPause(ctx context.Context, proxy Proxy, clusterclasses []*no log := logf.Log setClusterClassPauseBackoff := newWriteBackoff() - for i := range clusterclasses { - clusterclass := clusterclasses[i] + for _, clusterclass := range clusterclasses { if pause { log.V(5).Info("Set Paused annotation", "ClusterClass", clusterclass.identity.Name, "Namespace", clusterclass.identity.Namespace) } else { @@ -611,6 +645,44 @@ func setClusterClassPause(ctx context.Context, proxy Proxy, clusterclasses []*no return nil } +// checkClustersNotPaused checks that no cluster in the graph is paused before proceeding. +func checkClustersNotPaused(ctx context.Context, proxy Proxy, clusters []*node) error { + paused := []string{} + for _, cluster := range clusters { + clusterObj := &clusterv1.Cluster{} + if err := getClusterObj(ctx, proxy, cluster, clusterObj); err != nil { + return err + } + + if ptr.Deref(clusterObj.Spec.Paused, false) || annotations.HasPaused(clusterObj) { + paused = append(paused, fmt.Sprintf("%s/%s", clusterObj.Namespace, clusterObj.Name)) + } + } + if len(paused) > 0 { + return errors.Errorf("cannot start operation while the following Clusters are paused: %s", strings.Join(paused, ", ")) + } + return nil +} + +// checkClusterClassesNotPaused checks that no clusterClass in the graph is paused before proceeding. +func checkClusterClassesNotPaused(ctx context.Context, proxy Proxy, clusterClasses []*node) error { + paused := []string{} + for _, clusterClass := range clusterClasses { + clusterClassObj := &clusterv1.ClusterClass{} + if err := getClusterClassObj(ctx, proxy, clusterClass, clusterClassObj); err != nil { + return err + } + + if annotations.HasPaused(clusterClassObj) { + paused = append(paused, fmt.Sprintf("%s/%s", clusterClassObj.Namespace, clusterClassObj.Name)) + } + } + if len(paused) > 0 { + return errors.Errorf("cannot start operation while the following ClusterClasses are paused: %s", strings.Join(paused, ", ")) + } + return nil +} + func waitReadyForMove(ctx context.Context, proxy Proxy, nodes []*node, dryRun bool, backoff wait.Backoff) error { if dryRun { return nil @@ -723,7 +795,8 @@ func pauseClusterClass(ctx context.Context, proxy Proxy, n *node, pause bool, mu ObjectMeta: metav1.ObjectMeta{ Name: n.identity.Name, Namespace: n.identity.Namespace, - }}, mutators...) + }, + }, mutators...) if err != nil { return err } @@ -1072,7 +1145,7 @@ func (o *objectMover) backupTargetObject(ctx context.Context, nodeToCreate *node } } - err = os.WriteFile(objectFile, byObj, 0600) + err = os.WriteFile(objectFile, byObj, 0o600) if err != nil { return err } @@ -1173,7 +1246,6 @@ func (o *objectMover) deleteGroup(ctx context.Context, group moveGroup) error { err := retryWithExponentialBackoff(ctx, deleteSourceObjectBackoff, func(ctx context.Context) error { return o.deleteSourceObject(ctx, nodeToDelete) }) - if err != nil { errList = append(errList, err) } diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index e72793011c32..a7f48f29e2db 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -98,6 +98,40 @@ var moveTests = []struct { }, wantErr: false, }, + { + name: "Paused Cluster", + fields: moveTestsFields{ + objs: test.NewFakeCluster("ns1", "foo").WithPaused().Objs(), + }, + wantMoveGroups: [][]string{ + { // group 1 + clusterv1.GroupVersion.String() + ", Kind=Cluster, ns1/foo", + }, + { // group 2 (objects with ownerReferences in group 1) + // owned by Clusters + "/v1, Kind=Secret, ns1/foo-ca", + "/v1, Kind=Secret, ns1/foo-kubeconfig", + clusterv1.GroupVersionInfrastructure.String() + ", Kind=GenericInfrastructureCluster, ns1/foo", + }, + }, + wantErr: true, + }, + { + name: "Paused ClusterClass", + fields: moveTestsFields{ + objs: test.NewFakeClusterClass("ns1", "class1").WithPaused().Objs(), + }, + wantMoveGroups: [][]string{ + { // group 1 + clusterv1.GroupVersion.String() + ", Kind=ClusterClass, ns1/class1", + }, + { // group 2 + clusterv1.GroupVersionInfrastructure.String() + ", Kind=GenericInfrastructureClusterTemplate, ns1/class1", + clusterv1.GroupVersionControlPlane.String() + ", Kind=GenericControlPlaneTemplate, ns1/class1", + }, + }, + wantErr: true, + }, { name: "Cluster with cloud config secret with the force move label", fields: moveTestsFields{ @@ -693,10 +727,10 @@ var backupRestoreTests = []struct { objs: test.NewFakeCluster("ns1", "foo").Objs(), }, files: map[string]string{ - "Cluster_ns1_foo.yaml": `{"apiVersion":"$CAPI","kind":"Cluster","metadata":{"creationTimestamp":null,"name":"foo","namespace":"ns1","resourceVersion":"999","uid":"$CAPI, Kind=Cluster, ns1/foo"},"spec":{"infrastructureRef":{"apiGroup":"$INFRA_GROUP","kind":"GenericInfrastructureCluster","name":"foo"}}}` + "\n", - "Secret_ns1_foo-kubeconfig.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"foo-kubeconfig","namespace":"ns1","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"foo","uid":"$CAPI, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-kubeconfig"}}` + "\n", - "Secret_ns1_foo-ca.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"foo-ca","namespace":"ns1","resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-ca"}}` + "\n", - "GenericInfrastructureCluster_ns1_foo.yaml": `{"apiVersion":"$INFRA","kind":"GenericInfrastructureCluster","metadata":{"creationTimestamp":null,"labels":{"cluster.x-k8s.io/cluster-name":"foo"},"name":"foo","namespace":"ns1","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"foo","uid":"$CAPI, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"$INFRA, Kind=GenericInfrastructureCluster, ns1/foo"}}` + "\n", + "Cluster_ns1_foo.yaml": `{"apiVersion":"$CAPI","kind":"Cluster","metadata":{"name":"foo","namespace":"ns1","resourceVersion":"999","uid":"$CAPI, Kind=Cluster, ns1/foo"},"spec":{"infrastructureRef":{"apiGroup":"$INFRA_GROUP","kind":"GenericInfrastructureCluster","name":"foo"}}}` + "\n", + "Secret_ns1_foo-kubeconfig.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"name":"foo-kubeconfig","namespace":"ns1","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"foo","uid":"$CAPI, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-kubeconfig"}}` + "\n", + "Secret_ns1_foo-ca.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"name":"foo-ca","namespace":"ns1","resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-ca"}}` + "\n", + "GenericInfrastructureCluster_ns1_foo.yaml": `{"apiVersion":"$INFRA","kind":"GenericInfrastructureCluster","metadata":{"labels":{"cluster.x-k8s.io/cluster-name":"foo"},"name":"foo","namespace":"ns1","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"foo","uid":"$CAPI, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"$INFRA, Kind=GenericInfrastructureCluster, ns1/foo"}}` + "\n", }, wantErr: false, }, @@ -711,14 +745,14 @@ var backupRestoreTests = []struct { }(), }, files: map[string]string{ - "Cluster_ns1_foo.yaml": `{"apiVersion":"$CAPI","kind":"Cluster","metadata":{"creationTimestamp":null,"name":"foo","namespace":"ns1","resourceVersion":"999","uid":"$CAPI, Kind=Cluster, ns1/foo"},"spec":{"infrastructureRef":{"apiGroup":"$INFRA_GROUP","kind":"GenericInfrastructureCluster","name":"foo"}}}` + "\n", - "Secret_ns1_foo-kubeconfig.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"foo-kubeconfig","namespace":"ns1","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"foo","uid":"$CAPI, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-kubeconfig"}}` + "\n", - "Secret_ns1_foo-ca.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"foo-ca","namespace":"ns1","resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-ca"}}` + "\n", - "GenericInfrastructureCluster_ns1_foo.yaml": `{"apiVersion":"$INFRA","kind":"GenericInfrastructureCluster","metadata":{"creationTimestamp":null,"labels":{"cluster.x-k8s.io/cluster-name":"foo"},"name":"foo","namespace":"ns1","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"foo","uid":"$CAPI, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"$INFRA, Kind=GenericInfrastructureCluster, ns1/foo"}}` + "\n", - "Cluster_ns2_bar.yaml": `{"apiVersion":"$CAPI","kind":"Cluster","metadata":{"creationTimestamp":null,"name":"bar","namespace":"ns2","resourceVersion":"999","uid":"$CAPI, Kind=Cluster, ns2/bar"},"spec":{"infrastructureRef":{"apiGroup":"$INFRA_GROUP","kind":"GenericInfrastructureCluster","name":"bar"}}}` + "\n", - "Secret_ns2_bar-kubeconfig.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"bar-kubeconfig","namespace":"ns2","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"bar","uid":"$CAPI, Kind=Cluster, ns2/bar"}],"resourceVersion":"999","uid":"/v1, Kind=Secret, ns2/bar-kubeconfig"}}` + "\n", - "Secret_ns2_bar-ca.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"bar-ca","namespace":"ns2","resourceVersion":"999","uid":"/v1, Kind=Secret, ns2/bar-ca"}}` + "\n", - "GenericInfrastructureCluster_ns2_bar.yaml": `{"apiVersion":"$INFRA","kind":"GenericInfrastructureCluster","metadata":{"creationTimestamp":null,"labels":{"cluster.x-k8s.io/cluster-name":"bar"},"name":"bar","namespace":"ns2","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"bar","uid":"$CAPI, Kind=Cluster, ns2/bar"}],"resourceVersion":"999","uid":"$INFRA, Kind=GenericInfrastructureCluster, ns2/bar"}}` + "\n", + "Cluster_ns1_foo.yaml": `{"apiVersion":"$CAPI","kind":"Cluster","metadata":{"name":"foo","namespace":"ns1","resourceVersion":"999","uid":"$CAPI, Kind=Cluster, ns1/foo"},"spec":{"infrastructureRef":{"apiGroup":"$INFRA_GROUP","kind":"GenericInfrastructureCluster","name":"foo"}}}` + "\n", + "Secret_ns1_foo-kubeconfig.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"name":"foo-kubeconfig","namespace":"ns1","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"foo","uid":"$CAPI, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-kubeconfig"}}` + "\n", + "Secret_ns1_foo-ca.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"name":"foo-ca","namespace":"ns1","resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-ca"}}` + "\n", + "GenericInfrastructureCluster_ns1_foo.yaml": `{"apiVersion":"$INFRA","kind":"GenericInfrastructureCluster","metadata":{"labels":{"cluster.x-k8s.io/cluster-name":"foo"},"name":"foo","namespace":"ns1","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"foo","uid":"$CAPI, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"$INFRA, Kind=GenericInfrastructureCluster, ns1/foo"}}` + "\n", + "Cluster_ns2_bar.yaml": `{"apiVersion":"$CAPI","kind":"Cluster","metadata":{"name":"bar","namespace":"ns2","resourceVersion":"999","uid":"$CAPI, Kind=Cluster, ns2/bar"},"spec":{"infrastructureRef":{"apiGroup":"$INFRA_GROUP","kind":"GenericInfrastructureCluster","name":"bar"}}}` + "\n", + "Secret_ns2_bar-kubeconfig.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"name":"bar-kubeconfig","namespace":"ns2","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"bar","uid":"$CAPI, Kind=Cluster, ns2/bar"}],"resourceVersion":"999","uid":"/v1, Kind=Secret, ns2/bar-kubeconfig"}}` + "\n", + "Secret_ns2_bar-ca.yaml": `{"apiVersion":"v1","kind":"Secret","metadata":{"name":"bar-ca","namespace":"ns2","resourceVersion":"999","uid":"/v1, Kind=Secret, ns2/bar-ca"}}` + "\n", + "GenericInfrastructureCluster_ns2_bar.yaml": `{"apiVersion":"$INFRA","kind":"GenericInfrastructureCluster","metadata":{"labels":{"cluster.x-k8s.io/cluster-name":"bar"},"name":"bar","namespace":"ns2","ownerReferences":[{"apiVersion":"$CAPI","kind":"Cluster","name":"bar","uid":"$CAPI, Kind=Cluster, ns2/bar"}],"resourceVersion":"999","uid":"$INFRA, Kind=GenericInfrastructureCluster, ns2/bar"}}` + "\n", }, wantErr: false, }, @@ -923,8 +957,29 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { } func Test_objectMover_toDirectory(t *testing.T) { - // NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process - for _, tt := range backupRestoreTests { + tests := []struct { + name string + fields moveTestsFields + files map[string]string + wantErr bool + }{ + { + name: "Cluster is paused", + fields: moveTestsFields{ + objs: test.NewFakeCluster("ns1", "foo").WithPaused().Objs(), + }, + wantErr: true, + }, + { + name: "ClusterClass is paused", + fields: moveTestsFields{ + objs: test.NewFakeClusterClass("ns1", "foo").WithPaused().Objs(), + }, + wantErr: true, + }, + } + tests = append(tests, backupRestoreTests...) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) diff --git a/cmd/clusterctl/client/cluster/template.go b/cmd/clusterctl/client/cluster/template.go index 872fd4eed6c2..1e4e32da9aa7 100644 --- a/cmd/clusterctl/client/cluster/template.go +++ b/cmd/clusterctl/client/cluster/template.go @@ -303,10 +303,12 @@ func getGitHubClient(ctx context.Context, configVariablesClient config.Variables return github.NewClient(authenticatingHTTPClient), nil } +var errRateLimit = errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable") + // handleGithubErr wraps error messages. func handleGithubErr(err error, message string, args ...interface{}) error { if _, ok := err.(*github.RateLimitError); ok { - return errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable") + return errRateLimit } return errors.Wrapf(err, message, args...) } diff --git a/cmd/clusterctl/client/cluster/template_test.go b/cmd/clusterctl/client/cluster/template_test.go index c1042fafa9c5..fbeb6857b02e 100644 --- a/cmd/clusterctl/client/cluster/template_test.go +++ b/cmd/clusterctl/client/cluster/template_test.go @@ -29,6 +29,7 @@ import ( "github.com/google/go-github/v53/github" . "github.com/onsi/gomega" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -513,6 +514,43 @@ func Test_templateClient_GetFromURL(t *testing.T) { } } +func Test_handleGithubErr(t *testing.T) { + tests := []struct { + name string + err error + message string + args []any + want error + }{ + { + name: "Return error", + err: errors.New("error"), + message: "message %s and %s", + args: []any{"arg1", "arg2"}, + want: fmt.Errorf("message arg1 and arg2: %w", errors.New("error")), + }, + { + name: "Return RateLimitError", + err: &github.RateLimitError{ + Response: &http.Response{ + StatusCode: http.StatusForbidden, + }, + }, + message: "", + args: nil, + want: errRateLimit, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got := handleGithubErr(tt.err, tt.message, tt.args...) + g.Expect(got.Error()).To(Equal(tt.want.Error())) + }) + } +} + func mustParseURL(rawURL string) *url.URL { rURL, err := url.Parse(rawURL) if err != nil { diff --git a/cmd/clusterctl/client/cluster/upgrader_info_test.go b/cmd/clusterctl/client/cluster/upgrader_info_test.go index d13138ae3111..9397a1b6d98a 100644 --- a/cmd/clusterctl/client/cluster/upgrader_info_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_info_test.go @@ -491,10 +491,6 @@ func toSemanticVersions(versions []string) []version.Version { func fakeProvider(name string, providerType clusterctlv1.ProviderType, version, targetNamespace string) clusterctlv1.Provider { return clusterctlv1.Provider{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "Provider", - }, ObjectMeta: metav1.ObjectMeta{ ResourceVersion: "999", Namespace: targetNamespace, diff --git a/cmd/clusterctl/client/config/providers_client.go b/cmd/clusterctl/client/config/providers_client.go index 4de55ec40454..8c4d00c243fe 100644 --- a/cmd/clusterctl/client/config/providers_client.go +++ b/cmd/clusterctl/client/config/providers_client.go @@ -73,6 +73,7 @@ const ( VultrProviderName = "vultr-vultr" OpenNebulaProviderName = "opennebula" ScalewayProviderName = "scaleway" + MetalStackProviderName = "metal-stack" ) // Bootstrap providers. @@ -97,6 +98,7 @@ const ( RKE2ControlPlaneProviderName = "rke2" K0smotronControlPlaneProviderName = "k0sproject-k0smotron" CanonicalKubernetesControlPlaneProviderName = "canonical-kubernetes" + HCPControlPlaneProviderName = "hosted-control-plane" ) // IPAM providers. @@ -341,6 +343,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/scaleway/cluster-api-provider-scaleway/releases/latest/infrastructure-components.yaml", providerType: clusterctlv1.InfrastructureProviderType, }, + &provider{ + name: MetalStackProviderName, + url: "https://github.com/metal-stack/cluster-api-provider-metal-stack/releases/latest/infrastructure-components.yaml", + providerType: clusterctlv1.InfrastructureProviderType, + }, // Bootstrap providers &provider{ @@ -425,6 +432,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/canonical/cluster-api-k8s/releases/latest/control-plane-components.yaml", providerType: clusterctlv1.ControlPlaneProviderType, }, + &provider{ + name: HCPControlPlaneProviderName, + url: "https://github.com/teutonet/cluster-api-provider-hosted-control-plane/releases/latest/control-plane-components.yaml", + providerType: clusterctlv1.ControlPlaneProviderType, + }, // IPAM providers &provider{ diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go index 1236c19f2efe..557271af405c 100644 --- a/cmd/clusterctl/client/config_test.go +++ b/cmd/clusterctl/client/config_test.go @@ -64,6 +64,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { config.RKE2BootstrapProviderName, config.TalosBootstrapProviderName, config.CanonicalKubernetesControlPlaneProviderName, + config.HCPControlPlaneProviderName, config.K0smotronControlPlaneProviderName, config.KamajiControlPlaneProviderName, config.KubeadmControlPlaneProviderName, @@ -91,6 +92,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { config.KubevirtProviderName, config.LinodeProviderName, config.MAASProviderName, + config.MetalStackProviderName, config.Metal3ProviderName, config.NestedProviderName, config.NutanixProviderName, @@ -134,6 +136,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { config.RKE2BootstrapProviderName, config.TalosBootstrapProviderName, config.CanonicalKubernetesControlPlaneProviderName, + config.HCPControlPlaneProviderName, config.K0smotronControlPlaneProviderName, config.KamajiControlPlaneProviderName, config.KubeadmControlPlaneProviderName, @@ -161,6 +164,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { config.KubevirtProviderName, config.LinodeProviderName, config.MAASProviderName, + config.MetalStackProviderName, config.Metal3ProviderName, config.NestedProviderName, config.NutanixProviderName, diff --git a/cmd/clusterctl/client/describe.go b/cmd/clusterctl/client/describe.go index 03c77a287088..96b91d2102b1 100644 --- a/cmd/clusterctl/client/describe.go +++ b/cmd/clusterctl/client/describe.go @@ -35,7 +35,7 @@ type DescribeClusterOptions struct { ClusterName string // ShowOtherConditions is a list of comma separated kind or kind/name for which we should add the ShowObjectConditionsAnnotation - // to signal to the presentation layer to show all the conditions for the objects. + // to signal to the presentation layer to show conditions for the objects. ShowOtherConditions string // ShowMachineSets instructs the discovery process to include machine sets in the ObjectTree. diff --git a/cmd/clusterctl/client/repository/components_test.go b/cmd/clusterctl/client/repository/components_test.go index 0035cbac1619..f86c70e58ce3 100644 --- a/cmd/clusterctl/client/repository/components_test.go +++ b/cmd/clusterctl/client/repository/components_test.go @@ -241,8 +241,7 @@ func Test_fixTargetNamespace(t *testing.T) { "annotations": map[string]interface{}{ "cert-manager.io/inject-ca-from": "bar/capm3-serving-cert", }, - "creationTimestamp": nil, - "name": "capm3-mutating-webhook-configuration", + "name": "capm3-mutating-webhook-configuration", }, "webhooks": []interface{}{ map[string]interface{}{ @@ -329,8 +328,7 @@ func Test_fixTargetNamespace(t *testing.T) { "annotations": map[string]interface{}{ "cert-manager.io/inject-ca-from": "bar/capm3-serving-cert", }, - "creationTimestamp": nil, - "name": "capm3-mutating-webhook-configuration", + "name": "capm3-mutating-webhook-configuration", }, "webhooks": []interface{}{ map[string]interface{}{ @@ -390,8 +388,7 @@ func Test_fixTargetNamespace(t *testing.T) { "annotations": map[string]interface{}{ "cert-manager.io/inject-ca-from": "bar/capm3-serving-cert", }, - "creationTimestamp": nil, - "name": "aCoolName", + "name": "aCoolName", }, "spec": map[string]interface{}{ "group": "", @@ -491,8 +488,7 @@ func Test_fixTargetNamespace(t *testing.T) { "annotations": map[string]interface{}{ "cert-manager.io/inject-ca-from": "bar/capm3-serving-cert", }, - "creationTimestamp": nil, - "name": "aCoolName", + "name": "aCoolName", }, "spec": map[string]interface{}{ "group": "", diff --git a/cmd/clusterctl/client/repository/repository_github.go b/cmd/clusterctl/client/repository/repository_github.go index 16b828ef34ed..c44d92170028 100644 --- a/cmd/clusterctl/client/repository/repository_github.go +++ b/cmd/clusterctl/client/repository/repository_github.go @@ -50,7 +50,8 @@ const ( ) var ( - errNotFound = errors.New("404 Not Found") + errNotFound = errors.New("404 Not Found") + errRateLimit = errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable") // Caches used to limit the number of GitHub API calls. @@ -319,7 +320,7 @@ func (g *gitHubRepository) getVersions(ctx context.Context) ([]string, error) { if listReleasesErr != nil { retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases") // Return immediately if we are rate limited. - if _, ok := listReleasesErr.(*github.RateLimitError); ok { + if errors.Is(retryError, errRateLimit) { return false, retryError } return false, nil @@ -334,7 +335,7 @@ func (g *gitHubRepository) getVersions(ctx context.Context) ([]string, error) { if listReleasesErr != nil { retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases") // Return immediately if we are rate limited. - if _, ok := listReleasesErr.(*github.RateLimitError); ok { + if errors.Is(retryError, errRateLimit) { return false, retryError } return false, nil @@ -384,7 +385,7 @@ func (g *gitHubRepository) getReleaseByTag(ctx context.Context, tag string) (*gi return false, retryError } // Return immediately if we are rate limited. - if _, ok := getReleasesErr.(*github.RateLimitError); ok { + if errors.Is(retryError, errRateLimit) { return false, retryError } return false, nil @@ -466,7 +467,7 @@ func (g *gitHubRepository) downloadFilesFromRelease(ctx context.Context, release if downloadReleaseError != nil { retryError = g.handleGithubErr(downloadReleaseError, "failed to download file %q from %q release", *release.TagName, fileName) // Return immediately if we are rate limited. - if _, ok := downloadReleaseError.(*github.RateLimitError); ok { + if errors.Is(retryError, errRateLimit) { return false, retryError } return false, nil @@ -500,12 +501,13 @@ func (g *gitHubRepository) downloadFilesFromRelease(ctx context.Context, release // handleGithubErr wraps error messages. func (g *gitHubRepository) handleGithubErr(err error, message string, args ...interface{}) error { if _, ok := err.(*github.RateLimitError); ok { - return errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable") + return errRateLimit } - if ghErr, ok := err.(*github.ErrorResponse); ok { - if ghErr.Response.StatusCode == http.StatusNotFound { - return errNotFound - } + + var ghErr *github.ErrorResponse + if errors.As(err, &ghErr) && ghErr.Response.StatusCode == http.StatusNotFound { + return errNotFound } - return errors.Wrapf(err, message, args...) + + return fmt.Errorf("%s: %w", fmt.Sprintf(message, args...), err) } diff --git a/cmd/clusterctl/client/repository/repository_github_test.go b/cmd/clusterctl/client/repository/repository_github_test.go index 07eed06cc85e..ed689e9a8d4e 100644 --- a/cmd/clusterctl/client/repository/repository_github_test.go +++ b/cmd/clusterctl/client/repository/repository_github_test.go @@ -26,6 +26,7 @@ import ( "github.com/google/go-github/v53/github" . "github.com/onsi/gomega" + "github.com/pkg/errors" "k8s.io/utils/ptr" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" @@ -1108,3 +1109,54 @@ func Test_gitHubRepository_releaseNotFound(t *testing.T) { }) } } + +func Test_handleGithubErr(t *testing.T) { + tests := []struct { + name string + err error + message string + args []any + want error + }{ + { + name: "Return error", + err: errors.New("error"), + message: "message %s and %s", + args: []any{"arg1", "arg2"}, + want: fmt.Errorf("message arg1 and arg2: %w", errors.New("error")), + }, + { + name: "Return RateLimitError", + err: &github.RateLimitError{ + Response: &http.Response{ + StatusCode: http.StatusForbidden, + }, + }, + message: "", + args: nil, + want: errRateLimit, + }, + { + name: "Return ErrorResponse", + err: &github.ErrorResponse{ + Response: &http.Response{ + StatusCode: http.StatusNotFound, + }, + }, + message: "", + args: nil, + want: errNotFound, + }, + } + + gRepo := &gitHubRepository{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got := gRepo.handleGithubErr(tt.err, tt.message, tt.args...) + g.Expect(got.Error()).To(Equal(tt.want.Error())) + }) + } +} diff --git a/cmd/clusterctl/client/tree/annotations.go b/cmd/clusterctl/client/tree/annotations.go index b8c2fa88bc1c..b287a52156c4 100644 --- a/cmd/clusterctl/client/tree/annotations.go +++ b/cmd/clusterctl/client/tree/annotations.go @@ -23,7 +23,8 @@ import ( ) const ( - // ShowObjectConditionsAnnotation documents that the presentation layer should show all the conditions for the object. + // ShowObjectConditionsAnnotation documents that the presentation layer should show conditions for the object + // and the filter to select those conditions. ShowObjectConditionsAnnotation = "tree.cluster.x-k8s.io.io/show-conditions" // ObjectMetaNameAnnotation contains the meta name that should be used for the object in the presentation layer, @@ -73,6 +74,26 @@ const ( ObjectZOrderAnnotation = "tree.cluster.x-k8s.io.io/z-order" ) +// ConditionFilterType defines the type for condition filters. +type ConditionFilterType string + +const ( + // ShownNoConditions should be used when no conditions must be used for an object. + ShownNoConditions ConditionFilterType = "" + + // ShowAllConditions should be used when all the conditions for an object must be shown. + ShowAllConditions ConditionFilterType = "All" + + // ShowNonZeroConditions should be used when only non-zero conditions for an object must be shown. + // Non-zero conditions are conditions with a message set or with status different from the normal state + // for a given condition polarity (e.g. for positive polarity normal state is True, so the non-zero + // status are Unknown and False). + ShowNonZeroConditions ConditionFilterType = "NonZero" +) + +// ShowNonZeroConditionsSuffix defines the suffix to be used when the ShowNonZeroConditions filter should be applied. +const ShowNonZeroConditionsSuffix = "+" + // GetMetaName returns the object meta name that should be used for the object in the presentation layer, if defined. func GetMetaName(obj client.Object) string { if val, ok := getAnnotation(obj, ObjectMetaNameAnnotation); ok { @@ -181,12 +202,22 @@ func IsVirtualObject(obj client.Object) bool { return false } -// IsShowConditionsObject returns true if the presentation layer should show all the conditions for the object. -func IsShowConditionsObject(obj client.Object) bool { - if val, ok := getBoolAnnotation(obj, ShowObjectConditionsAnnotation); ok { - return val +// ShowConditionsFilter returns the filter to be used by the presentation layer when showing conditions +// for an object. +func ShowConditionsFilter(obj client.Object) ConditionFilterType { + switch val, _ := getAnnotation(obj, ShowObjectConditionsAnnotation); val { + case "All": + return ShowAllConditions + case "NonZero": + return ShowNonZeroConditions } - return false + return ShownNoConditions +} + +// IsShowConditionsObject returns true if the presentation layer should show all the conditions for the object +// or a subset of them. +func IsShowConditionsObject(obj client.Object) bool { + return ShowConditionsFilter(obj) != ShownNoConditions } func getAnnotation(obj client.Object, annotation string) (string, bool) { diff --git a/cmd/clusterctl/client/tree/discovery.go b/cmd/clusterctl/client/tree/discovery.go index d8ae0aaf6b71..bc49f4df225f 100644 --- a/cmd/clusterctl/client/tree/discovery.go +++ b/cmd/clusterctl/client/tree/discovery.go @@ -19,9 +19,11 @@ package tree import ( "context" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" addonsv1 "sigs.k8s.io/cluster-api/api/addons/v1beta2" @@ -34,7 +36,7 @@ import ( // DiscoverOptions define options for the discovery process. type DiscoverOptions struct { // ShowOtherConditions is a list of comma separated kind or kind/name for which we should add the ShowObjectConditionsAnnotation - // to signal to the presentation layer to show all the conditions for the objects. + // to signal to the presentation layer to show conditions for the objects. ShowOtherConditions string // ShowMachineSets instructs the discovery process to include machine sets in the ObjectTree. @@ -110,7 +112,9 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt addAnnotation(controlPlane, ObjectContractAnnotation, "ControlPlane") addAnnotation(controlPlane, ObjectContractVersionAnnotation, contractVersion) - addControlPlane(cluster, controlPlane, tree, options) + if err := addControlPlane(ctx, c, cluster, controlPlane, tree, options); err != nil { + return nil, err + } } // Adds control plane machines. @@ -120,7 +124,7 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt } machineMap := map[string]bool{} addMachineFunc := func(parent client.Object, m *clusterv1.Machine) { - _, visible := tree.Add(parent, m) + _, visible := tree.Add(parent, m, GroupVersionKind(clusterv1.GroupVersion.WithKind("Machine"))) machineMap[m.Name] = true if visible { @@ -204,30 +208,51 @@ func addClusterResourceSetsToObjectTree(ctx context.Context, c client.Client, cl } } -func addControlPlane(cluster *clusterv1.Cluster, controlPlane *unstructured.Unstructured, tree *ObjectTree, options DiscoverOptions) { +func addControlPlane(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, controlPlane *unstructured.Unstructured, tree *ObjectTree, options DiscoverOptions) error { tree.Add(cluster, controlPlane, ObjectMetaName("ControlPlane"), GroupingObject(true)) if options.ShowTemplates { // Add control plane infrastructure ref using spec fields guaranteed in contract - infrastructureRef, found, err := unstructured.NestedMap(controlPlane.UnstructuredContent(), "spec", "machineTemplate", "infrastructureRef") - if err == nil && found { - infrastructureObjectRef := &corev1.ObjectReference{ - Kind: infrastructureRef["kind"].(string), - Namespace: infrastructureRef["namespace"].(string), - Name: infrastructureRef["name"].(string), - APIVersion: infrastructureRef["apiVersion"].(string), - } + contractVersion, err := contract.GetContractVersionForVersion(ctx, c, controlPlane.GroupVersionKind().GroupKind(), controlPlane.GroupVersionKind().Version) + if err != nil { + return errors.Wrapf(err, "failed to get contract version for the ControlPlane object") + } - machineTemplateRefObject := ObjectReferenceObject(infrastructureObjectRef) - var templateParent client.Object - if options.AddTemplateVirtualNode { - templateParent = addTemplateVirtualNode(tree, controlPlane, cluster.Namespace) - } else { - templateParent = controlPlane + var infrastructureObjectRef *corev1.ObjectReference + if contractVersion == "v1beta1" { + currentRef, err := contract.ControlPlane().MachineTemplate().InfrastructureV1Beta1Ref().Get(controlPlane) + if err != nil { + return nil //nolint:nilerr // intentionally ignoring the error here because infraRef in CP is optional } - tree.Add(templateParent, machineTemplateRefObject, ObjectMetaName("MachineInfrastructureTemplate")) + infrastructureObjectRef = currentRef + } else { + currentRef, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(controlPlane) + if err != nil { + return nil //nolint:nilerr // intentionally ignoring the error here because infraRef in CP is optional + } + apiVersion, err := contract.GetAPIVersion(ctx, c, currentRef.GroupKind()) + if err != nil { + return err + } + infrastructureObjectRef = &corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: currentRef.Kind, + Namespace: controlPlane.GetNamespace(), + Name: currentRef.Name, + } + } + + machineTemplateRefObject := ObjectReferenceObject(infrastructureObjectRef) + var templateParent client.Object + if options.AddTemplateVirtualNode { + templateParent = addTemplateVirtualNode(tree, controlPlane, cluster.Namespace) + } else { + templateParent = controlPlane } + tree.Add(templateParent, machineTemplateRefObject, ObjectMetaName("MachineInfrastructureTemplate")) } + + return nil } func addMachineDeploymentToObjectTree(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, workers *NodeObject, machinesList *clusterv1.MachineList, tree *ObjectTree, options DiscoverOptions, addMachineFunc func(parent client.Object, m *clusterv1.Machine)) error { @@ -248,6 +273,7 @@ func addMachineDeploymentToObjectTree(ctx context.Context, c client.Client, clus if !options.ShowMachineSets { addOpts = append(addOpts, GroupingObject(true)) } + addOpts = append(addOpts, GroupVersionKind(clusterv1.GroupVersion.WithKind("MachineDeployment"))) tree.Add(workers, md, addOpts...) if options.ShowTemplates { @@ -286,17 +312,17 @@ func addMachineDeploymentToObjectTree(ctx context.Context, c client.Client, clus tree.Add(templateParent, machineTemplateRefObject, ObjectMetaName("MachineInfrastructureTemplate")) } - machineSets := selectMachinesSetsControlledBy(machineSetList, md) + machineSets := selectMachinesSetsControlledBy(machineSetList, md, clusterv1.GroupVersion.WithKind("MachineDeployment").GroupKind()) for i := range machineSets { ms := machineSets[i] var parent client.Object = md if options.ShowMachineSets { - tree.Add(md, ms, GroupingObject(true)) + tree.Add(md, ms, GroupingObject(true), GroupVersionKind(clusterv1.GroupVersion.WithKind("MachineSet"))) parent = ms } - machines := selectMachinesControlledBy(machinesList, ms) + machines := selectMachinesControlledBy(machinesList, ms, clusterv1.GroupVersion.WithKind("MachineSet").GroupKind()) for _, w := range machines { addMachineFunc(parent, w) } @@ -309,7 +335,7 @@ func addMachineDeploymentToObjectTree(ctx context.Context, c client.Client, clus func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, workers *NodeObject, machinePoolList *clusterv1.MachinePoolList, machinesList *clusterv1.MachineList, tree *ObjectTree, addMachineFunc func(parent client.Object, m *clusterv1.Machine)) { for i := range machinePoolList.Items { mp := &machinePoolList.Items[i] - _, visible := tree.Add(workers, mp, GroupingObject(true)) + _, visible := tree.Add(workers, mp, GroupingObject(true), GroupVersionKind(clusterv1.GroupVersion.WithKind("MachinePool"))) if visible { if machinePoolBootstrap, err := external.GetObjectFromContractVersionedRef(ctx, c, mp.Spec.Template.Spec.Bootstrap.ConfigRef, mp.Namespace); err == nil { @@ -321,7 +347,7 @@ func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, workers * } } - machines := selectMachinesControlledBy(machinesList, mp) + machines := selectMachinesControlledBy(machinesList, mp, clusterv1.GroupVersion.WithKind("MachinePool").GroupKind()) for _, m := range machines { addMachineFunc(mp, m) } @@ -417,22 +443,22 @@ func selectControlPlaneMachines(machineList *clusterv1.MachineList) []*clusterv1 return machines } -func selectMachinesSetsControlledBy(machineSetList *clusterv1.MachineSetList, controller client.Object) []*clusterv1.MachineSet { +func selectMachinesSetsControlledBy(machineSetList *clusterv1.MachineSetList, controller client.Object, controllerGK schema.GroupKind) []*clusterv1.MachineSet { machineSets := []*clusterv1.MachineSet{} for i := range machineSetList.Items { m := &machineSetList.Items[i] - if util.IsControlledBy(m, controller) { + if util.IsControlledBy(m, controller, controllerGK) { machineSets = append(machineSets, m) } } return machineSets } -func selectMachinesControlledBy(machineList *clusterv1.MachineList, controller client.Object) []*clusterv1.Machine { +func selectMachinesControlledBy(machineList *clusterv1.MachineList, controller client.Object, controllerGK schema.GroupKind) []*clusterv1.Machine { machines := []*clusterv1.Machine{} for i := range machineList.Items { m := &machineList.Items[i] - if util.IsControlledBy(m, controller) { + if util.IsControlledBy(m, controller, controllerGK) { machines = append(machines, m) } } diff --git a/cmd/clusterctl/client/tree/options.go b/cmd/clusterctl/client/tree/options.go index cb52a426d2b3..7e898d6329f6 100644 --- a/cmd/clusterctl/client/tree/options.go +++ b/cmd/clusterctl/client/tree/options.go @@ -16,16 +16,22 @@ limitations under the License. package tree +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" +) + // AddObjectOption define an option for the ObjectTree Add operation. type AddObjectOption interface { ApplyToAdd(*addObjectOptions) } type addObjectOptions struct { - MetaName string - GroupingObject bool - NoEcho bool - ZOrder int + GroupVersionKind *schema.GroupVersionKind + MetaName string + GroupingObject bool + NoEcho bool + ZOrder int } func (o *addObjectOptions) ApplyOptions(opts []AddObjectOption) *addObjectOptions { @@ -35,6 +41,16 @@ func (o *addObjectOptions) ApplyOptions(opts []AddObjectOption) *addObjectOption return o } +// GroupVersionKind is the gvk to set on the passed in obj. +// This option has to be used if obj is a typed object and +// it cannot be guaranteed that gvk is set. +type GroupVersionKind schema.GroupVersionKind + +// ApplyToAdd applies the given options. +func (n GroupVersionKind) ApplyToAdd(options *addObjectOptions) { + options.GroupVersionKind = ptr.To(schema.GroupVersionKind(n)) +} + // The ObjectMetaName option defines the meta name that should be used for the object in the presentation layer, // e.g. control plane for KCP. type ObjectMetaName string diff --git a/cmd/clusterctl/client/tree/tree.go b/cmd/clusterctl/client/tree/tree.go index 13cbc3140b64..4de695c1bf08 100644 --- a/cmd/clusterctl/client/tree/tree.go +++ b/cmd/clusterctl/client/tree/tree.go @@ -34,7 +34,7 @@ import ( // ObjectTreeOptions defines the options for an ObjectTree. type ObjectTreeOptions struct { // ShowOtherConditions is a list of comma separated kind or kind/name for which we should add the ShowObjectConditionsAnnotation - // to signal to the presentation layer to show all the conditions for the objects. + // to signal to the presentation layer to show the conditions for the objects and also which filter to apply. ShowOtherConditions string // ShowMachineSets instructs the discovery process to include machine sets in the ObjectTree. @@ -74,11 +74,9 @@ type ObjectTree struct { // NewObjectTree creates a new object tree with the given root and options. func NewObjectTree(root client.Object, options ObjectTreeOptions) *ObjectTree { - // If it is requested to show all the conditions for the root, add + // If it is requested to show conditions for the root, add // the ShowObjectConditionsAnnotation to signal this to the presentation layer. - if isObjDebug(root, options.ShowOtherConditions) { - addAnnotation(root, ShowObjectConditionsAnnotation, "True") - } + addAnnotation(root, ShowObjectConditionsAnnotation, string(showConditions(root, options.ShowOtherConditions))) return &ObjectTree{ root: root, @@ -97,6 +95,10 @@ func (od ObjectTree) Add(parent, obj client.Object, opts ...AddObjectOption) (ad addOpts := &addObjectOptions{} addOpts.ApplyOptions(opts) + if addOpts.GroupVersionKind != nil { + obj.GetObjectKind().SetGroupVersionKind(*addOpts.GroupVersionKind) + } + // Get a small set of conditions that will be used to determine e.g. when grouping or when an object is just an echo of // its parent. var objReadyV1Beta1, parentReadyV1Beta1 *clusterv1.Condition @@ -112,11 +114,9 @@ func (od ObjectTree) Add(parent, obj client.Object, opts ...AddObjectOption) (ad parentReady = GetReadyCondition(parent) } - // If it is requested to show all the conditions for the object, add + // If it is requested to show conditions for the object, add // the ShowObjectConditionsAnnotation to signal this to the presentation layer. - if isObjDebug(obj, od.options.ShowOtherConditions) { - addAnnotation(obj, ShowObjectConditionsAnnotation, "True") - } + addAnnotation(obj, ShowObjectConditionsAnnotation, string(showConditions(obj, od.options.ShowOtherConditions))) // If echo should be dropped from the ObjectTree, return if the object's ready condition is true, and it is the same it has of parent's object ready condition (it is an echo). // Note: the Echo option applies only for infrastructure machine or bootstrap config objects, and for those objects only Ready condition makes sense. @@ -493,28 +493,36 @@ func updateV1Beta1GroupNode(groupObj client.Object, groupReady *clusterv1.Condit } } -func isObjDebug(obj client.Object, debugFilter string) bool { - if debugFilter == "" { - return false +func showConditions(obj client.Object, showOtherConditions string) ConditionFilterType { + if showOtherConditions == "" { + return ShownNoConditions } - for _, filter := range strings.Split(strings.ToLower(debugFilter), ",") { - filter = strings.TrimSpace(filter) + for _, filter := range strings.Split(showOtherConditions, ",") { if filter == "" { continue } - if strings.EqualFold(filter, "all") { - return true + if strings.EqualFold("all", strings.TrimSuffix(filter, ShowNonZeroConditionsSuffix)) { + if strings.HasSuffix(filter, ShowNonZeroConditionsSuffix) { + return ShowNonZeroConditions + } + return ShowAllConditions } kn := strings.Split(filter, "/") if len(kn) == 2 { - if strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) == kn[0] && obj.GetName() == kn[1] { - return true + if strings.EqualFold(obj.GetObjectKind().GroupVersionKind().Kind, kn[0]) && strings.EqualFold(obj.GetName(), strings.TrimSuffix(kn[1], ShowNonZeroConditionsSuffix)) { + if strings.HasSuffix(kn[1], ShowNonZeroConditionsSuffix) { + return ShowNonZeroConditions + } + return ShowAllConditions } continue } - if strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) == kn[0] { - return true + if strings.EqualFold(obj.GetObjectKind().GroupVersionKind().Kind, strings.TrimSuffix(kn[0], ShowNonZeroConditionsSuffix)) { + if strings.HasSuffix(kn[0], ShowNonZeroConditionsSuffix) { + return ShowNonZeroConditions + } + return ShowAllConditions } } - return false + return ShownNoConditions } diff --git a/cmd/clusterctl/client/tree/tree_test.go b/cmd/clusterctl/client/tree/tree_test.go index c9195e4155bc..f58f82683766 100644 --- a/cmd/clusterctl/client/tree/tree_test.go +++ b/cmd/clusterctl/client/tree/tree_test.go @@ -438,7 +438,7 @@ func Test_minLastTransitionTimeV1Beta1(t *testing.T) { } } -func Test_isObjDebug(t *testing.T) { +func Test_showConditions(t *testing.T) { obj := fakeMachine("my-machine") type args struct { filter string @@ -446,56 +446,77 @@ func Test_isObjDebug(t *testing.T) { tests := []struct { name string args args - want bool + want ConditionFilterType }{ { - name: "empty filter should return false", + name: "empty filter should return empty string", args: args{ filter: "", }, - want: false, + want: ShownNoConditions, }, { - name: "all filter should return true", + name: "all filter should return All", args: args{ filter: "all", }, - want: true, + want: ShowAllConditions, }, { - name: "kind filter should return true", + name: "kind filter should return All", args: args{ filter: "Machine", }, - want: true, + want: ShowAllConditions, }, { - name: "another kind filter should return false", + name: "another kind filter should return empty string", args: args{ filter: "AnotherKind", }, - want: false, + want: ShownNoConditions, }, { - name: "kind/name filter should return true", + name: "kind/name filter should return All", args: args{ filter: "Machine/my-machine", }, - want: true, + want: ShowAllConditions, }, { - name: "kind/wrong name filter should return false", + name: "kind/wrong name filter should return empty string", args: args{ filter: "Cluster/another-cluster", }, - want: false, + want: ShownNoConditions, + }, + { + name: "all! filter should return NonZero", + args: args{ + filter: "all" + ShowNonZeroConditionsSuffix, + }, + want: ShowNonZeroConditions, + }, + { + name: "kind! filter should return NonZero", + args: args{ + filter: "Machine" + ShowNonZeroConditionsSuffix, + }, + want: ShowNonZeroConditions, + }, + { + name: "kind/name filter should return NonZero", + args: args{ + filter: "Machine/my-machine" + ShowNonZeroConditionsSuffix, + }, + want: ShowNonZeroConditions, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got := isObjDebug(obj, tt.args.filter) + got := showConditions(obj, tt.args.filter) g.Expect(got).To(Equal(tt.want)) }) } @@ -842,21 +863,28 @@ func Test_Add_setsShowObjectConditionsAnnotation(t *testing.T) { tests := []struct { name string args args - want bool + want string }{ { - name: "filter selecting my machine should not add the annotation", + name: "filter selecting my machine should add the annotation with All", args: args{ treeOptions: ObjectTreeOptions{ShowOtherConditions: "all"}, }, - want: true, + want: "All", }, { - name: "filter not selecting my machine should not add the annotation", + name: "filter selecting my machine should add the annotation with NonZero", + args: args{ + treeOptions: ObjectTreeOptions{ShowOtherConditions: "all" + ShowNonZeroConditionsSuffix}, + }, + want: "NonZero", + }, + { + name: "filter not selecting my machine should add the annotation with empty value", args: args{ treeOptions: ObjectTreeOptions{ShowOtherConditions: ""}, }, - want: false, + want: "", }, } for _, tt := range tests { @@ -874,13 +902,7 @@ func Test_Add_setsShowObjectConditionsAnnotation(t *testing.T) { gotObj := tree.GetObject("my-machine") g.Expect(gotObj).ToNot(BeNil()) - switch tt.want { - case true: - g.Expect(gotObj.GetAnnotations()).To(HaveKey(ShowObjectConditionsAnnotation)) - g.Expect(gotObj.GetAnnotations()[ShowObjectConditionsAnnotation]).To(Equal("True")) - case false: - g.Expect(gotObj.GetAnnotations()).ToNot(HaveKey(ShowObjectConditionsAnnotation)) - } + g.Expect(gotObj.GetAnnotations()).To(HaveKeyWithValue(ShowObjectConditionsAnnotation, tt.want)) }) } } diff --git a/cmd/clusterctl/client/upgrade_test.go b/cmd/clusterctl/client/upgrade_test.go index d5df8f6e1862..bf633f021d0e 100644 --- a/cmd/clusterctl/client/upgrade_test.go +++ b/cmd/clusterctl/client/upgrade_test.go @@ -365,10 +365,6 @@ func fakeClientForUpgrade() *fakeClient { func fakeProvider(name string, providerType clusterctlv1.ProviderType, version, targetNamespace string) clusterctlv1.Provider { return clusterctlv1.Provider{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "Provider", - }, ObjectMeta: metav1.ObjectMeta{ Namespace: targetNamespace, Name: clusterctlv1.ManifestLabel(name, providerType), diff --git a/cmd/clusterctl/cmd/config_repositories_test.go b/cmd/clusterctl/cmd/config_repositories_test.go index 3edd6dc88c1b..cd504714d0b7 100644 --- a/cmd/clusterctl/cmd/config_repositories_test.go +++ b/cmd/clusterctl/cmd/config_repositories_test.go @@ -109,6 +109,7 @@ microk8s BootstrapProvider https://github.com/canonical/ rke2 BootstrapProvider https://github.com/rancher/cluster-api-provider-rke2/releases/latest/ bootstrap-components.yaml talos BootstrapProvider https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/ bootstrap-components.yaml canonical-kubernetes ControlPlaneProvider https://github.com/canonical/cluster-api-k8s/releases/latest/ control-plane-components.yaml +hosted-control-plane ControlPlaneProvider https://github.com/teutonet/cluster-api-provider-hosted-control-plane/releases/latest/ control-plane-components.yaml k0sproject-k0smotron ControlPlaneProvider https://github.com/k0sproject/k0smotron/releases/latest/ control-plane-components.yaml kamaji ControlPlaneProvider https://github.com/clastix/cluster-api-control-plane-provider-kamaji/releases/latest/ control-plane-components.yaml kubeadm ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ control-plane-components.yaml @@ -136,6 +137,7 @@ kubekey InfrastructureProvider https://github.com/kubesphere kubevirt InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt/releases/latest/ infrastructure-components.yaml linode-linode InfrastructureProvider https://github.com/linode/cluster-api-provider-linode/releases/latest/ infrastructure-components.yaml maas InfrastructureProvider https://github.com/spectrocloud/cluster-api-provider-maas/releases/latest/ infrastructure-components.yaml +metal-stack InfrastructureProvider https://github.com/metal-stack/cluster-api-provider-metal-stack/releases/latest/ infrastructure-components.yaml metal3 InfrastructureProvider https://github.com/metal3-io/cluster-api-provider-metal3/releases/latest/ infrastructure-components.yaml my-infra-provider InfrastructureProvider /home/.config/cluster-api/overrides/infrastructure-docker/latest/ infrastructure-components.yaml nested InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ infrastructure-components.yaml @@ -202,6 +204,10 @@ var expectedOutputYaml = `- File: core_components.yaml Name: canonical-kubernetes ProviderType: ControlPlaneProvider URL: https://github.com/canonical/cluster-api-k8s/releases/latest/ +- File: control-plane-components.yaml + Name: hosted-control-plane + ProviderType: ControlPlaneProvider + URL: https://github.com/teutonet/cluster-api-provider-hosted-control-plane/releases/latest/ - File: control-plane-components.yaml Name: k0sproject-k0smotron ProviderType: ControlPlaneProvider @@ -310,6 +316,10 @@ var expectedOutputYaml = `- File: core_components.yaml Name: maas ProviderType: InfrastructureProvider URL: https://github.com/spectrocloud/cluster-api-provider-maas/releases/latest/ +- File: infrastructure-components.yaml + Name: metal-stack + ProviderType: InfrastructureProvider + URL: https://github.com/metal-stack/cluster-api-provider-metal-stack/releases/latest/ - File: infrastructure-components.yaml Name: metal3 ProviderType: InfrastructureProvider diff --git a/cmd/clusterctl/cmd/describe_cluster.go b/cmd/clusterctl/cmd/describe_cluster.go index 626339d6bdc7..0ef994dafc5c 100644 --- a/cmd/clusterctl/cmd/describe_cluster.go +++ b/cmd/clusterctl/cmd/describe_cluster.go @@ -18,6 +18,7 @@ package cmd import ( "context" + "fmt" "os" "github.com/fatih/color" @@ -26,6 +27,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/cmd/clusterctl/client" + "sigs.k8s.io/cluster-api/cmd/clusterctl/client/tree" "sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/internal/templates" cmdtree "sigs.k8s.io/cluster-api/internal/util/tree" ) @@ -93,7 +95,7 @@ func init() { "The namespace where the workload cluster is located. If unspecified, the current namespace will be used.") describeClusterClusterCmd.Flags().StringVar(&dc.showOtherConditions, "show-conditions", "", - "list of comma separated kind or kind/name for which the command should show all the object's conditions (use 'all' to show conditions for everything).") + fmt.Sprintf("list of comma separated kind or kind/name for which the command should show all the object's conditions (use 'all' to show conditions for everything, use the %s suffix to show only non-zero conditions).", tree.ShowNonZeroConditionsSuffix)) describeClusterClusterCmd.Flags().BoolVar(&dc.showMachineSets, "show-machinesets", false, "Show MachineSet objects.") describeClusterClusterCmd.Flags().BoolVar(&dc.showClusterResourceSets, "show-resourcesets", false, @@ -158,9 +160,13 @@ func runDescribeCluster(cmd *cobra.Command, name string) error { switch dc.v1beta2 { case true: - cmdtree.PrintObjectTree(tree, os.Stdout) + if err := cmdtree.PrintObjectTree(tree, os.Stdout); err != nil { + return errors.Wrap(err, "failed to print object tree") + } default: - cmdtree.PrintObjectTreeV1Beta1(tree) + if err := cmdtree.PrintObjectTreeV1Beta1(tree); err != nil { + return errors.Wrap(err, "failed to print object tree v1beta1") + } } return nil diff --git a/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_metadata.yaml b/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_metadata.yaml index 1eeae9cb934b..7ca550194471 100644 --- a/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_metadata.yaml +++ b/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_metadata.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: metadata.clusterctl.cluster.x-k8s.io spec: group: clusterctl.cluster.x-k8s.io diff --git a/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_providers.yaml b/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_providers.yaml index 8d7e2cc0f02a..4e9f47ffd652 100644 --- a/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_providers.yaml +++ b/cmd/clusterctl/config/crd/bases/clusterctl.cluster.x-k8s.io_providers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: providers.clusterctl.cluster.x-k8s.io spec: group: clusterctl.cluster.x-k8s.io diff --git a/cmd/clusterctl/config/manifest/clusterctl-api.yaml b/cmd/clusterctl/config/manifest/clusterctl-api.yaml index 9f3f6ab67262..e906a9fe4aa4 100644 --- a/cmd/clusterctl/config/manifest/clusterctl-api.yaml +++ b/cmd/clusterctl/config/manifest/clusterctl-api.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: providers.clusterctl.cluster.x-k8s.io spec: group: clusterctl.cluster.x-k8s.io diff --git a/cmd/clusterctl/hack/create-local-repository.py b/cmd/clusterctl/hack/create-local-repository.py index 4c00ce651321..69eefcbf5966 100755 --- a/cmd/clusterctl/hack/create-local-repository.py +++ b/cmd/clusterctl/hack/create-local-repository.py @@ -54,30 +54,30 @@ providers = { 'cluster-api': { 'componentsFile': 'core-components.yaml', - 'nextVersion': 'v1.11.99', + 'nextVersion': 'v1.12.99', 'type': 'CoreProvider', }, 'bootstrap-kubeadm': { 'componentsFile': 'bootstrap-components.yaml', - 'nextVersion': 'v1.11.99', + 'nextVersion': 'v1.12.99', 'type': 'BootstrapProvider', 'configFolder': 'bootstrap/kubeadm/config/default', }, 'control-plane-kubeadm': { 'componentsFile': 'control-plane-components.yaml', - 'nextVersion': 'v1.11.99', + 'nextVersion': 'v1.12.99', 'type': 'ControlPlaneProvider', 'configFolder': 'controlplane/kubeadm/config/default', }, 'infrastructure-docker': { 'componentsFile': 'infrastructure-components-development.yaml', - 'nextVersion': 'v1.11.99', + 'nextVersion': 'v1.12.99', 'type': 'InfrastructureProvider', 'configFolder': 'test/infrastructure/docker/config/default', }, 'runtime-extension-test': { 'componentsFile': 'runtime-extension-components-development.yaml', - 'nextVersion': 'v1.11.99', + 'nextVersion': 'v1.12.99', 'type': 'RuntimeExtensionProvider', 'configFolder': 'test/extension/config/default', }, diff --git a/cmd/clusterctl/internal/test/fake_objects.go b/cmd/clusterctl/internal/test/fake_objects.go index c91ee9bcf40c..2799fa4ebf9c 100644 --- a/cmd/clusterctl/internal/test/fake_objects.go +++ b/cmd/clusterctl/internal/test/fake_objects.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" @@ -47,6 +48,7 @@ import ( type FakeCluster struct { namespace string name string + paused bool controlPlane *FakeControlPlane machinePools []*FakeMachinePool machineDeployments []*FakeMachineDeployment @@ -116,6 +118,11 @@ func (f *FakeCluster) WithTopologyClassNamespace(namespace string) *FakeCluster return f } +func (f *FakeCluster) WithPaused() *FakeCluster { + f.paused = true + return f +} + func (f *FakeCluster) Objs() []client.Object { clusterInfrastructure := &fakeinfrastructure.GenericInfrastructureCluster{ TypeMeta: metav1.TypeMeta{ @@ -160,6 +167,10 @@ func (f *FakeCluster) Objs() []client.Object { } } + if f.paused { + cluster.Spec.Paused = ptr.To(true) + } + // Ensure the cluster gets a UID to be used by dependant objects for creating OwnerReferences. setUID(cluster) @@ -358,11 +369,12 @@ func (f *FakeControlPlane) Objs(cluster *clusterv1.Cluster) []client.Object { }, Spec: fakecontrolplane.GenericControlPlaneSpec{ MachineTemplate: fakecontrolplane.GenericMachineTemplate{ - InfrastructureRef: corev1.ObjectReference{ - APIVersion: controlPlaneInfrastructure.APIVersion, - Kind: controlPlaneInfrastructure.Kind, - Namespace: controlPlaneInfrastructure.Namespace, - Name: controlPlaneInfrastructure.Name, + Spec: fakecontrolplane.GenericMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: fakeinfrastructure.GroupVersion.Group, + Kind: controlPlaneInfrastructure.Kind, + Name: controlPlaneInfrastructure.Name, + }, }, }, }, @@ -1484,6 +1496,7 @@ func FakeCRDList() []*apiextensionsv1.CustomResourceDefinition { type FakeClusterClass struct { namespace string name string + paused bool infrastructureClusterTemplate *unstructured.Unstructured controlPlaneTemplate *unstructured.Unstructured controlPlaneInfrastructureMachineTemplate *unstructured.Unstructured @@ -1517,6 +1530,11 @@ func (f *FakeClusterClass) WithWorkerMachineDeploymentClasses(classes []*FakeMac return f } +func (f *FakeClusterClass) WithPaused() *FakeClusterClass { + f.paused = true + return f +} + func (f *FakeClusterClass) Objs() []client.Object { // objMap map where the key is the object to which the owner reference to the cluster class should be added // and the value dictates if the onwner ref needs to be added. @@ -1544,6 +1562,10 @@ func (f *FakeClusterClass) Objs() []client.Object { objMap[f.controlPlaneInfrastructureMachineTemplate] = true } + if f.paused { + clusterClassBuilder.WithAnnotations(map[string]string{clusterv1.PausedAnnotation: "true"}) + } + if len(f.workerMachineDeploymentClasses) > 0 { mdClasses := []clusterv1.MachineDeploymentClass{} for _, fakeMDClass := range f.workerMachineDeploymentClasses { @@ -1555,11 +1577,15 @@ func (f *FakeClusterClass) Objs() []client.Object { } clusterClass := clusterClassBuilder.Build() + clusterClass.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("ClusterClass")) objMap[clusterClass] = false for o := range objMap { setUID(o) } + // GVK should be only set for setUID to avoid the wrong assumption that GVK is set on a + // ClusterClass in other parts of the code. + clusterClass.SetGroupVersionKind(schema.GroupVersionKind{}) for o, setOwnerReference := range objMap { if setOwnerReference { diff --git a/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go b/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go index b4a130942c87..4dd0bac89056 100644 --- a/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go +++ b/cmd/clusterctl/internal/test/providers/controlplane/generic_types.go @@ -17,13 +17,19 @@ limitations under the License. package controlplane import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // GenericMachineTemplate contains a generic control plane spec. type GenericMachineTemplate struct { - InfrastructureRef corev1.ObjectReference `json:"infrastructureRef"` + Spec GenericMachineTemplateSpec `json:"spec"` +} + +// GenericMachineTemplateSpec contains a generic control plane spec. +type GenericMachineTemplateSpec struct { + InfrastructureRef clusterv1.ContractVersionedObjectReference `json:"infrastructureRef"` } // GenericControlPlaneSpec contains a generic control plane spec. diff --git a/cmd/clusterctl/internal/test/providers/controlplane/zz_generated.deepcopy.go b/cmd/clusterctl/internal/test/providers/controlplane/zz_generated.deepcopy.go index aacc2ef45d3d..06b3473ff854 100644 --- a/cmd/clusterctl/internal/test/providers/controlplane/zz_generated.deepcopy.go +++ b/cmd/clusterctl/internal/test/providers/controlplane/zz_generated.deepcopy.go @@ -101,7 +101,7 @@ func (in *GenericControlPlaneSpec) DeepCopy() *GenericControlPlaneSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericMachineTemplate) DeepCopyInto(out *GenericMachineTemplate) { *out = *in - out.InfrastructureRef = in.InfrastructureRef + out.Spec = in.Spec } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericMachineTemplate. @@ -113,3 +113,19 @@ func (in *GenericMachineTemplate) DeepCopy() *GenericMachineTemplate { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericMachineTemplateSpec) DeepCopyInto(out *GenericMachineTemplateSpec) { + *out = *in + out.InfrastructureRef = in.InfrastructureRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericMachineTemplateSpec. +func (in *GenericMachineTemplateSpec) DeepCopy() *GenericMachineTemplateSpec { + if in == nil { + return nil + } + out := new(GenericMachineTemplateSpec) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/clusterctl/internal/util/doc.go b/cmd/clusterctl/internal/util/doc.go index 7bda1e336e87..3325276e7904 100644 --- a/cmd/clusterctl/internal/util/doc.go +++ b/cmd/clusterctl/internal/util/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package util implements clusterctl utilty functions. +// Package util implements clusterctl utility functions. package util diff --git a/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesetbindings.yaml b/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesetbindings.yaml index aaada266bed9..22c512caa66b 100644 --- a/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesetbindings.yaml +++ b/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesetbindings.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: clusterresourcesetbindings.addons.cluster.x-k8s.io spec: group: addons.cluster.x-k8s.io diff --git a/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesets.yaml b/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesets.yaml index e566096811dd..7cd655dd2853 100644 --- a/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesets.yaml +++ b/config/crd/bases/addons.cluster.x-k8s.io_clusterresourcesets.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: clusterresourcesets.addons.cluster.x-k8s.io spec: group: addons.cluster.x-k8s.io diff --git a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml index dd3ca76f608e..b9dab0c42ba8 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: clusterclasses.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -2969,6 +2969,53 @@ spec: format: int32 minimum: 0 type: integer + unhealthyMachineConditions: + description: |- + unhealthyMachineConditions contains a list of the machine conditions that determine + whether a machine is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + items: + description: |- + UnhealthyMachineCondition represents a Machine condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a machine is considered unhealthy. + properties: + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + timeoutSeconds: + description: |- + timeoutSeconds is the duration that a machine must be in a given status for, + after which the machine is considered unhealthy. + For example, with a value of "3600", the machine must match the status + for at least 1 hour before being considered unhealthy. + format: int32 + minimum: 0 + type: integer + type: + description: type of Machine condition + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + x-kubernetes-validations: + - message: 'type must not be one of: Ready, Available, + HealthCheckSucceeded, OwnerRemediated, ExternallyRemediated' + rule: '!(self in [''Ready'',''Available'',''HealthCheckSucceeded'',''OwnerRemediated'',''ExternallyRemediated''])' + required: + - status + - timeoutSeconds + - type + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic unhealthyNodeConditions: description: |- unhealthyNodeConditions contains a list of conditions that determine @@ -2989,7 +3036,7 @@ spec: description: |- timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. - For example, with a value of "1h", the node must match the status + For example, with a value of "3600", the node must match the status for at least 1 hour before being considered unhealthy. format: int32 minimum: 0 @@ -3320,6 +3367,20 @@ spec: required: - templateRef type: object + kubernetesVersions: + description: |- + kubernetesVersions is the list of Kubernetes versions that can be + used for clusters using this ClusterClass. + The list of version must be ordered from the older to the newer version, and there should be + at least one version for every minor in between the first and the last version. + items: + maxLength: 256 + minLength: 1 + type: string + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic patches: description: |- patches defines the patches which are applied to customize @@ -3545,6 +3606,24 @@ spec: minItems: 1 type: array x-kubernetes-list-type: atomic + upgrade: + description: upgrade defines the upgrade configuration for clusters + using this ClusterClass. + minProperties: 1 + properties: + external: + description: external defines external runtime extensions for + upgrade operations. + minProperties: 1 + properties: + generateUpgradePlanExtension: + description: generateUpgradePlanExtension references an extension + which is called to generate upgrade plan. + maxLength: 512 + minLength: 1 + type: string + type: object + type: object variables: description: |- variables defines the variables which can be configured @@ -4114,6 +4193,54 @@ spec: format: int32 minimum: 0 type: integer + unhealthyMachineConditions: + description: |- + unhealthyMachineConditions contains a list of the machine conditions that determine + whether a machine is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + items: + description: |- + UnhealthyMachineCondition represents a Machine condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a machine is considered unhealthy. + properties: + status: + description: status of the condition, one + of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + timeoutSeconds: + description: |- + timeoutSeconds is the duration that a machine must be in a given status for, + after which the machine is considered unhealthy. + For example, with a value of "3600", the machine must match the status + for at least 1 hour before being considered unhealthy. + format: int32 + minimum: 0 + type: integer + type: + description: type of Machine condition + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + x-kubernetes-validations: + - message: 'type must not be one of: Ready, + Available, HealthCheckSucceeded, OwnerRemediated, + ExternallyRemediated' + rule: '!(self in [''Ready'',''Available'',''HealthCheckSucceeded'',''OwnerRemediated'',''ExternallyRemediated''])' + required: + - status + - timeoutSeconds + - type + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic unhealthyNodeConditions: description: |- unhealthyNodeConditions contains a list of conditions that determine @@ -4134,7 +4261,7 @@ spec: description: |- timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. - For example, with a value of "1h", the node must match the status + For example, with a value of "3600", the node must match the status for at least 1 hour before being considered unhealthy. format: int32 minimum: 0 diff --git a/config/crd/bases/cluster.x-k8s.io_clusters.yaml b/config/crd/bases/cluster.x-k8s.io_clusters.yaml index 6bae359e86fa..2fc0166c8643 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusters.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: clusters.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -855,9 +855,6 @@ spec: description: port is the port on which the API server is serving. format: int32 type: integer - required: - - host - - port type: object controlPlaneRef: description: |- @@ -2534,6 +2531,54 @@ spec: format: int32 minimum: 0 type: integer + unhealthyMachineConditions: + description: |- + unhealthyMachineConditions contains a list of the machine conditions that determine + whether a machine is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + items: + description: |- + UnhealthyMachineCondition represents a Machine condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a machine is considered unhealthy. + properties: + status: + description: status of the condition, one of + True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + timeoutSeconds: + description: |- + timeoutSeconds is the duration that a machine must be in a given status for, + after which the machine is considered unhealthy. + For example, with a value of "3600", the machine must match the status + for at least 1 hour before being considered unhealthy. + format: int32 + minimum: 0 + type: integer + type: + description: type of Machine condition + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + x-kubernetes-validations: + - message: 'type must not be one of: Ready, + Available, HealthCheckSucceeded, OwnerRemediated, + ExternallyRemediated' + rule: '!(self in [''Ready'',''Available'',''HealthCheckSucceeded'',''OwnerRemediated'',''ExternallyRemediated''])' + required: + - status + - timeoutSeconds + - type + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic unhealthyNodeConditions: description: |- unhealthyNodeConditions contains a list of conditions that determine @@ -2554,7 +2599,7 @@ spec: description: |- timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. - For example, with a value of "1h", the node must match the status + For example, with a value of "3600", the node must match the status for at least 1 hour before being considered unhealthy. format: int32 minimum: 0 @@ -2935,6 +2980,54 @@ spec: format: int32 minimum: 0 type: integer + unhealthyMachineConditions: + description: |- + unhealthyMachineConditions contains a list of the machine conditions that determine + whether a machine is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + items: + description: |- + UnhealthyMachineCondition represents a Machine condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a machine is considered unhealthy. + properties: + status: + description: status of the condition, + one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + timeoutSeconds: + description: |- + timeoutSeconds is the duration that a machine must be in a given status for, + after which the machine is considered unhealthy. + For example, with a value of "3600", the machine must match the status + for at least 1 hour before being considered unhealthy. + format: int32 + minimum: 0 + type: integer + type: + description: type of Machine condition + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + x-kubernetes-validations: + - message: 'type must not be one of: Ready, + Available, HealthCheckSucceeded, OwnerRemediated, + ExternallyRemediated' + rule: '!(self in [''Ready'',''Available'',''HealthCheckSucceeded'',''OwnerRemediated'',''ExternallyRemediated''])' + required: + - status + - timeoutSeconds + - type + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic unhealthyNodeConditions: description: |- unhealthyNodeConditions contains a list of conditions that determine @@ -2955,7 +3048,7 @@ spec: description: |- timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. - For example, with a value of "1h", the node must match the status + For example, with a value of "3600", the node must match the status for at least 1 hour before being considered unhealthy. format: int32 minimum: 0 diff --git a/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml b/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml index db9491037f84..58cba176177b 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: machinedeployments.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -2340,6 +2340,77 @@ spec: x-kubernetes-list-map-keys: - conditionType x-kubernetes-list-type: map + taints: + description: |- + taints are the node taints that Cluster API will manage. + This list is not necessarily complete: other Kubernetes components may add or remove other taints from nodes, + e.g. the node controller might add the node.kubernetes.io/not-ready taint. + Only those taints defined in this list will be added or removed by core Cluster API controllers. + + There can be at most 64 taints. + A pod would have to tolerate all existing taints to run on the corresponding node. + + NOTE: This list is implemented as a "map" type, meaning that individual elements can be managed by different owners. + items: + description: MachineTaint defines a taint equivalent to + corev1.Taint, but additionally having a propagation field. + properties: + effect: + description: effect is the effect for the taint. Valid + values are NoSchedule, PreferNoSchedule and NoExecute. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: |- + key is the taint key to be applied to a node. + Must be a valid qualified name of maximum size 63 characters + with an optional subdomain prefix of maximum size 253 characters, + separated by a `/`. + maxLength: 317 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/)?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + type: string + x-kubernetes-validations: + - message: key must be a valid qualified name of max + size 63 characters with an optional subdomain prefix + of max size 253 characters + rule: 'self.contains(''/'') ? ( self.split(''/'') + [0].size() <= 253 && self.split(''/'') [1].size() + <= 63 && self.split(''/'').size() == 2 ) : self.size() + <= 63' + propagation: + description: |- + propagation defines how this taint should be propagated to nodes. + Valid values are 'Always' and 'OnInitialization'. + Always: The taint will be continuously reconciled. If it is not set for a node, it will be added during reconciliation. + OnInitialization: The taint will be added during node initialization. If it gets removed from the node later on it will not get added again. + enum: + - Always + - OnInitialization + type: string + value: + description: |- + value is the taint value corresponding to the taint key. + It must be a valid label value of maximum size 63 characters. + maxLength: 63 + minLength: 1 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + required: + - effect + - key + - propagation + type: object + maxItems: 64 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - key + - effect + x-kubernetes-list-type: map version: description: |- version defines the desired Kubernetes version. diff --git a/config/crd/bases/cluster.x-k8s.io_machinedrainrules.yaml b/config/crd/bases/cluster.x-k8s.io_machinedrainrules.yaml index 927182c1b857..4e9f1f649529 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinedrainrules.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinedrainrules.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: machinedrainrules.cluster.x-k8s.io spec: group: cluster.x-k8s.io diff --git a/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml b/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml index db99e6286fd6..908a50fa4c74 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: machinehealthchecks.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -1065,6 +1065,53 @@ spec: format: int32 minimum: 0 type: integer + unhealthyMachineConditions: + description: |- + unhealthyMachineConditions contains a list of the machine conditions that determine + whether a machine is considered unhealthy. The conditions are combined in a + logical OR, i.e. if any of the conditions is met, the machine is unhealthy. + items: + description: |- + UnhealthyMachineCondition represents a Machine condition type and value with a timeout + specified as a duration. When the named condition has been in the given + status for at least the timeout value, a machine is considered unhealthy. + properties: + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + timeoutSeconds: + description: |- + timeoutSeconds is the duration that a machine must be in a given status for, + after which the machine is considered unhealthy. + For example, with a value of "3600", the machine must match the status + for at least 1 hour before being considered unhealthy. + format: int32 + minimum: 0 + type: integer + type: + description: type of Machine condition + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + x-kubernetes-validations: + - message: 'type must not be one of: Ready, Available, HealthCheckSucceeded, + OwnerRemediated, ExternallyRemediated' + rule: '!(self in [''Ready'',''Available'',''HealthCheckSucceeded'',''OwnerRemediated'',''ExternallyRemediated''])' + required: + - status + - timeoutSeconds + - type + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-type: atomic unhealthyNodeConditions: description: |- unhealthyNodeConditions contains a list of conditions that determine @@ -1085,7 +1132,7 @@ spec: description: |- timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. - For example, with a value of "1h", the node must match the status + For example, with a value of "3600", the node must match the status for at least 1 hour before being considered unhealthy. format: int32 minimum: 0 diff --git a/config/crd/bases/cluster.x-k8s.io_machinepools.yaml b/config/crd/bases/cluster.x-k8s.io_machinepools.yaml index d8094cc1053a..e968bc6d1524 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinepools.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinepools.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: machinepools.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -1944,6 +1944,77 @@ spec: x-kubernetes-list-map-keys: - conditionType x-kubernetes-list-type: map + taints: + description: |- + taints are the node taints that Cluster API will manage. + This list is not necessarily complete: other Kubernetes components may add or remove other taints from nodes, + e.g. the node controller might add the node.kubernetes.io/not-ready taint. + Only those taints defined in this list will be added or removed by core Cluster API controllers. + + There can be at most 64 taints. + A pod would have to tolerate all existing taints to run on the corresponding node. + + NOTE: This list is implemented as a "map" type, meaning that individual elements can be managed by different owners. + items: + description: MachineTaint defines a taint equivalent to + corev1.Taint, but additionally having a propagation field. + properties: + effect: + description: effect is the effect for the taint. Valid + values are NoSchedule, PreferNoSchedule and NoExecute. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: |- + key is the taint key to be applied to a node. + Must be a valid qualified name of maximum size 63 characters + with an optional subdomain prefix of maximum size 253 characters, + separated by a `/`. + maxLength: 317 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/)?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + type: string + x-kubernetes-validations: + - message: key must be a valid qualified name of max + size 63 characters with an optional subdomain prefix + of max size 253 characters + rule: 'self.contains(''/'') ? ( self.split(''/'') + [0].size() <= 253 && self.split(''/'') [1].size() + <= 63 && self.split(''/'').size() == 2 ) : self.size() + <= 63' + propagation: + description: |- + propagation defines how this taint should be propagated to nodes. + Valid values are 'Always' and 'OnInitialization'. + Always: The taint will be continuously reconciled. If it is not set for a node, it will be added during reconciliation. + OnInitialization: The taint will be added during node initialization. If it gets removed from the node later on it will not get added again. + enum: + - Always + - OnInitialization + type: string + value: + description: |- + value is the taint value corresponding to the taint key. + It must be a valid label value of maximum size 63 characters. + maxLength: 63 + minLength: 1 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + required: + - effect + - key + - propagation + type: object + maxItems: 64 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - key + - effect + x-kubernetes-list-type: map version: description: |- version defines the desired Kubernetes version. diff --git a/config/crd/bases/cluster.x-k8s.io_machines.yaml b/config/crd/bases/cluster.x-k8s.io_machines.yaml index 7de8401c07e7..d2df0cd5d44c 100644 --- a/config/crd/bases/cluster.x-k8s.io_machines.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machines.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: machines.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -1754,6 +1754,76 @@ spec: x-kubernetes-list-map-keys: - conditionType x-kubernetes-list-type: map + taints: + description: |- + taints are the node taints that Cluster API will manage. + This list is not necessarily complete: other Kubernetes components may add or remove other taints from nodes, + e.g. the node controller might add the node.kubernetes.io/not-ready taint. + Only those taints defined in this list will be added or removed by core Cluster API controllers. + + There can be at most 64 taints. + A pod would have to tolerate all existing taints to run on the corresponding node. + + NOTE: This list is implemented as a "map" type, meaning that individual elements can be managed by different owners. + items: + description: MachineTaint defines a taint equivalent to corev1.Taint, + but additionally having a propagation field. + properties: + effect: + description: effect is the effect for the taint. Valid values + are NoSchedule, PreferNoSchedule and NoExecute. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: |- + key is the taint key to be applied to a node. + Must be a valid qualified name of maximum size 63 characters + with an optional subdomain prefix of maximum size 253 characters, + separated by a `/`. + maxLength: 317 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/)?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + type: string + x-kubernetes-validations: + - message: key must be a valid qualified name of max size 63 + characters with an optional subdomain prefix of max size + 253 characters + rule: 'self.contains(''/'') ? ( self.split(''/'') [0].size() + <= 253 && self.split(''/'') [1].size() <= 63 && self.split(''/'').size() + == 2 ) : self.size() <= 63' + propagation: + description: |- + propagation defines how this taint should be propagated to nodes. + Valid values are 'Always' and 'OnInitialization'. + Always: The taint will be continuously reconciled. If it is not set for a node, it will be added during reconciliation. + OnInitialization: The taint will be added during node initialization. If it gets removed from the node later on it will not get added again. + enum: + - Always + - OnInitialization + type: string + value: + description: |- + value is the taint value corresponding to the taint key. + It must be a valid label value of maximum size 63 characters. + maxLength: 63 + minLength: 1 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + required: + - effect + - key + - propagation + type: object + maxItems: 64 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - key + - effect + x-kubernetes-list-type: map version: description: |- version defines the desired Kubernetes version. @@ -1797,7 +1867,7 @@ spec: - address - type type: object - maxItems: 32 + maxItems: 128 type: array x-kubernetes-list-type: atomic certificatesExpiryDate: @@ -1810,7 +1880,7 @@ spec: description: |- conditions represents the observations of a Machine's current state. Known condition types are Available, Ready, UpToDate, BootstrapConfigReady, InfrastructureReady, NodeReady, - NodeHealthy, Deleting, Paused. + NodeHealthy, Updating, Deleting, Paused. If a MachineHealthCheck is targeting this machine, also HealthCheckSucceeded, OwnerRemediated conditions are added. Additionally control plane Machines controlled by KubeadmControlPlane will have following additional conditions: APIServerPodHealthy, ControllerManagerPodHealthy, SchedulerPodHealthy, EtcdPodHealthy, EtcdMemberHealthy. @@ -2122,6 +2192,7 @@ spec: - Provisioning - Provisioned - Running + - Updating - Deleting - Deleted - Failed diff --git a/config/crd/bases/cluster.x-k8s.io_machinesets.yaml b/config/crd/bases/cluster.x-k8s.io_machinesets.yaml index c3ecdaa0ac3c..6f7e928372ee 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinesets.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinesets.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: machinesets.cluster.x-k8s.io spec: group: cluster.x-k8s.io @@ -1996,6 +1996,77 @@ spec: x-kubernetes-list-map-keys: - conditionType x-kubernetes-list-type: map + taints: + description: |- + taints are the node taints that Cluster API will manage. + This list is not necessarily complete: other Kubernetes components may add or remove other taints from nodes, + e.g. the node controller might add the node.kubernetes.io/not-ready taint. + Only those taints defined in this list will be added or removed by core Cluster API controllers. + + There can be at most 64 taints. + A pod would have to tolerate all existing taints to run on the corresponding node. + + NOTE: This list is implemented as a "map" type, meaning that individual elements can be managed by different owners. + items: + description: MachineTaint defines a taint equivalent to + corev1.Taint, but additionally having a propagation field. + properties: + effect: + description: effect is the effect for the taint. Valid + values are NoSchedule, PreferNoSchedule and NoExecute. + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + type: string + key: + description: |- + key is the taint key to be applied to a node. + Must be a valid qualified name of maximum size 63 characters + with an optional subdomain prefix of maximum size 253 characters, + separated by a `/`. + maxLength: 317 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/)?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + type: string + x-kubernetes-validations: + - message: key must be a valid qualified name of max + size 63 characters with an optional subdomain prefix + of max size 253 characters + rule: 'self.contains(''/'') ? ( self.split(''/'') + [0].size() <= 253 && self.split(''/'') [1].size() + <= 63 && self.split(''/'').size() == 2 ) : self.size() + <= 63' + propagation: + description: |- + propagation defines how this taint should be propagated to nodes. + Valid values are 'Always' and 'OnInitialization'. + Always: The taint will be continuously reconciled. If it is not set for a node, it will be added during reconciliation. + OnInitialization: The taint will be added during node initialization. If it gets removed from the node later on it will not get added again. + enum: + - Always + - OnInitialization + type: string + value: + description: |- + value is the taint value corresponding to the taint key. + It must be a valid label value of maximum size 63 characters. + maxLength: 63 + minLength: 1 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + type: string + required: + - effect + - key + - propagation + type: object + maxItems: 64 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - key + - effect + x-kubernetes-list-type: map version: description: |- version defines the desired Kubernetes version. diff --git a/config/crd/bases/ipam.cluster.x-k8s.io_ipaddressclaims.yaml b/config/crd/bases/ipam.cluster.x-k8s.io_ipaddressclaims.yaml index ed03b07b3c72..467cd4147a22 100644 --- a/config/crd/bases/ipam.cluster.x-k8s.io_ipaddressclaims.yaml +++ b/config/crd/bases/ipam.cluster.x-k8s.io_ipaddressclaims.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: ipaddressclaims.ipam.cluster.x-k8s.io spec: group: ipam.cluster.x-k8s.io diff --git a/config/crd/bases/ipam.cluster.x-k8s.io_ipaddresses.yaml b/config/crd/bases/ipam.cluster.x-k8s.io_ipaddresses.yaml index 6f4cf9e248a8..1dd4002f3717 100644 --- a/config/crd/bases/ipam.cluster.x-k8s.io_ipaddresses.yaml +++ b/config/crd/bases/ipam.cluster.x-k8s.io_ipaddresses.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: ipaddresses.ipam.cluster.x-k8s.io spec: group: ipam.cluster.x-k8s.io diff --git a/config/crd/bases/runtime.cluster.x-k8s.io_extensionconfigs.yaml b/config/crd/bases/runtime.cluster.x-k8s.io_extensionconfigs.yaml index 65ff45142e2b..666b2c3b1836 100644 --- a/config/crd/bases/runtime.cluster.x-k8s.io_extensionconfigs.yaml +++ b/config/crd/bases/runtime.cluster.x-k8s.io_extensionconfigs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: extensionconfigs.runtime.cluster.x-k8s.io spec: group: runtime.cluster.x-k8s.io diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 54acf21d0444..f14cbd505132 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -23,7 +23,7 @@ spec: - "--leader-elect" - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" - - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},ClusterResourceSet=${EXP_CLUSTER_RESOURCE_SET:=true},ClusterTopology=${CLUSTER_TOPOLOGY:=false},RuntimeSDK=${EXP_RUNTIME_SDK:=false},MachineSetPreflightChecks=${EXP_MACHINE_SET_PREFLIGHT_CHECKS:=true},MachineWaitForVolumeDetachConsiderVolumeAttachments=${EXP_MACHINE_WAITFORVOLUMEDETACH_CONSIDER_VOLUMEATTACHMENTS:=true},PriorityQueue=${EXP_PRIORITY_QUEUE:=false}" + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},ClusterTopology=${CLUSTER_TOPOLOGY:=false},RuntimeSDK=${EXP_RUNTIME_SDK:=false},MachineSetPreflightChecks=${EXP_MACHINE_SET_PREFLIGHT_CHECKS:=true},MachineWaitForVolumeDetachConsiderVolumeAttachments=${EXP_MACHINE_WAITFORVOLUMEDETACH_CONSIDER_VOLUMEATTACHMENTS:=true},PriorityQueue=${EXP_PRIORITY_QUEUE:=false},ReconcilerRateLimiting=${EXP_RECONCILER_RATE_LIMITING:=false},InPlaceUpdates=${EXP_IN_PLACE_UPDATES:=false},MachineTaintPropagation=${EXP_MACHINE_TAINT_PROPAGATION:=false}" image: controller:latest name: manager env: diff --git a/config/metrics/crd-clusterrole.yaml b/config/metrics/crd-clusterrole.yaml index 52ca08243e28..f24506732b06 100644 --- a/config/metrics/crd-clusterrole.yaml +++ b/config/metrics/crd-clusterrole.yaml @@ -2,9 +2,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: kube-state-metrics-custom-resource-capi labels: kube-state-metrics/aggregate-to-manager: "true" + name: manager-metrics-role rules: - apiGroups: - addons.cluster.x-k8s.io diff --git a/config/metrics/crd-metrics-config.yaml b/config/metrics/crd-metrics-config.yaml index 6ace01c4711d..a3fb0750306c 100644 --- a/config/metrics/crd-metrics-config.yaml +++ b/config/metrics/crd-metrics-config.yaml @@ -8,7 +8,7 @@ spec: groupVersionKind: group: addons.cluster.x-k8s.io kind: ClusterResourceSet - version: v1beta1 + version: v1beta2 labelsFromPath: name: - metadata @@ -33,7 +33,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -50,7 +49,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -62,7 +60,7 @@ spec: groupVersionKind: group: bootstrap.cluster.x-k8s.io kind: KubeadmConfig - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - metadata @@ -140,7 +138,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -157,7 +154,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -169,7 +165,7 @@ spec: groupVersionKind: group: cluster.x-k8s.io kind: Cluster - version: v1beta1 + version: v1beta2 labelsFromPath: name: - metadata @@ -231,10 +227,16 @@ spec: - spec - infrastructureRef - name - topology_class: + topology_classref_name: - spec - topology - - class + - classRef + - name + topology_classref_namespace: + - spec + - topology + - classRef + - namespace topology_version: - spec - topology @@ -266,7 +268,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -283,7 +284,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -312,7 +312,7 @@ spec: groupVersionKind: group: cluster.x-k8s.io kind: ClusterClass - version: v1beta1 + version: v1beta2 labelsFromPath: name: - metadata @@ -386,7 +386,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -403,7 +402,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -415,7 +413,7 @@ spec: groupVersionKind: group: cluster.x-k8s.io kind: Machine - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - spec @@ -563,7 +561,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -580,7 +577,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -592,8 +588,6 @@ spec: labelsFromPath: node_name: - name - node_uid: - - uid path: - status - nodeRef @@ -624,7 +618,7 @@ spec: groupVersionKind: group: cluster.x-k8s.io kind: MachineDeployment - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - spec @@ -743,6 +737,7 @@ spec: nilIsZero: false path: - spec + - rollout - strategy - rollingUpdate - maxSurge @@ -756,6 +751,7 @@ spec: nilIsZero: false path: - spec + - rollout - strategy - rollingUpdate - maxUnavailable @@ -775,7 +771,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -792,7 +787,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -850,27 +844,17 @@ spec: nilIsZero: true path: - status - - unavailableReplicas - valueFrom: null - type: Gauge - help: The number of unavailable replicas per machinedeployment. - name: status_replicas_unavailable - - each: - gauge: - nilIsZero: true - path: - - status - - updatedReplicas + - upToDateReplicas valueFrom: null type: Gauge - help: The number of updated replicas per machinedeployment. - name: status_replicas_updated + help: The number of up-to-date replicas per machinedeployment. + name: status_replicas_uptodate resourcePlural: "" - errorLogV: 0 groupVersionKind: group: cluster.x-k8s.io kind: MachineHealthCheck - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - spec @@ -911,9 +895,16 @@ spec: - each: info: labelsFromPath: - maxUnhealthy: + remediation_triggerif_unhealthyinrange: - spec - - maxUnhealthy + - remediation + - triggerIf + - unhealthyInRange + remediation_triggerif_unhealthylessthanorequalto: + - spec + - remediation + - triggerIf + - unhealthyLessThanOrEqualTo path: null type: Info help: Information about a machinehealthcheck. @@ -947,7 +938,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -964,7 +954,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -1006,7 +995,7 @@ spec: groupVersionKind: group: cluster.x-k8s.io kind: MachinePool - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - spec @@ -1126,7 +1115,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -1143,7 +1131,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -1201,17 +1188,17 @@ spec: nilIsZero: true path: - status - - unavailableReplicas + - upToDateReplicas valueFrom: null type: Gauge - help: The number of unavailable replicas per machinepool. - name: status_replicas_unavailable + help: The number of up-to-date replicas per machinepool. + name: status_replicas_uptodate resourcePlural: "" - errorLogV: 0 groupVersionKind: group: cluster.x-k8s.io kind: MachineSet - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - spec @@ -1326,7 +1313,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -1343,7 +1329,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -1375,27 +1360,27 @@ spec: nilIsZero: true path: - status - - fullyLabeledReplicas + - readyReplicas valueFrom: null type: Gauge - help: The number of fully labeled replicas per machineset. - name: status_replicas_fully_labeled + help: The number of ready replicas per machineset. + name: status_replicas_ready - each: gauge: nilIsZero: true path: - status - - readyReplicas + - upToDateReplicas valueFrom: null type: Gauge - help: The number of ready replicas per machineset. - name: status_replicas_ready + help: The number of up-to-date replicas per machineset. + name: status_replicas_uptodate resourcePlural: "" - errorLogV: 0 groupVersionKind: group: controlplane.cluster.x-k8s.io kind: KubeadmControlPlane - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - metadata @@ -1477,7 +1462,8 @@ spec: nilIsZero: false path: - spec - - rolloutStrategy + - rollout + - strategy - rollingUpdate - maxSurge valueFrom: null @@ -1497,7 +1483,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -1514,7 +1499,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -1536,37 +1520,37 @@ spec: nilIsZero: true path: - status - - readyReplicas + - availableReplicas valueFrom: null type: Gauge - help: The number of ready replicas per kubeadmcontrolplane. - name: status_replicas_ready + help: The number of available replicas per kubeadmcontrolplane. + name: status_replicas_available - each: gauge: nilIsZero: true path: - status - - unavailableReplicas + - readyReplicas valueFrom: null type: Gauge - help: The number of unavailable replicas per kubeadmcontrolplane. - name: status_replicas_unavailable + help: The number of ready replicas per kubeadmcontrolplane. + name: status_replicas_ready - each: gauge: nilIsZero: true path: - status - - updatedReplicas + - upToDateReplicas valueFrom: null type: Gauge - help: The number of updated replicas per kubeadmcontrolplane. - name: status_replicas_updated + help: The number of up-to-date replicas per kubeadmcontrolplane. + name: status_replicas_uptodate resourcePlural: "" - errorLogV: 0 groupVersionKind: group: infrastructure.cluster.x-k8s.io kind: DevCluster - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - metadata @@ -1595,7 +1579,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -1612,7 +1595,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -1624,7 +1606,7 @@ spec: groupVersionKind: group: infrastructure.cluster.x-k8s.io kind: DevMachine - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - metadata @@ -1653,7 +1635,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -1670,7 +1651,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -1682,7 +1662,7 @@ spec: groupVersionKind: group: infrastructure.cluster.x-k8s.io kind: DockerCluster - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - metadata @@ -1711,7 +1691,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -1728,7 +1707,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -1740,7 +1718,7 @@ spec: groupVersionKind: group: infrastructure.cluster.x-k8s.io kind: DockerMachine - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - metadata @@ -1769,7 +1747,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -1786,7 +1763,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -1798,7 +1774,7 @@ spec: groupVersionKind: group: ipam.cluster.x-k8s.io kind: IPAddressClaim - version: v1beta1 + version: v1beta2 labelsFromPath: cluster_name: - metadata @@ -1827,7 +1803,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -1844,7 +1819,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime @@ -1856,7 +1830,7 @@ spec: groupVersionKind: group: runtime.cluster.x-k8s.io kind: ExtensionConfig - version: v1alpha1 + version: v1beta2 labelsFromPath: name: - metadata @@ -1881,7 +1855,6 @@ spec: - Unknown path: - status - - v1beta2 - conditions valueFrom: - status @@ -1898,7 +1871,6 @@ spec: nilIsZero: false path: - status - - v1beta2 - conditions valueFrom: - lastTransitionTime diff --git a/config/metrics/kustomization.yaml b/config/metrics/kustomization.yaml index 3955c8d0d945..dafde062ccd5 100644 --- a/config/metrics/kustomization.yaml +++ b/config/metrics/kustomization.yaml @@ -2,9 +2,10 @@ resources: - ./crd-clusterrole.yaml namespace: observability +namePrefix: kube-state-metrics- configMapGenerator: -- name: kube-state-metrics-crd-config-capi +- name: crd-config-capi files: - capi.yaml=crd-metrics-config.yaml options: diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index ede44f1b35e0..dc6e7c20c0da 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -55,20 +55,20 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta2-machine + path: /mutate-runtime-cluster-x-k8s-io-v1beta2-extensionconfig failurePolicy: Fail matchPolicy: Equivalent - name: default.machine.cluster.x-k8s.io + name: default.extensionconfig.runtime.addons.cluster.x-k8s.io rules: - apiGroups: - - cluster.x-k8s.io + - runtime.cluster.x-k8s.io apiVersions: - v1beta2 operations: - CREATE - UPDATE resources: - - machines + - extensionconfigs sideEffects: None - admissionReviewVersions: - v1 @@ -77,10 +77,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta2-machinedeployment + path: /mutate-cluster-x-k8s-io-v1beta2-machine failurePolicy: Fail matchPolicy: Equivalent - name: default.machinedeployment.cluster.x-k8s.io + name: default.machine.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -90,7 +90,7 @@ webhooks: - CREATE - UPDATE resources: - - machinedeployments + - machines sideEffects: None - admissionReviewVersions: - v1 @@ -99,10 +99,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta2-machinehealthcheck + path: /mutate-cluster-x-k8s-io-v1beta2-machinedeployment failurePolicy: Fail matchPolicy: Equivalent - name: default.machinehealthcheck.cluster.x-k8s.io + name: default.machinedeployment.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -112,7 +112,7 @@ webhooks: - CREATE - UPDATE resources: - - machinehealthchecks + - machinedeployments sideEffects: None - admissionReviewVersions: - v1 @@ -121,10 +121,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta2-machineset + path: /mutate-cluster-x-k8s-io-v1beta2-machinehealthcheck failurePolicy: Fail matchPolicy: Equivalent - name: default.machineset.cluster.x-k8s.io + name: default.machinehealthcheck.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -134,7 +134,7 @@ webhooks: - CREATE - UPDATE resources: - - machinesets + - machinehealthchecks sideEffects: None - admissionReviewVersions: - v1 @@ -143,20 +143,20 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-runtime-cluster-x-k8s-io-v1beta2-extensionconfig + path: /mutate-cluster-x-k8s-io-v1beta2-machinepool failurePolicy: Fail matchPolicy: Equivalent - name: default.extensionconfig.runtime.addons.cluster.x-k8s.io + name: default.machinepool.cluster.x-k8s.io rules: - apiGroups: - - runtime.cluster.x-k8s.io + - cluster.x-k8s.io apiVersions: - v1beta2 operations: - CREATE - UPDATE resources: - - extensionconfigs + - machinepools sideEffects: None - admissionReviewVersions: - v1 @@ -165,10 +165,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-cluster-x-k8s-io-v1beta2-machinepool + path: /mutate-cluster-x-k8s-io-v1beta2-machineset failurePolicy: Fail matchPolicy: Equivalent - name: default.machinepool.cluster.x-k8s.io + name: default.machineset.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -178,7 +178,7 @@ webhooks: - CREATE - UPDATE resources: - - machinepools + - machinesets sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 @@ -283,20 +283,20 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta2-machine + path: /validate-runtime-cluster-x-k8s-io-v1beta2-extensionconfig failurePolicy: Fail matchPolicy: Equivalent - name: validation.machine.cluster.x-k8s.io + name: validation.extensionconfig.runtime.cluster.x-k8s.io rules: - apiGroups: - - cluster.x-k8s.io + - runtime.cluster.x-k8s.io apiVersions: - v1beta2 operations: - CREATE - UPDATE resources: - - machines + - extensionconfigs sideEffects: None - admissionReviewVersions: - v1 @@ -305,20 +305,21 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta2-machinedeployment + path: /validate-ipam-cluster-x-k8s-io-v1beta2-ipaddress failurePolicy: Fail matchPolicy: Equivalent - name: validation.machinedeployment.cluster.x-k8s.io + name: validation.ipaddress.ipam.cluster.x-k8s.io rules: - apiGroups: - - cluster.x-k8s.io + - ipam.cluster.x-k8s.io apiVersions: - v1beta2 operations: - CREATE - UPDATE + - DELETE resources: - - machinedeployments + - ipaddresses sideEffects: None - admissionReviewVersions: - v1 @@ -327,20 +328,21 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta2-machinedrainrule + path: /validate-ipam-cluster-x-k8s-io-v1beta2-ipaddressclaim failurePolicy: Fail matchPolicy: Equivalent - name: validation.machinedrainrule.cluster.x-k8s.io + name: validation.ipaddressclaim.ipam.cluster.x-k8s.io rules: - apiGroups: - - cluster.x-k8s.io + - ipam.cluster.x-k8s.io apiVersions: - v1beta2 operations: - CREATE - UPDATE + - DELETE resources: - - machinedrainrules + - ipaddressclaims sideEffects: None - admissionReviewVersions: - v1 @@ -349,10 +351,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta2-machinehealthcheck + path: /validate-cluster-x-k8s-io-v1beta2-machine failurePolicy: Fail matchPolicy: Equivalent - name: validation.machinehealthcheck.cluster.x-k8s.io + name: validation.machine.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -362,7 +364,7 @@ webhooks: - CREATE - UPDATE resources: - - machinehealthchecks + - machines sideEffects: None - admissionReviewVersions: - v1 @@ -371,10 +373,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta2-machineset + path: /validate-cluster-x-k8s-io-v1beta2-machinedeployment failurePolicy: Fail matchPolicy: Equivalent - name: validation.machineset.cluster.x-k8s.io + name: validation.machinedeployment.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -384,7 +386,7 @@ webhooks: - CREATE - UPDATE resources: - - machinesets + - machinedeployments sideEffects: None - admissionReviewVersions: - v1 @@ -393,20 +395,20 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-runtime-cluster-x-k8s-io-v1beta2-extensionconfig + path: /validate-cluster-x-k8s-io-v1beta2-machinedrainrule failurePolicy: Fail matchPolicy: Equivalent - name: validation.extensionconfig.runtime.cluster.x-k8s.io + name: validation.machinedrainrule.cluster.x-k8s.io rules: - apiGroups: - - runtime.cluster.x-k8s.io + - cluster.x-k8s.io apiVersions: - v1beta2 operations: - CREATE - UPDATE resources: - - extensionconfigs + - machinedrainrules sideEffects: None - admissionReviewVersions: - v1 @@ -415,10 +417,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-cluster-x-k8s-io-v1beta2-machinepool + path: /validate-cluster-x-k8s-io-v1beta2-machinehealthcheck failurePolicy: Fail matchPolicy: Equivalent - name: validation.machinepool.cluster.x-k8s.io + name: validation.machinehealthcheck.cluster.x-k8s.io rules: - apiGroups: - cluster.x-k8s.io @@ -428,7 +430,7 @@ webhooks: - CREATE - UPDATE resources: - - machinepools + - machinehealthchecks sideEffects: None - admissionReviewVersions: - v1 @@ -437,21 +439,20 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-ipam-cluster-x-k8s-io-v1beta2-ipaddress + path: /validate-cluster-x-k8s-io-v1beta2-machinepool failurePolicy: Fail matchPolicy: Equivalent - name: validation.ipaddress.ipam.cluster.x-k8s.io + name: validation.machinepool.cluster.x-k8s.io rules: - apiGroups: - - ipam.cluster.x-k8s.io + - cluster.x-k8s.io apiVersions: - v1beta2 operations: - CREATE - UPDATE - - DELETE resources: - - ipaddresses + - machinepools sideEffects: None - admissionReviewVersions: - v1 @@ -460,19 +461,18 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-ipam-cluster-x-k8s-io-v1beta2-ipaddressclaim + path: /validate-cluster-x-k8s-io-v1beta2-machineset failurePolicy: Fail matchPolicy: Equivalent - name: validation.ipaddressclaim.ipam.cluster.x-k8s.io + name: validation.machineset.cluster.x-k8s.io rules: - apiGroups: - - ipam.cluster.x-k8s.io + - cluster.x-k8s.io apiVersions: - v1beta2 operations: - CREATE - UPDATE - - DELETE resources: - - ipaddressclaims + - machinesets sideEffects: None diff --git a/controllers/alias.go b/controllers/alias.go index 63895489b137..b8fc4d21e103 100644 --- a/controllers/alias.go +++ b/controllers/alias.go @@ -34,9 +34,11 @@ import ( clusterclasscontroller "sigs.k8s.io/cluster-api/internal/controllers/clusterclass" "sigs.k8s.io/cluster-api/internal/controllers/clusterresourceset" "sigs.k8s.io/cluster-api/internal/controllers/clusterresourcesetbinding" + extensionconfigcontroller "sigs.k8s.io/cluster-api/internal/controllers/extensionconfig" machinecontroller "sigs.k8s.io/cluster-api/internal/controllers/machine" machinedeploymentcontroller "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment" machinehealthcheckcontroller "sigs.k8s.io/cluster-api/internal/controllers/machinehealthcheck" + machinepoolcontroller "sigs.k8s.io/cluster-api/internal/controllers/machinepool" machinesetcontroller "sigs.k8s.io/cluster-api/internal/controllers/machineset" clustertopologycontroller "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster" machinedeploymenttopologycontroller "sigs.k8s.io/cluster-api/internal/controllers/topology/machinedeployment" @@ -70,9 +72,10 @@ func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag // MachineReconciler reconciles a Machine object. type MachineReconciler struct { - Client client.Client - APIReader client.Reader - ClusterCache clustercache.ClusterCache + Client client.Client + APIReader client.Reader + ClusterCache clustercache.ClusterCache + RuntimeClient runtimeclient.Client // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -88,6 +91,7 @@ func (r *MachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag Client: r.Client, APIReader: r.APIReader, ClusterCache: r.ClusterCache, + RuntimeClient: r.RuntimeClient, WatchFilterValue: r.WatchFilterValue, RemoteConditionsGracePeriod: r.RemoteConditionsGracePeriod, AdditionalSyncMachineLabels: r.AdditionalSyncMachineLabels, @@ -119,8 +123,9 @@ func (r *MachineSetReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma // MachineDeploymentReconciler reconciles a MachineDeployment object. type MachineDeploymentReconciler struct { - Client client.Client - APIReader client.Reader + Client client.Client + APIReader client.Reader + RuntimeClient runtimeclient.Client // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -130,6 +135,7 @@ func (r *MachineDeploymentReconciler) SetupWithManager(ctx context.Context, mgr return (&machinedeploymentcontroller.Reconciler{ Client: r.Client, APIReader: r.APIReader, + RuntimeClient: r.RuntimeClient, WatchFilterValue: r.WatchFilterValue, }).SetupWithManager(ctx, mgr, options) } @@ -279,3 +285,45 @@ func (r *ClusterResourceSetBindingReconciler) SetupWithManager(ctx context.Conte WatchFilterValue: r.WatchFilterValue, }).SetupWithManager(ctx, mgr, options) } + +// MachinePoolReconciler reconciles a MachinePool object. +type MachinePoolReconciler struct { + Client client.Client + APIReader client.Reader + ClusterCache clustercache.ClusterCache + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +func (r *MachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + return (&machinepoolcontroller.Reconciler{ + Client: r.Client, + APIReader: r.APIReader, + ClusterCache: r.ClusterCache, + WatchFilterValue: r.WatchFilterValue, + }).SetupWithManager(ctx, mgr, options) +} + +// ExtensionConfigReconciler reconciles an ExtensionConfig object. +type ExtensionConfigReconciler struct { + Client client.Client + APIReader client.Reader + RuntimeClient runtimeclient.Client + PartialSecretCache cache.Cache + ReadOnly bool + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +func (r *ExtensionConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + return (&extensionconfigcontroller.Reconciler{ + Client: r.Client, + APIReader: r.APIReader, + RuntimeClient: r.RuntimeClient, + PartialSecretCache: r.PartialSecretCache, + ReadOnly: r.ReadOnly, + WatchFilterValue: r.WatchFilterValue, + }).SetupWithManager(ctx, mgr, options) +} diff --git a/controllers/clustercache/cluster_accessor.go b/controllers/clustercache/cluster_accessor.go index 0bbc92f2380d..d981607cd6ac 100644 --- a/controllers/clustercache/cluster_accessor.go +++ b/controllers/clustercache/cluster_accessor.go @@ -82,6 +82,10 @@ type clusterAccessorConfig struct { // connection after creating a connection failed. ConnectionCreationRetryInterval time.Duration + // DisableClientCertificatePrivateKey is the flag to disable the creation of the client + // certificate private key. + DisableClientCertificatePrivateKey bool + // Cache is the config used for the cache that the clusterAccessor creates. Cache *clusterAccessorCacheConfig @@ -191,6 +195,10 @@ type clusterAccessorLockedConnectionState struct { // all typed objects except the ones for which caching has been disabled via DisableFor. cachedClient client.Client + // uncachedClient to communicate with the workload cluster. + // It performs live GET/LIST calls directly against the API server with no caching. + uncachedClient client.Client + // cache is the cache used by the client. // It manages informers that have been created e.g. by adding indexes to the cache, // Get & List calls from the client or via the Watch method of the clusterAccessor. @@ -280,7 +288,7 @@ func (ca *clusterAccessor) Connect(ctx context.Context) (retErr error) { // Only generate the clientCertificatePrivateKey once as there is no need to regenerate it after disconnect/connect. // Note: This has to be done before setting connection, because otherwise this code wouldn't be re-entrant if the // private key generation fails because we check Connected above. - if ca.lockedState.clientCertificatePrivateKey == nil { + if ca.lockedState.clientCertificatePrivateKey == nil && !ca.config.DisableClientCertificatePrivateKey { log.V(6).Info("Generating client certificate private key") clientCertificatePrivateKey, err := certs.NewPrivateKey() if err != nil { @@ -297,11 +305,12 @@ func (ca *clusterAccessor) Connect(ctx context.Context) (retErr error) { consecutiveFailures: 0, } ca.lockedState.connection = &clusterAccessorLockedConnectionState{ - restConfig: connection.RESTConfig, - restClient: connection.RESTClient, - cachedClient: connection.CachedClient, - cache: connection.Cache, - watches: sets.Set[string]{}, + restConfig: connection.RESTConfig, + restClient: connection.RESTClient, + cachedClient: connection.CachedClient, + uncachedClient: connection.UncachedClient, + cache: connection.Cache, + watches: sets.Set[string]{}, } return nil @@ -407,6 +416,18 @@ func (ca *clusterAccessor) GetReader(ctx context.Context) (client.Reader, error) return ca.lockedState.connection.cachedClient, nil } +// GetUncachedClient returns a live (uncached) client for the given cluster. +func (ca *clusterAccessor) GetUncachedClient(ctx context.Context) (client.Client, error) { + ca.rLock(ctx) + defer ca.rUnlock(ctx) + + if ca.lockedState.connection == nil { + return nil, errors.Wrapf(ErrClusterNotConnected, "error getting uncached client") + } + + return ca.lockedState.connection.uncachedClient, nil +} + func (ca *clusterAccessor) GetRESTConfig(ctx context.Context) (*rest.Config, error) { ca.rLock(ctx) defer ca.rUnlock(ctx) diff --git a/controllers/clustercache/cluster_accessor_client.go b/controllers/clustercache/cluster_accessor_client.go index 7e5ae2484626..8cc9a0a6340c 100644 --- a/controllers/clustercache/cluster_accessor_client.go +++ b/controllers/clustercache/cluster_accessor_client.go @@ -42,10 +42,11 @@ import ( ) type createConnectionResult struct { - RESTConfig *rest.Config - RESTClient *rest.RESTClient - CachedClient client.Client - Cache *stoppableCache + RESTConfig *rest.Config + RESTClient *rest.RESTClient + CachedClient client.Client + UncachedClient client.Client + Cache *stoppableCache } func (ca *clusterAccessor) createConnection(ctx context.Context) (*createConnectionResult, error) { @@ -97,6 +98,12 @@ func (ca *clusterAccessor) createConnection(ctx context.Context) (*createConnect if err != nil { return nil, errors.Wrapf(err, "error creating HTTP client and mapper (using in-cluster config)") } + + log.V(6).Info(fmt.Sprintf("Creating uncached client with updated REST config with host %q", restConfig.Host)) + uncachedClient, err = createUncachedClient(ca.config.Scheme, restConfig, httpClient, mapper) + if err != nil { + return nil, errors.Wrapf(err, "error creating uncached client (using in-cluster config)") + } } log.V(6).Info("Creating cached client and cache") @@ -106,10 +113,11 @@ func (ca *clusterAccessor) createConnection(ctx context.Context) (*createConnect } return &createConnectionResult{ - RESTConfig: restConfig, - RESTClient: restClient, - CachedClient: cachedClient, - Cache: cache, + RESTConfig: restConfig, + RESTClient: restClient, + CachedClient: cachedClient, + UncachedClient: uncachedClient, + Cache: cache, }, nil } @@ -208,7 +216,7 @@ func createUncachedClient(scheme *runtime.Scheme, config *rest.Config, httpClien return nil, errors.Wrapf(err, "error creating uncached client") } - return uncachedClient, nil + return newClientWithTimeout(uncachedClient, config.Timeout), nil } // createCachedClient creates a cached client for the given cluster, based on the rest.Config. diff --git a/controllers/clustercache/cluster_accessor_test.go b/controllers/clustercache/cluster_accessor_test.go index bfe5acdf1d88..c3fb117b69f1 100644 --- a/controllers/clustercache/cluster_accessor_test.go +++ b/controllers/clustercache/cluster_accessor_test.go @@ -76,8 +76,13 @@ func TestConnect(t *testing.T) { }, nil) accessor := newClusterAccessor(context.Background(), clusterKey, config) + // Before connect, getting the uncached client should fail with ErrClusterNotConnected + _, err := accessor.GetUncachedClient(ctx) + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.Is(err, ErrClusterNotConnected)).To(BeTrue()) + // Connect when kubeconfig Secret doesn't exist (should fail) - err := accessor.Connect(ctx) + err = accessor.Connect(ctx) g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(Equal("error creating REST config: error getting kubeconfig secret: Secret \"test-cluster-kubeconfig\" not found")) g.Expect(accessor.Connected(ctx)).To(BeFalse()) @@ -136,6 +141,16 @@ func TestConnect(t *testing.T) { g.Expect(accessor.lockedState.healthChecking.lastProbeSuccessTime.IsZero()).To(BeFalse()) g.Expect(accessor.lockedState.healthChecking.consecutiveFailures).To(Equal(0)) + // After connect, getting the uncached client should succeed + r, err := accessor.GetUncachedClient(ctx) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r).ToNot(BeNil()) + + // List Nodes via the uncached client + nodeListUncached := &corev1.NodeList{} + g.Expect(r.List(ctx, nodeListUncached)).To(Succeed()) + g.Expect(nodeListUncached.Items).To(BeEmpty()) + // Get client and test Get & List c, err := accessor.GetClient(ctx) g.Expect(err).ToNot(HaveOccurred()) @@ -150,6 +165,11 @@ func TestConnect(t *testing.T) { // Disconnect accessor.Disconnect(ctx) g.Expect(accessor.Connected(ctx)).To(BeFalse()) + + // After disconnect, getting the uncached client should fail with ErrClusterNotConnected + _, err = accessor.GetUncachedClient(ctx) + g.Expect(err).To(HaveOccurred()) + g.Expect(errors.Is(err, ErrClusterNotConnected)).To(BeTrue()) } func TestDisconnect(t *testing.T) { diff --git a/controllers/clustercache/cluster_cache.go b/controllers/clustercache/cluster_cache.go index 06d64f39863f..cf4d20ffafac 100644 --- a/controllers/clustercache/cluster_cache.go +++ b/controllers/clustercache/cluster_cache.go @@ -44,6 +44,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + capicontrollerutil "sigs.k8s.io/cluster-api/internal/util/controller" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -134,6 +135,10 @@ type ClusterCache interface { // If there is no connection to the workload cluster ErrClusterNotConnected will be returned. GetReader(ctx context.Context, cluster client.ObjectKey) (client.Reader, error) + // GetUncachedClient returns a live (uncached) client for the given cluster. + // If there is no connection to the workload cluster ErrClusterNotConnected will be returned. + GetUncachedClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) + // GetRESTConfig returns a REST config for the given cluster. // If there is no connection to the workload cluster ErrClusterNotConnected will be returned. GetRESTConfig(ctx context.Context, cluster client.ObjectKey) (*rest.Config, error) @@ -143,6 +148,9 @@ type ClusterCache interface { // cert to communicate with etcd. // This private key is stored and cached in the ClusterCache because it's expensive to generate a new // private key in every single Reconcile. + // + // Deprecated: This method is deprecated and will be removed in a future release as caching a rsa.PrivateKey + // is outside the scope of the ClusterCache. GetClientCertificatePrivateKey(ctx context.Context, cluster client.ObjectKey) (*rsa.PrivateKey, error) // Watch watches a workload cluster for events. @@ -321,7 +329,8 @@ func SetupWithManager(ctx context.Context, mgr manager.Manager, options Options, cacheCtxCancel: cacheCtxCancel, } - err := ctrl.NewControllerManagedBy(mgr). + predicateLog := ctrl.LoggerFrom(ctx).WithValues("controller", "clustercache") + err := capicontrollerutil.NewControllerManagedBy(mgr, predicateLog). Named("clustercache"). For(&clusterv1.Cluster{}). WithOptions(controllerOptions). @@ -392,6 +401,16 @@ func (cc *clusterCache) GetReader(ctx context.Context, cluster client.ObjectKey) return accessor.GetReader(ctx) } +// GetUncachedClient returns a live (uncached) client for the given cluster. +// If there is no connection to the workload cluster ErrClusterNotConnected will be returned. +func (cc *clusterCache) GetUncachedClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) { + accessor := cc.getClusterAccessor(cluster) + if accessor == nil { + return nil, errors.Wrapf(ErrClusterNotConnected, "error getting uncached client") + } + return accessor.GetUncachedClient(ctx) +} + func (cc *clusterCache) GetRESTConfig(ctx context.Context, cluster client.ObjectKey) (*rest.Config, error) { accessor := cc.getClusterAccessor(cluster) if accessor == nil { @@ -681,6 +700,12 @@ func (cc *clusterCache) SetConnectionCreationRetryInterval(interval time.Duratio cc.clusterAccessorConfig.ConnectionCreationRetryInterval = interval } +// DisablePrivateKeyGeneration can be used to disable the creation of cluster cert private key on clusteraccessor. +// This method should only be used for tests and is not part of the public ClusterCache interface. +func (cc *clusterCache) DisablePrivateKeyGeneration() { + cc.clusterAccessorConfig.DisableClientCertificatePrivateKey = true +} + // Shutdown can be used to shut down the ClusterCache in unit tests. // This method should only be used for tests because it hasn't been designed for production usage // in a manager (race conditions with manager shutdown etc.). diff --git a/controllers/clustercache/cluster_cache_fake.go b/controllers/clustercache/cluster_cache_fake.go index b6e44c4038f3..f4f44b3da097 100644 --- a/controllers/clustercache/cluster_cache_fake.go +++ b/controllers/clustercache/cluster_cache_fake.go @@ -32,8 +32,9 @@ func NewFakeClusterCache(workloadClient client.Client, clusterKey client.ObjectK testCacheTracker.clusterAccessors[clusterKey] = &clusterAccessor{ lockedState: clusterAccessorLockedState{ connection: &clusterAccessorLockedConnectionState{ - cachedClient: workloadClient, - watches: sets.Set[string]{}.Insert(watchObjects...), + cachedClient: workloadClient, + uncachedClient: workloadClient, + watches: sets.Set[string]{}.Insert(watchObjects...), }, healthChecking: clusterAccessorLockedHealthCheckingState{ lastProbeTime: time.Now(), @@ -44,3 +45,8 @@ func NewFakeClusterCache(workloadClient client.Client, clusterKey client.ObjectK } return testCacheTracker } + +// NewFakeEmptyClusterCache creates a new empty ClusterCache that can be used by unit tests. +func NewFakeEmptyClusterCache() ClusterCache { + return &clusterCache{} +} diff --git a/controllers/crdmigrator/crd_migrator.go b/controllers/crdmigrator/crd_migrator.go index 4fdf83e8da49..e56110fd0ba8 100644 --- a/controllers/crdmigrator/crd_migrator.go +++ b/controllers/crdmigrator/crd_migrator.go @@ -45,6 +45,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + capicontrollerutil "sigs.k8s.io/cluster-api/internal/util/controller" "sigs.k8s.io/cluster-api/util/cache" "sigs.k8s.io/cluster-api/util/contract" "sigs.k8s.io/cluster-api/util/predicates" @@ -121,7 +122,7 @@ func (r *CRDMigrator) SetupWithManager(ctx context.Context, mgr ctrl.Manager, co } predicateLog := ctrl.LoggerFrom(ctx).WithValues("controller", "crdmigrator") - err := ctrl.NewControllerManagedBy(mgr). + err := capicontrollerutil.NewControllerManagedBy(mgr, predicateLog). For(&apiextensionsv1.CustomResourceDefinition{}, // This controller uses a PartialObjectMetadata watch/informer to avoid an informer for CRDs // to reduce memory usage. @@ -412,7 +413,7 @@ func (r *CRDMigrator) reconcileStorageVersionMigration(ctx context.Context, crd if migrationConfig.UseStatusForStorageVersionMigration { err = r.Client.Status().Patch(ctx, u, client.Apply, client.FieldOwner("crdmigrator")) } else { - err = r.Client.Patch(ctx, u, client.Apply, client.FieldOwner("crdmigrator")) + err = r.Client.Apply(ctx, client.ApplyConfigurationFromUnstructured(u), client.FieldOwner("crdmigrator")) } // If we got a NotFound error, the object no longer exists so no need to update it. // If we got a Conflict error, another client wrote the object already so no need to update it. diff --git a/controllers/crdmigrator/crd_migrator_test.go b/controllers/crdmigrator/crd_migrator_test.go index 1000ac050eb7..ba8667fb216e 100644 --- a/controllers/crdmigrator/crd_migrator_test.go +++ b/controllers/crdmigrator/crd_migrator_test.go @@ -195,10 +195,10 @@ func TestReconcile(t *testing.T) { // Deploy test-cluster-1 and test-cluster-2. testClusterT1 := unstructuredTestCluster("test-cluster-1", t1v1beta1.GroupVersion.WithKind("TestCluster")) g.Expect(unstructured.SetNestedField(testClusterT1.Object, "foo-value", "spec", "foo")).To(Succeed()) - g.Expect(managerT1.GetClient().Patch(ctx, testClusterT1, client.Apply, fieldOwner)).To(Succeed()) + g.Expect(managerT1.GetClient().Apply(ctx, client.ApplyConfigurationFromUnstructured(testClusterT1), fieldOwner)).To(Succeed()) testClusterT1 = unstructuredTestCluster("test-cluster-2", t1v1beta1.GroupVersion.WithKind("TestCluster")) g.Expect(unstructured.SetNestedField(testClusterT1.Object, "foo-value", "spec", "foo")).To(Succeed()) - g.Expect(managerT1.GetClient().Patch(ctx, testClusterT1, client.Apply, fieldOwner)).To(Succeed()) + g.Expect(managerT1.GetClient().Apply(ctx, client.ApplyConfigurationFromUnstructured(testClusterT1), fieldOwner)).To(Succeed()) validateManagedFields(t, g, "v1beta1", map[string][]string{ "test-cluster-1": {"test.cluster.x-k8s.io/v1beta1"}, "test-cluster-2": {"test.cluster.x-k8s.io/v1beta1"}, @@ -226,11 +226,11 @@ func TestReconcile(t *testing.T) { // Set an additional field with a different field manager and v1beta2 apiVersion in test-cluster-2 testClusterT2 := unstructuredTestCluster("test-cluster-2", t2v1beta2.GroupVersion.WithKind("TestCluster")) g.Expect(unstructured.SetNestedField(testClusterT2.Object, "bar-value", "spec", "bar")).To(Succeed()) - g.Expect(managerT2.GetClient().Patch(ctx, testClusterT2, client.Apply, client.FieldOwner("different-unit-test-client"))).To(Succeed()) + g.Expect(managerT2.GetClient().Apply(ctx, client.ApplyConfigurationFromUnstructured(testClusterT2), client.FieldOwner("different-unit-test-client"))).To(Succeed()) // Deploy test-cluster-3. testClusterT2 = unstructuredTestCluster("test-cluster-3", t2v1beta2.GroupVersion.WithKind("TestCluster")) g.Expect(unstructured.SetNestedField(testClusterT2.Object, "foo-value", "spec", "foo")).To(Succeed()) - g.Expect(managerT2.GetClient().Patch(ctx, testClusterT2, client.Apply, fieldOwner)).To(Succeed()) + g.Expect(managerT2.GetClient().Apply(ctx, client.ApplyConfigurationFromUnstructured(testClusterT2), fieldOwner)).To(Succeed()) // At this point we have clusters with all combinations of managedField apiVersions. validateManagedFields(t, g, "v1beta2", map[string][]string{ "test-cluster-1": {"test.cluster.x-k8s.io/v1beta1"}, @@ -323,7 +323,7 @@ func TestReconcile(t *testing.T) { // Try to patch the test-clusters CRs with SSA. testClusterT4 := unstructuredTestCluster(clusterName, t4v1beta2.GroupVersion.WithKind("TestCluster")) g.Expect(unstructured.SetNestedField(testClusterT4.Object, "new-foo-value", "spec", "foo")).To(Succeed()) - err = managerT4.GetClient().Patch(ctx, testClusterT4, client.Apply, fieldOwner) + err = managerT4.GetClient().Apply(ctx, client.ApplyConfigurationFromUnstructured(testClusterT4), fieldOwner) // If managedField cleanup was skipped before, the SSA patch will fail for the clusters which still have v1beta1 managedFields. if skipCRDMigrationPhases.Has(CleanupManagedFieldsPhase) && (clusterName == "test-cluster-1" || clusterName == "test-cluster-2") { diff --git a/controllers/crdmigrator/test/t1/crd/test.cluster.x-k8s.io_testclusters.yaml b/controllers/crdmigrator/test/t1/crd/test.cluster.x-k8s.io_testclusters.yaml index 100c8844e266..93c784c367b4 100644 --- a/controllers/crdmigrator/test/t1/crd/test.cluster.x-k8s.io_testclusters.yaml +++ b/controllers/crdmigrator/test/t1/crd/test.cluster.x-k8s.io_testclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: testclusters.test.cluster.x-k8s.io spec: group: test.cluster.x-k8s.io diff --git a/controllers/crdmigrator/test/t2/crd/test.cluster.x-k8s.io_testclusters.yaml b/controllers/crdmigrator/test/t2/crd/test.cluster.x-k8s.io_testclusters.yaml index 497595ef1136..5b228b55e307 100644 --- a/controllers/crdmigrator/test/t2/crd/test.cluster.x-k8s.io_testclusters.yaml +++ b/controllers/crdmigrator/test/t2/crd/test.cluster.x-k8s.io_testclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: testclusters.test.cluster.x-k8s.io spec: group: test.cluster.x-k8s.io diff --git a/controllers/crdmigrator/test/t3/crd/test.cluster.x-k8s.io_testclusters.yaml b/controllers/crdmigrator/test/t3/crd/test.cluster.x-k8s.io_testclusters.yaml index 26026c0f3991..fe9779f3d0db 100644 --- a/controllers/crdmigrator/test/t3/crd/test.cluster.x-k8s.io_testclusters.yaml +++ b/controllers/crdmigrator/test/t3/crd/test.cluster.x-k8s.io_testclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: testclusters.test.cluster.x-k8s.io spec: group: test.cluster.x-k8s.io diff --git a/controllers/crdmigrator/test/t4/crd/test.cluster.x-k8s.io_testclusters.yaml b/controllers/crdmigrator/test/t4/crd/test.cluster.x-k8s.io_testclusters.yaml index dde0047add67..883127ebc120 100644 --- a/controllers/crdmigrator/test/t4/crd/test.cluster.x-k8s.io_testclusters.yaml +++ b/controllers/crdmigrator/test/t4/crd/test.cluster.x-k8s.io_testclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: testclusters.test.cluster.x-k8s.io spec: group: test.cluster.x-k8s.io diff --git a/controllers/external/tracker_test.go b/controllers/external/tracker_test.go index e24d69296731..92966d2c82bb 100644 --- a/controllers/external/tracker_test.go +++ b/controllers/external/tracker_test.go @@ -22,7 +22,6 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/cache/informertest" @@ -83,12 +82,7 @@ func TestWatchMultipleTimes(t *testing.T) { ctrl := &watchCountController{} tracker := ObjectTracker{Controller: ctrl, Scheme: runtime.NewScheme(), Cache: &informertest.FakeInformers{}, PredicateLogger: ptr.To(logr.New(log.NullLogSink{}))} - obj := &clusterv1.Cluster{ - TypeMeta: metav1.TypeMeta{ - Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.Version, - }, - } + obj := &clusterv1.Cluster{} err := tracker.Watch(logger, obj, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(ctrl.count).Should(Equal(1)) diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml index 1f43319a0864..5fc53686c597 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: kubeadmcontrolplanes.controlplane.cluster.x-k8s.io spec: group: controlplane.cluster.x-k8s.io @@ -733,9 +733,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -908,9 +907,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -1874,9 +1872,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -2052,9 +2049,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -2555,8 +2551,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2614,6 +2611,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2771,8 +2805,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2830,6 +2865,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -3021,8 +3093,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -3081,6 +3154,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -3257,8 +3367,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -3316,6 +3427,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -3791,9 +3939,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -4196,9 +4343,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -5119,8 +5265,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5178,6 +5325,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5369,8 +5553,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5428,6 +5613,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5549,6 +5771,22 @@ spec: minLength: 1 type: string type: object + encryptionAlgorithm: + description: |- + encryptionAlgorithm holds the type of asymmetric encryption algorithm used for keys and certificates. + Can be one of "RSA-2048", "RSA-3072", "RSA-4096", "ECDSA-P256" or "ECDSA-P384". + For Kubernetes 1.34 or above, "ECDSA-P384" is supported. + If not specified, Cluster API will use RSA-2048 as default. + When this field is modified every certificate generated afterward will use the new + encryptionAlgorithm. Existing CA certificates and service account keys are not rotated. + This field is only supported with Kubernetes v1.31 or above. + enum: + - ECDSA-P256 + - ECDSA-P384 + - RSA-2048 + - RSA-3072 + - RSA-4096 + type: string etcd: description: |- etcd holds configuration for etcd. @@ -5654,8 +5892,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5714,6 +5953,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5820,16 +6096,7 @@ spec: imageRepository: description: |- imageRepository sets the container registry to pull images from. - * If not set, the default registry of kubeadm will be used, i.e. - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions - Please note that when imageRepository is not set we don't allow upgrades to - versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - a newer patch version with the new registry instead (i.e. >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0). - * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + If not set, the default registry of kubeadm will be used (registry.k8s.io). maxLength: 512 minLength: 1 type: string @@ -5881,8 +6148,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. Must - be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5940,6 +6208,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6453,9 +6758,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -6938,9 +7242,8 @@ spec: to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time at which + the taint was added. format: date-time type: string value: @@ -7511,13 +7814,7 @@ spec: type: object type: object version: - description: |- - version defines the desired Kubernetes version. - Please note that if kubeadmConfigSpec.ClusterConfiguration.imageRepository is not set - we don't allow upgrades to versions >= v1.22.0 for which kubeadm uses the old registry (k8s.gcr.io). - Please use a newer patch version with the new registry instead. The default registries of kubeadm are: - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions + description: version defines the desired Kubernetes version. maxLength: 256 minLength: 1 type: string diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml index badeef3309e0..694318b53ed9 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: kubeadmcontrolplanetemplates.controlplane.cluster.x-k8s.io spec: group: controlplane.cluster.x-k8s.io @@ -674,9 +674,8 @@ spec: be applied to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time + at which the taint was added. format: date-time type: string value: @@ -854,9 +853,8 @@ spec: be applied to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time + at which the taint was added. format: date-time type: string value: @@ -1240,8 +1238,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1301,6 +1300,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1463,8 +1499,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1524,6 +1561,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1720,8 +1794,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment - variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1784,6 +1859,44 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the + volume mount containing the + env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1964,8 +2077,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2025,6 +2139,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2512,9 +2663,8 @@ spec: be applied to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time + at which the taint was added. format: date-time type: string value: @@ -2922,9 +3072,8 @@ spec: be applied to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time + at which the taint was added. format: date-time type: string value: @@ -3486,8 +3635,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -3547,6 +3697,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -3744,8 +3931,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -3805,6 +3993,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -3931,6 +4156,22 @@ spec: minLength: 1 type: string type: object + encryptionAlgorithm: + description: |- + encryptionAlgorithm holds the type of asymmetric encryption algorithm used for keys and certificates. + Can be one of "RSA-2048", "RSA-3072", "RSA-4096", "ECDSA-P256" or "ECDSA-P384". + For Kubernetes 1.34 or above, "ECDSA-P384" is supported. + If not specified, Cluster API will use RSA-2048 as default. + When this field is modified every certificate generated afterward will use the new + encryptionAlgorithm. Existing CA certificates and service account keys are not rotated. + This field is only supported with Kubernetes v1.31 or above. + enum: + - ECDSA-P256 + - ECDSA-P384 + - RSA-2048 + - RSA-3072 + - RSA-4096 + type: string etcd: description: |- etcd holds configuration for etcd. @@ -4039,8 +4280,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment - variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4103,6 +4345,44 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the + volume mount containing the + env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4213,16 +4493,7 @@ spec: imageRepository: description: |- imageRepository sets the container registry to pull images from. - * If not set, the default registry of kubeadm will be used, i.e. - * registry.k8s.io (new registry): >= v1.22.17, >= v1.23.15, >= v1.24.9, >= v1.25.0 - * k8s.gcr.io (old registry): all older versions - Please note that when imageRepository is not set we don't allow upgrades to - versions >= v1.22.0 which use the old registry (k8s.gcr.io). Please use - a newer patch version with the new registry instead (i.e. >= v1.22.17, - >= v1.23.15, >= v1.24.9, >= v1.25.0). - * If the version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/k8s-staging-ci-images` will be used as a default for control plane components - and for kube-proxy, while `registry.k8s.io` will be used for all the other images. + If not set, the default registry of kubeadm will be used (registry.k8s.io). maxLength: 512 minLength: 1 type: string @@ -4275,8 +4546,9 @@ spec: variable present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4336,6 +4608,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume + mount containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4862,9 +5171,8 @@ spec: be applied to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time + at which the taint was added. format: date-time type: string value: @@ -5353,9 +5661,8 @@ spec: be applied to a node. type: string timeAdded: - description: |- - TimeAdded represents the time at which the taint was added. - It is only written for NoExecute taints. + description: TimeAdded represents the time + at which the taint was added. format: date-time type: string value: diff --git a/controlplane/kubeadm/config/manager/manager.yaml b/controlplane/kubeadm/config/manager/manager.yaml index b5584d88551c..096ac647011a 100644 --- a/controlplane/kubeadm/config/manager/manager.yaml +++ b/controlplane/kubeadm/config/manager/manager.yaml @@ -22,7 +22,7 @@ spec: - "--leader-elect" - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" - - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},ClusterTopology=${CLUSTER_TOPOLOGY:=false},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false},PriorityQueue=${EXP_PRIORITY_QUEUE:=false}" + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},ClusterTopology=${CLUSTER_TOPOLOGY:=false},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false},PriorityQueue=${EXP_PRIORITY_QUEUE:=false},ReconcilerRateLimiting=${EXP_RECONCILER_RATE_LIMITING:=false},InPlaceUpdates=${EXP_IN_PLACE_UPDATES:=false}" image: controller:latest name: manager env: diff --git a/controlplane/kubeadm/config/rbac/role.yaml b/controlplane/kubeadm/config/rbac/role.yaml index ba9d19f28edb..37f7163123ea 100644 --- a/controlplane/kubeadm/config/rbac/role.yaml +++ b/controlplane/kubeadm/config/rbac/role.yaml @@ -11,12 +11,21 @@ rules: verbs: - create - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch - apiGroups: - "" resources: - secrets verbs: - create + - delete - get - list - patch @@ -90,3 +99,11 @@ rules: - patch - update - watch +- apiGroups: + - runtime.cluster.x-k8s.io + resources: + - extensionconfigs + verbs: + - get + - list + - watch diff --git a/controlplane/kubeadm/controllers/alias.go b/controlplane/kubeadm/controllers/alias.go index b0733f825be5..0c9453ffd881 100644 --- a/controlplane/kubeadm/controllers/alias.go +++ b/controlplane/kubeadm/controllers/alias.go @@ -27,12 +27,15 @@ import ( "sigs.k8s.io/cluster-api/controllers/clustercache" kubeadmcontrolplanecontrollers "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/controllers" + runtimeclient "sigs.k8s.io/cluster-api/exp/runtime/client" ) // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. type KubeadmControlPlaneReconciler struct { Client client.Client + APIReader client.Reader SecretCachingClient client.Client + RuntimeClient runtimeclient.Client ClusterCache clustercache.ClusterCache EtcdDialTimeout time.Duration @@ -49,7 +52,9 @@ type KubeadmControlPlaneReconciler struct { func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&kubeadmcontrolplanecontrollers.KubeadmControlPlaneReconciler{ Client: r.Client, + APIReader: r.APIReader, SecretCachingClient: r.SecretCachingClient, + RuntimeClient: r.RuntimeClient, ClusterCache: r.ClusterCache, EtcdDialTimeout: r.EtcdDialTimeout, EtcdCallTimeout: r.EtcdCallTimeout, diff --git a/controlplane/kubeadm/internal/cluster.go b/controlplane/kubeadm/internal/cluster.go index 1fddef3c8a21..f48999cb0ce3 100644 --- a/controlplane/kubeadm/internal/cluster.go +++ b/controlplane/kubeadm/internal/cluster.go @@ -27,11 +27,14 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" + "sigs.k8s.io/cluster-api/util/cache" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/secret" ) @@ -42,7 +45,7 @@ type ManagementCluster interface { GetMachinesForCluster(ctx context.Context, cluster *clusterv1.Cluster, filters ...collections.Func) (collections.Machines, error) GetMachinePoolsForCluster(ctx context.Context, cluster *clusterv1.Cluster) (*clusterv1.MachinePoolList, error) - GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) + GetWorkloadCluster(ctx context.Context, cluster *clusterv1.Cluster, keyEncryptionAlgorithm bootstrapv1.EncryptionAlgorithmType) (WorkloadCluster, error) } // Management holds operations on the management cluster. @@ -53,6 +56,20 @@ type Management struct { EtcdDialTimeout time.Duration EtcdCallTimeout time.Duration EtcdLogger *zap.Logger + ClientCertCache cache.Cache[ClientCertEntry] +} + +// ClientCertEntry is an Entry for the Cache that stores the client cert. +type ClientCertEntry struct { + Cluster client.ObjectKey + ClusterUID types.UID + ClientCert *tls.Certificate + EncryptionAlgorithm bootstrapv1.EncryptionAlgorithmType +} + +// Key returns the cache key of a ClientCertEntry. +func (r ClientCertEntry) Key() string { + return fmt.Sprintf("%s/%s/%s", r.Cluster.String(), r.ClusterUID, r.EncryptionAlgorithm) } // RemoteClusterConnectionError represents a failure to connect to a remote cluster. @@ -64,7 +81,7 @@ type RemoteClusterConnectionError struct { // Error satisfies the error interface. func (e *RemoteClusterConnectionError) Error() string { return e.Name + ": " + e.Err.Error() } -// Unwrap satisfies the unwrap error inteface. +// Unwrap satisfies the unwrap error interface. func (e *RemoteClusterConnectionError) Unwrap() error { return e.Err } // Get implements client.Reader. @@ -98,7 +115,9 @@ func (m *Management) GetMachinePoolsForCluster(ctx context.Context, cluster *clu // GetWorkloadCluster builds a cluster object. // The cluster comes with an etcd client generator to connect to any etcd pod living on a managed machine. -func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) { +func (m *Management) GetWorkloadCluster(ctx context.Context, cluster *clusterv1.Cluster, keyEncryptionAlgorithm bootstrapv1.EncryptionAlgorithmType) (WorkloadCluster, error) { + clusterKey := client.ObjectKeyFromObject(cluster) + // TODO(chuckha): Inject this dependency. // TODO(chuckha): memoize this function. The workload client only exists as long as a reconciliation loop. restConfig, err := m.ClusterCache.GetRESTConfig(ctx, clusterKey) @@ -126,14 +145,17 @@ func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.O // TODO: consider if we can detect if we are using external etcd in a more explicit way (e.g. looking at the config instead of deriving from the existing certificates) var clientCert tls.Certificate if keyData != nil { - clientKey, err := m.ClusterCache.GetClientCertificatePrivateKey(ctx, clusterKey) - if err != nil { - return nil, err - } - - clientCert, err = generateClientCert(crtData, keyData, clientKey) - if err != nil { - return nil, err + // Get client cert from cache if possible, otherwise generate it and add it to the cache. + // Note: The caching assumes that the etcd CA is not rotated during the lifetime of a Cluster. + if entry, ok := m.ClientCertCache.Has(ClientCertEntry{Cluster: clusterKey, ClusterUID: cluster.UID, EncryptionAlgorithm: keyEncryptionAlgorithm}.Key()); ok { + clientCert = *entry.ClientCert + } else { + // The client cert expires after 10 years, but that's okay as the cache has a TTL of 1 day. + clientCert, err = generateClientCert(crtData, keyData, keyEncryptionAlgorithm) + if err != nil { + return nil, err + } + m.ClientCertCache.Add(ClientCertEntry{Cluster: clusterKey, ClusterUID: cluster.UID, ClientCert: &clientCert, EncryptionAlgorithm: keyEncryptionAlgorithm}) } } else { clientCert, err = m.getAPIServerEtcdClientCert(ctx, clusterKey) diff --git a/controlplane/kubeadm/internal/cluster_labels.go b/controlplane/kubeadm/internal/cluster_labels.go deleted file mode 100644 index f0ff9663b9fd..000000000000 --- a/controlplane/kubeadm/internal/cluster_labels.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/labels/format" -) - -// ControlPlaneMachineLabelsForCluster returns a set of labels to add to a control plane machine for this specific cluster. -func ControlPlaneMachineLabelsForCluster(kcp *controlplanev1.KubeadmControlPlane, clusterName string) map[string]string { - labels := map[string]string{} - - // Add the labels from the MachineTemplate. - // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. - for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Labels { - labels[k] = v - } - - // Always force these labels over the ones coming from the spec. - labels[clusterv1.ClusterNameLabel] = clusterName - labels[clusterv1.MachineControlPlaneLabel] = "" - // Note: MustFormatValue is used here as the label value can be a hash if the control plane name is longer than 63 characters. - labels[clusterv1.MachineControlPlaneNameLabel] = format.MustFormatValue(kcp.Name) - return labels -} diff --git a/controlplane/kubeadm/internal/cluster_test.go b/controlplane/kubeadm/internal/cluster_test.go index 14252358336f..b0dfdb36f56f 100644 --- a/controlplane/kubeadm/internal/cluster_test.go +++ b/controlplane/kubeadm/internal/cluster_test.go @@ -39,9 +39,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/reconcile" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controllers/remote" + "sigs.k8s.io/cluster-api/util/cache" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -159,10 +161,6 @@ func TestGetWorkloadCluster(t *testing.T) { secret.KubeconfigDataName: testEnvKubeconfig, }, } - clusterKey := client.ObjectKey{ - Name: "my-cluster", - Namespace: ns.Name, - } tests := []struct { name string @@ -171,40 +169,34 @@ func TestGetWorkloadCluster(t *testing.T) { expectErr bool }{ { - name: "returns a workload cluster", - clusterKey: clusterKey, - objs: []client.Object{etcdSecret.DeepCopy(), kubeconfigSecret.DeepCopy()}, - expectErr: false, + name: "returns a workload cluster", + objs: []client.Object{etcdSecret.DeepCopy(), kubeconfigSecret.DeepCopy()}, + expectErr: false, }, { - name: "returns error if cannot get rest.Config from kubeconfigSecret", - clusterKey: clusterKey, - objs: []client.Object{etcdSecret.DeepCopy()}, - expectErr: true, + name: "returns error if cannot get rest.Config from kubeconfigSecret", + objs: []client.Object{etcdSecret.DeepCopy()}, + expectErr: true, }, { - name: "returns error if unable to find the etcd secret", - clusterKey: clusterKey, - objs: []client.Object{kubeconfigSecret.DeepCopy()}, - expectErr: true, + name: "returns error if unable to find the etcd secret", + objs: []client.Object{kubeconfigSecret.DeepCopy()}, + expectErr: true, }, { - name: "returns error if unable to find the certificate in the etcd secret", - clusterKey: clusterKey, - objs: []client.Object{emptyCrtEtcdSecret.DeepCopy(), kubeconfigSecret.DeepCopy()}, - expectErr: true, + name: "returns error if unable to find the certificate in the etcd secret", + objs: []client.Object{emptyCrtEtcdSecret.DeepCopy(), kubeconfigSecret.DeepCopy()}, + expectErr: true, }, { - name: "returns error if unable to find the key in the etcd secret", - clusterKey: clusterKey, - objs: []client.Object{emptyKeyEtcdSecret.DeepCopy(), kubeconfigSecret.DeepCopy()}, - expectErr: true, + name: "returns error if unable to find the key in the etcd secret", + objs: []client.Object{emptyKeyEtcdSecret.DeepCopy(), kubeconfigSecret.DeepCopy()}, + expectErr: true, }, { - name: "returns error if unable to generate client cert", - clusterKey: clusterKey, - objs: []client.Object{badCrtEtcdSecret.DeepCopy(), kubeconfigSecret.DeepCopy()}, - expectErr: true, + name: "returns error if unable to generate client cert", + objs: []client.Object{badCrtEtcdSecret.DeepCopy(), kubeconfigSecret.DeepCopy()}, + expectErr: true, }, } @@ -239,6 +231,7 @@ func TestGetWorkloadCluster(t *testing.T) { Client: env.GetClient(), SecretCachingClient: secretCachingClient, ClusterCache: clusterCache, + ClientCertCache: cache.New[ClientCertEntry](24 * time.Hour), } // Ensure the ClusterCache reconciled at least once (and if possible created a clusterAccessor). @@ -247,7 +240,7 @@ func TestGetWorkloadCluster(t *testing.T) { }) g.Expect(err).ToNot(HaveOccurred()) - workloadCluster, err := m.GetWorkloadCluster(ctx, tt.clusterKey) + workloadCluster, err := m.GetWorkloadCluster(ctx, cluster, bootstrapv1.EncryptionAlgorithmRSA2048) if tt.expectErr { g.Expect(err).To(HaveOccurred()) g.Expect(workloadCluster).To(BeNil()) @@ -301,7 +294,6 @@ func machineListForTestGetMachinesForCluster() *clusterv1.MachineList { } machine := func(name string) clusterv1.Machine { return clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: metav1.NamespaceDefault, diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index cd3e862c886d..877c0aeb9814 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -33,8 +33,11 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" + "sigs.k8s.io/cluster-api/internal/hooks" + "sigs.k8s.io/cluster-api/internal/util/inplace" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/failuredomains" @@ -50,9 +53,13 @@ type ControlPlane struct { Machines collections.Machines machinesPatchHelpers map[string]*patch.Helper - machinesNotUptoDate collections.Machines - machinesNotUptoDateLogMessages map[string][]string - machinesNotUptoDateConditionMessages map[string][]string + // MachinesNotUpToDate is the source of truth for Machines that are not up-to-date. + // It should be used to check if a Machine is up-to-date (not machinesUpToDateResults). + MachinesNotUpToDate collections.Machines + // machinesUpToDateResults is used to store the result of the UpToDate call for all Machines + // (even for Machines that are up-to-date). + // MachinesNotUpToDate should always be used instead to check if a Machine is up-to-date. + machinesUpToDateResults map[string]UpToDateResult // reconciliationTime is the time of the current reconciliation, and should be used for all "now" calculations reconciliationTime metav1.Time @@ -98,7 +105,7 @@ type PreflightCheckResults struct { // NewControlPlane returns an instantiated ControlPlane. func NewControlPlane(ctx context.Context, managementCluster ManagementCluster, client client.Client, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, ownedMachines collections.Machines) (*ControlPlane, error) { - infraObjects, err := getInfraResources(ctx, client, ownedMachines) + infraMachines, err := getInfraMachines(ctx, client, ownedMachines) if err != nil { return nil, err } @@ -118,32 +125,31 @@ func NewControlPlane(ctx context.Context, managementCluster ManagementCluster, c // Select machines that should be rolled out because of an outdated configuration or because rolloutAfter/Before expired. reconciliationTime := metav1.Now() machinesNotUptoDate := make(collections.Machines, len(ownedMachines)) - machinesNotUptoDateLogMessages := map[string][]string{} - machinesNotUptoDateConditionMessages := map[string][]string{} + machinesUpToDateResults := map[string]UpToDateResult{} for _, m := range ownedMachines { - upToDate, logMessages, conditionMessages, err := UpToDate(m, kcp, &reconciliationTime, infraObjects, kubeadmConfigs) + upToDate, upToDateResult, err := UpToDate(ctx, client, cluster, m, kcp, &reconciliationTime, infraMachines, kubeadmConfigs) if err != nil { return nil, err } if !upToDate { machinesNotUptoDate.Insert(m) - machinesNotUptoDateLogMessages[m.Name] = logMessages - machinesNotUptoDateConditionMessages[m.Name] = conditionMessages } + // Set this even if machine is UpToDate. This is needed to complete triggering in-place updates + // MachinesNotUpToDate should always be used instead to check if a Machine is up-to-date. + machinesUpToDateResults[m.Name] = *upToDateResult } return &ControlPlane{ - KCP: kcp, - Cluster: cluster, - Machines: ownedMachines, - machinesPatchHelpers: patchHelpers, - machinesNotUptoDate: machinesNotUptoDate, - machinesNotUptoDateLogMessages: machinesNotUptoDateLogMessages, - machinesNotUptoDateConditionMessages: machinesNotUptoDateConditionMessages, - KubeadmConfigs: kubeadmConfigs, - InfraResources: infraObjects, - reconciliationTime: reconciliationTime, - managementCluster: managementCluster, + KCP: kcp, + Cluster: cluster, + Machines: ownedMachines, + machinesPatchHelpers: patchHelpers, + MachinesNotUpToDate: machinesNotUptoDate, + machinesUpToDateResults: machinesUpToDateResults, + KubeadmConfigs: kubeadmConfigs, + InfraResources: infraMachines, + reconciliationTime: reconciliationTime, + managementCluster: managementCluster, }, nil } @@ -182,6 +188,21 @@ func (c *ControlPlane) MachineWithDeleteAnnotation(machines collections.Machines return annotatedMachines } +// MachinesToCompleteTriggerInPlaceUpdate returns Machines for which we have to complete triggering +// the in-place update. This can become necessary if triggering the in-place update fails after +// we added UpdateInProgressAnnotation and before we marked the UpdateMachine hook as pending. +func (c *ControlPlane) MachinesToCompleteTriggerInPlaceUpdate() collections.Machines { + return c.Machines.Filter(func(machine *clusterv1.Machine) bool { + _, ok := machine.Annotations[clusterv1.UpdateInProgressAnnotation] + return ok && !hooks.IsPending(runtimehooksv1.UpdateMachine, machine) + }) +} + +// MachinesToCompleteInPlaceUpdate returns Machines that still have to complete their in-place update. +func (c *ControlPlane) MachinesToCompleteInPlaceUpdate() collections.Machines { + return c.Machines.Filter(inplace.IsUpdateInProgress) +} + // FailureDomainWithMostMachines returns the fd with most machines in it and at least one eligible machine in it. // Note: if there are eligibleMachines machines in failure domain that do not exist anymore, cleaning up those failure domains takes precedence. func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, eligibleMachines collections.Machines) string { @@ -220,25 +241,6 @@ func getGetFailureDomainIDs(failureDomains []clusterv1.FailureDomain) []string { return ids } -// InitialControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for an initializing control plane. -func (c *ControlPlane) InitialControlPlaneConfig() *bootstrapv1.KubeadmConfigSpec { - bootstrapSpec := c.KCP.Spec.KubeadmConfigSpec.DeepCopy() - // Note: When building a KubeadmConfig for the first CP machine empty out the unnecessary JoinConfiguration. - bootstrapSpec.JoinConfiguration = bootstrapv1.JoinConfiguration{} - return bootstrapSpec -} - -// JoinControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for joining control planes. -func (c *ControlPlane) JoinControlPlaneConfig() *bootstrapv1.KubeadmConfigSpec { - bootstrapSpec := c.KCP.Spec.KubeadmConfigSpec.DeepCopy() - // Note: When building a KubeadmConfig for a joining CP machine empty out the unnecessary InitConfiguration. - bootstrapSpec.InitConfiguration = bootstrapv1.InitConfiguration{} - // NOTE: For the joining we are preserving the ClusterConfiguration in order to determine if the - // cluster is using an external etcd in the kubeadm bootstrap provider (even if this is not required by kubeadm Join). - // TODO: Determine if this copy of cluster configuration can be used for rollouts (thus allowing to remove the annotation at machine level) - return bootstrapSpec -} - // HasDeletingMachine returns true if any machine in the control plane is in the process of being deleted. func (c *ControlPlane) HasDeletingMachine() bool { return len(c.Machines.Filter(collections.HasDeletionTimestamp)) > 0 @@ -256,35 +258,35 @@ func (c *ControlPlane) GetKubeadmConfig(machineName string) (*bootstrapv1.Kubead } // MachinesNeedingRollout return a list of machines that need to be rolled out. -func (c *ControlPlane) MachinesNeedingRollout() (collections.Machines, map[string][]string) { +func (c *ControlPlane) MachinesNeedingRollout() (collections.Machines, map[string]UpToDateResult) { // Note: Machines already deleted are dropped because they will be replaced by new machines after deletion completes. - return c.machinesNotUptoDate.Filter(collections.Not(collections.HasDeletionTimestamp)), c.machinesNotUptoDateLogMessages + return c.MachinesNotUpToDate.Filter(collections.Not(collections.HasDeletionTimestamp)), c.machinesUpToDateResults } // NotUpToDateMachines return a list of machines that are not up to date with the control // plane's configuration. -func (c *ControlPlane) NotUpToDateMachines() (collections.Machines, map[string][]string) { - return c.machinesNotUptoDate, c.machinesNotUptoDateConditionMessages +func (c *ControlPlane) NotUpToDateMachines() (collections.Machines, map[string]UpToDateResult) { + return c.MachinesNotUpToDate, c.machinesUpToDateResults } // UpToDateMachines returns the machines that are up to date with the control // plane's configuration. func (c *ControlPlane) UpToDateMachines() collections.Machines { - return c.Machines.Difference(c.machinesNotUptoDate) + return c.Machines.Difference(c.MachinesNotUpToDate) } -// getInfraResources fetches the external infrastructure resource for each machine in the collection and returns a map of machine.Name -> infraResource. -func getInfraResources(ctx context.Context, cl client.Client, machines collections.Machines) (map[string]*unstructured.Unstructured, error) { +// getInfraMachines fetches the InfraMachine for each machine in the collection and returns a map of machine.Name -> InfraMachine. +func getInfraMachines(ctx context.Context, cl client.Client, machines collections.Machines) (map[string]*unstructured.Unstructured, error) { result := map[string]*unstructured.Unstructured{} for _, m := range machines { - infraObj, err := external.GetObjectFromContractVersionedRef(ctx, cl, m.Spec.InfrastructureRef, m.Namespace) + infraMachine, err := external.GetObjectFromContractVersionedRef(ctx, cl, m.Spec.InfrastructureRef, m.Namespace) if err != nil { if apierrors.IsNotFound(errors.Cause(err)) { continue } - return nil, errors.Wrapf(err, "failed to retrieve infra obj for machine %q", m.Name) + return nil, errors.Wrapf(err, "failed to retrieve InfraMachine for Machine %s", m.Name) } - result[m.Name] = infraObj + result[m.Name] = infraMachine } return result, nil } @@ -297,14 +299,14 @@ func getKubeadmConfigs(ctx context.Context, cl client.Client, machines collectio if !bootstrapRef.IsDefined() { continue } - machineConfig := &bootstrapv1.KubeadmConfig{} - if err := cl.Get(ctx, client.ObjectKey{Name: bootstrapRef.Name, Namespace: m.Namespace}, machineConfig); err != nil { + kubeadmConfig := &bootstrapv1.KubeadmConfig{} + if err := cl.Get(ctx, client.ObjectKey{Name: bootstrapRef.Name, Namespace: m.Namespace}, kubeadmConfig); err != nil { if apierrors.IsNotFound(errors.Cause(err)) { continue } - return nil, errors.Wrapf(err, "failed to retrieve bootstrap config for machine %q", m.Name) + return nil, errors.Wrapf(err, "failed to retrieve KubeadmConfig for Machine %s", m.Name) } - result[m.Name] = machineConfig + result[m.Name] = kubeadmConfig } return result, nil } @@ -388,7 +390,7 @@ func (c *ControlPlane) GetWorkloadCluster(ctx context.Context) (WorkloadCluster, return c.workloadCluster, nil } - workloadCluster, err := c.managementCluster.GetWorkloadCluster(ctx, client.ObjectKeyFromObject(c.Cluster)) + workloadCluster, err := c.managementCluster.GetWorkloadCluster(ctx, c.Cluster, c.GetKeyEncryptionAlgorithm()) if err != nil { return nil, err } @@ -483,3 +485,12 @@ func (c *ControlPlane) StatusToLogKeyAndValues(newMachine, deletedMachine *clust "etcdMembers", strings.Join(etcdMembers, ", "), } } + +// GetKeyEncryptionAlgorithm returns the control plane EncryptionAlgorithm. +// If its unset the default encryption algorithm is returned. +func (c *ControlPlane) GetKeyEncryptionAlgorithm() bootstrapv1.EncryptionAlgorithmType { + if c.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.EncryptionAlgorithm == "" { + return bootstrapv1.EncryptionAlgorithmRSA2048 + } + return c.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.EncryptionAlgorithm +} diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index 121c459fb5b9..a7bd0df271e6 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -27,6 +27,7 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimev1 "sigs.k8s.io/cluster-api/api/runtime/v1beta2" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/util/collections" ) @@ -122,17 +123,19 @@ func TestControlPlane(t *testing.T) { g.Expect(controlPlane.Machines).To(HaveLen(5)) - machinesNotUptoDate, machinesNotUptoDateConditionMessages := controlPlane.NotUpToDateMachines() + machinesNotUptoDate, machinesUpToDateResults := controlPlane.NotUpToDateMachines() g.Expect(machinesNotUptoDate.Names()).To(ConsistOf("m2", "m3")) - g.Expect(machinesNotUptoDateConditionMessages).To(HaveLen(2)) - g.Expect(machinesNotUptoDateConditionMessages).To(HaveKeyWithValue("m2", []string{"Version v1.29.0, v1.31.0 required"})) - g.Expect(machinesNotUptoDateConditionMessages).To(HaveKeyWithValue("m3", []string{"Version v1.29.3, v1.31.0 required"})) + // machinesUpToDateResults contains results for all Machines (including up-to-date Machines). + g.Expect(machinesUpToDateResults).To(HaveLen(5)) + g.Expect(machinesUpToDateResults["m2"].ConditionMessages).To(Equal([]string{"Version v1.29.0, v1.31.0 required"})) + g.Expect(machinesUpToDateResults["m3"].ConditionMessages).To(Equal([]string{"Version v1.29.3, v1.31.0 required"})) - machinesNeedingRollout, machinesNotUptoDateLogMessages := controlPlane.MachinesNeedingRollout() + machinesNeedingRollout, machinesUpToDateResults := controlPlane.MachinesNeedingRollout() g.Expect(machinesNeedingRollout.Names()).To(ConsistOf("m2")) - g.Expect(machinesNotUptoDateLogMessages).To(HaveLen(2)) - g.Expect(machinesNotUptoDateLogMessages).To(HaveKeyWithValue("m2", []string{"Machine version \"v1.29.0\" is not equal to KCP version \"v1.31.0\""})) - g.Expect(machinesNotUptoDateLogMessages).To(HaveKeyWithValue("m3", []string{"Machine version \"v1.29.3\" is not equal to KCP version \"v1.31.0\""})) + // machinesUpToDateResults contains results for all Machines (including up-to-date Machines). + g.Expect(machinesUpToDateResults).To(HaveLen(5)) + g.Expect(machinesUpToDateResults["m2"].LogMessages).To(Equal([]string{"Machine version \"v1.29.0\" is not equal to KCP version \"v1.31.0\""})) + g.Expect(machinesUpToDateResults["m3"].LogMessages).To(Equal([]string{"Machine version \"v1.29.3\" is not equal to KCP version \"v1.31.0\""})) upToDateMachines := controlPlane.UpToDateMachines() g.Expect(upToDateMachines).To(HaveLen(3)) @@ -291,6 +294,160 @@ func TestHasHealthyMachineStillProvisioning(t *testing.T) { }) } +func TestMachinesToCompleteTriggerInPlaceUpdate(t *testing.T) { + machineWithoutAnnotations := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineWithoutAnnotations", + }, + } + machineWithUpdateInProgressAnnotation := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineWithUpdateInProgressAnnotation", + Annotations: map[string]string{ + clusterv1.UpdateInProgressAnnotation: "", + }, + }, + } + machineWithPendingHooksAnnotation := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineWithPendingHooksAnnotation", + Annotations: map[string]string{ + runtimev1.PendingHooksAnnotation: "UpdateMachine", + }, + }, + } + machineWithBothAnnotations := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineWithBothAnnotations", + Annotations: map[string]string{ + clusterv1.UpdateInProgressAnnotation: "", + runtimev1.PendingHooksAnnotation: "UpdateMachine", + }, + }, + } + + tests := []struct { + name string + machine *clusterv1.Machine + completeTriggerInPlaceUpdate bool + }{ + { + name: "machineWithoutAnnotations => false", + machine: machineWithoutAnnotations, + completeTriggerInPlaceUpdate: false, + }, + { + name: "machineWithUpdateInProgressAnnotation => true", + machine: machineWithUpdateInProgressAnnotation, + completeTriggerInPlaceUpdate: true, + }, + { + name: "machineWithPendingHooksAnnotation => false", + machine: machineWithPendingHooksAnnotation, + completeTriggerInPlaceUpdate: false, + }, + { + name: "machineWithBothAnnotations => false", + machine: machineWithBothAnnotations, + completeTriggerInPlaceUpdate: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + c := ControlPlane{ + Machines: collections.FromMachines(tt.machine), + } + + if tt.completeTriggerInPlaceUpdate { + g.Expect(c.MachinesToCompleteTriggerInPlaceUpdate().Len()).To(Equal(1)) + g.Expect(c.MachinesToCompleteTriggerInPlaceUpdate().Has(tt.machine)).To(BeTrue()) + } else { + g.Expect(c.MachinesToCompleteTriggerInPlaceUpdate().Len()).To(Equal(0)) + } + }) + } +} + +func TestMachinesToCompleteInPlaceUpdate(t *testing.T) { + machineWithoutAnnotations := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineWithoutAnnotations", + }, + } + machineWithUpdateInProgressAnnotation := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineWithUpdateInProgressAnnotation", + Annotations: map[string]string{ + clusterv1.UpdateInProgressAnnotation: "", + }, + }, + } + machineWithPendingHooksAnnotation := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineWithPendingHooksAnnotation", + Annotations: map[string]string{ + runtimev1.PendingHooksAnnotation: "UpdateMachine", + }, + }, + } + machineWithBothAnnotations := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineWithBothAnnotations", + Annotations: map[string]string{ + clusterv1.UpdateInProgressAnnotation: "", + runtimev1.PendingHooksAnnotation: "UpdateMachine", + }, + }, + } + + tests := []struct { + name string + machine *clusterv1.Machine + completeInPlaceUpdate bool + }{ + { + name: "machineWithoutAnnotations => false", + machine: machineWithoutAnnotations, + completeInPlaceUpdate: false, + }, + { + name: "machineWithUpdateInProgressAnnotation => true", + machine: machineWithUpdateInProgressAnnotation, + completeInPlaceUpdate: true, + }, + { + name: "machineWithPendingHooksAnnotation => true", + machine: machineWithPendingHooksAnnotation, + completeInPlaceUpdate: true, + }, + { + name: "machineWithBothAnnotations => true", + machine: machineWithBothAnnotations, + completeInPlaceUpdate: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + c := ControlPlane{ + Machines: collections.FromMachines(tt.machine), + } + + if tt.completeInPlaceUpdate { + g.Expect(c.MachinesToCompleteInPlaceUpdate().Len()).To(Equal(1)) + g.Expect(c.MachinesToCompleteInPlaceUpdate().Has(tt.machine)).To(BeTrue()) + } else { + g.Expect(c.MachinesToCompleteInPlaceUpdate().Len()).To(Equal(0)) + } + }) + } +} + func TestStatusToLogKeyAndValues(t *testing.T) { healthyMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "healthy"}, @@ -339,7 +496,7 @@ func TestStatusToLogKeyAndValues(t *testing.T) { c := &ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{}, Machines: collections.FromMachines(healthyMachine, machineWithoutNode, machineJustDeleted, machineNotUpToDate, machineMarkedForRemediation), - machinesNotUptoDate: collections.FromMachines(machineNotUpToDate), + MachinesNotUpToDate: collections.FromMachines(machineNotUpToDate), EtcdMembers: []*etcd.Member{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}, } diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index ec7502a700c9..57ede403acd6 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -19,6 +19,7 @@ package controllers import ( "context" "fmt" + "slices" "sort" "strings" "time" @@ -35,7 +36,6 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -46,10 +46,13 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + runtimeclient "sigs.k8s.io/cluster-api/exp/runtime/client" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/internal/contract" + capicontrollerutil "sigs.k8s.io/cluster-api/internal/util/controller" + "sigs.k8s.io/cluster-api/internal/util/inplace" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/cache" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" @@ -64,11 +67,12 @@ import ( const ( kcpManagerName = "capi-kubeadmcontrolplane" + kcpMetadataManagerName = "capi-kubeadmcontrolplane-metadata" kubeadmControlPlaneKind = "KubeadmControlPlane" ) // +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch -// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io;bootstrap.cluster.x-k8s.io;controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch;create;update;patch;delete @@ -78,8 +82,10 @@ const ( // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. type KubeadmControlPlaneReconciler struct { Client client.Client + APIReader client.Reader SecretCachingClient client.Client - controller controller.Controller + RuntimeClient runtimeclient.Client + controller capicontrollerutil.Controller recorder record.EventRecorder ClusterCache clustercache.ClusterCache @@ -95,6 +101,18 @@ type KubeadmControlPlaneReconciler struct { managementCluster internal.ManagementCluster managementClusterUncached internal.ManagementCluster ssaCache ssa.Cache + + // Only used for testing. + overrideTryInPlaceUpdateFunc func(ctx context.Context, controlPlane *internal.ControlPlane, machineToInPlaceUpdate *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, ctrl.Result, error) + overrideScaleUpControlPlaneFunc func(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) + overrideScaleDownControlPlaneFunc func(ctx context.Context, controlPlane *internal.ControlPlane, machineToDelete *clusterv1.Machine) (ctrl.Result, error) + overridePreflightChecksFunc func(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result + overrideCanUpdateMachineFunc func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, error) + overrideCanExtensionsUpdateMachine func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) + overrideTriggerInPlaceUpdate func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) error + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + disableRemoveManagedFieldsForLabelsAndAnnotations bool } func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { @@ -109,25 +127,23 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg "EtcdDialTimeout and EtcdCallTimeout must not be 0 and " + "RemoteConditionsGracePeriod must not be < 2m") } + if feature.Gates.Enabled(feature.InPlaceUpdates) && r.RuntimeClient == nil { + return errors.New("RuntimeClient must not be nil when InPlaceUpdates feature gate is enabled") + } predicateLog := ctrl.LoggerFrom(ctx).WithValues("controller", "kubeadmcontrolplane") - c, err := ctrl.NewControllerManagedBy(mgr). + c, err := capicontrollerutil.NewControllerManagedBy(mgr, predicateLog). For(&controlplanev1.KubeadmControlPlane{}). - Owns(&clusterv1.Machine{}, builder.WithPredicates(predicates.ResourceIsChanged(mgr.GetScheme(), predicateLog))). + Owns(&clusterv1.Machine{}). WithOptions(options). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)). Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmControlPlane), - builder.WithPredicates( - predicates.All(mgr.GetScheme(), predicateLog, - predicates.ResourceIsChanged(mgr.GetScheme(), predicateLog), - predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue), - predicates.Any(mgr.GetScheme(), predicateLog, - predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), predicateLog), - predicates.ClusterTopologyVersionChanged(mgr.GetScheme(), predicateLog), - ), - ), + predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue), + predicates.Any(mgr.GetScheme(), predicateLog, + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), predicateLog), + predicates.ClusterTopologyVersionChanged(mgr.GetScheme(), predicateLog), ), ). WatchesRawSource(r.ClusterCache.GetClusterSource("kubeadmcontrolplane", r.ClusterToKubeadmControlPlane, @@ -149,6 +165,7 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg EtcdDialTimeout: r.EtcdDialTimeout, EtcdCallTimeout: r.EtcdCallTimeout, EtcdLogger: r.EtcdLogger, + ClientCertCache: cache.New[internal.ClientCertEntry](24 * time.Hour), } } @@ -261,12 +278,6 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. res = ctrl.Result{RequeueAfter: 20 * time.Second} } } - - // Note: controller-runtime logs a warning that non-empty result is ignored - // if error is not nil, so setting result here to empty to avoid noisy warnings. - if reterr != nil { - res = ctrl.Result{} - } }() if !kcp.DeletionTimestamp.IsZero() { @@ -308,7 +319,7 @@ func (r *KubeadmControlPlaneReconciler) initControlPlaneScope(ctx context.Contex return nil, true, r.adoptMachines(ctx, kcp, adoptableMachines, cluster) } - ownedMachines := controlPlaneMachines.Filter(collections.OwnedMachines(kcp)) + ownedMachines := controlPlaneMachines.Filter(collections.OwnedMachines(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane").GroupKind())) if kcp.DeletionTimestamp.IsZero() && len(ownedMachines) != len(controlPlaneMachines) { err := errors.New("not all control plane machines are owned by this KubeadmControlPlane, refusing to operate in mixed management mode") log.Error(err, "KCP cannot reconcile") @@ -427,7 +438,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl } // Generate Cluster Kubeconfig if needed - if result, err := r.reconcileKubeconfig(ctx, controlPlane); !result.IsZero() || err != nil { + if result, err := r.reconcileKubeconfig(ctx, controlPlane); err != nil || !result.IsZero() { if err != nil { log.Error(err, "Failed to reconcile Kubeconfig") } @@ -435,6 +446,11 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl } if err := r.syncMachines(ctx, controlPlane); err != nil { + // Note: If any of the calls got a NotFound error, it means that at least one Machine got deleted. + // Let's return here so that the next Reconcile will get the updated list of Machines. + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil // Note: Requeue is not needed, changes to Machines trigger another reconcile. + } return ctrl.Result{}, errors.Wrap(err, "failed to sync Machines") } @@ -460,23 +476,48 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl return result, err } + // Complete triggering in-place update if necessary, for reentrancy if triggerInPlaceUpdate failed + // when triggering the in-place update initially. + if machines := controlPlane.MachinesToCompleteTriggerInPlaceUpdate(); len(machines) > 0 { + _, machinesUpToDateResults := controlPlane.NotUpToDateMachines() + for _, m := range machines { + if err := r.triggerInPlaceUpdate(ctx, m, machinesUpToDateResults[m.Name]); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil // Note: Changes to Machines trigger another reconcile. + } + // Reconcile unhealthy machines by triggering deletion and requeue if it is considered safe to remediate, // otherwise continue with the other KCP operations. if result, err := r.reconcileUnhealthyMachines(ctx, controlPlane); err != nil || !result.IsZero() { return result, err } + // Wait for in-place update to complete. + // Note: If a Machine becomes unhealthy during in-place update reconcileUnhealthyMachines above remediates it. + // Note: We have to wait here even if there are no more Machines that need rollout (in-place update in + // progress is not counted as needs rollout). + if machines := controlPlane.MachinesToCompleteInPlaceUpdate(); machines.Len() > 0 { + for _, machine := range machines { + log.Info(fmt.Sprintf("Waiting for in-place update of Machine %s to complete", machine.Name), "Machine", klog.KObj(machine)) + } + return ctrl.Result{}, nil // Note: Changes to Machines trigger another reconcile. + } + // Control plane machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations. - machinesNeedingRollout, machinesNeedingRolloutLogMessages := controlPlane.MachinesNeedingRollout() + machinesNeedingRollout, machinesUpToDateResults := controlPlane.MachinesNeedingRollout() switch { case len(machinesNeedingRollout) > 0: var allMessages []string - for machine, messages := range machinesNeedingRolloutLogMessages { - allMessages = append(allMessages, fmt.Sprintf("Machine %s needs rollout: %s", machine, strings.Join(messages, ","))) + machinesNeedingRolloutNames := machinesNeedingRollout.Names() + slices.Sort(machinesNeedingRolloutNames) + for _, name := range machinesNeedingRolloutNames { + allMessages = append(allMessages, fmt.Sprintf("Machine %s needs rollout: %s", name, strings.Join(machinesUpToDateResults[name].LogMessages, ", "))) } - log.Info(fmt.Sprintf("Rolling out Control Plane machines: %s", strings.Join(allMessages, ",")), "machinesNeedingRollout", machinesNeedingRollout.Names()) + log.Info(fmt.Sprintf("Machines need rollout: %s", strings.Join(machinesNeedingRolloutNames, ",")), "reason", strings.Join(allMessages, ", ")) v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateV1Beta1Condition, controlplanev1.RollingUpdateInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(machinesNeedingRollout), len(controlPlane.Machines)-len(machinesNeedingRollout)) - return r.upgradeControlPlane(ctx, controlPlane, machinesNeedingRollout) + return r.updateControlPlane(ctx, controlPlane, machinesNeedingRollout, machinesUpToDateResults) default: // make sure last upgrade operation is marked as completed. // NOTE: we are checking the condition already exists in order to avoid to set this condition at the first @@ -506,7 +547,12 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl case numMachines > desiredReplicas: log.Info("Scaling down control plane", "desired", desiredReplicas, "existing", numMachines) // The last parameter (i.e. machines needing to be rolled out) should always be empty here. - return r.scaleDownControlPlane(ctx, controlPlane, collections.Machines{}) + // Pick the Machine that we should scale down. + machineToDelete, err := selectMachineForInPlaceUpdateOrScaleDown(ctx, controlPlane, collections.Machines{}) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down") + } + return r.scaleDownControlPlane(ctx, controlPlane, machineToDelete) } // Get the workload cluster client. @@ -760,12 +806,6 @@ func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(_ context.C } // syncMachines updates Machines, InfrastructureMachines and KubeadmConfigs to propagate in-place mutable fields from KCP. -// Note: It also cleans up managed fields of all Machines so that Machines that were -// created/patched before (< v1.4.0) the controller adopted Server-Side-Apply (SSA) can also work with SSA. -// Note: For InfrastructureMachines and KubeadmConfigs it also drops ownership of "metadata.labels" and -// "metadata.annotations" from "manager" so that "capi-kubeadmcontrolplane" can own these fields and can work with SSA. -// Otherwise, fields would be co-owned by our "old" "manager" and "capi-kubeadmcontrolplane" and then we would not be -// able to e.g. drop labels and annotations. func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, controlPlane *internal.ControlPlane) error { patchHelpers := map[string]*patch.Helper{} for machineName := range controlPlane.Machines { @@ -797,19 +837,17 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro continue } - // Cleanup managed fields of all Machines. - // We do this so that Machines that were created/patched before the controller adopted Server-Side-Apply (SSA) - // (< v1.4.0) can also work with SSA. Otherwise, fields would be co-owned by our "old" "manager" and - // "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.CleanUpManagedFieldsForSSAAdoption(ctx, r.Client, m, kcpManagerName); err != nil { - return errors.Wrapf(err, "failed to update Machine: failed to adjust the managedFields of the Machine %s", klog.KObj(m)) - } // Update Machine to propagate in-place mutable fields from KCP. updatedMachine, err := r.updateMachine(ctx, m, controlPlane.KCP, controlPlane.Cluster) if err != nil { return errors.Wrapf(err, "failed to update Machine: %s", klog.KObj(m)) } + // Note: Ensure ControlPlane has the latest version of the Machine. This is required because + // e.g. the in-place update code that is called later has to use the latest version of the Machine. controlPlane.Machines[machineName] = updatedMachine + if _, ok := controlPlane.MachinesNotUpToDate[machineName]; ok { + controlPlane.MachinesNotUpToDate[machineName] = updatedMachine + } // Since the machine is updated, re-create the patch helper so that any subsequent // Patch calls use the correct base machine object to calculate the diffs. // Example: reconcileControlPlaneAndMachinesConditions patches the machine objects in a subsequent call @@ -824,23 +862,21 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro } patchHelpers[machineName] = patchHelper - labelsAndAnnotationsManagedFieldPaths := []contract.Path{ - {"f:metadata", "f:annotations"}, - {"f:metadata", "f:labels"}, - } infraMachine, infraMachineFound := controlPlane.InfraResources[machineName] // Only update the InfraMachine if it is already found, otherwise just skip it. // This could happen e.g. if the cache is not up-to-date yet. if infraMachineFound { - // Cleanup managed fields of all InfrastructureMachines to drop ownership of labels and annotations - // from "manager". We do this so that InfrastructureMachines that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.DropManagedFields(ctx, r.Client, infraMachine, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { + // Drop managedFields for manager:Update and capi-kubeadmcontrolplane:Apply for all objects created with CAPI <= v1.11. + // Starting with CAPI v1.12 we have a new managedField structure where capi-kubeadmcontrolplane-metadata will own + // labels and annotations and capi-kubeadmcontrolplane everything else. + // Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once. + // Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can + // be removed with CAPI v1.15. + if err := ssa.MigrateManagedFields(ctx, r.Client, infraMachine, kcpManagerName, kcpMetadataManagerName); err != nil { return errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine)) } // Update in-place mutating fields on InfrastructureMachine. - if err := r.updateExternalObject(ctx, infraMachine, controlPlane.KCP, controlPlane.Cluster); err != nil { + if err := r.updateLabelsAndAnnotations(ctx, infraMachine, infraMachine.GroupVersionKind(), controlPlane.KCP, controlPlane.Cluster); err != nil { return errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine)) } } @@ -849,17 +885,17 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro // Only update the KubeadmConfig if it is already found, otherwise just skip it. // This could happen e.g. if the cache is not up-to-date yet. if kubeadmConfigFound { - // Note: Set the GroupVersionKind because updateExternalObject depends on it. - kubeadmConfig.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) - // Cleanup managed fields of all KubeadmConfigs to drop ownership of labels and annotations - // from "manager". We do this so that KubeadmConfigs that are created using the Create method - // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager" - // and "capi-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations. - if err := ssa.DropManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, labelsAndAnnotationsManagedFieldPaths); err != nil { + // Drop managedFields for manager:Update and capi-kubeadmcontrolplane:Apply for all objects created with CAPI <= v1.11. + // Starting with CAPI v1.12 we have a new managedField structure where capi-kubeadmcontrolplane-metadata will own + // labels and annotations and capi-kubeadmcontrolplane everything else. + // Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once. + // Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can + // be removed with CAPI v1.15. + if err := ssa.MigrateManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, kcpMetadataManagerName); err != nil { return errors.Wrapf(err, "failed to clean up managedFields of KubeadmConfig %s", klog.KObj(kubeadmConfig)) } // Update in-place mutating fields on BootstrapConfig. - if err := r.updateExternalObject(ctx, kubeadmConfig, controlPlane.KCP, controlPlane.Cluster); err != nil { + if err := r.updateLabelsAndAnnotations(ctx, kubeadmConfig, bootstrapv1.GroupVersion.WithKind("KubeadmConfig"), controlPlane.KCP, controlPlane.Cluster); err != nil { return errors.Wrapf(err, "failed to update KubeadmConfig %s", klog.KObj(kubeadmConfig)) } } @@ -980,16 +1016,17 @@ func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneAndMachinesConditio } func reconcileMachineUpToDateCondition(_ context.Context, controlPlane *internal.ControlPlane) { - machinesNotUptoDate, machinesNotUptoDateConditionMessages := controlPlane.NotUpToDateMachines() + machinesNotUptoDate, machinesUpToDateResults := controlPlane.NotUpToDateMachines() machinesNotUptoDateNames := sets.New(machinesNotUptoDate.Names()...) for _, machine := range controlPlane.Machines { if machinesNotUptoDateNames.Has(machine.Name) { // Note: the code computing the message for KCP's RolloutOut condition is making assumptions on the format/content of this message. message := "" - if reasons, ok := machinesNotUptoDateConditionMessages[machine.Name]; ok { - for i := range reasons { - reasons[i] = fmt.Sprintf("* %s", reasons[i]) + if machineUpToDateResult, ok := machinesUpToDateResults[machine.Name]; ok && len(machineUpToDateResult.ConditionMessages) > 0 { + var reasons []string + for _, conditionMessage := range machineUpToDateResult.ConditionMessages { + reasons = append(reasons, fmt.Sprintf("* %s", conditionMessage)) } message = strings.Join(reasons, "\n") } @@ -1000,9 +1037,23 @@ func reconcileMachineUpToDateCondition(_ context.Context, controlPlane *internal Reason: clusterv1.MachineNotUpToDateReason, Message: message, }) + continue + } + if inplace.IsUpdateInProgress(machine) { + msg := "* In-place update in progress" + if c := conditions.Get(machine, clusterv1.MachineUpdatingCondition); c != nil && c.Status == metav1.ConditionTrue && c.Message != "" { + msg = fmt.Sprintf("* %s", c.Message) + } + conditions.Set(machine, metav1.Condition{ + Type: clusterv1.MachineUpToDateCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineUpToDateUpdatingReason, + Message: msg, + }) continue } + conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, @@ -1387,7 +1438,7 @@ func (r *KubeadmControlPlaneReconciler) adoptOwnedSecrets(ctx context.Context, k for i := range secrets.Items { s := secrets.Items[i] - if !util.IsOwnedByObject(&s, currentOwner) { + if !util.IsOwnedByObject(&s, currentOwner, bootstrapv1.GroupVersion.WithKind("KubeadmConfig").GroupKind()) { continue } // avoid taking ownership of the bootstrap data secret diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 4f2f82002283..7ee1bc1bcde8 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -33,8 +33,10 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/clientcmd" @@ -52,9 +54,9 @@ import ( "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" controlplanev1webhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/webhooks" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/internal/webhooks" "sigs.k8s.io/cluster-api/util" @@ -68,6 +70,10 @@ import ( "sigs.k8s.io/cluster-api/util/test/builder" ) +const ( + timeout = time.Second * 30 +) + func TestClusterToKubeadmControlPlane(t *testing.T) { g := NewWithT(t) fakeClient := newFakeClient() @@ -520,7 +526,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabels(kcp, cluster.Name), }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -588,7 +594,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabels(kcp, cluster.Name), }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -602,10 +608,6 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { }, } cfg := &bootstrapv1.KubeadmConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: bootstrapv1.GroupVersion.String(), - Kind: "KubeadmConfig", - }, ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, @@ -703,7 +705,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabels(kcp, cluster.Name), }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -761,7 +763,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: "test0", - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabels(kcp, cluster.Name), }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -983,6 +985,9 @@ func TestReconcileCertificateExpiries(t *testing.T) { cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault}) kcp := &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.30.0", + }, Status: controlplanev1.KubeadmControlPlaneStatus{ Initialization: controlplanev1.KubeadmControlPlaneInitializationStatus{ ControlPlaneInitialized: ptr.To(true), @@ -1524,7 +1529,7 @@ func TestReconcileInitializeControlPlane_withUserCA(t *testing.T) { KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{}, }, } - g.Expect(env.Create(ctx, kcp)).To(Succeed()) + g.Expect(env.CreateAndWait(ctx, kcp)).To(Succeed()) corednsCM := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -1701,7 +1706,6 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { namespace, testCluster := setup(t, g) defer teardown(t, g, namespace, testCluster) - classicManager := "manager" duration5s := ptr.To(int32(5)) duration10s := ptr.To(int32(10)) @@ -1716,12 +1720,12 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { "metadata": map[string]interface{}{ "name": "existing-inframachine", "namespace": testCluster.Namespace, - "labels": map[string]string{ + "labels": map[string]interface{}{ "preserved-label": "preserved-value", "dropped-label": "dropped-value", "modified-label": "modified-value", }, - "annotations": map[string]string{ + "annotations": map[string]interface{}{ "preserved-annotation": "preserved-value", "dropped-annotation": "dropped-value", "modified-annotation": "modified-value", @@ -1735,18 +1739,12 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Name: "existing-inframachine", APIGroup: clusterv1.GroupVersionInfrastructure.Group, } - // Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0. - g.Expect(env.Create(ctx, existingInfraMachine, client.FieldOwner("manager"))).To(Succeed()) // Existing KubeadmConfig bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{ Users: []bootstrapv1.User{{Name: "test-user"}}, } existingKubeadmConfig := &bootstrapv1.KubeadmConfig{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "existing-kubeadmconfig", Namespace: namespace.Name, @@ -1768,16 +1766,10 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Name: "existing-kubeadmconfig", APIGroup: bootstrapv1.GroupVersion.Group, } - // Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0. - g.Expect(env.Create(ctx, existingKubeadmConfig, client.FieldOwner("manager"))).To(Succeed()) // Existing Machine to validate in-place mutation fd := "fd1" inPlaceMutatingMachine := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "existing-machine", Namespace: namespace.Name, @@ -1800,7 +1792,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { InfrastructureRef: *infraMachineRef, Version: "v1.25.3", FailureDomain: fd, - ProviderID: "provider-id", + // ProviderID is intentionally not set here, this field is set by the Machine controller. Deletion: clusterv1.MachineDeletionSpec{ NodeDrainTimeoutSeconds: duration5s, NodeVolumeDetachTimeoutSeconds: duration5s, @@ -1808,15 +1800,9 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { }, }, } - // Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0. - g.Expect(env.Create(ctx, inPlaceMutatingMachine, client.FieldOwner("manager"))).To(Succeed()) // Existing machine that is in deleting state deletingMachine := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Machine", - }, ObjectMeta: metav1.ObjectMeta{ Name: "deleting-machine", Namespace: namespace.Name, @@ -1832,33 +1818,23 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Name: "inframachine", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To("machine-bootstrap-secret"), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmConfig", + Name: "non-existing-kubeadmconfig", + APIGroup: bootstrapv1.GroupVersion.Group, + }, }, Deletion: clusterv1.MachineDeletionSpec{ NodeDrainTimeoutSeconds: duration5s, NodeVolumeDetachTimeoutSeconds: duration5s, NodeDeletionTimeoutSeconds: duration5s, }, - ReadinessGates: mandatoryMachineReadinessGates, + ReadinessGates: desiredstate.MandatoryMachineReadinessGates, }, } - g.Expect(env.Create(ctx, deletingMachine, client.FieldOwner(classicManager))).To(Succeed()) - // Delete the machine to put it in the deleting state - g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed()) - // Wait till the machine is marked for deletion - g.Eventually(func() bool { - if err := env.Get(ctx, client.ObjectKeyFromObject(deletingMachine), deletingMachine); err != nil { - return false - } - return !deletingMachine.DeletionTimestamp.IsZero() - }, 30*time.Second).Should(BeTrue()) // Existing machine that has a InfrastructureRef which does not exist. nilInfraMachineMachine := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Machine", - }, ObjectMeta: metav1.ObjectMeta{ Name: "nil-infra-machine-machine", Namespace: namespace.Name, @@ -1874,18 +1850,16 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Name: "inframachine", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To("machine-bootstrap-secret"), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmConfig", + Name: "non-existing-kubeadmconfig", + APIGroup: bootstrapv1.GroupVersion.Group, + }, }, }, } - g.Expect(env.Create(ctx, nilInfraMachineMachine, client.FieldOwner(classicManager))).To(Succeed()) - // Delete the machine to put it in the deleting state kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ UID: types.UID("abc-123-control-plane"), Name: "existing-kcp", @@ -1923,6 +1897,32 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { }, } + // + // Create objects + // + + // Create InfraMachine (same as in createInfraMachine) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, existingInfraMachine)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), existingInfraMachine, kcpManagerName)).To(Succeed()) + + // Create KubeadmConfig (same as in createKubeadmConfig) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, existingKubeadmConfig)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), existingKubeadmConfig, kcpManagerName)).To(Succeed()) + + // Create Machines (same as in createMachine) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, inPlaceMutatingMachine)).To(Succeed()) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, deletingMachine)).To(Succeed()) + // Delete the machine to put it in the deleting state + g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed()) + // Wait till the machine is marked for deletion + g.Eventually(func() bool { + if err := env.Get(ctx, client.ObjectKeyFromObject(deletingMachine), deletingMachine); err != nil { + return false + } + return !deletingMachine.DeletionTimestamp.IsZero() + }, timeout).Should(BeTrue()) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, nilInfraMachineMachine)).To(Succeed()) + controlPlane := &internal.ControlPlane{ KCP: kcp, Cluster: testCluster, @@ -1948,66 +1948,67 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // Run syncMachines to clean up managed fields and have proper field ownership // for Machines, InfrastructureMachines and KubeadmConfigs. reconciler := &KubeadmControlPlaneReconciler{ - Client: env, + // Note: Ensure the fieldManager defaults to manager like in prod. + // Otherwise it defaults to the binary name which is not manager in tests. + Client: client.WithFieldOwner(env.Client, "manager"), SecretCachingClient: secretCachingClient, ssaCache: ssa.NewCache("test-controller"), } g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed()) - // The inPlaceMutatingMachine should have cleaned up managed fields. - updatedInplaceMutatingMachine := inPlaceMutatingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed()) - // Verify ManagedFields - g.Expect(updatedInplaceMutatingMachine.ManagedFields).Should( - ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)), - "in-place mutable machine should contain an entry for SSA manager", - ) - g.Expect(updatedInplaceMutatingMachine.ManagedFields).ShouldNot( - ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)), - "in-place mutable machine should not contain an entry for old manager", - ) + updatedInPlaceMutatingMachine := inPlaceMutatingMachine.DeepCopy() + g.Eventually(func(g Gomega) { + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInPlaceMutatingMachine), updatedInPlaceMutatingMachine)).To(Succeed()) + g.Expect(cleanupTime(updatedInPlaceMutatingMachine.ManagedFields)).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane owns almost everything. + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: "{\"f:metadata\":{\"f:annotations\":{\"f:dropped-annotation\":{},\"f:modified-annotation\":{},\"f:pre-terminate.delete.hook.machine.cluster.x-k8s.io/kcp-cleanup\":{},\"f:preserved-annotation\":{}},\"f:labels\":{\"f:cluster.x-k8s.io/cluster-name\":{},\"f:cluster.x-k8s.io/control-plane\":{},\"f:cluster.x-k8s.io/control-plane-name\":{},\"f:dropped-label\":{},\"f:modified-label\":{},\"f:preserved-label\":{}},\"f:ownerReferences\":{\"k:{\\\"uid\\\":\\\"abc-123-control-plane\\\"}\":{}}},\"f:spec\":{\"f:bootstrap\":{\"f:configRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}}},\"f:clusterName\":{},\"f:deletion\":{\"f:nodeDeletionTimeoutSeconds\":{},\"f:nodeDrainTimeoutSeconds\":{},\"f:nodeVolumeDetachTimeoutSeconds\":{}},\"f:failureDomain\":{},\"f:infrastructureRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}},\"f:readinessGates\":{\"k:{\\\"conditionType\\\":\\\"APIServerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"ControllerManagerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"EtcdMemberHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"EtcdPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"SchedulerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}}},\"f:version\":{}}}", + }}))) + }, timeout).Should(Succeed()) - // The InfrastructureMachine should have ownership of "labels" and "annotations" transferred to - // "capi-kubeadmcontrolplane" manager. updatedInfraMachine := existingInfraMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) + g.Eventually(func(g Gomega) { + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) + g.Expect(cleanupTime(updatedInfraMachine.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane-metadata owns labels and annotations. + Manager: kcpMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: updatedInfraMachine.GetAPIVersion(), + FieldsV1: "{\"f:metadata\":{\"f:annotations\":{\"f:dropped-annotation\":{},\"f:modified-annotation\":{},\"f:preserved-annotation\":{}},\"f:labels\":{\"f:cluster.x-k8s.io/cluster-name\":{},\"f:cluster.x-k8s.io/control-plane\":{},\"f:cluster.x-k8s.io/control-plane-name\":{},\"f:dropped-label\":{},\"f:modified-label\":{},\"f:preserved-label\":{}}}}", + }, { + // capi-kubeadmcontrolplane owns almost everything. + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: updatedInfraMachine.GetAPIVersion(), + FieldsV1: "{\"f:spec\":{\"f:infra-field\":{}}}", + }}))) + }, timeout).Should(Succeed()) - // Verify ManagedFields - g.Expect(updatedInfraMachine.GetManagedFields()).Should( - ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedInfraMachine.GetManagedFields()).Should( - ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"})) - g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"})) - // Verify ownership of other fields is not changed. - g.Expect(updatedInfraMachine.GetManagedFields()).Should( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"})) - - // The KubeadmConfig should have ownership of "labels" and "annotations" transferred to - // "capi-kubeadmcontrolplane" manager. updatedKubeadmConfig := existingKubeadmConfig.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed()) - - // Verify ManagedFields - g.Expect(updatedKubeadmConfig.GetManagedFields()).Should( - ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedKubeadmConfig.GetManagedFields()).Should( - ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"})) - g.Expect(updatedKubeadmConfig.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"})) - g.Expect(updatedKubeadmConfig.GetManagedFields()).ShouldNot( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"})) - // Verify ownership of other fields is not changed. - g.Expect(updatedKubeadmConfig.GetManagedFields()).Should( - ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"})) + g.Eventually(func(g Gomega) { + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed()) + g.Expect(cleanupTime(updatedKubeadmConfig.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane-metadata owns labels and annotations. + Manager: kcpMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: bootstrapv1.GroupVersion.String(), + FieldsV1: "{\"f:metadata\":{\"f:annotations\":{\"f:dropped-annotation\":{},\"f:modified-annotation\":{},\"f:preserved-annotation\":{}},\"f:labels\":{\"f:cluster.x-k8s.io/cluster-name\":{},\"f:cluster.x-k8s.io/control-plane\":{},\"f:cluster.x-k8s.io/control-plane-name\":{},\"f:dropped-label\":{},\"f:modified-label\":{},\"f:preserved-label\":{}}}}", + }, { + // capi-kubeadmcontrolplane owns almost everything. + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: bootstrapv1.GroupVersion.String(), + FieldsV1: "{\"f:spec\":{\"f:users\":{}}}", + }}))) + }, timeout).Should(Succeed()) // // Verify In-place mutating fields // - // Update KCP and verify the in-place mutating fields are propagated. + // Update the KCP and verify the in-place mutating fields are propagated. kcp.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ "preserved-label": "preserved-value", // Keep the label and value as is "modified-label": "modified-value-2", // Modify the value of the label @@ -2028,50 +2029,34 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds = duration10s kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds = duration10s kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = duration10s - - // Use the updated KCP. controlPlane.KCP = kcp g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed()) // Verify in-place mutable fields are updated on the Machine. - updatedInplaceMutatingMachine = inPlaceMutatingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed()) - // Verify Labels - g.Expect(updatedInplaceMutatingMachine.Labels).Should(Equal(expectedLabels)) - // Verify Annotations + updatedInPlaceMutatingMachine = inPlaceMutatingMachine.DeepCopy() + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInPlaceMutatingMachine), updatedInPlaceMutatingMachine)).To(Succeed()) + g.Expect(updatedInPlaceMutatingMachine.Labels).Should(Equal(expectedLabels)) expectedAnnotations := map[string]string{} for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Annotations { expectedAnnotations[k] = v } // The pre-terminate annotation should always be added expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" - g.Expect(updatedInplaceMutatingMachine.Annotations).Should(Equal(expectedAnnotations)) - // Verify Node timeout values - g.Expect(updatedInplaceMutatingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds)), - )) - g.Expect(updatedInplaceMutatingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds)), - )) - g.Expect(updatedInplaceMutatingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(And( - Not(BeNil()), - HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)), - )) + g.Expect(updatedInPlaceMutatingMachine.Annotations).Should(Equal(expectedAnnotations)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)) // Verify that the non in-place mutating fields remain the same. - g.Expect(updatedInplaceMutatingMachine.Spec.FailureDomain).Should(Equal(inPlaceMutatingMachine.Spec.FailureDomain)) - g.Expect(updatedInplaceMutatingMachine.Spec.ProviderID).Should(Equal(inPlaceMutatingMachine.Spec.ProviderID)) - g.Expect(updatedInplaceMutatingMachine.Spec.Version).Should(Equal(inPlaceMutatingMachine.Spec.Version)) - g.Expect(updatedInplaceMutatingMachine.Spec.InfrastructureRef).Should(BeComparableTo(inPlaceMutatingMachine.Spec.InfrastructureRef)) - g.Expect(updatedInplaceMutatingMachine.Spec.Bootstrap).Should(BeComparableTo(inPlaceMutatingMachine.Spec.Bootstrap)) + g.Expect(updatedInPlaceMutatingMachine.Spec.FailureDomain).Should(Equal(inPlaceMutatingMachine.Spec.FailureDomain)) + g.Expect(updatedInPlaceMutatingMachine.Spec.ProviderID).Should(Equal(inPlaceMutatingMachine.Spec.ProviderID)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Version).Should(Equal(inPlaceMutatingMachine.Spec.Version)) + g.Expect(updatedInPlaceMutatingMachine.Spec.InfrastructureRef).Should(BeComparableTo(inPlaceMutatingMachine.Spec.InfrastructureRef)) + g.Expect(updatedInPlaceMutatingMachine.Spec.Bootstrap).Should(BeComparableTo(inPlaceMutatingMachine.Spec.Bootstrap)) // Verify in-place mutable fields are updated on InfrastructureMachine updatedInfraMachine = existingInfraMachine.DeepCopy() g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed()) - // Verify Labels g.Expect(updatedInfraMachine.GetLabels()).Should(Equal(expectedLabels)) - // Verify Annotations g.Expect(updatedInfraMachine.GetAnnotations()).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations)) // Verify spec remains the same g.Expect(updatedInfraMachine.Object).Should(HaveKeyWithValue("spec", infraMachineSpec)) @@ -2079,31 +2064,35 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { // Verify in-place mutable fields are updated on the KubeadmConfig. updatedKubeadmConfig = existingKubeadmConfig.DeepCopy() g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed()) - // Verify Labels g.Expect(updatedKubeadmConfig.GetLabels()).Should(Equal(expectedLabels)) - // Verify Annotations g.Expect(updatedKubeadmConfig.GetAnnotations()).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations)) // Verify spec remains the same g.Expect(updatedKubeadmConfig.Spec).Should(BeComparableTo(existingKubeadmConfig.Spec)) - // The deleting machine should not change. - updatedDeletingMachine := deletingMachine.DeepCopy() - g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) - // Verify ManagedFields - g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot( - ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)), - "deleting machine should not contain an entry for SSA manager", - ) - g.Expect(updatedDeletingMachine.ManagedFields).Should( - ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate)), - "in-place mutable machine should still contain an entry for old manager", - ) + g.Eventually(func(g Gomega) { + updatedDeletingMachine := deletingMachine.DeepCopy() + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) + g.Expect(cleanupTime(updatedDeletingMachine.ManagedFields)).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane owns almost everything. + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: "{\"f:metadata\":{\"f:finalizers\":{\"v:\\\"testing-finalizer\\\"\":{}}},\"f:spec\":{\"f:bootstrap\":{\"f:configRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}}},\"f:clusterName\":{},\"f:infrastructureRef\":{\"f:apiGroup\":{},\"f:kind\":{},\"f:name\":{}},\"f:readinessGates\":{\"k:{\\\"conditionType\\\":\\\"APIServerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"ControllerManagerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}},\"k:{\\\"conditionType\\\":\\\"SchedulerPodHealthy\\\"}\":{\".\":{},\"f:conditionType\":{}}}}}", + }, { + // capi-kubeadmcontrolplane owns the fields that are propagated in-place for deleting Machines in syncMachines via patchHelper. + Manager: "manager", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: clusterv1.GroupVersion.String(), + FieldsV1: "{\"f:spec\":{\"f:deletion\":{\"f:nodeDeletionTimeoutSeconds\":{},\"f:nodeDrainTimeoutSeconds\":{},\"f:nodeVolumeDetachTimeoutSeconds\":{}}}}", + }}))) + }, timeout).Should(Succeed()) - // Verify the machine labels and annotations are unchanged. - g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels)) - g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations)) - // Verify Node timeout values + // Verify in-place mutable fields are updated on the deleting Machine. + updatedDeletingMachine := deletingMachine.DeepCopy() + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) + g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels)) // Not propagated to a deleting Machine + g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations)) // Not propagated to a deleting Machine g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDrainTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds)) g.Expect(updatedDeletingMachine.Spec.Deletion.NodeDeletionTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds)) g.Expect(updatedDeletingMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds)) @@ -2333,6 +2322,91 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition }, }, }, + { + name: "Machines in place updating, machine not up-to-date date", + controlPlane: func() *internal.ControlPlane { + controlPlane, err := internal.NewControlPlane(ctx, nil, env.GetClient(), defaultCluster, defaultKCP.DeepCopy(), collections.FromMachines( + func() *clusterv1.Machine { + m := defaultMachine1.DeepCopy() + m.Annotations = map[string]string{ + clusterv1.UpdateInProgressAnnotation: "", + } + return m + }(), + )) + if err != nil { + panic(err) + } + return controlPlane + }(), + managementCluster: &fakeManagementCluster{ + Workload: &fakeWorkloadCluster{ + Workload: &internal.Workload{ + Client: fake.NewClientBuilder().Build(), + }, + }, + }, + lastProbeSuccessTime: now.Add(-3 * time.Minute), + expectKCPConditions: []metav1.Condition{ + { + Type: controlplanev1.KubeadmControlPlaneInitializedCondition, + Status: metav1.ConditionTrue, + Reason: controlplanev1.KubeadmControlPlaneInitializedReason, + }, + { + Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyCondition, + Status: metav1.ConditionUnknown, + Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthUnknownReason, + Message: "* Machine machine1-test:\n" + + " * EtcdMemberHealthy: Waiting for a Node with spec.providerID foo to exist", + }, + { + Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyCondition, + Status: metav1.ConditionUnknown, + Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownReason, + Message: "* Machine machine1-test:\n" + + " * Control plane components: Waiting for a Node with spec.providerID foo to exist", + }, + }, + expectMachineConditions: []metav1.Condition{ + { + Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, + Status: metav1.ConditionUnknown, + Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedReason, + Message: "Waiting for a Node with spec.providerID foo to exist", + }, + { + Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, + Status: metav1.ConditionUnknown, + Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedReason, + Message: "Waiting for a Node with spec.providerID foo to exist", + }, + { + Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, + Status: metav1.ConditionUnknown, + Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedReason, + Message: "Waiting for a Node with spec.providerID foo to exist", + }, + { + Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, + Status: metav1.ConditionUnknown, + Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedReason, + Message: "Waiting for a Node with spec.providerID foo to exist", + }, + { + Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, + Status: metav1.ConditionUnknown, + Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedReason, + Message: "Waiting for a Node with spec.providerID foo to exist", + }, + { + Type: clusterv1.MachineUpToDateCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineUpToDateUpdatingReason, + Message: "* In-place update in progress", + }, + }, + }, { name: "Machines not up to date", controlPlane: func() *internal.ControlPlane { @@ -3919,17 +3993,25 @@ func TestObjectsPendingDelete(t *testing.T) { // test utils. -func newFakeClient(initObjs ...client.Object) client.Client { +func newFakeClient(initObjs ...client.Object) client.WithWatch { + // Use a new scheme to avoid side effects if multiple tests are sharing the same global scheme. + scheme := runtime.NewScheme() + _ = appsv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) return &fakeClient{ startTime: time.Now(), - Client: fake.NewClientBuilder().WithObjects(initObjs...).WithStatusSubresource(&controlplanev1.KubeadmControlPlane{}).Build(), + WithWatch: fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithStatusSubresource(&controlplanev1.KubeadmControlPlane{}).Build(), } } type fakeClient struct { startTime time.Time mux sync.Mutex - client.Client + client.WithWatch } type fakeClientI interface { @@ -3945,7 +4027,7 @@ func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...clie f.SetCreationTimestamp(metav1.NewTime(c.startTime)) c.mux.Unlock() } - return c.Client.Create(ctx, obj, opts...) + return c.WithWatch.Create(ctx, obj, opts...) } func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *controlplanev1.KubeadmControlPlane, *unstructured.Unstructured) { @@ -3961,10 +4043,6 @@ func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *contr } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - APIVersion: controlplanev1.GroupVersion.String(), - Kind: "KubeadmControlPlane", - }, ObjectMeta: metav1.ObjectMeta{ Name: kcpName, Namespace: namespace, @@ -4029,14 +4107,10 @@ func setKCPHealthy(kcp *controlplanev1.KubeadmControlPlane) { func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, ready bool) (*clusterv1.Machine, *corev1.Node) { machine := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), + Labels: desiredstate.ControlPlaneMachineLabels(kcp, cluster.Name), Annotations: map[string]string{}, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")), @@ -4094,10 +4168,6 @@ func setMachineHealthy(m *clusterv1.Machine) { // newCluster return a CAPI cluster object. func newCluster(namespacedName *types.NamespacedName) *clusterv1.Cluster { return &clusterv1.Cluster{ - TypeMeta: metav1.TypeMeta{ - Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: namespacedName.Namespace, Name: namespacedName.Name, diff --git a/controlplane/kubeadm/internal/controllers/fakes_test.go b/controlplane/kubeadm/internal/controllers/fakes_test.go index 8d819b52d6ad..77bc307a0d2d 100644 --- a/controlplane/kubeadm/internal/controllers/fakes_test.go +++ b/controlplane/kubeadm/internal/controllers/fakes_test.go @@ -49,7 +49,7 @@ func (f *fakeManagementCluster) List(ctx context.Context, list client.ObjectList return f.Reader.List(ctx, list, opts...) } -func (f *fakeManagementCluster) GetWorkloadCluster(_ context.Context, _ client.ObjectKey) (internal.WorkloadCluster, error) { +func (f *fakeManagementCluster) GetWorkloadCluster(_ context.Context, _ *clusterv1.Cluster, _ bootstrapv1.EncryptionAlgorithmType) (internal.WorkloadCluster, error) { return f.Workload, f.WorkloadErr } diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index bcc3c4518375..bff3a4d39047 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -25,9 +25,8 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime/schema" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -36,8 +35,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/internal/contract" - topologynames "sigs.k8s.io/cluster-api/internal/topology/names" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" @@ -47,19 +45,6 @@ import ( "sigs.k8s.io/cluster-api/util/secret" ) -// mandatoryMachineReadinessGates are readinessGates KCP enforces to be set on machine it owns. -var mandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ - {ConditionType: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition}, - {ConditionType: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition}, - {ConditionType: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition}, -} - -// etcdMandatoryMachineReadinessGates are readinessGates KCP enforces to be set on machine it owns if etcd is managed. -var etcdMandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ - {ConditionType: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition}, - {ConditionType: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition}, -} - func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) @@ -67,7 +52,6 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, if endpoint.IsZero() { return ctrl.Result{}, nil } - controllerOwnerRef := *metav1.NewControllerRef(controlPlane.KCP, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)) clusterName := util.ObjectKey(controlPlane.Cluster) configSecret, err := secret.GetFromNamespacedName(ctx, r.SecretCachingClient, clusterName, secret.Kubeconfig) @@ -79,6 +63,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, clusterName, endpoint.String(), controllerOwnerRef, + kubeconfig.KeyEncryptionAlgorithm(controlPlane.GetKeyEncryptionAlgorithm()), ) if errors.Is(createErr, kubeconfig.ErrDependentCertificateNotFound) { return ctrl.Result{RequeueAfter: dependentCertRequeueAfter}, nil @@ -94,7 +79,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, } // only do rotation on owned secrets - if !util.IsControlledBy(configSecret, controlPlane.KCP) { + if !util.IsControlledBy(configSecret, controlPlane.KCP, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane").GroupKind()) { return ctrl.Result{}, nil } @@ -105,7 +90,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, if needsRotation { log.Info("Rotating kubeconfig secret") - if err := kubeconfig.RegenerateSecret(ctx, r.Client, configSecret); err != nil { + if err := kubeconfig.RegenerateSecret(ctx, r.Client, configSecret, kubeconfig.KeyEncryptionAlgorithm(controlPlane.GetKeyEncryptionAlgorithm())); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to regenerate kubeconfig") } } @@ -181,61 +166,32 @@ func (r *KubeadmControlPlaneReconciler) reconcileExternalReference(ctx context.C return patchHelper.Patch(ctx, obj) } -func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, bootstrapSpec *bootstrapv1.KubeadmConfigSpec, failureDomain string) (*clusterv1.Machine, error) { +func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, isJoin bool, failureDomain string) (*clusterv1.Machine, error) { var errs []error - // Compute desired Machine - machine, err := r.computeDesiredMachine(kcp, cluster, failureDomain, nil) + machine, err := desiredstate.ComputeDesiredMachine(kcp, cluster, failureDomain, nil) if err != nil { - return nil, errors.Wrap(err, "failed to create Machine: failed to compute desired Machine") - } - - // Since the cloned resource should eventually have a controller ref for the Machine, we create an - // OwnerReference here without the Controller field set - infraCloneOwner := &metav1.OwnerReference{ - APIVersion: controlplanev1.GroupVersion.String(), - Kind: kubeadmControlPlaneKind, - Name: kcp.Name, - UID: kcp.UID, + return nil, errors.Wrap(err, "failed to create Machine") } - // Clone the infrastructure template - apiVersion, err := contract.GetAPIVersion(ctx, r.Client, kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind()) - if err != nil { - return nil, errors.Wrap(err, "failed to clone infrastructure template") - } - infraMachine, infraRef, err := external.CreateFromTemplate(ctx, &external.CreateFromTemplateInput{ - Client: r.Client, - TemplateRef: &corev1.ObjectReference{ - APIVersion: apiVersion, - Kind: kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Kind, - Namespace: kcp.Namespace, - Name: kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name, - }, - Namespace: kcp.Namespace, - Name: machine.Name, - OwnerRef: infraCloneOwner, - ClusterName: cluster.Name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), - Annotations: kcp.Spec.MachineTemplate.ObjectMeta.Annotations, - }) + infraMachine, infraRef, err := r.createInfraMachine(ctx, kcp, cluster, machine.Name) if err != nil { // Safe to return early here since no resources have been created yet. v1beta1conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedV1Beta1Condition, controlplanev1.InfrastructureTemplateCloningFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return nil, errors.Wrap(err, "failed to clone infrastructure template") + return nil, errors.Wrap(err, "failed to create Machine") } machine.Spec.InfrastructureRef = infraRef // Clone the bootstrap configuration - bootstrapConfig, bootstrapRef, err := r.generateKubeadmConfig(ctx, kcp, cluster, bootstrapSpec, machine.Name) + bootstrapConfig, bootstrapRef, err := r.createKubeadmConfig(ctx, kcp, cluster, isJoin, machine.Name) if err != nil { v1beta1conditions.MarkFalse(kcp, controlplanev1.MachinesCreatedV1Beta1Condition, controlplanev1.BootstrapTemplateCloningFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "%s", err.Error()) - errs = append(errs, errors.Wrap(err, "failed to generate bootstrap config")) + errs = append(errs, errors.Wrap(err, "failed to create Machine")) } - // Only proceed to generating the Machine if we haven't encountered an error + // Only proceed to creating the Machine if we haven't encountered an error if len(errs) == 0 { machine.Spec.Bootstrap.ConfigRef = bootstrapRef @@ -249,7 +205,7 @@ func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx conte // If we encountered any errors, attempt to clean up any dangling resources if len(errs) > 0 { if err := r.cleanupFromGeneration(ctx, infraMachine, bootstrapConfig); err != nil { - errs = append(errs, errors.Wrap(err, "failed to cleanup generated resources")) + errs = append(errs, errors.Wrap(err, "failed to cleanup created objects")) } return nil, kerrors.NewAggregate(errs) } @@ -272,61 +228,85 @@ func (r *KubeadmControlPlaneReconciler) cleanupFromGeneration(ctx context.Contex return kerrors.NewAggregate(errs) } -func (r *KubeadmControlPlaneReconciler) generateKubeadmConfig(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, spec *bootstrapv1.KubeadmConfigSpec, name string) (*bootstrapv1.KubeadmConfig, clusterv1.ContractVersionedObjectReference, error) { - // Create an owner reference without a controller reference because the owning controller is the machine controller - owner := metav1.OwnerReference{ - APIVersion: controlplanev1.GroupVersion.String(), - Kind: kubeadmControlPlaneKind, - Name: kcp.Name, - UID: kcp.UID, +func (r *KubeadmControlPlaneReconciler) createInfraMachine(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, name string) (*unstructured.Unstructured, clusterv1.ContractVersionedObjectReference, error) { + infraMachine, err := desiredstate.ComputeDesiredInfraMachine(ctx, r.Client, kcp, cluster, name, nil) + if err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") + } + + // Create the full object with capi-kubeadmcontrolplane. + // Below ssa.RemoveManagedFieldsForLabelsAndAnnotations will drop ownership for labels and annotations + // so that in a subsequent syncMachines call capi-kubeadmcontrolplane-metadata can take ownership for them. + // Note: This is done in way that it does not rely on managedFields being stored in the cache, so we can optimize + // memory usage by dropping managedFields before storing objects in the cache. + if err := ssa.Patch(ctx, r.Client, kcpManagerName, infraMachine); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") + } + + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on KubeadmConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + if !r.disableRemoveManagedFieldsForLabelsAndAnnotations { + if err := ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, r.Client, r.APIReader, infraMachine, kcpManagerName); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create InfraMachine") + } + } + + return infraMachine, clusterv1.ContractVersionedObjectReference{ + APIGroup: infraMachine.GroupVersionKind().Group, + Kind: infraMachine.GetKind(), + Name: infraMachine.GetName(), + }, nil +} + +func (r *KubeadmControlPlaneReconciler) createKubeadmConfig(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, isJoin bool, name string) (*bootstrapv1.KubeadmConfig, clusterv1.ContractVersionedObjectReference, error) { + kubeadmConfig, err := desiredstate.ComputeDesiredKubeadmConfig(kcp, cluster, isJoin, name, nil) + if err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create KubeadmConfig") } - bootstrapConfig := &bootstrapv1.KubeadmConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: kcp.Namespace, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), - Annotations: kcp.Spec.MachineTemplate.ObjectMeta.Annotations, - OwnerReferences: []metav1.OwnerReference{owner}, - }, - Spec: *spec, + // Create the full object with capi-kubeadmcontrolplane. + // Below ssa.RemoveManagedFieldsForLabelsAndAnnotations will drop ownership for labels and annotations + // so that in a subsequent syncMachines call capi-kubeadmcontrolplane-metadata can take ownership for them. + // Note: This is done in way that it does not rely on managedFields being stored in the cache, so we can optimize + // memory usage by dropping managedFields before storing objects in the cache. + if err := ssa.Patch(ctx, r.Client, kcpManagerName, kubeadmConfig); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create KubeadmConfig") } - if err := r.Client.Create(ctx, bootstrapConfig); err != nil { - return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrap(err, "failed to create bootstrap configuration") + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on KubeadmConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + if !r.disableRemoveManagedFieldsForLabelsAndAnnotations { + if err := ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, r.Client, r.APIReader, kubeadmConfig, kcpManagerName); err != nil { + return nil, clusterv1.ContractVersionedObjectReference{}, errors.Wrapf(err, "failed to create KubeadmConfig") + } } - return bootstrapConfig, clusterv1.ContractVersionedObjectReference{ + return kubeadmConfig, clusterv1.ContractVersionedObjectReference{ APIGroup: bootstrapv1.GroupVersion.Group, Kind: "KubeadmConfig", - Name: bootstrapConfig.GetName(), + Name: kubeadmConfig.GetName(), }, nil } -// updateExternalObject updates the external object with the labels and annotations from KCP. -func (r *KubeadmControlPlaneReconciler) updateExternalObject(ctx context.Context, obj client.Object, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error { +// updateLabelsAndAnnotations updates the external object with the labels and annotations from KCP. +func (r *KubeadmControlPlaneReconciler) updateLabelsAndAnnotations(ctx context.Context, obj client.Object, objGVK schema.GroupVersionKind, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error { updatedObject := &unstructured.Unstructured{} - updatedObject.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + updatedObject.SetGroupVersionKind(objGVK) updatedObject.SetNamespace(obj.GetNamespace()) updatedObject.SetName(obj.GetName()) // Set the UID to ensure that Server-Side-Apply only performs an update // and does not perform an accidental create. updatedObject.SetUID(obj.GetUID()) - // Update labels - updatedObject.SetLabels(internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name)) - // Update annotations - updatedObject.SetAnnotations(kcp.Spec.MachineTemplate.ObjectMeta.Annotations) + updatedObject.SetLabels(desiredstate.ControlPlaneMachineLabels(kcp, cluster.Name)) + updatedObject.SetAnnotations(desiredstate.ControlPlaneMachineAnnotations(kcp)) - if err := ssa.Patch(ctx, r.Client, kcpManagerName, updatedObject, ssa.WithCachingProxy{Cache: r.ssaCache, Original: obj}); err != nil { - return errors.Wrapf(err, "failed to update %s", obj.GetObjectKind().GroupVersionKind().Kind) - } - return nil + return ssa.Patch(ctx, r.Client, kcpMetadataManagerName, updatedObject, ssa.WithCachingProxy{Cache: r.ssaCache, Original: obj}) } func (r *KubeadmControlPlaneReconciler) createMachine(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) error { if err := ssa.Patch(ctx, r.Client, kcpManagerName, machine); err != nil { - return errors.Wrap(err, "failed to create Machine") + return err } // Remove the annotation tracking that a remediation is in progress (the remediation completed when // the replacement machine has been created above). @@ -335,167 +315,14 @@ func (r *KubeadmControlPlaneReconciler) createMachine(ctx context.Context, kcp * } func (r *KubeadmControlPlaneReconciler) updateMachine(ctx context.Context, machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) (*clusterv1.Machine, error) { - updatedMachine, err := r.computeDesiredMachine(kcp, cluster, machine.Spec.FailureDomain, machine) + updatedMachine, err := desiredstate.ComputeDesiredMachine(kcp, cluster, machine.Spec.FailureDomain, machine) if err != nil { - return nil, errors.Wrap(err, "failed to update Machine: failed to compute desired Machine") + return nil, errors.Wrap(err, "failed to apply Machine") } err = ssa.Patch(ctx, r.Client, kcpManagerName, updatedMachine, ssa.WithCachingProxy{Cache: r.ssaCache, Original: machine}) if err != nil { - return nil, errors.Wrap(err, "failed to update Machine") + return nil, err } return updatedMachine, nil } - -// computeDesiredMachine computes the desired Machine. -// This Machine will be used during reconciliation to: -// * create a new Machine -// * update an existing Machine -// Because we are using Server-Side-Apply we always have to calculate the full object. -// There are small differences in how we calculate the Machine depending on if it -// is a create or update. Example: for a new Machine we have to calculate a new name, -// while for an existing Machine we have to use the name of the existing Machine. -func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, failureDomain string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { - var machineName string - var machineUID types.UID - var version string - annotations := map[string]string{} - if existingMachine == nil { - // Creating a new machine - nameTemplate := "{{ .kubeadmControlPlane.name }}-{{ .random }}" - if kcp.Spec.MachineNaming.Template != "" { - nameTemplate = kcp.Spec.MachineNaming.Template - if !strings.Contains(nameTemplate, "{{ .random }}") { - return nil, errors.New("cannot generate Machine name: {{ .random }} is missing in machineNaming.template") - } - } - generatedMachineName, err := topologynames.KCPMachineNameGenerator(nameTemplate, cluster.Name, kcp.Name).GenerateName() - if err != nil { - return nil, errors.Wrap(err, "failed to generate Machine name") - } - machineName = generatedMachineName - version = kcp.Spec.Version - - // Machine's bootstrap config may be missing ClusterConfiguration if it is not the first machine in the control plane. - // We store ClusterConfiguration as annotation here to detect any changes in KCP ClusterConfiguration and rollout the machine if any. - // Nb. This annotation is read when comparing the KubeadmConfig to check if a machine needs to be rolled out. - clusterConfigurationAnnotation, err := internal.ClusterConfigurationToMachineAnnotationValue(&kcp.Spec.KubeadmConfigSpec.ClusterConfiguration) - if err != nil { - return nil, err - } - annotations[controlplanev1.KubeadmClusterConfigurationAnnotation] = clusterConfigurationAnnotation - - // In case this machine is being created as a consequence of a remediation, then add an annotation - // tracking remediating data. - // NOTE: This is required in order to track remediation retries. - if remediationData, ok := kcp.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { - annotations[controlplanev1.RemediationForAnnotation] = remediationData - } - } else { - // Updating an existing machine - machineName = existingMachine.Name - machineUID = existingMachine.UID - version = existingMachine.Spec.Version - - // For existing machine only set the ClusterConfiguration annotation if the machine already has it. - // We should not add the annotation if it was missing in the first place because we do not have enough - // information. - if clusterConfigurationAnnotation, ok := existingMachine.Annotations[controlplanev1.KubeadmClusterConfigurationAnnotation]; ok { - // In case the annotation is outdated, update it. - if internal.ClusterConfigurationAnnotationFromMachineIsOutdated(clusterConfigurationAnnotation) { - clusterConfiguration, err := internal.ClusterConfigurationFromMachine(existingMachine) - if err != nil { - return nil, err - } - - clusterConfigurationAnnotation, err = internal.ClusterConfigurationToMachineAnnotationValue(clusterConfiguration) - if err != nil { - return nil, err - } - } - annotations[controlplanev1.KubeadmClusterConfigurationAnnotation] = clusterConfigurationAnnotation - } - - // If the machine already has remediation data then preserve it. - // NOTE: This is required in order to track remediation retries. - if remediationData, ok := existingMachine.Annotations[controlplanev1.RemediationForAnnotation]; ok { - annotations[controlplanev1.RemediationForAnnotation] = remediationData - } - } - // Setting pre-terminate hook so we can later remove the etcd member right before Machine termination - // (i.e. before InfraMachine deletion). - annotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" - - // Construct the basic Machine. - desiredMachine := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Machine", - }, - ObjectMeta: metav1.ObjectMeta{ - UID: machineUID, - Name: machineName, - Namespace: kcp.Namespace, - // Note: by setting the ownerRef on creation we signal to the Machine controller that this is not a stand-alone Machine. - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind)), - }, - Labels: map[string]string{}, - Annotations: map[string]string{}, - }, - Spec: clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: version, - FailureDomain: failureDomain, - }, - } - - // Set the in-place mutable fields. - // When we create a new Machine we will just create the Machine with those fields. - // When we update an existing Machine will we update the fields on the existing Machine (in-place mutate). - - // Set labels - desiredMachine.Labels = internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name) - - // Set annotations - // Add the annotations from the MachineTemplate. - // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. - for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Annotations { - desiredMachine.Annotations[k] = v - } - for k, v := range annotations { - desiredMachine.Annotations[k] = v - } - - // Set other in-place mutable fields - desiredMachine.Spec.Deletion.NodeDrainTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds - desiredMachine.Spec.Deletion.NodeDeletionTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds - desiredMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds - - // Note: We intentionally don't set "minReadySeconds" on Machines because we consider it enough to have machine availability driven by readiness of control plane components. - if existingMachine != nil { - desiredMachine.Spec.InfrastructureRef = existingMachine.Spec.InfrastructureRef - desiredMachine.Spec.Bootstrap.ConfigRef = existingMachine.Spec.Bootstrap.ConfigRef - } - - // Set machines readiness gates - allReadinessGates := []clusterv1.MachineReadinessGate{} - allReadinessGates = append(allReadinessGates, mandatoryMachineReadinessGates...) - isEtcdManaged := !kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External.IsDefined() - if isEtcdManaged { - allReadinessGates = append(allReadinessGates, etcdMandatoryMachineReadinessGates...) - } - allReadinessGates = append(allReadinessGates, kcp.Spec.MachineTemplate.Spec.ReadinessGates...) - - desiredMachine.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{} - knownGates := sets.Set[string]{} - for _, gate := range allReadinessGates { - if knownGates.Has(gate.ConditionType) { - continue - } - desiredMachine.Spec.ReadinessGates = append(desiredMachine.Spec.ReadinessGates, gate) - knownGates.Insert(gate.ConditionType) - } - - return desiredMachine, nil -} diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index 4aecf1b5175e..5ec5a38eb98f 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -17,25 +17,31 @@ limitations under the License. package controllers import ( - "fmt" + "context" + "strings" "testing" . "github.com/onsi/gomega" - gomegatypes "github.com/onsi/gomega/types" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + clientutil "sigs.k8s.io/cluster-api/internal/util/client" + "sigs.k8s.io/cluster-api/internal/util/ssa" + "sigs.k8s.io/cluster-api/util/collections" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" @@ -46,10 +52,6 @@ func TestReconcileKubeconfigEmptyAPIEndpoints(t *testing.T) { g := NewWithT(t) cluster := &clusterv1.Cluster{ - TypeMeta: metav1.TypeMeta{ - Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: metav1.NamespaceDefault, @@ -60,10 +62,6 @@ func TestReconcileKubeconfigEmptyAPIEndpoints(t *testing.T) { } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: metav1.NamespaceDefault, @@ -102,10 +100,6 @@ func TestReconcileKubeconfigMissingCACertificate(t *testing.T) { g := NewWithT(t) cluster := &clusterv1.Cluster{ - TypeMeta: metav1.TypeMeta{ - Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: metav1.NamespaceDefault, @@ -116,10 +110,6 @@ func TestReconcileKubeconfigMissingCACertificate(t *testing.T) { } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: metav1.NamespaceDefault, @@ -157,10 +147,6 @@ func TestReconcileKubeconfigSecretDoesNotAdoptsUserSecrets(t *testing.T) { g := NewWithT(t) cluster := &clusterv1.Cluster{ - TypeMeta: metav1.TypeMeta{ - Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: metav1.NamespaceDefault, @@ -171,10 +157,6 @@ func TestReconcileKubeconfigSecretDoesNotAdoptsUserSecrets(t *testing.T) { } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: metav1.NamespaceDefault, @@ -232,10 +214,6 @@ func TestKubeadmControlPlaneReconciler_reconcileKubeconfig(t *testing.T) { g := NewWithT(t) cluster := &clusterv1.Cluster{ - TypeMeta: metav1.TypeMeta{ - Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: metav1.NamespaceDefault, @@ -246,10 +224,6 @@ func TestKubeadmControlPlaneReconciler_reconcileKubeconfig(t *testing.T) { } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: metav1.NamespaceDefault, @@ -294,7 +268,7 @@ func TestKubeadmControlPlaneReconciler_reconcileKubeconfig(t *testing.T) { g.Expect(kubeconfigSecret.Labels).To(HaveKeyWithValue(clusterv1.ClusterNameLabel, cluster.Name)) } -func TestCloneConfigsAndGenerateMachine(t *testing.T) { +func TestCloneConfigsAndGenerateMachineAndSyncMachines(t *testing.T) { setup := func(t *testing.T, g *WithT) *corev1.Namespace { t.Helper() @@ -350,6 +324,18 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { UID: "abc-123-kcp-control-plane", }, Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("8"), + }, + }, + }, + }, + }, MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ InfrastructureRef: clusterv1.ContractVersionedObjectReference{ @@ -369,39 +355,183 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { r := &KubeadmControlPlaneReconciler{ Client: env, SecretCachingClient: secretCachingClient, + ssaCache: ssa.NewCache("test-controller"), recorder: record.NewFakeRecorder(32), } - bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{} - _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, "") + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, true, "") g.Expect(err).To(Succeed()) machineList := &clusterv1.MachineList{} g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(machineList.Items).To(HaveLen(1)) - for i := range machineList.Items { - m := machineList.Items[i] - g.Expect(m.Namespace).To(Equal(cluster.Namespace)) - g.Expect(m.Name).NotTo(BeEmpty()) - g.Expect(m.Name).To(HavePrefix(kcp.Name + namingTemplateKey)) + m := machineList.Items[0] + g.Expect(m.Namespace).To(Equal(cluster.Namespace)) + g.Expect(m.Name).NotTo(BeEmpty()) + g.Expect(m.Name).To(HavePrefix(kcp.Name + namingTemplateKey)) + g.Expect(m.Spec.InfrastructureRef.Name).To(Equal(m.Name)) + g.Expect(m.Spec.InfrastructureRef.APIGroup).To(Equal(genericInfrastructureMachineTemplate.GroupVersionKind().Group)) + g.Expect(m.Spec.InfrastructureRef.Kind).To(Equal("GenericInfrastructureMachine")) - infraObj, err := external.GetObjectFromContractVersionedRef(ctx, r.Client, m.Spec.InfrastructureRef, m.Namespace) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName())) - g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String())) + g.Expect(m.Spec.Bootstrap.ConfigRef.Name).To(Equal(m.Name)) + g.Expect(m.Spec.Bootstrap.ConfigRef.APIGroup).To(Equal(bootstrapv1.GroupVersion.Group)) + g.Expect(m.Spec.Bootstrap.ConfigRef.Kind).To(Equal("KubeadmConfig")) + + infraObj, err := external.GetObjectFromContractVersionedRef(ctx, env.GetAPIReader(), m.Spec.InfrastructureRef, m.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(infraObj.GetOwnerReferences()).To(HaveLen(1)) + g.Expect(infraObj.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + })) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName())) + g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String())) + // Note: capi-kubeadmcontrolplane should own ownerReferences and spec, labels and annotations should be orphaned. + // Labels and annotations will be owned by capi-kubeadmcontrolplane-metadata after the next update + // of labels and annotations. + g.Expect(cleanupTime(infraObj.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + APIVersion: infraObj.GetAPIVersion(), + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"abc-123-kcp-control-plane\"}":{} + } +}, +"f:spec":{ + "f:hello":{} +}}`, + }}))) + + kubeadmConfig := &bootstrapv1.KubeadmConfig{} + err = env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}, kubeadmConfig) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(kubeadmConfig.OwnerReferences).To(HaveLen(1)) + g.Expect(kubeadmConfig.OwnerReferences).To(ContainElement(metav1.OwnerReference{ + Kind: "KubeadmControlPlane", + APIVersion: controlplanev1.GroupVersion.String(), + Name: kcp.Name, + UID: kcp.UID, + })) + g.Expect(kubeadmConfig.Spec.InitConfiguration).To(BeComparableTo(bootstrapv1.InitConfiguration{})) + expectedJoinConfiguration := kcp.Spec.KubeadmConfigSpec.JoinConfiguration.DeepCopy() + expectedJoinConfiguration.ControlPlane = &bootstrapv1.JoinControlPlane{} + g.Expect(kubeadmConfig.Spec.JoinConfiguration).To(BeComparableTo(*expectedJoinConfiguration)) + // Note: capi-kubeadmcontrolplane should own ownerReferences and spec, labels and annotations should be orphaned. + // Labels and annotations will be owned by capi-kubeadmcontrolplane-metadata after the next update + // of labels and annotations. + g.Expect(cleanupTime(kubeadmConfig.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + APIVersion: bootstrapv1.GroupVersion.String(), + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"abc-123-kcp-control-plane\"}":{} + } +}, +"f:spec":{ + "f:joinConfiguration":{ + "f:controlPlane":{}, + "f:nodeRegistration":{ + "f:kubeletExtraArgs":{ + "k:{\"name\":\"v\",\"value\":\"8\"}":{ + ".":{},"f:name":{},"f:value":{}} + } + } + } +}}`, + }}))) - g.Expect(m.Spec.InfrastructureRef.Name).To(Equal(m.Name)) - g.Expect(m.Spec.InfrastructureRef.APIGroup).To(Equal(genericInfrastructureMachineTemplate.GroupVersionKind().Group)) - g.Expect(m.Spec.InfrastructureRef.Kind).To(Equal("GenericInfrastructureMachine")) + // Sync Machines - g.Expect(m.Spec.Bootstrap.ConfigRef.Name).To(Equal(m.Name)) - g.Expect(m.Spec.Bootstrap.ConfigRef.APIGroup).To(Equal(bootstrapv1.GroupVersion.Group)) - g.Expect(m.Spec.Bootstrap.ConfigRef.Kind).To(Equal("KubeadmConfig")) - } + // Note: Ensure the client observed the latest objects so syncMachines below is not failing with conflict errors. + // Note: Not adding a WaitForCacheToBeUpToDate for infraObj for now as we didn't have test flakes because of it and + // WaitForCacheToBeUpToDate does not support Unstructured as of now. + g.Expect(clientutil.WaitForCacheToBeUpToDate(ctx, r.Client, "cloneConfigsAndGenerateMachine", &m)).To(Succeed()) + g.Expect(clientutil.WaitForCacheToBeUpToDate(ctx, r.Client, "cloneConfigsAndGenerateMachine", kubeadmConfig)).To(Succeed()) + + controlPlane, err := internal.NewControlPlane(ctx, r.managementCluster, r.Client, cluster, kcp, collections.FromMachines(&m)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(r.syncMachines(ctx, controlPlane)).To(Succeed()) + + // Verify managedFields again. + infraObj, err = external.GetObjectFromContractVersionedRef(ctx, env.GetAPIReader(), m.Spec.InfrastructureRef, m.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cleanupTime(infraObj.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane-metadata owns labels and annotations + APIVersion: infraObj.GetAPIVersion(), + Manager: kcpMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{}, + "f:labels":{ + "f:cluster.x-k8s.io/cluster-name":{}, + "f:cluster.x-k8s.io/control-plane":{}, + "f:cluster.x-k8s.io/control-plane-name":{} + } +}}`, + }, { + // capi-kubeadmcontrolplane owns ownerReferences and spec + APIVersion: infraObj.GetAPIVersion(), + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"abc-123-kcp-control-plane\"}":{} + } +}, +"f:spec":{ + "f:hello":{} +}}`, + }}))) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}, kubeadmConfig)).To(Succeed()) + g.Expect(cleanupTime(kubeadmConfig.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{ + // capi-kubeadmcontrolplane-metadata owns labels and annotations + APIVersion: bootstrapv1.GroupVersion.String(), + Manager: kcpMetadataManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:annotations":{}, + "f:labels":{ + "f:cluster.x-k8s.io/cluster-name":{}, + "f:cluster.x-k8s.io/control-plane":{}, + "f:cluster.x-k8s.io/control-plane-name":{} + } +}}`, + }, { + // capi-kubeadmcontrolplane owns ownerReferences and spec + APIVersion: bootstrapv1.GroupVersion.String(), + Manager: kcpManagerName, + Operation: metav1.ManagedFieldsOperationApply, + FieldsV1: `{ +"f:metadata":{ + "f:ownerReferences":{ + "k:{\"uid\":\"abc-123-kcp-control-plane\"}":{} + } +}, +"f:spec":{ + "f:joinConfiguration":{ + "f:controlPlane":{}, + "f:nodeRegistration":{ + "f:kubeletExtraArgs":{ + "k:{\"name\":\"v\",\"value\":\"8\"}":{ + ".":{},"f:name":{},"f:value":{}} + } + } + } +}}`, + }}))) } -func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { +func TestCloneConfigsAndGenerateMachineFailInfraMachineCreation(t *testing.T) { g := NewWithT(t) cluster := &clusterv1.Cluster{ @@ -456,587 +586,215 @@ func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { recorder: record.NewFakeRecorder(32), } - bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{}, - } - - // Try to break Infra Cloning + // Break InfraMachine cloning kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name = "something_invalid" - _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, "") + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, true, "") g.Expect(err).To(HaveOccurred()) g.Expect(&kcp.GetV1Beta1Conditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ Type: controlplanev1.MachinesCreatedV1Beta1Condition, Status: corev1.ConditionFalse, Severity: clusterv1.ConditionSeverityError, Reason: controlplanev1.InfrastructureTemplateCloningFailedV1Beta1Reason, - Message: "failed to retrieve GenericInfrastructureMachineTemplate default/something_invalid: genericinfrastructuremachinetemplates.infrastructure.cluster.x-k8s.io \"something_invalid\" not found", + Message: "failed to create InfraMachine: failed to compute desired InfraMachine: failed to retrieve GenericInfrastructureMachineTemplate default/something_invalid: genericinfrastructuremachinetemplates.infrastructure.cluster.x-k8s.io \"something_invalid\" not found", })) + // No objects should exist. + machineList := &clusterv1.MachineList{} + g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(BeEmpty()) + infraMachineList := &unstructured.UnstructuredList{} + infraMachineList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: builder.InfrastructureGroupVersion.Group, + Version: builder.InfrastructureGroupVersion.Version, + Kind: builder.GenericInfrastructureMachineKind + "List", + }) + g.Expect(fakeClient.List(ctx, infraMachineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(infraMachineList.Items).To(BeEmpty()) + kubeadmConfigList := &bootstrapv1.KubeadmConfigList{} + g.Expect(fakeClient.List(ctx, kubeadmConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(kubeadmConfigList.Items).To(BeEmpty()) } -func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { - namingTemplateKey := "-kcp" - kcpName := "testControlPlane" - clusterName := "testCluster" +func TestCloneConfigsAndGenerateMachineFailKubeadmConfigCreation(t *testing.T) { + g := NewWithT(t) cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, + Name: "foo", Namespace: metav1.NamespaceDefault, }, } - duration5s := ptr.To(int32(5)) - duration10s := ptr.To(int32(10)) - kcpMachineTemplateObjectMeta := clusterv1.ObjectMeta{ - Labels: map[string]string{ - "machineTemplateLabel": "machineTemplateLabelValue", - }, - Annotations: map[string]string{ - "machineTemplateAnnotation": "machineTemplateAnnotationValue", - }, - } - kcpMachineTemplateObjectMetaCopy := kcpMachineTemplateObjectMeta.DeepCopy() - infraRef := &clusterv1.ContractVersionedObjectReference{ - Kind: "InfraKind", - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Name: "infra", - } - bootstrapRef := clusterv1.ContractVersionedObjectReference{ - Kind: "BootstrapKind", - APIGroup: clusterv1.GroupVersionBootstrap.Group, - Name: "bootstrap", - } - - tests := []struct { - name string - kcp *controlplanev1.KubeadmControlPlane - isUpdatingExistingMachine bool - existingClusterConfigurationAnnotation string - want []gomegatypes.GomegaMatcher - wantClusterConfigurationAnnotation string - wantErr bool - }{ - { - name: "should return the correct Machine object when creating a new Machine", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Foo", - }, - }, - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", - }, - }, - }, - isUpdatingExistingMachine: false, - want: []gomegatypes.GomegaMatcher{ - HavePrefix(kcpName + namingTemplateKey), - Not(HaveSuffix("00000")), - }, - wantClusterConfigurationAnnotation: "{\"marshalVersion\":\"v1beta2\",\"certificatesDir\":\"foo\"}", - wantErr: false, - }, - { - name: "should return error when creating a new Machine when '.random' is not added in template", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey, - }, - }, - }, - isUpdatingExistingMachine: false, - wantErr: true, - }, - { - name: "should not return error when creating a new Machine when the generated name exceeds 63", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .random }}" + fmt.Sprintf("%059d", 0), - }, - }, - }, - isUpdatingExistingMachine: false, - want: []gomegatypes.GomegaMatcher{ - ContainSubstring(fmt.Sprintf("%053d", 0)), - Not(HaveSuffix("00000")), - }, - wantClusterConfigurationAnnotation: "{\"marshalVersion\":\"v1beta2\",\"certificatesDir\":\"foo\"}", - wantErr: false, - }, - { - name: "should return error when creating a new Machine with invalid template", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "some-hardcoded-name-{{ .doesnotexistindata }}-{{ .random }}", // invalid template - }, - }, - }, - isUpdatingExistingMachine: false, - wantErr: true, - }, - { - name: "should return the correct Machine object when creating a new Machine with default templated name", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - }, - }, - isUpdatingExistingMachine: false, - wantErr: false, - want: []gomegatypes.GomegaMatcher{ - HavePrefix(kcpName), - Not(HaveSuffix("00000")), - }, - wantClusterConfigurationAnnotation: "{\"marshalVersion\":\"v1beta2\",\"certificatesDir\":\"foo\"}", - }, - { - name: "should return the correct Machine object when creating a new Machine with additional kcp readinessGates", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Bar", - }, - }, - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - }, + genericMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineTemplateKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "infra-foo", + "namespace": cluster.Namespace, }, - isUpdatingExistingMachine: false, - wantClusterConfigurationAnnotation: "{\"marshalVersion\":\"v1beta2\",\"certificatesDir\":\"foo\"}", - wantErr: false, - }, - { - name: "should return the correct Machine object when updating an existing Machine (empty ClusterConfiguration annotation)", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Foo", - }, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "hello": "world", }, }, }, - isUpdatingExistingMachine: true, - existingClusterConfigurationAnnotation: "", - wantClusterConfigurationAnnotation: "", - wantErr: false, }, - { - name: "should return the correct Machine object when updating an existing Machine (outdated ClusterConfiguration annotation)", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Foo", - }, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", - }, - }, - }, - isUpdatingExistingMachine: true, + } - existingClusterConfigurationAnnotation: "{\"etcd\":{},\"apiServer\":{\"extraArgs\":{\"foo\":\"bar\"}},\"certificatesDir\":\"foo\"}", - wantClusterConfigurationAnnotation: "{\"marshalVersion\":\"v1beta2\",\"apiServer\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"certificatesDir\":\"foo\"}", - wantErr: false, + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kcp-foo", + Namespace: cluster.Namespace, }, - { - name: "should return the correct Machine object when updating an existing Machine (up to date ClusterConfiguration annotation)", - kcp: &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: kcpName, - Namespace: cluster.Namespace, - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.16.6", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: duration5s, - NodeDeletionTimeoutSeconds: duration5s, - NodeVolumeDetachTimeoutSeconds: duration5s, - }, - ReadinessGates: []clusterv1.MachineReadinessGate{ - { - ConditionType: "Foo", - }, - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - MachineNaming: controlplanev1.MachineNamingSpec{ - Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: genericMachineTemplate.GetKind(), + APIGroup: genericMachineTemplate.GroupVersionKind().Group, + Name: genericMachineTemplate.GetName(), }, }, }, - isUpdatingExistingMachine: true, - existingClusterConfigurationAnnotation: "{\"marshalVersion\":\"v1beta2\",\"etcd\":{},\"apiServer\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"controllerManager\":{},\"scheduler\":{},\"dns\":{},\"certificatesDir\":\"foo\"}", - wantClusterConfigurationAnnotation: "{\"marshalVersion\":\"v1beta2\",\"etcd\":{},\"apiServer\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"controllerManager\":{},\"scheduler\":{},\"dns\":{},\"certificatesDir\":\"foo\"}", - wantErr: false, + Version: "v1.16.6", }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - var desiredMachine *clusterv1.Machine - failureDomain := "fd-1" - var expectedMachineSpec clusterv1.MachineSpec - var err error - - if tt.isUpdatingExistingMachine { - machineName := "existing-machine" - machineUID := types.UID("abc-123-existing-machine") - // Use different ClusterConfiguration string than the information present in KCP - // to verify that for an existing machine we do not override this information. - remediationData := "remediation-data" - machineVersion := "v1.25.3" - existingMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: machineName, - UID: machineUID, - Annotations: map[string]string{ - controlplanev1.RemediationForAnnotation: remediationData, - }, - }, - Spec: clusterv1.MachineSpec{ - Version: machineVersion, - FailureDomain: failureDomain, - Deletion: clusterv1.MachineDeletionSpec{ - NodeDrainTimeoutSeconds: duration10s, - NodeDeletionTimeoutSeconds: duration10s, - NodeVolumeDetachTimeoutSeconds: duration10s, - }, - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: bootstrapRef, - }, - InfrastructureRef: *infraRef, - ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "Foo"}}, - }, - } - if tt.existingClusterConfigurationAnnotation != "" { - existingMachine.Annotations[controlplanev1.KubeadmClusterConfigurationAnnotation] = tt.existingClusterConfigurationAnnotation - } - - desiredMachine, err = (&KubeadmControlPlaneReconciler{}).computeDesiredMachine( - tt.kcp, cluster, - existingMachine.Spec.FailureDomain, existingMachine, - ) - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - expectedMachineSpec = clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: machineVersion, // Should use the Machine version and not the version from KCP. - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: bootstrapRef, - }, - InfrastructureRef: *infraRef, - FailureDomain: failureDomain, - Deletion: clusterv1.MachineDeletionSpec{ - NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds, - NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds, - NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds, - }, - ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.Spec.ReadinessGates...), - } - - // Verify the Name and UID of the Machine remain unchanged - g.Expect(desiredMachine.Name).To(Equal(machineName)) - g.Expect(desiredMachine.UID).To(Equal(machineUID)) - // Verify annotations. - expectedAnnotations := map[string]string{} - for k, v := range kcpMachineTemplateObjectMeta.Annotations { - expectedAnnotations[k] = v - } - if tt.wantClusterConfigurationAnnotation != "" { - expectedAnnotations[controlplanev1.KubeadmClusterConfigurationAnnotation] = tt.wantClusterConfigurationAnnotation - } - expectedAnnotations[controlplanev1.RemediationForAnnotation] = remediationData - // The pre-terminate annotation should always be added - expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" - g.Expect(desiredMachine.Annotations).To(Equal(expectedAnnotations)) - } else { - desiredMachine, err = (&KubeadmControlPlaneReconciler{}).computeDesiredMachine( - tt.kcp, cluster, - failureDomain, nil, - ) - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - - expectedMachineSpec = clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: tt.kcp.Spec.Version, - FailureDomain: failureDomain, - Deletion: clusterv1.MachineDeletionSpec{ - NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds, - NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds, - NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds, - }, - ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.Spec.ReadinessGates...), - } - // Verify Name. - for _, matcher := range tt.want { - g.Expect(desiredMachine.Name).To(matcher) - } - // Verify annotations. - expectedAnnotations := map[string]string{} - for k, v := range kcpMachineTemplateObjectMeta.Annotations { - expectedAnnotations[k] = v - } - expectedAnnotations[controlplanev1.KubeadmClusterConfigurationAnnotation] = tt.wantClusterConfigurationAnnotation - // The pre-terminate annotation should always be added - expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" - g.Expect(desiredMachine.Annotations).To(Equal(expectedAnnotations)) - } - - g.Expect(desiredMachine.Namespace).To(Equal(tt.kcp.Namespace)) - g.Expect(desiredMachine.OwnerReferences).To(HaveLen(1)) - g.Expect(desiredMachine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(tt.kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")))) - g.Expect(desiredMachine.Spec).To(BeComparableTo(expectedMachineSpec)) + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy(), builder.GenericInfrastructureMachineTemplateCRD) - // Verify that the machineTemplate.ObjectMeta has been propagated to the Machine. - // Verify labels. - expectedLabels := map[string]string{} - for k, v := range kcpMachineTemplateObjectMeta.Labels { - expectedLabels[k] = v - } - expectedLabels[clusterv1.ClusterNameLabel] = cluster.Name - expectedLabels[clusterv1.MachineControlPlaneLabel] = "" - expectedLabels[clusterv1.MachineControlPlaneNameLabel] = tt.kcp.Name - g.Expect(desiredMachine.Labels).To(Equal(expectedLabels)) - - // Verify that machineTemplate.ObjectMeta in KCP has not been modified. - g.Expect(tt.kcp.Spec.MachineTemplate.ObjectMeta.Labels).To(Equal(kcpMachineTemplateObjectMetaCopy.Labels)) - g.Expect(tt.kcp.Spec.MachineTemplate.ObjectMeta.Annotations).To(Equal(kcpMachineTemplateObjectMetaCopy.Annotations)) - }) + r := &KubeadmControlPlaneReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + recorder: record.NewFakeRecorder(32), + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + disableRemoveManagedFieldsForLabelsAndAnnotations: true, } + + // Break KubeadmConfig computation + kcp.Spec.Version = "something_invalid" + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, true, "") + g.Expect(err).To(HaveOccurred()) + g.Expect(&kcp.GetV1Beta1Conditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ + Type: controlplanev1.MachinesCreatedV1Beta1Condition, + Status: corev1.ConditionFalse, + Severity: clusterv1.ConditionSeverityError, + Reason: controlplanev1.BootstrapTemplateCloningFailedV1Beta1Reason, + Message: "failed to create KubeadmConfig: failed to compute desired KubeadmConfig: failed to parse Kubernetes version \"something_invalid\": Invalid character(s) found in major number \"0something_invalid\"", + })) + // No objects should exist. + machineList := &clusterv1.MachineList{} + g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(BeEmpty()) + infraMachineList := &unstructured.UnstructuredList{} + infraMachineList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: builder.InfrastructureGroupVersion.Group, + Version: builder.InfrastructureGroupVersion.Version, + Kind: builder.GenericInfrastructureMachineKind + "List", + }) + g.Expect(fakeClient.List(ctx, infraMachineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(infraMachineList.Items).To(BeEmpty()) + kubeadmConfigList := &bootstrapv1.KubeadmConfigList{} + g.Expect(fakeClient.List(ctx, kubeadmConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(kubeadmConfigList.Items).To(BeEmpty()) } -func TestKubeadmControlPlaneReconciler_generateKubeadmConfig(t *testing.T) { +func TestCloneConfigsAndGenerateMachineFailMachineCreation(t *testing.T) { g := NewWithT(t) - fakeClient := newFakeClient() cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ - Name: "testCluster", + Name: "foo", Namespace: metav1.NamespaceDefault, }, } + genericMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineTemplateKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "infra-foo", + "namespace": cluster.Namespace, + }, + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + }, + }, + } + kcp := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ - Name: "testControlPlane", + Name: "kcp-foo", Namespace: cluster.Namespace, }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: genericMachineTemplate.GetKind(), + APIGroup: genericMachineTemplate.GroupVersionKind().Group, + Name: genericMachineTemplate.GetName(), + }, + }, + }, + Version: "v1.16.6", + }, } - spec := bootstrapv1.KubeadmConfigSpec{} - expectedReferenceKind := "KubeadmConfig" - expectedReferenceAPIGroup := bootstrapv1.GroupVersion.Group - expectedOwner := metav1.OwnerReference{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - Name: kcp.Name, - } + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy(), builder.GenericInfrastructureMachineTemplateCRD) + // Break Machine creation by injecting an error into the Machine apply call. + fakeClient = interceptor.NewClient(fakeClient, interceptor.Funcs{ + Apply: func(ctx context.Context, c client.WithWatch, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + clientObject, ok := obj.(client.Object) + if !ok { + return errors.Errorf("error during Machine creation: unexpected ApplyConfiguration") + } + if clientObject.GetObjectKind().GroupVersionKind().Kind == "Machine" { + return errors.Errorf("fake error during Machine creation") + } + return c.Apply(ctx, obj, opts...) + }, + }) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, SecretCachingClient: fakeClient, recorder: record.NewFakeRecorder(32), + // Note: This field is only used for unit tests that use fake client because the fake client does not properly set resourceVersion + // on BootstrapConfig/InfraMachine after ssa.Patch and then ssa.RemoveManagedFieldsForLabelsAndAnnotations would fail. + disableRemoveManagedFieldsForLabelsAndAnnotations: true, } - _, got, err := r.generateKubeadmConfig(ctx, kcp, cluster, spec.DeepCopy(), "kubeadmconfig-name") - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).NotTo(BeNil()) - g.Expect(got.Name).To(Equal("kubeadmconfig-name")) - g.Expect(got.Kind).To(Equal(expectedReferenceKind)) - g.Expect(got.APIGroup).To(Equal(expectedReferenceAPIGroup)) - - bootstrapConfig := &bootstrapv1.KubeadmConfig{} - key := client.ObjectKey{Name: got.Name, Namespace: kcp.Namespace} - g.Expect(fakeClient.Get(ctx, key, bootstrapConfig)).To(Succeed()) - g.Expect(bootstrapConfig.OwnerReferences).To(HaveLen(1)) - g.Expect(bootstrapConfig.OwnerReferences).To(ContainElement(expectedOwner)) - g.Expect(bootstrapConfig.Spec).To(BeComparableTo(spec)) + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, true, "") + g.Expect(err).To(HaveOccurred()) + g.Expect(&kcp.GetV1Beta1Conditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ + Type: controlplanev1.MachinesCreatedV1Beta1Condition, + Status: corev1.ConditionFalse, + Severity: clusterv1.ConditionSeverityError, + Reason: controlplanev1.MachineGenerationFailedV1Beta1Reason, + Message: "failed to apply Machine: fake error during Machine creation", + })) + // No objects should exist. + machineList := &clusterv1.MachineList{} + g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(BeEmpty()) + infraMachineList := &unstructured.UnstructuredList{} + infraMachineList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: builder.InfrastructureGroupVersion.Group, + Version: builder.InfrastructureGroupVersion.Version, + Kind: builder.GenericInfrastructureMachineKind + "List", + }) + g.Expect(fakeClient.List(ctx, infraMachineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(infraMachineList.Items).To(BeEmpty()) + kubeadmConfigList := &bootstrapv1.KubeadmConfigList{} + g.Expect(fakeClient.List(ctx, kubeadmConfigList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(kubeadmConfigList.Items).To(BeEmpty()) } func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { @@ -1073,10 +831,6 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { userProvidedKubeadmConfigSecretOtherOwner.OwnerReferences = []metav1.OwnerReference{otherOwner} kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Name: "testControlPlane", Namespace: metav1.NamespaceDefault, @@ -1093,8 +847,8 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { expectedOwnerRef: metav1.OwnerReference{ Name: kcp.Name, UID: kcp.UID, - Kind: kcp.Kind, - APIVersion: kcp.APIVersion, + Kind: "KubeadmControlPlane", + APIVersion: controlplanev1.GroupVersion.String(), Controller: ptr.To(true), BlockOwnerDeletion: ptr.To(true), }, @@ -1105,8 +859,8 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { expectedOwnerRef: metav1.OwnerReference{ Name: kcp.Name, UID: kcp.UID, - Kind: kcp.Kind, - APIVersion: kcp.APIVersion, + Kind: "KubeadmControlPlane", + APIVersion: controlplanev1.GroupVersion.String(), Controller: ptr.To(true), BlockOwnerDeletion: ptr.To(true), }, @@ -1136,3 +890,39 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { }) } } + +func cleanupTime(fields []metav1.ManagedFieldsEntry) []metav1.ManagedFieldsEntry { + for i := range fields { + fields[i].Time = nil + } + return fields +} + +type managedFieldEntry struct { + Manager string + Operation metav1.ManagedFieldsOperationType + APIVersion string + FieldsV1 string + Subresource string +} + +func toManagedFields(managedFields []managedFieldEntry) []metav1.ManagedFieldsEntry { + res := []metav1.ManagedFieldsEntry{} + for _, f := range managedFields { + res = append(res, metav1.ManagedFieldsEntry{ + Manager: f.Manager, + Operation: f.Operation, + APIVersion: f.APIVersion, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(trimSpaces(f.FieldsV1))}, + Subresource: f.Subresource, + }) + } + return res +} + +func trimSpaces(s string) string { + s = strings.ReplaceAll(s, "\n", "") + s = strings.ReplaceAll(s, "\t", "") + return s +} diff --git a/controlplane/kubeadm/internal/controllers/inplace.go b/controlplane/kubeadm/internal/controllers/inplace.go new file mode 100644 index 000000000000..d4b659d250b6 --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/inplace.go @@ -0,0 +1,67 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" +) + +func (r *KubeadmControlPlaneReconciler) tryInPlaceUpdate( + ctx context.Context, + controlPlane *internal.ControlPlane, + machineToInPlaceUpdate *clusterv1.Machine, + machineUpToDateResult internal.UpToDateResult, +) (fallbackToScaleDown bool, _ ctrl.Result, _ error) { + if r.overrideTryInPlaceUpdateFunc != nil { + return r.overrideTryInPlaceUpdateFunc(ctx, controlPlane, machineToInPlaceUpdate, machineUpToDateResult) + } + + // Run preflight checks to ensure that the control plane is stable before proceeding with in-place update operation. + if resultForAllMachines := r.preflightChecks(ctx, controlPlane); !resultForAllMachines.IsZero() { + // If the control plane is not stable, check if the issues are only for machineToInPlaceUpdate. + if result := r.preflightChecks(ctx, controlPlane, machineToInPlaceUpdate); result.IsZero() { + // The issues are only for machineToInPlaceUpdate, fallback to scale down. + // Note: The consequence of this is that a Machine with issues is scaled down and not in-place updated. + return true, ctrl.Result{}, nil + } + + return false, resultForAllMachines, nil + } + + // Note: Usually canUpdateMachine is only called once for a single Machine rollout. + // If it returns true, the code below will mark the in-place update as in progress via + // UpdateInProgressAnnotation. From this point forward we are not going to call canUpdateMachine again. + // If it returns false, we are going to fall back to scale down which will delete the Machine. + // We only have to repeat the canUpdateMachine call if the write call to set UpdateInProgressAnnotation + // fails or if we fail to delete the Machine. + canUpdate, err := r.canUpdateMachine(ctx, machineToInPlaceUpdate, machineUpToDateResult) + if err != nil { + return false, ctrl.Result{}, errors.Wrapf(err, "failed to determine if Machine %s can be updated in-place", machineToInPlaceUpdate.Name) + } + + if !canUpdate { + return true, ctrl.Result{}, nil + } + + return false, ctrl.Result{}, r.triggerInPlaceUpdate(ctx, machineToInPlaceUpdate, machineUpToDateResult) +} diff --git a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go new file mode 100644 index 000000000000..503919c75059 --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go @@ -0,0 +1,343 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "strings" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/internal/util/compare" + "sigs.k8s.io/cluster-api/internal/util/inplace" + "sigs.k8s.io/cluster-api/internal/util/patch" + "sigs.k8s.io/cluster-api/internal/util/ssa" +) + +func (r *KubeadmControlPlaneReconciler) canUpdateMachine(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, error) { + if r.overrideCanUpdateMachineFunc != nil { + return r.overrideCanUpdateMachineFunc(ctx, machine, machineUpToDateResult) + } + + log := ctrl.LoggerFrom(ctx).WithValues("Machine", klog.KObj(machine)) + + // Machine cannot be updated in-place if the feature gate is not enabled. + if !feature.Gates.Enabled(feature.InPlaceUpdates) { + return false, nil + } + + // Machine cannot be updated in-place if the UpToDate func was not able to provide all objects, + // e.g. if the InfraMachine or KubeadmConfig was deleted. + if machineUpToDateResult.DesiredMachine == nil || + machineUpToDateResult.CurrentInfraMachine == nil || + machineUpToDateResult.DesiredInfraMachine == nil || + machineUpToDateResult.CurrentKubeadmConfig == nil || + machineUpToDateResult.DesiredKubeadmConfig == nil { + return false, nil + } + + extensionHandlers, err := r.RuntimeClient.GetAllExtensions(ctx, runtimehooksv1.CanUpdateMachine, machine) + if err != nil { + return false, err + } + // Machine cannot be updated in-place if no CanUpdateMachine extensions are registered. + if len(extensionHandlers) == 0 { + return false, nil + } + if len(extensionHandlers) > 1 { + return false, errors.Errorf("found multiple CanUpdateMachine hooks (%s): only one hook is supported", strings.Join(extensionHandlers, ",")) + } + + canUpdateMachine, reasons, err := r.canExtensionsUpdateMachine(ctx, machine, machineUpToDateResult, extensionHandlers) + if err != nil { + return false, err + } + if !canUpdateMachine { + log.Info(fmt.Sprintf("Machine %s cannot be updated in-place by extensions", machine.Name), "reason", strings.Join(reasons, ",")) + return false, nil + } + return true, nil +} + +// canExtensionsUpdateMachine calls CanUpdateMachine extensions to decide if a Machine can be updated in-place. +// Note: This is following the same general structure that is used in the Apply func in +// internal/controllers/topology/cluster/patches/engine.go. +func (r *KubeadmControlPlaneReconciler) canExtensionsUpdateMachine(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) { + if r.overrideCanExtensionsUpdateMachine != nil { + return r.overrideCanExtensionsUpdateMachine(ctx, machine, machineUpToDateResult, extensionHandlers) + } + + log := ctrl.LoggerFrom(ctx) + + // Create the CanUpdateMachine request. + req, err := createRequest(ctx, r.Client, machine, machineUpToDateResult) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to generate CanUpdateMachine request") + } + + var reasons []string + for _, extensionHandler := range extensionHandlers { + // Call CanUpdateMachine extension. + resp := &runtimehooksv1.CanUpdateMachineResponse{} + if err := r.RuntimeClient.CallExtension(ctx, runtimehooksv1.CanUpdateMachine, machine, extensionHandler, req, resp); err != nil { + return false, nil, err + } + + // Apply patches from the CanUpdateMachine response to the request. + if err := applyPatchesToRequest(ctx, req, resp); err != nil { + return false, nil, errors.Wrapf(err, "failed to apply patches from extension %s to the CanUpdateMachine request", extensionHandler) + } + + // Check if current and desired objects are now matching. + var matches bool + matches, reasons, err = matchesMachine(req) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to compare current and desired objects after calling extension %s", extensionHandler) + } + if matches { + return true, nil, nil + } + log.V(5).Info(fmt.Sprintf("Machine cannot be updated in-place yet after calling extension %s: %s", extensionHandler, strings.Join(reasons, ",")), "Machine", klog.KObj(&req.Current.Machine)) + } + + return false, reasons, nil +} + +func createRequest(ctx context.Context, c client.Client, currentMachine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (*runtimehooksv1.CanUpdateMachineRequest, error) { + // DeepCopy objects to avoid mutations. + currentMachineForDiff := currentMachine.DeepCopy() + currentKubeadmConfigForDiff := machineUpToDateResult.CurrentKubeadmConfig.DeepCopy() + currentInfraMachineForDiff := machineUpToDateResult.CurrentInfraMachine.DeepCopy() + + desiredMachineForDiff := machineUpToDateResult.DesiredMachine.DeepCopy() + desiredKubeadmConfigForDiff := machineUpToDateResult.DesiredKubeadmConfig.DeepCopy() + desiredInfraMachineForDiff := machineUpToDateResult.DesiredInfraMachine.DeepCopy() + + // Sync in-place mutable changes from desired to current KubeadmConfig / InfraMachine. + // Note: Writing these fields is handled by syncMachines and not the responsibility of in-place updates. + // Note: Desired KubeadmConfig / InfraMachine already contain the latest labels & annotations. + currentKubeadmConfigForDiff.SetLabels(desiredKubeadmConfigForDiff.GetLabels()) + currentKubeadmConfigForDiff.SetAnnotations(desiredKubeadmConfigForDiff.GetAnnotations()) + currentInfraMachineForDiff.SetLabels(desiredInfraMachineForDiff.GetLabels()) + currentInfraMachineForDiff.SetAnnotations(desiredInfraMachineForDiff.GetAnnotations()) + + // Apply defaulting to current / desired Machine / KubeadmConfig / InfraMachine. + // Machine + // Note: currentMachineForDiff doesn't need a dry-run as it was just written in syncMachines and then + // update in controlPlane to ensure the Machine we get here is the latest version of the Machine. + // Note: desiredMachineForDiff needs a dry-run because otherwise we have unintended diffs, e.g. dataSecretName, + // providerID and nodeDeletionTimeout don't exist on the newly computed desired Machine. + if err := ssa.Patch(ctx, c, kcpManagerName, desiredMachineForDiff, ssa.WithDryRun{}); err != nil { + return nil, errors.Wrap(err, "server side apply dry-run failed for desired Machine") + } + // InfraMachine + // Note: Both currentInfraMachineForDiff and desiredInfraMachineForDiff need a dry-run to ensure changes + // in defaulting logic and fields added by other controllers don't lead to an unintended diff. + if err := ssa.Patch(ctx, c, kcpManagerName, currentInfraMachineForDiff, ssa.WithDryRun{}); err != nil { + return nil, errors.Wrap(err, "server side apply dry-run failed for current InfraMachine") + } + if err := ssa.Patch(ctx, c, kcpManagerName, desiredInfraMachineForDiff, ssa.WithDryRun{}); err != nil { + return nil, errors.Wrap(err, "server side apply dry-run failed for desired InfraMachine") + } + // KubeadmConfig + // Note: Both currentKubeadmConfigForDiff and desiredKubeadmConfigForDiff don't need a dry-run as + // PrepareKubeadmConfigsForDiff already has to perfectly handle differences between current / desired + // KubeadmConfig. Otherwise the regular rollout logic would not detect correctly if a Machine needs a rollout. + // Note: KubeadmConfig doesn't have a defaulting webhook and no API defaulting anymore. + desiredKubeadmConfigForDiff, currentKubeadmConfigForDiff = internal.PrepareKubeadmConfigsForDiff(desiredKubeadmConfigForDiff, currentKubeadmConfigForDiff, true) + + // Cleanup objects and create request. + req := &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *cleanupMachine(currentMachineForDiff), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *cleanupMachine(desiredMachineForDiff), + }, + } + var err error + req.Current.BootstrapConfig, err = patch.ConvertToRawExtension(cleanupKubeadmConfig(currentKubeadmConfigForDiff)) + if err != nil { + return nil, err + } + req.Desired.BootstrapConfig, err = patch.ConvertToRawExtension(cleanupKubeadmConfig(desiredKubeadmConfigForDiff)) + if err != nil { + return nil, err + } + req.Current.InfrastructureMachine, err = patch.ConvertToRawExtension(cleanupUnstructured(currentInfraMachineForDiff)) + if err != nil { + return nil, err + } + req.Desired.InfrastructureMachine, err = patch.ConvertToRawExtension(cleanupUnstructured(desiredInfraMachineForDiff)) + if err != nil { + return nil, err + } + + return req, nil +} + +func cleanupMachine(machine *clusterv1.Machine) *clusterv1.Machine { + return &clusterv1.Machine{ + // Set GVK because object is later marshalled with json.Marshal. + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: machine.Name, + Namespace: machine.Namespace, + Labels: machine.Labels, + Annotations: machine.Annotations, + }, + Spec: *machine.Spec.DeepCopy(), + } +} + +func cleanupKubeadmConfig(kubeadmConfig *bootstrapv1.KubeadmConfig) *bootstrapv1.KubeadmConfig { + return &bootstrapv1.KubeadmConfig{ + // Set GVK because object is later marshalled with json.Marshal. + TypeMeta: metav1.TypeMeta{ + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: "KubeadmConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmConfig.Name, + Namespace: kubeadmConfig.Namespace, + Labels: kubeadmConfig.Labels, + Annotations: kubeadmConfig.Annotations, + }, + Spec: *kubeadmConfig.Spec.DeepCopy(), + } +} + +func cleanupUnstructured(u *unstructured.Unstructured) *unstructured.Unstructured { + cleanedUpU := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": u.GetAPIVersion(), + "kind": u.GetKind(), + "spec": u.Object["spec"], + }, + } + cleanedUpU.SetName(u.GetName()) + cleanedUpU.SetNamespace(u.GetNamespace()) + cleanedUpU.SetLabels(u.GetLabels()) + cleanedUpU.SetAnnotations(u.GetAnnotations()) + return cleanedUpU +} + +func applyPatchesToRequest(ctx context.Context, req *runtimehooksv1.CanUpdateMachineRequest, resp *runtimehooksv1.CanUpdateMachineResponse) error { + if resp.MachinePatch.IsDefined() { + if err := patch.ApplyPatchToTypedObject(ctx, &req.Current.Machine, resp.MachinePatch, "spec"); err != nil { + return err + } + } + + if resp.BootstrapConfigPatch.IsDefined() { + if _, err := patch.ApplyPatchToObject(ctx, &req.Current.BootstrapConfig, resp.BootstrapConfigPatch, "spec"); err != nil { + return err + } + } + + if resp.InfrastructureMachinePatch.IsDefined() { + if _, err := patch.ApplyPatchToObject(ctx, &req.Current.InfrastructureMachine, resp.InfrastructureMachinePatch, "spec"); err != nil { + return err + } + } + + return nil +} + +func matchesMachine(req *runtimehooksv1.CanUpdateMachineRequest) (bool, []string, error) { + var reasons []string + match, diff, err := matchesMachineSpec(&req.Current.Machine, &req.Desired.Machine) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to match Machine") + } + if !match { + reasons = append(reasons, fmt.Sprintf("Machine cannot be updated in-place: %s", diff)) + } + match, diff, err = matchesUnstructuredSpec(req.Current.BootstrapConfig, req.Desired.BootstrapConfig) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to match KubeadmConfig") + } + if !match { + reasons = append(reasons, fmt.Sprintf("KubeadmConfig cannot be updated in-place: %s", diff)) + } + match, diff, err = matchesUnstructuredSpec(req.Current.InfrastructureMachine, req.Desired.InfrastructureMachine) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to match %s", req.Current.InfrastructureMachine.Object.GetObjectKind().GroupVersionKind().Kind) + } + if !match { + reasons = append(reasons, fmt.Sprintf("%s cannot be updated in-place: %s", req.Current.InfrastructureMachine.Object.GetObjectKind().GroupVersionKind().Kind, diff)) + } + + if len(reasons) > 0 { + return false, reasons, nil + } + + return true, nil, nil +} + +func matchesMachineSpec(patched, desired *clusterv1.Machine) (equal bool, diff string, matchErr error) { + // Note: Wrapping Machine specs in a Machine for proper formatting of the diff. + return compare.Diff( + &clusterv1.Machine{ + Spec: *inplace.CleanupMachineSpecForDiff(&patched.Spec), + }, + &clusterv1.Machine{ + Spec: *inplace.CleanupMachineSpecForDiff(&desired.Spec), + }, + ) +} + +func matchesUnstructuredSpec(patched, desired runtime.RawExtension) (equal bool, diff string, matchErr error) { + // Note: Both patched and desired objects are always Unstructured as createRequest and + // applyPatchToObject are always setting objects as Unstructured. + patchedUnstructured, ok := patched.Object.(*unstructured.Unstructured) + if !ok { + return false, "", errors.Errorf("patched object is not an Unstructured") + } + desiredUnstructured, ok := desired.Object.(*unstructured.Unstructured) + if !ok { + return false, "", errors.Errorf("desired object is not an Unstructured") + } + // Note: Wrapping Unstructured specs in an Unstructured for proper formatting of the diff. + return compare.Diff( + &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": patchedUnstructured.Object["spec"], + }, + }, + &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": desiredUnstructured.Object["spec"], + }, + }, + ) +} diff --git a/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go new file mode 100644 index 000000000000..94c4a9c68de8 --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/inplace_canupdatemachine_test.go @@ -0,0 +1,1187 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "testing" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilfeature "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" + "sigs.k8s.io/cluster-api/bootstrap/kubeadm/defaulting" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" + "sigs.k8s.io/cluster-api/feature" + fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" + "sigs.k8s.io/cluster-api/internal/util/compare" + "sigs.k8s.io/cluster-api/internal/util/patch" + "sigs.k8s.io/cluster-api/internal/util/ssa" + "sigs.k8s.io/cluster-api/util/test/builder" +) + +func Test_canUpdateMachine(t *testing.T) { + machineToInPlaceUpdate := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + }, + } + nonEmptyMachineUpToDateResult := internal.UpToDateResult{ + // No real content needed for this, fields should just not be nil, + EligibleForInPlaceUpdate: true, + DesiredMachine: &clusterv1.Machine{}, + CurrentInfraMachine: &unstructured.Unstructured{}, + DesiredInfraMachine: &unstructured.Unstructured{}, + CurrentKubeadmConfig: &bootstrapv1.KubeadmConfig{}, + DesiredKubeadmConfig: &bootstrapv1.KubeadmConfig{}, + } + catalog := runtimecatalog.New() + _ = runtimehooksv1.AddToCatalog(catalog) + canUpdateMachineGVH, err := catalog.GroupVersionHook(runtimehooksv1.CanUpdateMachine) + if err != nil { + panic("unable to compute GVH") + } + + tests := []struct { + name string + machineUpToDateResult internal.UpToDateResult + enableInPlaceUpdatesFeatureGate bool + canExtensionsUpdateMachineFunc func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) + getAllExtensionsResponses map[runtimecatalog.GroupVersionHook][]string + wantCanExtensionsUpdateMachineCalled bool + wantCanUpdateMachine bool + wantError bool + wantErrorMessage string + }{ + { + name: "Return false if feature gate is not enabled", + enableInPlaceUpdatesFeatureGate: false, + wantCanUpdateMachine: false, + }, + { + name: "Return false if objects in machineUpToDateResult are nil", + enableInPlaceUpdatesFeatureGate: true, + wantCanUpdateMachine: false, + }, + { + name: "Return false if no CanUpdateMachine extensions registered", + enableInPlaceUpdatesFeatureGate: true, + machineUpToDateResult: nonEmptyMachineUpToDateResult, + getAllExtensionsResponses: map[runtimecatalog.GroupVersionHook][]string{}, + wantCanUpdateMachine: false, + }, + { + name: "Return error if more than one CanUpdateMachine extensions registered", + enableInPlaceUpdatesFeatureGate: true, + machineUpToDateResult: nonEmptyMachineUpToDateResult, + getAllExtensionsResponses: map[runtimecatalog.GroupVersionHook][]string{ + canUpdateMachineGVH: {"test-update-extension-1", "test-update-extension-2"}, + }, + wantError: true, + wantErrorMessage: "found multiple CanUpdateMachine hooks (test-update-extension-1,test-update-extension-2): only one hook is supported", + }, + { + name: "Return false if canExtensionsUpdateMachine returns false", + enableInPlaceUpdatesFeatureGate: true, + machineUpToDateResult: nonEmptyMachineUpToDateResult, + getAllExtensionsResponses: map[runtimecatalog.GroupVersionHook][]string{ + canUpdateMachineGVH: {"test-update-extension"}, + }, + canExtensionsUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) { + if len(extensionHandlers) != 1 || extensionHandlers[0] != "test-update-extension" { + return false, nil, errors.Errorf("unexpected error") + } + return false, []string{"can not update"}, nil + }, + wantCanExtensionsUpdateMachineCalled: true, + wantCanUpdateMachine: false, + }, + { + name: "Return true if canExtensionsUpdateMachine returns true", + enableInPlaceUpdatesFeatureGate: true, + machineUpToDateResult: nonEmptyMachineUpToDateResult, + getAllExtensionsResponses: map[runtimecatalog.GroupVersionHook][]string{ + canUpdateMachineGVH: {"test-update-extension"}, + }, + canExtensionsUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) { + if len(extensionHandlers) != 1 || extensionHandlers[0] != "test-update-extension" { + return false, nil, errors.Errorf("unexpected error") + } + return true, nil, nil + }, + wantCanExtensionsUpdateMachineCalled: true, + wantCanUpdateMachine: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + if tt.enableInPlaceUpdatesFeatureGate { + utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.InPlaceUpdates, true) + } + + runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). + WithCatalog(catalog). + WithGetAllExtensionResponses(tt.getAllExtensionsResponses). + Build() + + var canExtensionsUpdateMachineCalled bool + r := &KubeadmControlPlaneReconciler{ + RuntimeClient: runtimeClient, + overrideCanExtensionsUpdateMachine: func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, extensionHandlers []string) (bool, []string, error) { + canExtensionsUpdateMachineCalled = true + return tt.canExtensionsUpdateMachineFunc(ctx, machine, machineUpToDateResult, extensionHandlers) + }, + } + + canUpdateMachine, err := r.canUpdateMachine(ctx, machineToInPlaceUpdate, tt.machineUpToDateResult) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(canUpdateMachine).To(Equal(tt.wantCanUpdateMachine)) + + g.Expect(canExtensionsUpdateMachineCalled).To(Equal(tt.wantCanExtensionsUpdateMachineCalled), "canExtensionsUpdateMachineCalled: actual: %t expected: %t", canExtensionsUpdateMachineCalled, tt.wantCanExtensionsUpdateMachineCalled) + }) + } +} + +func Test_canExtensionsUpdateMachine(t *testing.T) { + currentMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.MachineSpec{ + Version: "v1.30.0", + }, + } + desiredMachine := currentMachine.DeepCopy() + desiredMachine.Spec.Version = "v1.31.0" + + currentKubeadmConfig := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: metav1.NamespaceDefault, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + Etcd: bootstrapv1.Etcd{ + Local: bootstrapv1.LocalEtcd{ + ImageTag: "3.5.0-0", + }, + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + ControlPlane: &bootstrapv1.JoinControlPlane{}, + }, + }, + } + desiredKubeadmConfig := currentKubeadmConfig.DeepCopy() + desiredKubeadmConfig.Spec.ClusterConfiguration.Etcd.Local.ImageTag = "3.6.4-0" + + currentInfraMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineKind, + "metadata": map[string]interface{}{ + "name": "machine-to-in-place-update", + "namespace": metav1.NamespaceDefault, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + }, + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + } + desiredInfraMachine := currentInfraMachine.DeepCopy() + _ = unstructured.SetNestedField(desiredInfraMachine.Object, "in-place updated world", "spec", "hello") + + responseWithEmptyPatches := &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte("[]"), + }, + InfrastructureMachinePatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte{}, + }, + BootstrapConfigPatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte("{}"), + }, + } + patchToUpdateMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/version","value":"v1.31.0"}]`), + } + patchToUpdateKubeadmConfig := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/clusterConfiguration/etcd/local/imageTag","value":"3.6.4-0"}]`), + } + patchToUpdateInfraMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/hello","value":"in-place updated world"}]`), + } + emptyPatch := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte{}, + } + + tests := []struct { + name string + machineUpToDateResult internal.UpToDateResult + extensionHandlers []string + callExtensionResponses map[string]runtimehooksv1.ResponseObject + callExtensionExpectedChanges map[string]func(runtime.Object) + wantCanUpdateMachine bool + wantReasons []string + wantError bool + wantErrorMessage string + }{ + { + name: "Return true if current and desired objects are equal and no patches are returned", + // Note: canExtensionsUpdateMachine should never be called if the objects are equal, but this is a simple first test case. + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: currentMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: currentInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: currentKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension": responseWithEmptyPatches, + }, + wantCanUpdateMachine: true, + }, + { + name: "Return false if current and desired objects are not equal and no patches are returned", + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: desiredMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: desiredInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: desiredKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension": responseWithEmptyPatches, + }, + wantCanUpdateMachine: false, + wantReasons: []string{ + `Machine cannot be updated in-place: &v1beta2.Machine{ + TypeMeta: {}, + ObjectMeta: {}, + Spec: v1beta2.MachineSpec{ + ClusterName: "", + Bootstrap: {}, + InfrastructureRef: {}, +- Version: "v1.30.0", ++ Version: "v1.31.0", + ProviderID: "", + FailureDomain: "", + ... // 4 identical fields + }, + Status: {}, + }`, + `KubeadmConfig cannot be updated in-place: &unstructured.Unstructured{ + Object: map[string]any{ + "spec": map[string]any{ +- "clusterConfiguration": map[string]any{"etcd": map[string]any{"local": map[string]any{"imageTag": string("3.5.0-0")}}}, ++ "clusterConfiguration": map[string]any{"etcd": map[string]any{"local": map[string]any{"imageTag": string("3.6.4-0")}}}, + "format": string("cloud-config"), + "initConfiguration": map[string]any{"nodeRegistration": map[string]any{"imagePullPolicy": string("IfNotPresent")}}, + "joinConfiguration": map[string]any{"controlPlane": map[string]any{}, "nodeRegistration": map[string]any{"imagePullPolicy": string("IfNotPresent")}}, + }, + }, + }`, + `TestInfrastructureMachine cannot be updated in-place: &unstructured.Unstructured{ +- Object: map[string]any{"spec": map[string]any{"hello": string("world")}}, ++ Object: map[string]any{"spec": map[string]any{"hello": string("in-place updated world")}}, + }`, + }, + }, + { + name: "Return true if current and desired objects are not equal and patches are returned that account for all diffs", + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: desiredMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: desiredInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: desiredKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension": &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: patchToUpdateMachine, + InfrastructureMachinePatch: patchToUpdateInfraMachine, + BootstrapConfigPatch: patchToUpdateKubeadmConfig, + }, + }, + wantCanUpdateMachine: true, + }, + { + name: "Return true if current and desired objects are not equal and patches are returned that account for all diffs (multiple extensions)", + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: desiredMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: desiredInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: desiredKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension-1", "test-update-extension-2", "test-update-extension-3"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension-1": &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: patchToUpdateMachine, + }, + "test-update-extension-2": &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + InfrastructureMachinePatch: patchToUpdateInfraMachine, + }, + "test-update-extension-3": &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + BootstrapConfigPatch: patchToUpdateKubeadmConfig, + }, + }, + callExtensionExpectedChanges: map[string]func(runtime.Object){ + "test-update-extension-2": func(object runtime.Object) { + if machine, ok := object.(*clusterv1.Machine); ok { + // After the call to test-update-extension-1 we expect that patchToUpdateMachine is already applied. + machine.Spec.Version = "v1.31.0" + } + }, + "test-update-extension-3": func(object runtime.Object) { + if machine, ok := object.(*clusterv1.Machine); ok { + // After the call to test-update-extension-1 we expect that patchToUpdateMachine is already applied. + machine.Spec.Version = "v1.31.0" + } + if infraMachine, ok := object.(*unstructured.Unstructured); ok { + // After the call to test-update-extension-2 we expect that patchToUpdateInfraMachine is already applied. + _ = unstructured.SetNestedField(infraMachine.Object, "in-place updated world", "spec", "hello") + } + }, + }, + wantCanUpdateMachine: true, + }, + { + name: "Return false if current and desired objects are not equal and patches are returned that only account for some diffs", + machineUpToDateResult: internal.UpToDateResult{ + DesiredMachine: desiredMachine, + CurrentInfraMachine: currentInfraMachine, + DesiredInfraMachine: desiredInfraMachine, + CurrentKubeadmConfig: currentKubeadmConfig, + DesiredKubeadmConfig: desiredKubeadmConfig, + }, + extensionHandlers: []string{"test-update-extension"}, + callExtensionResponses: map[string]runtimehooksv1.ResponseObject{ + "test-update-extension": &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: patchToUpdateMachine, + InfrastructureMachinePatch: emptyPatch, + BootstrapConfigPatch: emptyPatch, + }, + }, + wantCanUpdateMachine: false, + wantReasons: []string{ + `KubeadmConfig cannot be updated in-place: &unstructured.Unstructured{ + Object: map[string]any{ + "spec": map[string]any{ +- "clusterConfiguration": map[string]any{"etcd": map[string]any{"local": map[string]any{"imageTag": string("3.5.0-0")}}}, ++ "clusterConfiguration": map[string]any{"etcd": map[string]any{"local": map[string]any{"imageTag": string("3.6.4-0")}}}, + "format": string("cloud-config"), + "initConfiguration": map[string]any{"nodeRegistration": map[string]any{"imagePullPolicy": string("IfNotPresent")}}, + "joinConfiguration": map[string]any{"controlPlane": map[string]any{}, "nodeRegistration": map[string]any{"imagePullPolicy": string("IfNotPresent")}}, + }, + }, + }`, + `TestInfrastructureMachine cannot be updated in-place: &unstructured.Unstructured{ +- Object: map[string]any{"spec": map[string]any{"hello": string("world")}}, ++ Object: map[string]any{"spec": map[string]any{"hello": string("in-place updated world")}}, + }`, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + fakeClient := fake.NewClientBuilder(). + WithObjects(currentMachine, currentInfraMachine, currentKubeadmConfig). + Build() + + catalog := runtimecatalog.New() + _ = runtimehooksv1.AddToCatalog(catalog) + runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). + WithCatalog(catalog). + WithCallExtensionValidations(validateCanUpdateMachineRequests(currentMachine, tt.machineUpToDateResult, tt.callExtensionExpectedChanges)). + WithCallExtensionResponses(tt.callExtensionResponses). + Build() + + r := &KubeadmControlPlaneReconciler{ + Client: fakeClient, + RuntimeClient: runtimeClient, + } + + canUpdateMachine, reasons, err := r.canExtensionsUpdateMachine(ctx, currentMachine, tt.machineUpToDateResult, tt.extensionHandlers) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(canUpdateMachine).To(Equal(tt.wantCanUpdateMachine)) + g.Expect(reasons).To(BeComparableTo(tt.wantReasons)) + }) + } +} + +func validateCanUpdateMachineRequests(currentMachine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult, callExtensionExpectedChanges map[string]func(runtime.Object)) func(name string, object runtimehooksv1.RequestObject) error { + return func(name string, req runtimehooksv1.RequestObject) error { + switch req := req.(type) { + case *runtimehooksv1.CanUpdateMachineRequest: + // Compare Machine + currentMachine := currentMachine.DeepCopy() + currentMachine.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) + currentMachine.ResourceVersion = "" // cleanupMachine drops ResourceVersion. + if mutator, ok := callExtensionExpectedChanges[name]; ok { + mutator(currentMachine) + } + if d := diff(req.Current.Machine, *currentMachine); d != "" { + return fmt.Errorf("expected currentMachine to be equal, got diff: %s", d) + } + desiredMachine := machineUpToDateResult.DesiredMachine.DeepCopy() + desiredMachine.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) + desiredMachine.ResourceVersion = "" // cleanupMachine drops ResourceVersion. + if d := diff(req.Desired.Machine, *desiredMachine); d != "" { + return fmt.Errorf("expected desiredMachine to be equal, got diff: %s", d) + } + + // Compare KubeadmConfig + currentKubeadmConfig := machineUpToDateResult.CurrentKubeadmConfig.DeepCopy() + currentKubeadmConfig.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) + currentKubeadmConfig.ResourceVersion = "" // cleanupKubeadmConfig drops ResourceVersion. + defaulting.ApplyPreviousKubeadmConfigDefaults(¤tKubeadmConfig.Spec) // PrepareKubeadmConfigsForDiff applies defaults. + if mutator, ok := callExtensionExpectedChanges[name]; ok { + mutator(currentKubeadmConfig) + } + currentKubeadmConfigBytes, _ := json.Marshal(currentKubeadmConfig) + if d := diff(req.Current.BootstrapConfig.Raw, currentKubeadmConfigBytes); d != "" { + return fmt.Errorf("expected currentKubeadmConfig to be equal, got diff: %s", d) + } + desiredKubeadmConfig := machineUpToDateResult.DesiredKubeadmConfig.DeepCopy() + desiredKubeadmConfig.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) + desiredKubeadmConfig.ResourceVersion = "" // cleanupKubeadmConfig drops ResourceVersion. + defaulting.ApplyPreviousKubeadmConfigDefaults(&desiredKubeadmConfig.Spec) // PrepareKubeadmConfigsForDiff applies defaults. + desiredKubeadmConfigBytes, _ := json.Marshal(desiredKubeadmConfig) + if d := diff(req.Desired.BootstrapConfig.Raw, desiredKubeadmConfigBytes); d != "" { + return fmt.Errorf("expected desiredKubeadmConfig to be equal, got diff: %s", d) + } + + // Compare InfraMachine + currentInfraMachine := machineUpToDateResult.CurrentInfraMachine.DeepCopy() + currentInfraMachine.SetResourceVersion("") // cleanupUnstructured drops ResourceVersion. + if mutator, ok := callExtensionExpectedChanges[name]; ok { + mutator(currentInfraMachine) + } + currentInfraMachineBytes, _ := json.Marshal(currentInfraMachine) + reqCurrentInfraMachineBytes := bytes.TrimSuffix(req.Current.InfrastructureMachine.Raw, []byte("\n")) // Note: Somehow Patch introduces a trailing \n. + if d := diff(reqCurrentInfraMachineBytes, currentInfraMachineBytes); d != "" { + return fmt.Errorf("expected currentInfraMachine to be equal, got diff: %s", d) + } + desiredInfraMachine := machineUpToDateResult.DesiredInfraMachine.DeepCopy() + desiredInfraMachine.SetResourceVersion("") // cleanupUnstructured drops ResourceVersion. + desiredInfraMachineBytes, _ := json.Marshal(desiredInfraMachine) + if d := diff(req.Desired.InfrastructureMachine.Raw, desiredInfraMachineBytes); d != "" { + return fmt.Errorf("expected desiredInfraMachine to be equal, got diff: %s", d) + } + + return nil + default: + return fmt.Errorf("unhandled request type %T", req) + } + } +} + +func Test_createRequest(t *testing.T) { + g := NewWithT(t) + + ns, err := env.CreateNamespace(ctx, "in-place-create-request") + g.Expect(err).ToNot(HaveOccurred()) + + currentMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "cluster-1", + "label-1": "label-value-1", + }, + Annotations: map[string]string{ + "annotation-1": "annotation-value-1", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "cluster-1", + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "machine-to-in-place-update", + }, + }, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineKind, + Name: "machine-to-in-place-update", + }, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDeletionTimeoutSeconds: ptr.To[int32](10), + }, + Version: "v1.30.0", + }, + Status: clusterv1.MachineStatus{ + NodeRef: clusterv1.MachineNodeReference{ + Name: "machine-to-in-place-update", + }, + }, + } + currentMachineCleanedUp := currentMachine.DeepCopy() + currentMachineCleanedUp.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) // cleanupMachine adds GVK. + currentMachineCleanedUp.Status = clusterv1.MachineStatus{} // cleanupMachine drops status. + currentMachineWithFieldsSetByMachineController := currentMachine.DeepCopy() + currentMachineWithFieldsSetByMachineController.Spec.ProviderID = "test://provider-id" + currentMachineWithFieldsSetByMachineController.Spec.Bootstrap.DataSecretName = ptr.To("data-secret") + currentMachineWithFieldsSetByMachineControllerCleanedUp := currentMachineCleanedUp.DeepCopy() + currentMachineWithFieldsSetByMachineControllerCleanedUp.Spec.ProviderID = "test://provider-id" + currentMachineWithFieldsSetByMachineControllerCleanedUp.Spec.Bootstrap.DataSecretName = ptr.To("data-secret") + + desiredMachine := currentMachine.DeepCopy() + desiredMachine.Spec.Version = "v1.31.0" + desiredMachineCleanedUp := desiredMachine.DeepCopy() + desiredMachineCleanedUp.SetGroupVersionKind(clusterv1.GroupVersion.WithKind("Machine")) // cleanupMachine adds GVK. + desiredMachineCleanedUp.Status = clusterv1.MachineStatus{} // cleanupMachine drops status. + desiredMachineWithFieldsSetByMachineControllerCleanedUp := desiredMachineCleanedUp.DeepCopy() + desiredMachineWithFieldsSetByMachineControllerCleanedUp.Spec.ProviderID = "test://provider-id" + desiredMachineWithFieldsSetByMachineControllerCleanedUp.Spec.Bootstrap.DataSecretName = ptr.To("data-secret") + + currentKubeadmConfig := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: ns.Name, + Labels: map[string]string{ + "label-1": "label-value-1", + }, + Annotations: map[string]string{ + "annotation-1": "annotation-value-1", + }, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + Etcd: bootstrapv1.Etcd{ + Local: bootstrapv1.LocalEtcd{ + ImageTag: "3.5.0-0", + }, + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + ControlPlane: &bootstrapv1.JoinControlPlane{}, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + KubeletExtraArgs: []bootstrapv1.Arg{{ + Name: "v", + Value: ptr.To("8"), + }}, + }, + }, + }, + Status: bootstrapv1.KubeadmConfigStatus{ + ObservedGeneration: 5, + }, + } + currentKubeadmConfigCleanedUp := currentKubeadmConfig.DeepCopy() + currentKubeadmConfigCleanedUp.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) // cleanupKubeadmConfig adds GVK. + currentKubeadmConfigCleanedUp.Status = bootstrapv1.KubeadmConfigStatus{} // cleanupKubeadmConfig drops status. + defaulting.ApplyPreviousKubeadmConfigDefaults(¤tKubeadmConfigCleanedUp.Spec) // PrepareKubeadmConfigsForDiff applies defaults. + currentKubeadmConfigWithOutdatedLabelsAndAnnotations := currentKubeadmConfig.DeepCopy() + currentKubeadmConfigWithOutdatedLabelsAndAnnotations.Labels["outdated-label-1"] = "outdated-label-value-1" + currentKubeadmConfigWithOutdatedLabelsAndAnnotations.Annotations["outdated-annotation-1"] = "outdated-annotation-value-1" + currentKubeadmConfigWithInitConfiguration := currentKubeadmConfig.DeepCopy() + currentKubeadmConfigWithInitConfiguration.Spec.InitConfiguration.NodeRegistration = currentKubeadmConfigWithInitConfiguration.Spec.JoinConfiguration.NodeRegistration + currentKubeadmConfigWithInitConfiguration.Spec.JoinConfiguration = bootstrapv1.JoinConfiguration{} + + desiredKubeadmConfig := currentKubeadmConfig.DeepCopy() + desiredKubeadmConfig.Spec.ClusterConfiguration.Etcd.Local.ImageTag = "3.6.4-0" + desiredKubeadmConfigCleanedUp := desiredKubeadmConfig.DeepCopy() + desiredKubeadmConfigCleanedUp.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) // cleanupKubeadmConfig adds GVK. + desiredKubeadmConfigCleanedUp.Status = bootstrapv1.KubeadmConfigStatus{} // cleanupKubeadmConfig drops status. + defaulting.ApplyPreviousKubeadmConfigDefaults(&desiredKubeadmConfigCleanedUp.Spec) // PrepareKubeadmConfigsForDiff applies defaults. + + currentInfraMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineKind, + "metadata": map[string]interface{}{ + "name": "machine-to-in-place-update", + "namespace": ns.Name, + "labels": map[string]interface{}{ + "label-1": "label-value-1", + }, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + }, + "spec": map[string]interface{}{ + "foo": "hello world", + }, + "status": map[string]interface{}{ + "foo": "hello world", + }, + }, + } + currentInfraMachineCleanedUp := currentInfraMachine.DeepCopy() + unstructured.RemoveNestedField(currentInfraMachineCleanedUp.Object, "status") // cleanupUnstructured drops status. + currentInfraMachineWithOutdatedLabelsAndAnnotations := currentInfraMachine.DeepCopy() + currentInfraMachineWithOutdatedLabelsAndAnnotations.SetLabels(map[string]string{"outdated-label-1": "outdated-label-value-1"}) + currentInfraMachineWithOutdatedLabelsAndAnnotations.SetAnnotations(map[string]string{"outdated-annotation-1": "outdated-annotation-value-1"}) + currentInfraMachineWithFieldsSetByMachineController := currentInfraMachine.DeepCopy() + g.Expect(unstructured.SetNestedField(currentInfraMachineWithFieldsSetByMachineController.Object, "hello world from the infra machine controller", "spec", "bar")).To(Succeed()) + currentInfraMachineWithFieldsSetByMachineControllerCleanedUp := currentInfraMachineCleanedUp.DeepCopy() + g.Expect(unstructured.SetNestedField(currentInfraMachineWithFieldsSetByMachineControllerCleanedUp.Object, "hello world from the infra machine controller", "spec", "bar")).To(Succeed()) + + desiredInfraMachine := currentInfraMachine.DeepCopy() + g.Expect(unstructured.SetNestedField(desiredInfraMachine.Object, "hello in-place updated world", "spec", "foo")).To(Succeed()) + desiredInfraMachineCleanedUp := desiredInfraMachine.DeepCopy() + unstructured.RemoveNestedField(desiredInfraMachineCleanedUp.Object, "status") // cleanupUnstructured drops status. + desiredInfraMachineWithFieldsSetByMachineControllerCleanedUp := desiredInfraMachineCleanedUp.DeepCopy() + g.Expect(unstructured.SetNestedField(desiredInfraMachineWithFieldsSetByMachineControllerCleanedUp.Object, "hello world from the infra machine controller", "spec", "bar")).To(Succeed()) + + tests := []struct { + name string + currentMachine *clusterv1.Machine + currentInfraMachine *unstructured.Unstructured + currentKubeadmConfig *bootstrapv1.KubeadmConfig + desiredMachine *clusterv1.Machine + desiredInfraMachine *unstructured.Unstructured + desiredKubeadmConfig *bootstrapv1.KubeadmConfig + modifyMachineAfterCreate func(ctx context.Context, c client.Client, machine *clusterv1.Machine) error + modifyInfraMachineAfterCreate func(ctx context.Context, c client.Client, infraMachine *unstructured.Unstructured) error + modifyUpToDateResult func(result *internal.UpToDateResult) + wantReq *runtimehooksv1.CanUpdateMachineRequest + wantError bool + wantErrorMessage string + }{ + { + name: "Should prepare all objects for diff", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + // Objects have been cleaned up for the diff. + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *desiredMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + { + name: "Should prepare all objects for diff: syncs BootstrapConfig/InfraMachine labels and annotations", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + modifyUpToDateResult: func(result *internal.UpToDateResult) { + // Modify the UpToDateResult before it is passed into createRequest. + // This covers the scenario where the "local" current objects are outdated. + result.CurrentInfraMachine = currentInfraMachineWithOutdatedLabelsAndAnnotations + result.CurrentKubeadmConfig = currentKubeadmConfigWithOutdatedLabelsAndAnnotations + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + // Current / desired BootstrapConfig / InfraMachine all contain the latest labels / annotations. + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *desiredMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + { + name: "Should prepare all objects for diff: desiredMachine picks up changes from currentMachine via SSA dry-run", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + modifyMachineAfterCreate: func(ctx context.Context, c client.Client, machine *clusterv1.Machine) error { + // Write additional fields like the Machine controller would do. + machineOrig := machine.DeepCopy() + machine.Spec.ProviderID = "test://provider-id" + machine.Spec.Bootstrap.DataSecretName = ptr.To("data-secret") + return c.Patch(ctx, machine, client.MergeFrom(machineOrig)) + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + // currentMachine always contained the fields written by the Machine controller + // as we pass the Machine object after modifyMachineAfterCreate into createRequest. + Machine: *currentMachineWithFieldsSetByMachineControllerCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + // desiredMachine picked up the fields written by the Machine controller via SSA dry-run. + Machine: *desiredMachineWithFieldsSetByMachineControllerCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + { + name: "Should prepare all objects for diff: desiredInfraMachine picks up changes from currentInfraMachine via SSA dry-run", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + modifyInfraMachineAfterCreate: func(ctx context.Context, c client.Client, infraMachine *unstructured.Unstructured) error { + // Write additional fields like the Infra Machine controller would do. + infraMachineOrig := infraMachine.DeepCopy() + g.Expect(unstructured.SetNestedField(infraMachine.Object, "hello world from the infra machine controller", "spec", "bar")).To(Succeed()) + return c.Patch(ctx, infraMachine, client.MergeFrom(infraMachineOrig)) + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachineCleanedUp, + // currentInfraMachine always contained the fields written by the InfraMachine controller + // as we pass the InfraMachine object after modifyInfraMachineAfterCreate into createRequest. + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineWithFieldsSetByMachineControllerCleanedUp), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *desiredMachineCleanedUp, + // desiredInfraMachine picked up the fields written by the InfraMachine controller via SSA dry-run. + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineWithFieldsSetByMachineControllerCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + { + name: "Should prepare all objects for diff: currentKubeadmConfig & desiredKubeadmConfig are prepared for diff", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfigWithInitConfiguration, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachineCleanedUp), + // currentKubeadmConfig was converted from InitConfiguration to JoinConfiguration via PrepareKubeadmConfigsForDiff. + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfigCleanedUp), + }, + Desired: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *desiredMachineCleanedUp, + InfrastructureMachine: mustConvertToRawExtension(desiredInfraMachineCleanedUp), + BootstrapConfig: mustConvertToRawExtension(desiredKubeadmConfigCleanedUp), + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() + g := NewWithT(t) + + // Create Machine (same as in createMachine) + currentMachineForPatch := tt.currentMachine.DeepCopy() + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, currentMachineForPatch)).To(Succeed()) + t.Cleanup(func() { + g.Expect(env.CleanupAndWait(context.Background(), tt.currentMachine)).To(Succeed()) + }) + + // Create InfraMachine (same as in createInfraMachine) + currentInfraMachineForPatch := tt.currentInfraMachine.DeepCopy() + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, currentInfraMachineForPatch)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), currentInfraMachineForPatch, kcpManagerName)).To(Succeed()) + t.Cleanup(func() { + g.Expect(env.CleanupAndWait(context.Background(), tt.currentInfraMachine)).To(Succeed()) + }) + + // Create KubeadmConfig (same as in createKubeadmConfig) + currentKubeadmConfigForPatch := tt.currentKubeadmConfig.DeepCopy() + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, currentKubeadmConfigForPatch)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), currentKubeadmConfigForPatch, kcpManagerName)).To(Succeed()) + t.Cleanup(func() { + g.Expect(env.CleanupAndWait(context.Background(), tt.currentKubeadmConfig)).To(Succeed()) + }) + + if tt.modifyMachineAfterCreate != nil { + g.Expect(tt.modifyMachineAfterCreate(ctx, env.Client, currentMachineForPatch)).To(Succeed()) + } + if tt.modifyInfraMachineAfterCreate != nil { + g.Expect(tt.modifyInfraMachineAfterCreate(ctx, env.Client, currentInfraMachineForPatch)).To(Succeed()) + } + + upToDateResult := internal.UpToDateResult{ + CurrentInfraMachine: currentInfraMachineForPatch, + CurrentKubeadmConfig: currentKubeadmConfigForPatch, + DesiredMachine: tt.desiredMachine, + DesiredInfraMachine: tt.desiredInfraMachine, + DesiredKubeadmConfig: tt.desiredKubeadmConfig, + } + if tt.modifyUpToDateResult != nil { + tt.modifyUpToDateResult(&upToDateResult) + } + + req, err := createRequest(ctx, env.Client, currentMachineForPatch, upToDateResult) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(req).To(BeComparableTo(tt.wantReq)) + }) + } +} + +func Test_applyPatchesToRequest(t *testing.T) { + currentMachine := &clusterv1.Machine{ + // Set GVK because this is required by convertToRawExtension. + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.MachineSpec{ + Version: "v1.30.0", + }, + } + patchedMachine := currentMachine.DeepCopy() + patchedMachine.Spec.Version = "v1.31.0" + + currentKubeadmConfig := &bootstrapv1.KubeadmConfig{ + // Set GVK because this is required by convertToRawExtension. + TypeMeta: metav1.TypeMeta{ + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: "KubeadmConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: metav1.NamespaceDefault, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + Etcd: bootstrapv1.Etcd{ + Local: bootstrapv1.LocalEtcd{ + ImageTag: "3.5.0-0", + }, + }, + }, + }, + } + patchedKubeadmConfig := currentKubeadmConfig.DeepCopy() + patchedKubeadmConfig.Spec.ClusterConfiguration.Etcd.Local.ImageTag = "3.6.4-0" + + currentInfraMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineKind, + "metadata": map[string]interface{}{ + "name": "machine-to-in-place-update", + "namespace": metav1.NamespaceDefault, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + }, + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + } + patchedInfraMachine := currentInfraMachine.DeepCopy() + _ = unstructured.SetNestedField(patchedInfraMachine.Object, "in-place updated world", "spec", "hello") + + responseWithEmptyPatches := &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte("[]"), + }, + InfrastructureMachinePatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte{}, + }, + BootstrapConfigPatch: runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte("{}"), + }, + } + patchToUpdateMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/version","value":"v1.31.0"}]`), + } + patchToUpdateKubeadmConfig := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/clusterConfiguration/etcd/local/imageTag","value":"3.6.4-0"}]`), + } + patchToUpdateInfraMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"replace","path":"/spec/hello","value":"in-place updated world"}]`), + } + jsonMergePatchToUpdateMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte(`{"spec":{"version":"v1.31.0"}}`), + } + jsonMergePatchToUpdateKubeadmConfig := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte(`{"spec":{"clusterConfiguration":{"etcd":{"local":{"imageTag":"3.6.4-0"}}}}}`), + } + jsonMergePatchToUpdateInfraMachine := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONMergePatchType, + Patch: []byte(`{"spec":{"hello":"in-place updated world"}}`), + } + patchToUpdateMachineStatus := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + } + patchToUpdateKubeadmConfigStatus := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + } + patchToUpdateInfraMachineStatus := runtimehooksv1.Patch{ + PatchType: runtimehooksv1.JSONPatchType, + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + } + + tests := []struct { + name string + req *runtimehooksv1.CanUpdateMachineRequest + resp *runtimehooksv1.CanUpdateMachineResponse + wantReq *runtimehooksv1.CanUpdateMachineRequest + wantError bool + wantErrorMessage string + }{ + { + name: "No changes with no patches", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: responseWithEmptyPatches, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + }, + { + name: "Changes with patches", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: patchToUpdateMachine, + InfrastructureMachinePatch: patchToUpdateInfraMachine, + BootstrapConfigPatch: patchToUpdateKubeadmConfig, + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *patchedMachine, + InfrastructureMachine: mustConvertToRawExtension(patchedInfraMachine), + BootstrapConfig: mustConvertToRawExtension(patchedKubeadmConfig), + }, + }, + }, + { + name: "Changes with JSON merge patches", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: jsonMergePatchToUpdateMachine, + InfrastructureMachinePatch: jsonMergePatchToUpdateInfraMachine, + BootstrapConfigPatch: jsonMergePatchToUpdateKubeadmConfig, + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *patchedMachine, + InfrastructureMachine: mustConvertToRawExtension(patchedInfraMachine), + BootstrapConfig: mustConvertToRawExtension(patchedKubeadmConfig), + }, + }, + }, + { + name: "No changes with status patches", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: patchToUpdateMachineStatus, + InfrastructureMachinePatch: patchToUpdateInfraMachineStatus, + BootstrapConfigPatch: patchToUpdateKubeadmConfigStatus, + }, + wantReq: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + }, + { + name: "Error if PatchType is not set but Patch is", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: runtimehooksv1.Patch{ + // PatchType is missing + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + }, + }, + wantError: true, + wantErrorMessage: "failed to apply patch: patchType is not set", + }, + { + name: "Error if PatchType is set to an unknown value", + req: &runtimehooksv1.CanUpdateMachineRequest{ + Current: runtimehooksv1.CanUpdateMachineRequestObjects{ + Machine: *currentMachine, + InfrastructureMachine: mustConvertToRawExtension(currentInfraMachine), + BootstrapConfig: mustConvertToRawExtension(currentKubeadmConfig), + }, + }, + resp: &runtimehooksv1.CanUpdateMachineResponse{ + CommonResponse: runtimehooksv1.CommonResponse{Status: runtimehooksv1.ResponseStatusSuccess}, + MachinePatch: runtimehooksv1.Patch{ + PatchType: "UnknownType", + Patch: []byte(`[{"op":"add","path":"/status","value":{"observedGeneration": 10}}]`), + }, + }, + wantError: true, + wantErrorMessage: "failed to apply patch: unknown patchType UnknownType", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + err := applyPatchesToRequest(ctx, tt.req, tt.resp) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + // Compare only the objects and avoid comparing runtime.RawExtension.Raw because + // Raw is slightly non-deterministic because it doesn't guarantee order of map keys. + g.Expect(tt.req.Current.Machine).To(BeComparableTo(tt.wantReq.Current.Machine)) + g.Expect(tt.req.Current.InfrastructureMachine.Object).To(BeComparableTo(tt.wantReq.Current.InfrastructureMachine.Object)) + g.Expect(tt.req.Current.BootstrapConfig.Object).To(BeComparableTo(tt.wantReq.Current.BootstrapConfig.Object)) + }) + } +} + +func diff(a, b any) string { + _, d, err := compare.Diff(a, b) + if err != nil { + return fmt.Sprintf("error during diff: %v", err) + } + return d +} + +func mustConvertToRawExtension(object runtime.Object) runtime.RawExtension { + raw, err := patch.ConvertToRawExtension(object) + if err != nil { + panic(err) + } + return raw +} diff --git a/controlplane/kubeadm/internal/controllers/inplace_test.go b/controlplane/kubeadm/internal/controllers/inplace_test.go new file mode 100644 index 000000000000..64a69a99a90b --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/inplace_test.go @@ -0,0 +1,137 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" +) + +func Test_tryInPlaceUpdate(t *testing.T) { + machineToInPlaceUpdate := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + }, + } + + tests := []struct { + name string + preflightChecksFunc func(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result + canUpdateMachineFunc func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, error) + wantCanUpdateMachineCalled bool + wantTriggerInPlaceUpdateCalled bool + wantFallbackToScaleDown bool + wantError bool + wantErrorMessage string + wantRes ctrl.Result + }{ + { + name: "Requeue if preflight checks for all Machines failed", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, _ ...*clusterv1.Machine) ctrl.Result { + return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter} + }, + wantRes: ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, + }, + { + name: "Fallback to scale down if checks for all Machines failed, but checks succeed when excluding machineToInPlaceUpdate", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result { + if len(excludeFor) == 1 && excludeFor[0] == machineToInPlaceUpdate { + return ctrl.Result{} // If machineToInPlaceUpdate is excluded preflight checks succeed => scale down + } + return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter} + }, + wantFallbackToScaleDown: true, + }, + { + name: "Return error if canUpdateMachine returns an error", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, _ ...*clusterv1.Machine) ctrl.Result { + return ctrl.Result{} + }, + canUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult) (bool, error) { + return false, errors.New("canUpdateMachine error") + }, + wantCanUpdateMachineCalled: true, + wantError: true, + wantErrorMessage: "failed to determine if Machine machine-to-in-place-update can be updated in-place: canUpdateMachine error", + }, + { + name: "Fallback to scale down if canUpdateMachine returns false", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, _ ...*clusterv1.Machine) ctrl.Result { + return ctrl.Result{} + }, + canUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult) (bool, error) { + return false, nil + }, + wantCanUpdateMachineCalled: true, + wantFallbackToScaleDown: true, + }, + { + name: "Trigger in-place update if canUpdateMachine returns true", + preflightChecksFunc: func(_ context.Context, _ *internal.ControlPlane, _ ...*clusterv1.Machine) ctrl.Result { + return ctrl.Result{} + }, + canUpdateMachineFunc: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult) (bool, error) { + return true, nil + }, + wantCanUpdateMachineCalled: true, + wantTriggerInPlaceUpdateCalled: true, + wantFallbackToScaleDown: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + var canUpdateMachineCalled bool + var triggerInPlaceUpdateCalled bool + r := &KubeadmControlPlaneReconciler{ + overridePreflightChecksFunc: func(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result { + return tt.preflightChecksFunc(ctx, controlPlane, excludeFor...) + }, + overrideCanUpdateMachineFunc: func(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, error) { + canUpdateMachineCalled = true + return tt.canUpdateMachineFunc(ctx, machine, machineUpToDateResult) + }, + overrideTriggerInPlaceUpdate: func(_ context.Context, _ *clusterv1.Machine, _ internal.UpToDateResult) error { + triggerInPlaceUpdateCalled = true + return nil + }, + } + + fallbackToScaleDown, res, err := r.tryInPlaceUpdate(ctx, nil, machineToInPlaceUpdate, internal.UpToDateResult{}) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(res).To(Equal(tt.wantRes)) + g.Expect(fallbackToScaleDown).To(Equal(tt.wantFallbackToScaleDown)) + + g.Expect(canUpdateMachineCalled).To(Equal(tt.wantCanUpdateMachineCalled), "canUpdateMachineCalled: actual: %t expected: %t", canUpdateMachineCalled, tt.wantCanUpdateMachineCalled) + g.Expect(triggerInPlaceUpdateCalled).To(Equal(tt.wantTriggerInPlaceUpdateCalled), "triggerInPlaceUpdateCalled: actual: %t expected: %t", triggerInPlaceUpdateCalled, tt.wantTriggerInPlaceUpdateCalled) + }) + } +} diff --git a/controlplane/kubeadm/internal/controllers/inplace_trigger.go b/controlplane/kubeadm/internal/controllers/inplace_trigger.go new file mode 100644 index 000000000000..580c1b97ba8f --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/inplace_trigger.go @@ -0,0 +1,170 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/internal/hooks" + clientutil "sigs.k8s.io/cluster-api/internal/util/client" + "sigs.k8s.io/cluster-api/internal/util/ssa" +) + +func (r *KubeadmControlPlaneReconciler) triggerInPlaceUpdate(ctx context.Context, machine *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) error { + if r.overrideTriggerInPlaceUpdate != nil { + return r.overrideTriggerInPlaceUpdate(ctx, machine, machineUpToDateResult) + } + + log := ctrl.LoggerFrom(ctx).WithValues("Machine", klog.KObj(machine)) + log.Info(fmt.Sprintf("Triggering in-place update for Machine %s", machine.Name)) + + // Mark Machine for in-place update. + // Note: Once we write UpdateInProgressAnnotation we will always continue with the in-place update. + // Note: Intentionally using client.Patch instead of SSA. Otherwise we would have to ensure we preserve + // UpdateInProgressAnnotation on existing Machines in KCP and that would lead to race conditions when + // the Machine controller tries to remove the annotation and then KCP adds it back. + if _, ok := machine.Annotations[clusterv1.UpdateInProgressAnnotation]; !ok { + orig := machine.DeepCopy() + if machine.Annotations == nil { + machine.Annotations = map[string]string{} + } + machine.Annotations[clusterv1.UpdateInProgressAnnotation] = "" + if err := r.Client.Patch(ctx, machine, client.MergeFrom(orig)); err != nil { + return errors.Wrapf(err, "failed to trigger in-place update for Machine %s by setting the %s annotation", klog.KObj(machine), clusterv1.UpdateInProgressAnnotation) + } + + // Wait until the cache observed the Machine with UpdateInProgressAnnotation to ensure subsequent reconciles + // will observe it as well and accordingly don't trigger another in-place update concurrently. + if err := clientutil.WaitForCacheToBeUpToDate(ctx, r.Client, fmt.Sprintf("setting the %s annotation", clusterv1.UpdateInProgressAnnotation), machine); err != nil { + return err + } + } + + // TODO: If this func fails below we are going to reconcile again and call triggerInPlaceUpdate again. If KCP + // spec changed in the meantime desired objects might change and then we would use different desired objects + // for UpdateMachine compared to what we used in CanUpdateMachine. + // If we want to account for that we could consider writing desired InfraMachine/KubeadmConfig/Machine with + // the in-progress annotation on the Machine and use it if necessary (and clean it up when we set the pending + // annotation). This might lead to issues with the maximum object size supported by etcd though (so we might + // have to write the objects somewhere else). + + desiredMachine := machineUpToDateResult.DesiredMachine + desiredInfraMachine := machineUpToDateResult.DesiredInfraMachine + desiredKubeadmConfig := machineUpToDateResult.DesiredKubeadmConfig + + // Machine cannot be updated in-place if the UpToDate func was not able to provide all objects, + // e.g. if the InfraMachine or KubeadmConfig was deleted. + // Note: As canUpdateMachine also checks these fields for nil this can only happen if the initial + // triggerInPlaceUpdate call failed after setting UpdateInProgressAnnotation. + if desiredInfraMachine == nil { + return errors.Errorf("failed to complete triggering in-place update for Machine %s, could not compute desired InfraMachine", klog.KObj(machine)) + } + if desiredKubeadmConfig == nil { + return errors.Errorf("failed to complete triggering in-place update for Machine %s, could not compute desired KubeadmConfig", klog.KObj(machine)) + } + + // Write InfraMachine without the labels & annotations that are written continuously by updateLabelsAndAnnotations. + // Note: Let's update InfraMachine first because that is the call that is most likely to fail. + desiredInfraMachine.SetLabels(nil) + desiredInfraMachine.SetAnnotations(map[string]string{ + // ClonedFrom annotations are initially written by createInfraMachine and then managedField ownership is + // removed via ssa.RemoveManagedFieldsForLabelsAndAnnotations. + // updateLabelsAndAnnotations is intentionally not updating them as they should be only updated as part + // of an in-place update here, e.g. for the case where the InfraMachineTemplate was rotated. + clusterv1.TemplateClonedFromNameAnnotation: desiredInfraMachine.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation], + clusterv1.TemplateClonedFromGroupKindAnnotation: desiredInfraMachine.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation], + // Machine controller waits for this annotation to exist on Machine and related objects before starting the in-place update. + clusterv1.UpdateInProgressAnnotation: "", + }) + if err := ssa.Patch(ctx, r.Client, kcpManagerName, desiredInfraMachine); err != nil { + return errors.Wrapf(err, "failed to complete triggering in-place update for Machine %s", klog.KObj(machine)) + } + + // Write KubeadmConfig without the labels & annotations that are written continuously by updateLabelsAndAnnotations. + desiredKubeadmConfig.Labels = nil + desiredKubeadmConfig.Annotations = map[string]string{ + // Machine controller waits for this annotation to exist on Machine and related objects before starting the in-place update. + clusterv1.UpdateInProgressAnnotation: "", + } + if err := ssa.Patch(ctx, r.Client, kcpManagerName, desiredKubeadmConfig); err != nil { + return errors.Wrapf(err, "failed to complete triggering in-place update for Machine %s", klog.KObj(machine)) + } + if desiredKubeadmConfig.Spec.InitConfiguration.IsDefined() { + if err := r.removeInitConfiguration(ctx, desiredKubeadmConfig); err != nil { + return errors.Wrapf(err, "failed to complete triggering in-place update for Machine %s", klog.KObj(machine)) + } + } + + // Write Machine. + if err := ssa.Patch(ctx, r.Client, kcpManagerName, desiredMachine); err != nil { + return errors.Wrapf(err, "failed to complete triggering in-place update for Machine %s", klog.KObj(machine)) + } + + // Note: Once we write PendingHooksAnnotation the Machine controller will start with the in-place update. + // Note: Intentionally using client.Patch (via hooks.MarkAsPending + patchHelper) instead of SSA. Otherwise we would + // have to ensure we preserve PendingHooksAnnotation on existing Machines in KCP and that would lead to race + // conditions when the Machine controller tries to remove the annotation and KCP adds it back. + // Note: This call will update the resourceVersion on desiredMachine, so that WaitForCacheToBeUpToDate also considers this change. + if err := hooks.MarkAsPending(ctx, r.Client, desiredMachine, true, runtimehooksv1.UpdateMachine); err != nil { + return errors.Wrapf(err, "failed to complete triggering in-place update for Machine %s", klog.KObj(machine)) + } + + log.Info(fmt.Sprintf("Completed triggering in-place update for Machine %s", machine.Name)) + r.recorder.Event(machine, corev1.EventTypeNormal, "SuccessfulStartInPlaceUpdate", "Machine starting in-place update") + + // Wait until the cache observed the Machine with PendingHooksAnnotation to ensure subsequent reconciles + // will observe it as well and won't repeatedly call triggerInPlaceUpdate. + return clientutil.WaitForCacheToBeUpToDate(ctx, r.Client, "marking the UpdateMachine hook as pending", desiredMachine) +} + +func (r *KubeadmControlPlaneReconciler) removeInitConfiguration(ctx context.Context, desiredKubeadmConfig *bootstrapv1.KubeadmConfig) error { + // Remove initConfiguration with Patch if necessary. + // This is only necessary if ssa.Patch above cannot remove the initConfiguration field because + // capi-kubeadmcontrolplane does not own it. + // Note: desiredKubeadmConfig here will always contain a joinConfiguration instead of an initConfiguration. + // + // This happens only on KubeadmConfigs (for kubeadm init) created with CAPI <= v1.11, because the initConfiguration + // field is not owned by anyone there (i.e. orphaned) after we called ssa.MigrateManagedFields in syncMachines. + // + // In KubeadmConfigs created with CAPI >= v1.12 capi-kubeadmcontrolplane owns the initConfiguration field + // and accordingly the ssa.Patch above is able to remove it. + // + // There are two ways this can be resolved: + // - Machine goes through an in-place rollout and this code removes the initConfiguration. + // - Machine is rolled out (re-created) which will use the new managedField structure. + // + // As CAPI v1.11 supported up to Kubernetes v1.34. We assume the Machine has to be either rolled out + // or in-place updated before CAPI drops support for Kubernetes v1.34. So this code can be removed + // once CAPI doesn't support Kubernetes v1.34 anymore. + origKubeadmConfig := desiredKubeadmConfig.DeepCopy() + desiredKubeadmConfig.Spec.InitConfiguration = bootstrapv1.InitConfiguration{} + if err := r.Client.Patch(ctx, desiredKubeadmConfig, client.MergeFrom(origKubeadmConfig)); err != nil { + return errors.Wrap(err, "failed to patch KubeadmConfig: failed to remove initConfiguration") + } + return nil +} diff --git a/controlplane/kubeadm/internal/controllers/inplace_trigger_test.go b/controlplane/kubeadm/internal/controllers/inplace_trigger_test.go new file mode 100644 index 000000000000..b6e0da22522c --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/inplace_trigger_test.go @@ -0,0 +1,323 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimev1 "sigs.k8s.io/cluster-api/api/runtime/v1beta2" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/internal/util/ssa" + "sigs.k8s.io/cluster-api/util/test/builder" +) + +func Test_triggerInPlaceUpdate(t *testing.T) { + g := NewWithT(t) + + ns, err := env.CreateNamespace(ctx, "in-place-trigger") + g.Expect(err).ToNot(HaveOccurred()) + + currentMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "cluster-1", + "label-1": "label-value-1", + }, + Annotations: map[string]string{ + "annotation-1": "annotation-value-1", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "cluster-1", + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "machine-to-in-place-update", + }, + }, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineKind, + Name: "machine-to-in-place-update", + }, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDeletionTimeoutSeconds: ptr.To[int32](10), + }, + Version: "v1.30.0", + }, + Status: clusterv1.MachineStatus{ + NodeRef: clusterv1.MachineNodeReference{ + Name: "machine-to-in-place-update", + }, + }, + } + desiredMachine := currentMachine.DeepCopy() + desiredMachine.Spec.Version = "v1.31.0" + + currentKubeadmConfig := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-to-in-place-update", + Namespace: ns.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "cluster-1", + "label-1": "label-value-1", + }, + Annotations: map[string]string{ + "annotation-1": "annotation-value-1", + }, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + Etcd: bootstrapv1.Etcd{ + Local: bootstrapv1.LocalEtcd{ + ImageTag: "3.5.0-0", + }, + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + KubeletExtraArgs: []bootstrapv1.Arg{{ + Name: "v", + Value: ptr.To("8"), + }}, + }, + }, + }, + Status: bootstrapv1.KubeadmConfigStatus{ + ObservedGeneration: 5, + }, + } + currentKubeadmConfigWithInitConfiguration := currentKubeadmConfig.DeepCopy() + currentKubeadmConfigWithInitConfiguration.Spec.InitConfiguration.NodeRegistration = currentKubeadmConfigWithInitConfiguration.Spec.JoinConfiguration.NodeRegistration + currentKubeadmConfigWithInitConfiguration.Spec.JoinConfiguration = bootstrapv1.JoinConfiguration{} + desiredKubeadmConfig := currentKubeadmConfig.DeepCopy() + desiredKubeadmConfig.Spec.ClusterConfiguration.Etcd.Local.ImageTag = "3.6.4-0" + + currentInfraMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineKind, + "metadata": map[string]interface{}{ + "name": "machine-to-in-place-update", + "namespace": ns.Name, + "labels": map[string]interface{}{ + clusterv1.ClusterNameLabel: "cluster-1", + "label-1": "label-value-1", + }, + "annotations": map[string]interface{}{ + "annotation-1": "annotation-value-1", + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + }, + "spec": map[string]interface{}{ + "foo": "hello world", + }, + "status": map[string]interface{}{ + "foo": "hello world", + }, + }, + } + desiredInfraMachine := currentInfraMachine.DeepCopy() + g.Expect(unstructured.SetNestedField(desiredInfraMachine.Object, "hello in-place updated world", "spec", "foo")).To(Succeed()) + desiredInfraMachine.SetAnnotations(map[string]string{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-2", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate2.infrastructure.cluster.x-k8s.io", + }) + + tests := []struct { + name string + currentMachine *clusterv1.Machine + createMachineWithUpdateInProgressAnnotation bool + currentInfraMachine *unstructured.Unstructured + currentKubeadmConfig *bootstrapv1.KubeadmConfig + createKubeadmConfigLikeWithCAPI1_11 bool + desiredMachine *clusterv1.Machine + desiredInfraMachine *unstructured.Unstructured + desiredKubeadmConfig *bootstrapv1.KubeadmConfig + wantError bool + wantErrorMessage string + }{ + { + name: "Return error if desiredInfraMachine is nil", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: nil, + desiredKubeadmConfig: desiredKubeadmConfig, + wantError: true, + wantErrorMessage: fmt.Sprintf("failed to complete triggering in-place update for Machine %s/machine-to-in-place-update, could not compute desired InfraMachine", ns.Name), + }, + { + name: "Return error if desiredKubeadmConfig is nil", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: nil, + wantError: true, + wantErrorMessage: fmt.Sprintf("failed to complete triggering in-place update for Machine %s/machine-to-in-place-update, could not compute desired KubeadmConfig", ns.Name), + }, + { + name: "Trigger in-place update", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + }, + { + name: "Trigger in-place update (Machine already has UpdateInProgressAnnotation)", + currentMachine: currentMachine, + createMachineWithUpdateInProgressAnnotation: true, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfig, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + }, + { + name: "Trigger in-place update: KubeadmConfig v1.11 => remove initConfiguration", + currentMachine: currentMachine, + currentInfraMachine: currentInfraMachine, + currentKubeadmConfig: currentKubeadmConfigWithInitConfiguration, + createKubeadmConfigLikeWithCAPI1_11: true, + desiredMachine: desiredMachine, + desiredInfraMachine: desiredInfraMachine, + desiredKubeadmConfig: desiredKubeadmConfig, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() + g := NewWithT(t) + + // Create Machine (same as in createMachine) + currentMachineForPatch := tt.currentMachine.DeepCopy() + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, currentMachineForPatch)).To(Succeed()) + t.Cleanup(func() { + g.Expect(env.CleanupAndWait(context.Background(), tt.currentMachine)).To(Succeed()) + }) + if tt.createMachineWithUpdateInProgressAnnotation { + orig := currentMachineForPatch.DeepCopy() + currentMachineForPatch.Annotations[clusterv1.UpdateInProgressAnnotation] = "" + g.Expect(env.Client.Patch(ctx, currentMachineForPatch, client.MergeFrom(orig), client.FieldOwner("manager"))).To(Succeed()) + } + + // Create InfraMachine (same as in createInfraMachine) + currentInfraMachineForPatch := tt.currentInfraMachine.DeepCopy() + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, currentInfraMachineForPatch)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), currentInfraMachineForPatch, kcpManagerName)).To(Succeed()) + t.Cleanup(func() { + g.Expect(env.CleanupAndWait(context.Background(), tt.currentInfraMachine)).To(Succeed()) + }) + + // Create KubeadmConfig (same as in createKubeadmConfig) + currentKubeadmConfigForPatch := tt.currentKubeadmConfig.DeepCopy() + if tt.createKubeadmConfigLikeWithCAPI1_11 { + // Note: Create the object like it was created it in CAPI <= v1.11 (with manager). + g.Expect(env.Client.Create(ctx, currentKubeadmConfigForPatch, client.FieldOwner("manager"))).To(Succeed()) + // Note: Update labels and annotations like in CAPI <= v1.11 (with the "capi-kubeadmcontrolplane" fieldManager). + updatedObject := &unstructured.Unstructured{} + updatedObject.SetGroupVersionKind(bootstrapv1.GroupVersion.WithKind("KubeadmConfig")) + updatedObject.SetNamespace(currentKubeadmConfigForPatch.GetNamespace()) + updatedObject.SetName(currentKubeadmConfigForPatch.GetName()) + updatedObject.SetUID(currentKubeadmConfigForPatch.GetUID()) + updatedObject.SetLabels(currentMachineForPatch.GetLabels()) + updatedObject.SetAnnotations(currentMachineForPatch.GetAnnotations()) + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, updatedObject)).To(Succeed()) + // Now migrate the managedFields like CAPI >= v1.12 does. + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(currentKubeadmConfigForPatch), currentKubeadmConfigForPatch)).Should(Succeed()) + g.Expect(ssa.MigrateManagedFields(ctx, env.Client, currentKubeadmConfigForPatch, kcpManagerName, kcpMetadataManagerName)).To(Succeed()) + // Note: At this point spec is not owned by anyone (orphaned). This requires the code path to remove initConfiguration for CAPI v1.11 objects. + } else { + g.Expect(ssa.Patch(ctx, env.Client, kcpManagerName, currentKubeadmConfigForPatch)).To(Succeed()) + g.Expect(ssa.RemoveManagedFieldsForLabelsAndAnnotations(ctx, env.Client, env.GetAPIReader(), currentKubeadmConfigForPatch, kcpManagerName)).To(Succeed()) + } + t.Cleanup(func() { + g.Expect(env.CleanupAndWait(context.Background(), tt.currentKubeadmConfig)).To(Succeed()) + }) + + upToDateResult := internal.UpToDateResult{ + CurrentInfraMachine: currentInfraMachineForPatch, + CurrentKubeadmConfig: currentKubeadmConfigForPatch, + DesiredMachine: tt.desiredMachine.DeepCopy(), + DesiredInfraMachine: tt.desiredInfraMachine.DeepCopy(), + DesiredKubeadmConfig: tt.desiredKubeadmConfig.DeepCopy(), + } + + r := KubeadmControlPlaneReconciler{ + Client: env.Client, + recorder: record.NewFakeRecorder(32), + } + + err := r.triggerInPlaceUpdate(ctx, currentMachineForPatch, upToDateResult) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + gotMachine := &clusterv1.Machine{} + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.desiredMachine), gotMachine)).To(Succeed()) + g.Expect(gotMachine.Annotations).To(Equal(map[string]string{ + "annotation-1": "annotation-value-1", + clusterv1.UpdateInProgressAnnotation: "", + runtimev1.PendingHooksAnnotation: "UpdateMachine", + })) + g.Expect(gotMachine.Spec).To(BeComparableTo(tt.desiredMachine.Spec)) + + gotInfraMachine := &unstructured.Unstructured{} + gotInfraMachine.SetGroupVersionKind(tt.desiredInfraMachine.GroupVersionKind()) + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.desiredInfraMachine), gotInfraMachine)).To(Succeed()) + g.Expect(gotInfraMachine.GetAnnotations()).To(Equal(map[string]string{ + "annotation-1": "annotation-value-1", + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-2", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate2.infrastructure.cluster.x-k8s.io", + clusterv1.UpdateInProgressAnnotation: "", + })) + g.Expect(gotInfraMachine.Object["spec"]).To(BeComparableTo(tt.desiredInfraMachine.Object["spec"])) + + gotKubeadmConfig := &bootstrapv1.KubeadmConfig{} + g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.desiredKubeadmConfig), gotKubeadmConfig)).To(Succeed()) + g.Expect(gotKubeadmConfig.Annotations).To(Equal(map[string]string{ + "annotation-1": "annotation-value-1", + clusterv1.UpdateInProgressAnnotation: "", + })) + g.Expect(gotKubeadmConfig.Spec).To(BeComparableTo(tt.desiredKubeadmConfig.Spec)) + }) + } +} diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index b58b1f1a5ffb..e9301b199527 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -20,8 +20,8 @@ import ( "context" "fmt" "strings" + "time" - "github.com/blang/semver/v4" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -39,30 +39,22 @@ import ( ) func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - logger := ctrl.LoggerFrom(ctx) - - bootstrapSpec := controlPlane.InitialControlPlaneConfig() - - parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) - if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) - } - internal.DefaultFeatureGates(bootstrapSpec, parsedVersion) + log := ctrl.LoggerFrom(ctx) fd, err := controlPlane.NextFailureDomainForScaleUp(ctx) if err != nil { return ctrl.Result{}, err } - newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd) + newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, false, fd) if err != nil { - logger.Error(err, "Failed to create initial control plane Machine") + log.Error(err, "Failed to create initial control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } - logger.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...). - Info("Machine created (scale up)", + log.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...). + Info(fmt.Sprintf("Machine %s created (init)", newMachine.Name), "Machine", klog.KObj(newMachine), newMachine.Spec.InfrastructureRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.InfrastructureRef.Name), newMachine.Spec.Bootstrap.ConfigRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.Bootstrap.ConfigRef.Name)) @@ -72,36 +64,31 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte } func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - logger := ctrl.LoggerFrom(ctx) - - // Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. - if result, err := r.preflightChecks(ctx, controlPlane); err != nil || !result.IsZero() { - return result, err + if r.overrideScaleUpControlPlaneFunc != nil { + return r.overrideScaleUpControlPlaneFunc(ctx, controlPlane) } - // Create the bootstrap configuration - bootstrapSpec := controlPlane.JoinControlPlaneConfig() + log := ctrl.LoggerFrom(ctx) - parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) - if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) + // Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. + if result := r.preflightChecks(ctx, controlPlane); !result.IsZero() { + return result, nil } - internal.DefaultFeatureGates(bootstrapSpec, parsedVersion) fd, err := controlPlane.NextFailureDomainForScaleUp(ctx) if err != nil { return ctrl.Result{}, err } - newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd) + newMachine, err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, true, fd) if err != nil { - logger.Error(err, "Failed to create additional control plane Machine") + log.Error(err, "Failed to create additional control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster % control plane: %v", klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } - logger.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...). - Info("Machine created (scale up)", + log.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...). + Info(fmt.Sprintf("Machine %s created (scale up)", newMachine.Name), "Machine", klog.KObj(newMachine), newMachine.Spec.InfrastructureRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.InfrastructureRef.Name), newMachine.Spec.Bootstrap.ConfigRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.Bootstrap.ConfigRef.Name)) @@ -113,30 +100,28 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( ctx context.Context, controlPlane *internal.ControlPlane, - outdatedMachines collections.Machines, + machineToDelete *clusterv1.Machine, ) (ctrl.Result, error) { - logger := ctrl.LoggerFrom(ctx) - - // Pick the Machine that we should scale down. - machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines) - if err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down") + if r.overrideScaleDownControlPlaneFunc != nil { + return r.overrideScaleDownControlPlaneFunc(ctx, controlPlane, machineToDelete) } + log := ctrl.LoggerFrom(ctx) + // Run preflight checks ensuring the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. // Given that we're scaling down, we can exclude the machineToDelete from the preflight checks. - if result, err := r.preflightChecks(ctx, controlPlane, machineToDelete); err != nil || !result.IsZero() { - return result, err + if result := r.preflightChecks(ctx, controlPlane, machineToDelete); !result.IsZero() { + return result, nil } workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { - logger.Error(err, "Failed to create client to workload cluster") + log.Error(err, "Failed to create client to workload cluster") return ctrl.Result{}, errors.Wrapf(err, "failed to create client to workload cluster") } if machineToDelete == nil { - logger.Info("Failed to pick control plane Machine to delete") + log.Info("Failed to pick control plane Machine to delete") return ctrl.Result{}, errors.New("failed to pick control plane Machine to delete") } @@ -144,7 +129,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( if controlPlane.IsEtcdManaged() { etcdLeaderCandidate := controlPlane.Machines.Newest() if err := workloadCluster.ForwardEtcdLeadership(ctx, machineToDelete, etcdLeaderCandidate); err != nil { - logger.Error(err, "Failed to move leadership to candidate machine", "candidate", etcdLeaderCandidate.Name) + log.Error(err, "Failed to move leadership to candidate machine", "candidate", etcdLeaderCandidate.Name) return ctrl.Result{}, err } @@ -152,15 +137,15 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( } if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) { - logger.Error(err, "Failed to delete control plane machine") + log.Error(err, "Failed to delete control plane machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleDown", "Failed to delete control plane Machine %s for cluster %s control plane: %v", machineToDelete.Name, klog.KObj(controlPlane.Cluster), err) return ctrl.Result{}, err } // Note: We intentionally log after Delete because we want this log line to show up only after DeletionTimestamp has been set. // Also, setting DeletionTimestamp doesn't mean the Machine is actually deleted (deletion takes some time). - logger.WithValues(controlPlane.StatusToLogKeyAndValues(nil, machineToDelete)...). - Info("Deleting Machine (scale down)", "Machine", klog.KObj(machineToDelete)) + log.WithValues(controlPlane.StatusToLogKeyAndValues(nil, machineToDelete)...). + Info(fmt.Sprintf("Machine %s deleting (scale down)", machineToDelete.Name), "Machine", klog.KObj(machineToDelete)) // Requeue the control plane, in case there are additional operations to perform return ctrl.Result{Requeue: true}, nil @@ -174,29 +159,52 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( // If the control plane is not passing preflight checks, it requeue. // // NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneAndMachinesConditions before this. -func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam - logger := ctrl.LoggerFrom(ctx) +func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) ctrl.Result { + if r.overridePreflightChecksFunc != nil { + return r.overridePreflightChecksFunc(ctx, controlPlane, excludeFor...) + } + + // Reset PreflightCheckResults in case this function is called multiple times (e.g. for in-place update code paths) + // Note: The PreflightCheckResults field is only written by this func, so this is safe. + controlPlane.PreflightCheckResults = internal.PreflightCheckResults{} + + log := ctrl.LoggerFrom(ctx) // If there is no KCP-owned control-plane machines, then control-plane has not been initialized yet, // so it is considered ok to proceed. if controlPlane.Machines.Len() == 0 { - return ctrl.Result{}, nil + return ctrl.Result{} } if feature.Gates.Enabled(feature.ClusterTopology) { // Block when we expect an upgrade to be propagated for topology clusters. - if controlPlane.Cluster.Spec.Topology.IsDefined() && controlPlane.Cluster.Spec.Topology.Version != controlPlane.KCP.Spec.Version { - logger.Info(fmt.Sprintf("Waiting for a version upgrade to %s to be propagated from Cluster.spec.topology", controlPlane.Cluster.Spec.Topology.Version)) + // NOTE: in case the cluster is performing an upgrade, allow creation of machines for the intermediate step. + hasSameVersionOfCurrentUpgradeStep := false + if version, ok := controlPlane.Cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok { + hasSameVersionOfCurrentUpgradeStep = version == controlPlane.KCP.Spec.Version + } + + if controlPlane.Cluster.Spec.Topology.IsDefined() && controlPlane.Cluster.Spec.Topology.Version != controlPlane.KCP.Spec.Version && !hasSameVersionOfCurrentUpgradeStep { + v := controlPlane.Cluster.Spec.Topology.Version + if version, ok := controlPlane.Cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok { + v = version + } + log.Info(fmt.Sprintf("Waiting for a version upgrade to %s to be propagated", v)) controlPlane.PreflightCheckResults.TopologyVersionMismatch = true - return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, nil + // Slow down reconcile frequency, as deferring a version upgrade waits for slow processes, + // e.g. workers are completing a previous upgrade step. + r.controller.DeferNextReconcileForObject(controlPlane.KCP, time.Now().Add(5*time.Second)) + return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter} } } // If there are deleting machines, wait for the operation to complete. if controlPlane.HasDeletingMachine() { controlPlane.PreflightCheckResults.HasDeletingMachine = true - logger.Info("Waiting for machines to be deleted", "machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) - return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil + log.Info("Waiting for machines to be deleted", "machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", ")) + // Slow down reconcile frequency, deletion is a slow process. + r.controller.DeferNextReconcileForObject(controlPlane.KCP, time.Now().Add(5*time.Second)) + return ctrl.Result{RequeueAfter: deleteRequeueAfter} } // Check machine health conditions; if there are conditions with False or Unknown, then wait. @@ -251,12 +259,15 @@ loopmachines: aggregatedError := kerrors.NewAggregate(machineErrors) r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "ControlPlaneUnhealthy", "Waiting for control plane to pass preflight checks to continue reconciliation: %v", aggregatedError) - logger.Info("Waiting for control plane to pass preflight checks", "failures", aggregatedError.Error()) - - return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, nil + log.Info("Waiting for control plane to pass preflight checks", "failures", aggregatedError.Error()) + // Slow down reconcile frequency, it takes some time before control plane components stabilize + // after a new Machine is created. Similarly, if there are issues on running Machines, it + // usually takes some time to get back to normal state. + r.controller.DeferNextReconcileForObject(controlPlane.KCP, time.Now().Add(5*time.Second)) + return ctrl.Result{RequeueAfter: preflightFailedRequeueAfter} } - return ctrl.Result{}, nil + return ctrl.Result{} } func preflightCheckCondition(kind string, obj *clusterv1.Machine, conditionType string) error { @@ -273,7 +284,8 @@ func preflightCheckCondition(kind string, obj *clusterv1.Machine, conditionType return nil } -// selectMachineForScaleDown select a machine candidate for scaling down. The selection is a two phase process: +// selectMachineForInPlaceUpdateOrScaleDown select a machine candidate for scaling down or for in-place update. +// The selection is a two phase process: // // In the first phase it selects a subset of machines eligible for deletion: // - if there are outdated machines with the delete machine annotation, use them as eligible subset (priority to user requests, part 1) @@ -284,18 +296,20 @@ func preflightCheckCondition(kind string, obj *clusterv1.Machine, conditionType // // Once the subset of machines eligible for deletion is identified, one machine is picked out of this subset by // selecting the machine in the failure domain with most machines (including both eligible and not eligible machines). -func selectMachineForScaleDown(ctx context.Context, controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { +func selectMachineForInPlaceUpdateOrScaleDown(ctx context.Context, controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { // Select the subset of machines eligible for scale down. - eligibleMachines := controlPlane.Machines + var eligibleMachines collections.Machines switch { case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0: eligibleMachines = controlPlane.MachineWithDeleteAnnotation(outdatedMachines) - case controlPlane.MachineWithDeleteAnnotation(eligibleMachines).Len() > 0: - eligibleMachines = controlPlane.MachineWithDeleteAnnotation(eligibleMachines) + case controlPlane.MachineWithDeleteAnnotation(controlPlane.Machines).Len() > 0: + eligibleMachines = controlPlane.MachineWithDeleteAnnotation(controlPlane.Machines) case controlPlane.UnhealthyMachinesWithUnhealthyControlPlaneComponents(outdatedMachines).Len() > 0: eligibleMachines = controlPlane.UnhealthyMachinesWithUnhealthyControlPlaneComponents(outdatedMachines) case outdatedMachines.Len() > 0: eligibleMachines = outdatedMachines + default: + eligibleMachines = controlPlane.Machines } // Pick an eligible machine from the failure domain with most machines in (including both eligible and not eligible machines) diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index b125a7abfdc9..ccd5bb1aa66a 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -31,12 +31,15 @@ import ( "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" "sigs.k8s.io/cluster-api/feature" + capicontrollerutil "sigs.k8s.io/cluster-api/internal/util/controller" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" ) @@ -88,7 +91,7 @@ func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) g.Expect(machineList.Items).To(HaveLen(1)) - res, err := collections.GetFilteredMachinesForCluster(ctx, env.GetAPIReader(), cluster, collections.OwnedMachines(kcp)) + res, err := collections.GetFilteredMachinesForCluster(ctx, env.GetAPIReader(), cluster, collections.OwnedMachines(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane").GroupKind())) g.Expect(res).To(HaveLen(1)) g.Expect(err).ToNot(HaveOccurred()) @@ -106,7 +109,7 @@ func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { kubeadmConfig := &bootstrapv1.KubeadmConfig{} bootstrapRef := machineList.Items[0].Spec.Bootstrap.ConfigRef g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: machineList.Items[0].Namespace, Name: bootstrapRef.Name}, kubeadmConfig)).To(Succeed()) - g.Expect(kubeadmConfig.Spec.ClusterConfiguration.FeatureGates).To(BeComparableTo(map[string]bool{internal.ControlPlaneKubeletLocalMode: true})) + g.Expect(kubeadmConfig.Spec.ClusterConfiguration.FeatureGates).To(BeComparableTo(map[string]bool{desiredstate.ControlPlaneKubeletLocalMode: true})) } func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { @@ -174,7 +177,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { kubeadmConfig := &bootstrapv1.KubeadmConfig{} bootstrapRef := controlPlaneMachines.Items[0].Spec.Bootstrap.ConfigRef g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: controlPlaneMachines.Items[0].Namespace, Name: bootstrapRef.Name}, kubeadmConfig)).To(Succeed()) - g.Expect(kubeadmConfig.Spec.ClusterConfiguration.FeatureGates).To(BeComparableTo(map[string]bool{internal.ControlPlaneKubeletLocalMode: true})) + g.Expect(kubeadmConfig.Spec.ClusterConfiguration.FeatureGates).To(BeComparableTo(map[string]bool{desiredstate.ControlPlaneKubeletLocalMode: true})) }) t.Run("does not create a control plane Machine if preflight checks fail", func(t *testing.T) { setup := func(t *testing.T, g *WithT) *corev1.Namespace { @@ -217,9 +220,12 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { Workload: &fakeWorkloadCluster{}, } + fc := capicontrollerutil.NewFakeController() + r := &KubeadmControlPlaneReconciler{ Client: env, SecretCachingClient: secretCachingClient, + controller: fc, managementCluster: fmc, managementClusterUncached: fmc, recorder: record.NewFakeRecorder(32), @@ -232,6 +238,10 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { result, err := r.scaleUpControlPlane(context.Background(), controlPlane) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) + g.Expect(fc.Deferrals).To(HaveKeyWithValue( + reconcile.Request{NamespacedName: client.ObjectKeyFromObject(kcp)}, + BeTemporally("~", time.Now().Add(5*time.Second), 1*time.Second)), + ) // scaleUpControlPlane is never called due to health check failure and new machine is not created to scale up. controlPlaneMachines := &clusterv1.MachineList{} @@ -283,7 +293,9 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. } controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) + machineToDelete, err := selectMachineForInPlaceUpdateOrScaleDown(ctx, controlPlane, controlPlane.Machines) + g.Expect(err).ToNot(HaveOccurred()) + result, err := r.scaleDownControlPlane(context.Background(), controlPlane, machineToDelete) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) @@ -325,7 +337,9 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. } controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) + machineToDelete, err := selectMachineForInPlaceUpdateOrScaleDown(ctx, controlPlane, controlPlane.Machines) + g.Expect(err).ToNot(HaveOccurred()) + result, err := r.scaleDownControlPlane(context.Background(), controlPlane, machineToDelete) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) @@ -345,10 +359,13 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. setMachineHealthy(machines["three"]) fakeClient := newFakeClient(machines["one"], machines["two"], machines["three"]) + fc := capicontrollerutil.NewFakeController() + r := &KubeadmControlPlaneReconciler{ recorder: record.NewFakeRecorder(32), Client: fakeClient, SecretCachingClient: fakeClient, + controller: fc, managementCluster: &fakeManagementCluster{ Workload: &fakeWorkloadCluster{}, }, @@ -363,9 +380,15 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. } controlPlane.InjectTestManagementCluster(r.managementCluster) - result, err := r.scaleDownControlPlane(context.Background(), controlPlane, controlPlane.Machines) + machineToDelete, err := selectMachineForInPlaceUpdateOrScaleDown(ctx, controlPlane, controlPlane.Machines) + g.Expect(err).ToNot(HaveOccurred()) + result, err := r.scaleDownControlPlane(context.Background(), controlPlane, machineToDelete) g.Expect(err).ToNot(HaveOccurred()) g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) + g.Expect(fc.Deferrals).To(HaveKeyWithValue( + reconcile.Request{NamespacedName: client.ObjectKeyFromObject(kcp)}, + BeTemporally("~", time.Now().Add(5*time.Second), 1*time.Second)), + ) controlPlaneMachines := clusterv1.MachineList{} g.Expect(fakeClient.List(context.Background(), &controlPlaneMachines)).To(Succeed()) @@ -373,7 +396,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. }) } -func TestSelectMachineForScaleDown(t *testing.T) { +func TestSelectMachineForInPlaceUpdateOrScaleDown(t *testing.T) { kcp := controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{}, } @@ -502,7 +525,7 @@ func TestSelectMachineForScaleDown(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - selectedMachine, err := selectMachineForScaleDown(ctx, tc.cp, tc.outDatedMachines) + selectedMachine, err := selectMachineForInPlaceUpdateOrScaleDown(ctx, tc.cp, tc.outDatedMachines) if tc.expectErr { g.Expect(err).To(HaveOccurred()) @@ -518,12 +541,13 @@ func TestSelectMachineForScaleDown(t *testing.T) { func TestPreflightChecks(t *testing.T) { utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true) testCases := []struct { - name string - cluster *clusterv1.Cluster - kcp *controlplanev1.KubeadmControlPlane - machines []*clusterv1.Machine - expectResult ctrl.Result - expectPreflight internal.PreflightCheckResults + name string + cluster *clusterv1.Cluster + kcp *controlplanev1.KubeadmControlPlane + machines []*clusterv1.Machine + expectResult ctrl.Result + expectPreflight internal.PreflightCheckResults + expectDeferNextReconcile time.Duration }{ { name: "control plane without machines (not initialized) should pass", @@ -561,6 +585,39 @@ func TestPreflightChecks(t *testing.T) { EtcdClusterNotHealthy: false, TopologyVersionMismatch: true, }, + expectDeferNextReconcile: 5 * time.Second, + }, + { + name: "control plane with a pending upgrade, but not yet at the current step of the upgrade plan, should requeue", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyUpgradeStepAnnotation: "v1.32.0", + }, + }, + Spec: clusterv1.ClusterSpec{ + Topology: clusterv1.Topology{ + Version: "v1.33.0", + }, + }, + }, + kcp: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.31.0", + }, + }, + machines: []*clusterv1.Machine{ + {}, + }, + + expectResult: ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, + expectPreflight: internal.PreflightCheckResults{ + HasDeletingMachine: false, + ControlPlaneComponentsNotHealthy: false, + EtcdClusterNotHealthy: false, + TopologyVersionMismatch: true, + }, + expectDeferNextReconcile: 5 * time.Second, }, { name: "control plane with a deleting machine should requeue", @@ -579,6 +636,7 @@ func TestPreflightChecks(t *testing.T) { EtcdClusterNotHealthy: false, TopologyVersionMismatch: false, }, + expectDeferNextReconcile: 5 * time.Second, }, { name: "control plane without a nodeRef should requeue", @@ -598,6 +656,7 @@ func TestPreflightChecks(t *testing.T) { EtcdClusterNotHealthy: true, TopologyVersionMismatch: false, }, + expectDeferNextReconcile: 5 * time.Second, }, { name: "control plane with an unhealthy machine condition should requeue", @@ -625,6 +684,7 @@ func TestPreflightChecks(t *testing.T) { EtcdClusterNotHealthy: false, TopologyVersionMismatch: false, }, + expectDeferNextReconcile: 5 * time.Second, }, { name: "control plane with an unhealthy machine condition should requeue", @@ -652,6 +712,7 @@ func TestPreflightChecks(t *testing.T) { EtcdClusterNotHealthy: true, TopologyVersionMismatch: false, }, + expectDeferNextReconcile: 5 * time.Second, }, { name: "control plane with an healthy machine and an healthy kcp condition should pass", @@ -687,14 +748,66 @@ func TestPreflightChecks(t *testing.T) { TopologyVersionMismatch: false, }, }, + { + name: "control plane with a pending upgrade, but already at the current step of the upgrade plan, should pass", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyUpgradeStepAnnotation: "v1.32.0", + }, + }, + Spec: clusterv1.ClusterSpec{ + Topology: clusterv1.Topology{ + Version: "v1.33.0", + }, + }, + }, + kcp: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.32.0", + }, Status: controlplanev1.KubeadmControlPlaneStatus{ + Conditions: []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyCondition, Status: metav1.ConditionTrue}, + }, + }, + }, + machines: []*clusterv1.Machine{ + { + Status: clusterv1.MachineStatus{ + NodeRef: clusterv1.MachineNodeReference{ + Name: "node-1", + }, + Conditions: []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionTrue}, + }, + }, + }, + }, + + expectResult: ctrl.Result{}, + expectPreflight: internal.PreflightCheckResults{ + HasDeletingMachine: false, + ControlPlaneComponentsNotHealthy: false, + EtcdClusterNotHealthy: false, + TopologyVersionMismatch: false, + }, + }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + fc := capicontrollerutil.NewFakeController() + r := &KubeadmControlPlaneReconciler{ - recorder: record.NewFakeRecorder(32), + controller: fc, + recorder: record.NewFakeRecorder(32), } cluster := &clusterv1.Cluster{} if tt.cluster != nil { @@ -705,10 +818,17 @@ func TestPreflightChecks(t *testing.T) { KCP: tt.kcp, Machines: collections.FromMachines(tt.machines...), } - result, err := r.preflightChecks(context.TODO(), controlPlane) - g.Expect(err).ToNot(HaveOccurred()) + result := r.preflightChecks(context.TODO(), controlPlane) g.Expect(result).To(BeComparableTo(tt.expectResult)) g.Expect(controlPlane.PreflightCheckResults).To(Equal(tt.expectPreflight)) + if tt.expectDeferNextReconcile == 0 { + g.Expect(fc.Deferrals).To(BeEmpty()) + } else { + g.Expect(fc.Deferrals).To(HaveKeyWithValue( + reconcile.Request{NamespacedName: client.ObjectKeyFromObject(tt.kcp)}, + BeTemporally("~", time.Now().Add(tt.expectDeferNextReconcile), 1*time.Second)), + ) + } }) } } diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index d056e2d8a567..44b2a2041ae1 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -155,6 +155,21 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro // Note: This only gets initialized once and does not change if the kubeadm config map goes away. func setControlPlaneInitialized(ctx context.Context, controlPlane *internal.ControlPlane) error { if !ptr.Deref(controlPlane.KCP.Status.Initialization.ControlPlaneInitialized, false) { + // If the control plane has only one machine, and this machine is marked for remediation or in the process of deleting, + // do not check for control plane initialized. + // This prevents an issue that happens if kubeadm init completes in the short timeframe between when machine deletion is triggered + // to when the machine goes away; this issue, if not properly handled, will lead to an inconsistent state where + // cluster is initialized, no CP machine exists, and the replacement CP machine fails when trying to join. + if len(controlPlane.Machines) == 1 { + m := controlPlane.Machines.UnsortedList()[0] + if collections.IsUnhealthyAndOwnerRemediated(m) { + return nil + } + if !m.DeletionTimestamp.IsZero() { + return nil + } + } + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) if err != nil { return errors.Wrap(err, "failed to create remote cluster client") @@ -836,7 +851,11 @@ func minTime(t1, t2 time.Time) time.Time { func getPreflightMessages(cluster *clusterv1.Cluster, preflightChecks internal.PreflightCheckResults) []string { additionalMessages := []string{} if preflightChecks.TopologyVersionMismatch { - additionalMessages = append(additionalMessages, fmt.Sprintf("* waiting for a version upgrade to %s to be propagated from Cluster.spec.topology", cluster.Spec.Topology.Version)) + v := cluster.Spec.Topology.Version + if version, ok := cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok { + v = version + } + additionalMessages = append(additionalMessages, fmt.Sprintf("* waiting for a version upgrade to %s to be propagated", v)) } if preflightChecks.HasDeletingMachine { diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index 66f983b8270e..6b1ff342ab3a 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -75,6 +75,9 @@ func TestKubeadmControlPlaneReconciler_setControlPlaneInitialized(t *testing.T) controlPlane := &internal.ControlPlane{ Cluster: &clusterv1.Cluster{}, KCP: &controlplanev1.KubeadmControlPlane{}, + Machines: collections.FromMachines( + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m1"}}, + ), } controlPlane.InjectTestManagementCluster(&fakeManagementCluster{ Workload: &fakeWorkloadCluster{ @@ -98,6 +101,85 @@ func TestKubeadmControlPlaneReconciler_setControlPlaneInitialized(t *testing.T) Reason: controlplanev1.KubeadmControlPlaneInitializedReason, }, conditions.IgnoreLastTransitionTime(true))) }) + t.Run("kubeadm config exists is ignored if there is a single CP machine and it is marked for remediation", func(t *testing.T) { + g := NewWithT(t) + controlPlane := &internal.ControlPlane{ + Cluster: &clusterv1.Cluster{}, + KCP: &controlplanev1.KubeadmControlPlane{}, + Machines: collections.FromMachines( + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{Name: "m1"}, + Status: clusterv1.MachineStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineHealthCheckNodeDeletedReason, + }, + { + Type: clusterv1.MachineOwnerRemediatedCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineOwnerRemediatedWaitingForRemediationReason, + Message: "Waiting for remediation", + }, + }, + }, + }, + ), + } + controlPlane.InjectTestManagementCluster(&fakeManagementCluster{ + Workload: &fakeWorkloadCluster{ + Status: internal.ClusterStatus{ + HasKubeadmConfig: true, + }, + }, + }) + + err := setControlPlaneInitialized(ctx, controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(ptr.Deref(controlPlane.KCP.Status.Initialization.ControlPlaneInitialized, false)).To(BeFalse()) + + setInitializedCondition(ctx, controlPlane.KCP) + c := conditions.Get(controlPlane.KCP, controlplanev1.KubeadmControlPlaneInitializedCondition) + g.Expect(c).ToNot(BeNil()) + g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ + Type: controlplanev1.KubeadmControlPlaneInitializedCondition, + Status: metav1.ConditionFalse, + Reason: controlplanev1.KubeadmControlPlaneNotInitializedReason, + }, conditions.IgnoreLastTransitionTime(true))) + }) + t.Run("kubeadm config exists is ignored if there is a single CP machine and it is deleting", func(t *testing.T) { + g := NewWithT(t) + controlPlane := &internal.ControlPlane{ + Cluster: &clusterv1.Cluster{}, + KCP: &controlplanev1.KubeadmControlPlane{}, + Machines: collections.FromMachines( + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m1", DeletionTimestamp: ptr.To(metav1.Now())}}, + ), + } + controlPlane.InjectTestManagementCluster(&fakeManagementCluster{ + Workload: &fakeWorkloadCluster{ + Status: internal.ClusterStatus{ + HasKubeadmConfig: true, + }, + }, + }) + + err := setControlPlaneInitialized(ctx, controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(ptr.Deref(controlPlane.KCP.Status.Initialization.ControlPlaneInitialized, false)).To(BeFalse()) + + setInitializedCondition(ctx, controlPlane.KCP) + c := conditions.Get(controlPlane.KCP, controlplanev1.KubeadmControlPlaneInitializedCondition) + g.Expect(c).ToNot(BeNil()) + g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ + Type: controlplanev1.KubeadmControlPlaneInitializedCondition, + Status: metav1.ConditionFalse, + Reason: controlplanev1.KubeadmControlPlaneNotInitializedReason, + }, conditions.IgnoreLastTransitionTime(true))) + }) } func TestSetReplicas(t *testing.T) { @@ -451,7 +533,7 @@ func Test_setScalingUpCondition(t *testing.T) { Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneScalingUpReason, Message: "Scaling up from 3 to 5 replicas is blocked because:\n" + - "* waiting for a version upgrade to v1.32.0 to be propagated from Cluster.spec.topology\n" + + "* waiting for a version upgrade to v1.32.0 to be propagated\n" + "* waiting for a control plane Machine to complete deletion\n" + "* waiting for control plane components to become healthy\n" + "* waiting for etcd cluster to become healthy", @@ -645,7 +727,7 @@ After above Pods have been removed from the Node, the following Pods will be evi Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneScalingDownReason, Message: "Scaling down from 3 to 1 replicas is blocked because:\n" + - "* waiting for a version upgrade to v1.32.0 to be propagated from Cluster.spec.topology\n" + + "* waiting for a version upgrade to v1.32.0 to be propagated\n" + "* waiting for a control plane Machine to complete deletion\n" + "* waiting for control plane components to become healthy\n" + "* waiting for etcd cluster to become healthy", @@ -1971,10 +2053,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) { } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: "foo", @@ -2123,10 +2201,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: "foo", @@ -2195,10 +2269,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: "foo", @@ -2274,10 +2344,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: "foo", @@ -2352,10 +2418,6 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr } kcp := &controlplanev1.KubeadmControlPlane{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmControlPlane", - APIVersion: controlplanev1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: "foo", diff --git a/controlplane/kubeadm/internal/controllers/update.go b/controlplane/kubeadm/internal/controllers/update.go new file mode 100644 index 000000000000..d84fd5d9c709 --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/update.go @@ -0,0 +1,159 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/pkg/errors" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/util/collections" +) + +func (r *KubeadmControlPlaneReconciler) updateControlPlane( + ctx context.Context, + controlPlane *internal.ControlPlane, + machinesNeedingRollout collections.Machines, + machinesUpToDateResults map[string]internal.UpToDateResult, +) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // TODO: handle reconciliation of etcd members and kubeadm config in case they get out of sync with cluster + + workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) + if err != nil { + log.Error(err, "failed to get remote client for workload cluster", "Cluster", klog.KObj(controlPlane.Cluster)) + return ctrl.Result{}, errors.Wrapf(err, "failed to update control plane") + } + + parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to update control plane: failed to parse Kubernetes version %q", controlPlane.KCP.Spec.Version) + } + + // Ensure kubeadm clusterRoleBinding for v1.29+ as per https://github.com/kubernetes/kubernetes/pull/121305 + if err := workloadCluster.AllowClusterAdminPermissions(ctx, parsedVersion); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to update control plane: failed to set cluster-admin ClusterRoleBinding for kubeadm") + } + + kubeadmCMMutators := make([]func(*bootstrapv1.ClusterConfiguration), 0) + + if controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.IsDefined() { + // Get the imageRepository or the correct value if nothing is set and a migration is necessary. + imageRepository := internal.ImageRepositoryFromClusterConfig(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration) + + kubeadmCMMutators = append(kubeadmCMMutators, + workloadCluster.UpdateImageRepositoryInKubeadmConfigMap(imageRepository), + workloadCluster.UpdateFeatureGatesInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec, parsedVersion), + workloadCluster.UpdateAPIServerInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer), + workloadCluster.UpdateControllerManagerInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager), + workloadCluster.UpdateSchedulerInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler), + workloadCluster.UpdateCertificateValidityPeriodDays(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays), + workloadCluster.UpdateEncryptionAlgorithm(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.EncryptionAlgorithm)) + + // Etcd local and external are mutually exclusive and they cannot be switched, once set. + if controlPlane.IsEtcdManaged() { + kubeadmCMMutators = append(kubeadmCMMutators, + workloadCluster.UpdateEtcdLocalInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local)) + } else { + kubeadmCMMutators = append(kubeadmCMMutators, + workloadCluster.UpdateEtcdExternalInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External)) + } + } + + // collectively update Kubeadm config map + if err = workloadCluster.UpdateClusterConfiguration(ctx, parsedVersion, kubeadmCMMutators...); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to update control plane") + } + + switch controlPlane.KCP.Spec.Rollout.Strategy.Type { + case controlplanev1.RollingUpdateStrategyType: + // RolloutStrategy is currently defaulted and validated to be RollingUpdate + res, err := r.rollingUpdate(ctx, controlPlane, machinesNeedingRollout, machinesUpToDateResults) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to update control plane") + } + return res, nil + default: + log.Info("RolloutStrategy type is not set to RollingUpdate, unable to determine the strategy for rolling out machines") + return ctrl.Result{}, nil + } +} + +func (r *KubeadmControlPlaneReconciler) rollingUpdate( + ctx context.Context, + controlPlane *internal.ControlPlane, + machinesNeedingRollout collections.Machines, + machinesUpToDateResults map[string]internal.UpToDateResult, +) (ctrl.Result, error) { + currentReplicas := int32(controlPlane.Machines.Len()) + currentUpToDateReplicas := int32(controlPlane.UpToDateMachines().Len()) + desiredReplicas := *controlPlane.KCP.Spec.Replicas + maxSurge := int32(controlPlane.KCP.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntValue()) + // Note: As MaxSurge is validated to be either 0 or 1, maxReplicas will be either desiredReplicas or desiredReplicas+1. + maxReplicas := desiredReplicas + maxSurge + + // If currentReplicas < maxReplicas we have to scale up + // Note: This is done to ensure we have as many Machines as allowed during rollout to maximize fault tolerance. + if currentReplicas < maxReplicas { + // Note: scaleUpControlPlane ensures that we don't continue scaling up while waiting for Machines to have NodeRefs. + return r.scaleUpControlPlane(ctx, controlPlane) + } + + // If currentReplicas >= maxReplicas we have to scale down. + // Note: If we are already at or above the maximum Machines we have to in-place update or delete a Machine + // to make progress with the update (as we cannot create additional new Machines above the maximum). + + // Pick the Machine that we should in-place update or scale down. + machineToInPlaceUpdateOrScaleDown, err := selectMachineForInPlaceUpdateOrScaleDown(ctx, controlPlane, machinesNeedingRollout) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to select next Machine for rollout") + } + machineUpToDateResult, ok := machinesUpToDateResults[machineToInPlaceUpdateOrScaleDown.Name] + if !ok { + // Note: This should never happen as we store results for all Machines in machinesUpToDateResults. + return ctrl.Result{}, errors.Errorf("failed to check if Machine %s is UpToDate", machineToInPlaceUpdateOrScaleDown.Name) + } + + // If the selected Machine is eligible for in-place update and we don't already have enough up-to-date replicas, try in-place update. + // Note: To be safe we only try an in-place update when we would otherwise delete a Machine. This ensures we could + // afford if the in-place update fails and the Machine becomes unavailable (and eventually MHC kicks in and the Machine is recreated). + if feature.Gates.Enabled(feature.InPlaceUpdates) && + machineUpToDateResult.EligibleForInPlaceUpdate && + currentUpToDateReplicas < desiredReplicas { + fallbackToScaleDown, res, err := r.tryInPlaceUpdate(ctx, controlPlane, machineToInPlaceUpdateOrScaleDown, machineUpToDateResult) + if err != nil { + return ctrl.Result{}, err + } + if !res.IsZero() { + return res, nil + } + if fallbackToScaleDown { + return r.scaleDownControlPlane(ctx, controlPlane, machineToInPlaceUpdateOrScaleDown) + } + // In-place update triggered + return ctrl.Result{}, nil // Note: Requeue is not needed, changes to Machines trigger another reconcile. + } + return r.scaleDownControlPlane(ctx, controlPlane, machineToInPlaceUpdateOrScaleDown) +} diff --git a/controlplane/kubeadm/internal/controllers/update_test.go b/controlplane/kubeadm/internal/controllers/update_test.go new file mode 100644 index 000000000000..96d9736cad98 --- /dev/null +++ b/controlplane/kubeadm/internal/controllers/update_test.go @@ -0,0 +1,531 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" + utilfeature "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" + "sigs.k8s.io/cluster-api/feature" + capicontrollerutil "sigs.k8s.io/cluster-api/internal/util/controller" + "sigs.k8s.io/cluster-api/internal/util/ssa" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/test/builder" +) + +const ( + UpdatedVersion string = "v1.17.4" + Host string = "nodomain.example.com" +) + +func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { + setup := func(t *testing.T, g *WithT) *corev1.Namespace { + t.Helper() + + t.Log("Creating the namespace") + ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-rollout-scaleup") + g.Expect(err).ToNot(HaveOccurred()) + + return ns + } + + teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) { + t.Helper() + + t.Log("Deleting the namespace") + g.Expect(env.Delete(ctx, ns)).To(Succeed()) + } + + g := NewWithT(t) + namespace := setup(t, g) + defer teardown(t, g, namespace) + + timeout := 30 * time.Second + + cluster, kcp, genericInfrastructureMachineTemplate := createClusterWithControlPlane(namespace.Name) + g.Expect(env.CreateAndWait(ctx, genericInfrastructureMachineTemplate, client.FieldOwner("manager"))).To(Succeed()) + cluster.UID = types.UID(util.RandomString(10)) + cluster.Spec.ControlPlaneEndpoint.Host = Host + cluster.Spec.ControlPlaneEndpoint.Port = 6443 + cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true) + kcp.UID = types.UID(util.RandomString(10)) + kcp.Spec.Replicas = ptr.To[int32](1) + setKCPHealthy(kcp) + + fc := capicontrollerutil.NewFakeController() + + r := &KubeadmControlPlaneReconciler{ + Client: env, + SecretCachingClient: secretCachingClient, + controller: fc, + recorder: record.NewFakeRecorder(32), + managementCluster: &fakeManagementCluster{ + Management: &internal.Management{Client: env}, + Workload: &fakeWorkloadCluster{ + Status: internal.ClusterStatus{Nodes: 1}, + }, + }, + managementClusterUncached: &fakeManagementCluster{ + Management: &internal.Management{Client: env}, + Workload: &fakeWorkloadCluster{ + Status: internal.ClusterStatus{Nodes: 1}, + }, + }, + ssaCache: ssa.NewCache("test-controller"), + } + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: nil, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + result, err := r.initializeControlPlane(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(err).ToNot(HaveOccurred()) + + // initial setup + initialMachine := &clusterv1.MachineList{} + g.Eventually(func(g Gomega) { + // Nb. This Eventually block also forces the cache to update so that subsequent + // reconcile and updateControlPlane calls use the updated cache and avoids flakiness in the test. + g.Expect(env.List(ctx, initialMachine, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(initialMachine.Items).To(HaveLen(1)) + }, timeout).Should(Succeed()) + for i := range initialMachine.Items { + setMachineHealthy(&initialMachine.Items[i]) + } + + // change the KCP spec so the machine becomes outdated + kcp.Spec.Version = UpdatedVersion + + // run upgrade the first time, expect we scale up + needingUpgrade := collections.FromMachineList(initialMachine) + controlPlane.Machines = needingUpgrade + machinesUpToDateResults := map[string]internal.UpToDateResult{} + for _, m := range needingUpgrade { + machinesUpToDateResults[m.Name] = internal.UpToDateResult{EligibleForInPlaceUpdate: false} + } + result, err = r.updateControlPlane(ctx, controlPlane, needingUpgrade, machinesUpToDateResults) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(err).ToNot(HaveOccurred()) + bothMachines := &clusterv1.MachineList{} + g.Eventually(func(g Gomega) { + g.Expect(env.List(ctx, bothMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(bothMachines.Items).To(HaveLen(2)) + }, timeout).Should(Succeed()) + + // run upgrade a second time, simulate that the node has not appeared yet but the machine exists + + // Unhealthy control plane will be detected during reconcile loop and upgrade will never be called. + controlPlane = &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: collections.FromMachineList(bothMachines), + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + result, err = r.reconcile(context.Background(), controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) + g.Expect(fc.Deferrals).To(HaveKeyWithValue( + reconcile.Request{NamespacedName: client.ObjectKeyFromObject(kcp)}, + BeTemporally("~", time.Now().Add(5*time.Second), 1*time.Second)), + ) + g.Eventually(func(g Gomega) { + g.Expect(env.List(context.Background(), bothMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(bothMachines.Items).To(HaveLen(2)) + }, timeout).Should(Succeed()) + + // manually increase number of nodes, make control plane healthy again + r.managementCluster.(*fakeManagementCluster).Workload.Status.Nodes++ + for i := range bothMachines.Items { + setMachineHealthy(&bothMachines.Items[i]) + } + controlPlane.Machines = collections.FromMachineList(bothMachines) + + machinesRequireUpgrade := collections.Machines{} + for i := range bothMachines.Items { + if bothMachines.Items[i].Spec.Version != "" && bothMachines.Items[i].Spec.Version != UpdatedVersion { + machinesRequireUpgrade[bothMachines.Items[i].Name] = &bothMachines.Items[i] + } + } + machinesUpToDateResults = map[string]internal.UpToDateResult{} + for _, m := range machinesRequireUpgrade { + machinesUpToDateResults[m.Name] = internal.UpToDateResult{EligibleForInPlaceUpdate: false} + } + + // run upgrade the second time, expect we scale down + result, err = r.updateControlPlane(ctx, controlPlane, machinesRequireUpgrade, machinesUpToDateResults) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + finalMachine := &clusterv1.MachineList{} + g.Eventually(func(g Gomega) { + g.Expect(env.List(ctx, finalMachine, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(finalMachine.Items).To(HaveLen(1)) + // assert that the deleted machine is the initial machine + g.Expect(finalMachine.Items[0].Name).ToNot(Equal(initialMachine.Items[0].Name)) + }, timeout).Should(Succeed()) +} + +func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { + version := "v1.17.3" + g := NewWithT(t) + + cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault) + cluster.Spec.ControlPlaneEndpoint.Host = "nodomain.example.com1" + cluster.Spec.ControlPlaneEndpoint.Port = 6443 + kcp.Spec.Replicas = ptr.To[int32](3) + kcp.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal = 0 + setKCPHealthy(kcp) + + fmc := &fakeManagementCluster{ + Machines: collections.Machines{}, + Workload: &fakeWorkloadCluster{ + Status: internal.ClusterStatus{Nodes: 3}, + }, + } + objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} + for i := range 3 { + name := fmt.Sprintf("test-%d", i) + m := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: name, + Labels: desiredstate.ControlPlaneMachineLabels(kcp, cluster.Name), + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: name, + }, + }, + Version: version, + }, + } + cfg := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: name, + }, + } + objs = append(objs, m, cfg) + fmc.Machines.Insert(m) + } + fakeClient := newFakeClient(objs...) + fmc.Reader = fakeClient + r := &KubeadmControlPlaneReconciler{ + Client: fakeClient, + SecretCachingClient: fakeClient, + managementCluster: fmc, + managementClusterUncached: fmc, + } + + controlPlane := &internal.ControlPlane{ + KCP: kcp, + Cluster: cluster, + Machines: nil, + } + controlPlane.InjectTestManagementCluster(r.managementCluster) + + result, err := r.reconcile(ctx, controlPlane) + g.Expect(result).To(BeComparableTo(ctrl.Result{})) + g.Expect(err).ToNot(HaveOccurred()) + + machineList := &clusterv1.MachineList{} + g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(machineList.Items).To(HaveLen(3)) + for i := range machineList.Items { + setMachineHealthy(&machineList.Items[i]) + } + + // change the KCP spec so the machine becomes outdated + kcp.Spec.Version = UpdatedVersion + + // run upgrade, expect we scale down + needingUpgrade := collections.FromMachineList(machineList) + controlPlane.Machines = needingUpgrade + machinesUpToDateResults := map[string]internal.UpToDateResult{} + for _, m := range needingUpgrade { + machinesUpToDateResults[m.Name] = internal.UpToDateResult{EligibleForInPlaceUpdate: false} + } + result, err = r.updateControlPlane(ctx, controlPlane, needingUpgrade, machinesUpToDateResults) + g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) + g.Expect(err).ToNot(HaveOccurred()) + remainingMachines := &clusterv1.MachineList{} + g.Expect(fakeClient.List(ctx, remainingMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) + g.Expect(remainingMachines.Items).To(HaveLen(2)) +} + +func Test_rollingUpdate(t *testing.T) { + tests := []struct { + name string + maxSurge int32 + currentReplicas int32 + currentUpToDateReplicas int32 + desiredReplicas int32 + enableInPlaceUpdatesFeatureGate bool + machineEligibleForInPlaceUpdate bool + tryInPlaceUpdateFunc func(ctx context.Context, controlPlane *internal.ControlPlane, machineToInPlaceUpdate *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, ctrl.Result, error) + wantTryInPlaceUpdateCalled bool + wantScaleDownCalled bool + wantScaleUpCalled bool + wantError bool + wantErrorMessage string + wantRes ctrl.Result + }{ + // Regular rollout (no in-place updates) + { + name: "Regular rollout: maxSurge 1: scale up", + maxSurge: 1, + currentReplicas: 3, + currentUpToDateReplicas: 0, + desiredReplicas: 3, + wantScaleUpCalled: true, + }, + { + name: "Regular rollout: maxSurge 1: scale down", + maxSurge: 1, + currentReplicas: 4, + currentUpToDateReplicas: 1, + desiredReplicas: 3, + wantScaleDownCalled: true, + }, + { + name: "Regular rollout: maxSurge 0: scale down", + maxSurge: 0, + currentReplicas: 3, + currentUpToDateReplicas: 0, + desiredReplicas: 3, + wantScaleDownCalled: true, + }, + { + name: "Regular rollout: maxSurge 0: scale up", + maxSurge: 0, + currentReplicas: 2, + currentUpToDateReplicas: 0, + desiredReplicas: 3, + wantScaleUpCalled: true, + }, + // In-place updates + // Note: maxSurge 0 or 1 doesn't have an impact on the in-place code path so not testing permutations here. + // Note: Scale up works the same way as for regular rollouts so not testing it here again. + // + // In-place updates: tryInPlaceUpdate not called + { + name: "In-place updates: feature gate disabled: scale down (tryInPlaceUpdate not called)", + maxSurge: 0, + currentReplicas: 3, + currentUpToDateReplicas: 0, + desiredReplicas: 3, + enableInPlaceUpdatesFeatureGate: false, + wantTryInPlaceUpdateCalled: false, + wantScaleDownCalled: true, + }, + { + name: "In-place updates: Machine not eligible for in-place: scale down (tryInPlaceUpdate not called)", + maxSurge: 0, + currentReplicas: 3, + currentUpToDateReplicas: 0, + desiredReplicas: 3, + enableInPlaceUpdatesFeatureGate: true, + machineEligibleForInPlaceUpdate: false, + wantTryInPlaceUpdateCalled: false, + wantScaleDownCalled: true, + }, + { + name: "In-place updates: already enough up-to-date replicas: scale down (tryInPlaceUpdate not called)", + maxSurge: 1, + currentReplicas: 4, + currentUpToDateReplicas: 3, + desiredReplicas: 3, + enableInPlaceUpdatesFeatureGate: true, + machineEligibleForInPlaceUpdate: true, + wantTryInPlaceUpdateCalled: false, + wantScaleDownCalled: true, + }, + // In-place updates: tryInPlaceUpdate called + { + name: "In-place updates: tryInPlaceUpdate returns error", + maxSurge: 0, + currentReplicas: 3, + currentUpToDateReplicas: 0, + desiredReplicas: 3, + enableInPlaceUpdatesFeatureGate: true, + machineEligibleForInPlaceUpdate: true, + tryInPlaceUpdateFunc: func(_ context.Context, _ *internal.ControlPlane, _ *clusterv1.Machine, _ internal.UpToDateResult) (bool, ctrl.Result, error) { + return false, ctrl.Result{}, errors.New("in-place update error") + }, + wantTryInPlaceUpdateCalled: true, + wantScaleDownCalled: false, + wantError: true, + wantErrorMessage: "in-place update error", + }, + { + name: "In-place updates: tryInPlaceUpdate returns Requeue", + maxSurge: 0, + currentReplicas: 3, + currentUpToDateReplicas: 0, + desiredReplicas: 3, + enableInPlaceUpdatesFeatureGate: true, + machineEligibleForInPlaceUpdate: true, + tryInPlaceUpdateFunc: func(_ context.Context, _ *internal.ControlPlane, _ *clusterv1.Machine, _ internal.UpToDateResult) (fallbackToScaleDown bool, _ ctrl.Result, _ error) { + return false, ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, nil + }, + wantTryInPlaceUpdateCalled: true, + wantScaleDownCalled: false, + wantRes: ctrl.Result{RequeueAfter: preflightFailedRequeueAfter}, + }, + { + name: "In-place updates: tryInPlaceUpdate returns fallback to scale down", + maxSurge: 0, + currentReplicas: 3, + currentUpToDateReplicas: 0, + desiredReplicas: 3, + enableInPlaceUpdatesFeatureGate: true, + machineEligibleForInPlaceUpdate: true, + tryInPlaceUpdateFunc: func(_ context.Context, _ *internal.ControlPlane, _ *clusterv1.Machine, _ internal.UpToDateResult) (fallbackToScaleDown bool, _ ctrl.Result, _ error) { + return true, ctrl.Result{}, nil + }, + wantTryInPlaceUpdateCalled: true, + wantScaleDownCalled: true, + }, + { + name: "In-place updates: tryInPlaceUpdate returns nothing (in-place update triggered)", + maxSurge: 0, + currentReplicas: 3, + currentUpToDateReplicas: 0, + desiredReplicas: 3, + enableInPlaceUpdatesFeatureGate: true, + machineEligibleForInPlaceUpdate: true, + tryInPlaceUpdateFunc: func(_ context.Context, _ *internal.ControlPlane, _ *clusterv1.Machine, _ internal.UpToDateResult) (fallbackToScaleDown bool, _ ctrl.Result, _ error) { + return false, ctrl.Result{}, nil + }, + wantTryInPlaceUpdateCalled: true, + wantScaleDownCalled: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + if tt.enableInPlaceUpdatesFeatureGate { + utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.InPlaceUpdates, true) + } + + var inPlaceUpdateCalled bool + var scaleDownCalled bool + var scaleUpCalled bool + r := &KubeadmControlPlaneReconciler{ + overrideTryInPlaceUpdateFunc: func(ctx context.Context, controlPlane *internal.ControlPlane, machineToInPlaceUpdate *clusterv1.Machine, machineUpToDateResult internal.UpToDateResult) (bool, ctrl.Result, error) { + inPlaceUpdateCalled = true + return tt.tryInPlaceUpdateFunc(ctx, controlPlane, machineToInPlaceUpdate, machineUpToDateResult) + }, + overrideScaleDownControlPlaneFunc: func(_ context.Context, _ *internal.ControlPlane, _ *clusterv1.Machine) (ctrl.Result, error) { + scaleDownCalled = true + return ctrl.Result{}, nil + }, + overrideScaleUpControlPlaneFunc: func(_ context.Context, _ *internal.ControlPlane) (ctrl.Result, error) { + scaleUpCalled = true + return ctrl.Result{}, nil + }, + } + + machines := collections.Machines{} + for i := range tt.currentReplicas { + machines[fmt.Sprintf("machine-%d", i)] = machine(fmt.Sprintf("machine-%d", i)) + } + machinesUpToDate := collections.Machines{} + for i := range tt.currentUpToDateReplicas { + machinesUpToDate[fmt.Sprintf("machine-%d", i)] = machine(fmt.Sprintf("machine-%d", i)) + } + + controlPlane := &internal.ControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Replicas: ptr.To(tt.desiredReplicas), + Rollout: controlplanev1.KubeadmControlPlaneRolloutSpec{ + Strategy: controlplanev1.KubeadmControlPlaneRolloutStrategy{ + RollingUpdate: controlplanev1.KubeadmControlPlaneRolloutStrategyRollingUpdate{ + MaxSurge: ptr.To(intstr.FromInt32(tt.maxSurge)), + }, + }, + }, + }, + }, + Cluster: &clusterv1.Cluster{}, + Machines: machines, + MachinesNotUpToDate: machines.Difference(machinesUpToDate), + } + machinesNeedingRollout, _ := controlPlane.MachinesNeedingRollout() + machinesUpToDateResults := map[string]internal.UpToDateResult{} + for _, m := range machinesNeedingRollout { + machinesUpToDateResults[m.Name] = internal.UpToDateResult{EligibleForInPlaceUpdate: tt.machineEligibleForInPlaceUpdate} + } + res, err := r.rollingUpdate(ctx, controlPlane, machinesNeedingRollout, machinesUpToDateResults) + if tt.wantError { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.wantErrorMessage)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(res).To(Equal(tt.wantRes)) + + g.Expect(inPlaceUpdateCalled).To(Equal(tt.wantTryInPlaceUpdateCalled), "inPlaceUpdateCalled: actual: %t expected: %t", inPlaceUpdateCalled, tt.wantTryInPlaceUpdateCalled) + g.Expect(scaleDownCalled).To(Equal(tt.wantScaleDownCalled), "scaleDownCalled: actual: %t expected: %t", scaleDownCalled, tt.wantScaleDownCalled) + g.Expect(scaleUpCalled).To(Equal(tt.wantScaleUpCalled), "scaleUpCalled: actual: %t expected: %t", scaleUpCalled, tt.wantScaleUpCalled) + }) + } +} + +type machineOpt func(*clusterv1.Machine) + +func machine(name string, opts ...machineOpt) *clusterv1.Machine { + m := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: metav1.NamespaceDefault, + }, + } + for _, opt := range opts { + opt(m) + } + return m +} diff --git a/controlplane/kubeadm/internal/controllers/upgrade.go b/controlplane/kubeadm/internal/controllers/upgrade.go deleted file mode 100644 index d267a4205e0e..000000000000 --- a/controlplane/kubeadm/internal/controllers/upgrade.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - - "github.com/blang/semver/v4" - "github.com/pkg/errors" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - - bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" - controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" - "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/util/collections" -) - -func (r *KubeadmControlPlaneReconciler) upgradeControlPlane( - ctx context.Context, - controlPlane *internal.ControlPlane, - machinesRequireUpgrade collections.Machines, -) (ctrl.Result, error) { - logger := ctrl.LoggerFrom(ctx) - - // TODO: handle reconciliation of etcd members and kubeadm config in case they get out of sync with cluster - - workloadCluster, err := controlPlane.GetWorkloadCluster(ctx) - if err != nil { - logger.Error(err, "failed to get remote client for workload cluster", "Cluster", klog.KObj(controlPlane.Cluster)) - return ctrl.Result{}, err - } - - parsedVersion, err := semver.ParseTolerant(controlPlane.KCP.Spec.Version) - if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", controlPlane.KCP.Spec.Version) - } - - // Ensure kubeadm clusterRoleBinding for v1.29+ as per https://github.com/kubernetes/kubernetes/pull/121305 - if err := workloadCluster.AllowClusterAdminPermissions(ctx, parsedVersion); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to set cluster-admin ClusterRoleBinding for kubeadm") - } - - kubeadmCMMutators := make([]func(*bootstrapv1.ClusterConfiguration), 0) - - if controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.IsDefined() { - // Get the imageRepository or the correct value if nothing is set and a migration is necessary. - imageRepository := internal.ImageRepositoryFromClusterConfig(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration) - - kubeadmCMMutators = append(kubeadmCMMutators, - workloadCluster.UpdateImageRepositoryInKubeadmConfigMap(imageRepository), - workloadCluster.UpdateFeatureGatesInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec, parsedVersion), - workloadCluster.UpdateAPIServerInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer), - workloadCluster.UpdateControllerManagerInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager), - workloadCluster.UpdateSchedulerInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler), - workloadCluster.UpdateCertificateValidityPeriodDays(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays)) - - // Etcd local and external are mutually exclusive and they cannot be switched, once set. - if controlPlane.IsEtcdManaged() { - kubeadmCMMutators = append(kubeadmCMMutators, - workloadCluster.UpdateEtcdLocalInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local)) - } else { - kubeadmCMMutators = append(kubeadmCMMutators, - workloadCluster.UpdateEtcdExternalInKubeadmConfigMap(controlPlane.KCP.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External)) - } - } - - // collectively update Kubeadm config map - if err = workloadCluster.UpdateClusterConfiguration(ctx, parsedVersion, kubeadmCMMutators...); err != nil { - return ctrl.Result{}, err - } - - switch controlPlane.KCP.Spec.Rollout.Strategy.Type { - case controlplanev1.RollingUpdateStrategyType: - // RolloutStrategy is currently defaulted and validated to be RollingUpdate - // We can ignore MaxUnavailable because we are enforcing health checks before we get here. - maxNodes := *controlPlane.KCP.Spec.Replicas + int32(controlPlane.KCP.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntValue()) - if int32(controlPlane.Machines.Len()) < maxNodes { - // scaleUp ensures that we don't continue scaling up while waiting for Machines to have NodeRefs - return r.scaleUpControlPlane(ctx, controlPlane) - } - return r.scaleDownControlPlane(ctx, controlPlane, machinesRequireUpgrade) - default: - logger.Info("RolloutStrategy type is not set to RollingUpdate, unable to determine the strategy for rolling out machines") - return ctrl.Result{}, nil - } -} diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/upgrade_test.go deleted file mode 100644 index c841da58c0c2..000000000000 --- a/controlplane/kubeadm/internal/controllers/upgrade_test.go +++ /dev/null @@ -1,284 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "testing" - "time" - - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" - "k8s.io/utils/ptr" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - "sigs.k8s.io/cluster-api/internal/util/ssa" - "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/test/builder" -) - -const ( - UpdatedVersion string = "v1.17.4" - Host string = "nodomain.example.com" -) - -func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { - setup := func(t *testing.T, g *WithT) *corev1.Namespace { - t.Helper() - - t.Log("Creating the namespace") - ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-rollout-scaleup") - g.Expect(err).ToNot(HaveOccurred()) - - return ns - } - - teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) { - t.Helper() - - t.Log("Deleting the namespace") - g.Expect(env.Delete(ctx, ns)).To(Succeed()) - } - - g := NewWithT(t) - namespace := setup(t, g) - defer teardown(t, g, namespace) - - timeout := 30 * time.Second - - cluster, kcp, genericInfrastructureMachineTemplate := createClusterWithControlPlane(namespace.Name) - g.Expect(env.CreateAndWait(ctx, genericInfrastructureMachineTemplate, client.FieldOwner("manager"))).To(Succeed()) - cluster.UID = types.UID(util.RandomString(10)) - cluster.Spec.ControlPlaneEndpoint.Host = Host - cluster.Spec.ControlPlaneEndpoint.Port = 6443 - cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true) - kcp.UID = types.UID(util.RandomString(10)) - kcp.Spec.Replicas = ptr.To[int32](1) - setKCPHealthy(kcp) - - r := &KubeadmControlPlaneReconciler{ - Client: env, - SecretCachingClient: secretCachingClient, - recorder: record.NewFakeRecorder(32), - managementCluster: &fakeManagementCluster{ - Management: &internal.Management{Client: env}, - Workload: &fakeWorkloadCluster{ - Status: internal.ClusterStatus{Nodes: 1}, - }, - }, - managementClusterUncached: &fakeManagementCluster{ - Management: &internal.Management{Client: env}, - Workload: &fakeWorkloadCluster{ - Status: internal.ClusterStatus{Nodes: 1}, - }, - }, - ssaCache: ssa.NewCache("test-controller"), - } - controlPlane := &internal.ControlPlane{ - KCP: kcp, - Cluster: cluster, - Machines: nil, - } - controlPlane.InjectTestManagementCluster(r.managementCluster) - - result, err := r.initializeControlPlane(ctx, controlPlane) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) - g.Expect(err).ToNot(HaveOccurred()) - - // initial setup - initialMachine := &clusterv1.MachineList{} - g.Eventually(func(g Gomega) { - // Nb. This Eventually block also forces the cache to update so that subsequent - // reconcile and upgradeControlPlane calls use the updated cache and avoids flakiness in the test. - g.Expect(env.List(ctx, initialMachine, client.InNamespace(cluster.Namespace))).To(Succeed()) - g.Expect(initialMachine.Items).To(HaveLen(1)) - }, timeout).Should(Succeed()) - for i := range initialMachine.Items { - setMachineHealthy(&initialMachine.Items[i]) - } - - // change the KCP spec so the machine becomes outdated - kcp.Spec.Version = UpdatedVersion - - // run upgrade the first time, expect we scale up - needingUpgrade := collections.FromMachineList(initialMachine) - controlPlane.Machines = needingUpgrade - result, err = r.upgradeControlPlane(ctx, controlPlane, needingUpgrade) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) - g.Expect(err).ToNot(HaveOccurred()) - bothMachines := &clusterv1.MachineList{} - g.Eventually(func(g Gomega) { - g.Expect(env.List(ctx, bothMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) - g.Expect(bothMachines.Items).To(HaveLen(2)) - }, timeout).Should(Succeed()) - - // run upgrade a second time, simulate that the node has not appeared yet but the machine exists - - // Unhealthy control plane will be detected during reconcile loop and upgrade will never be called. - controlPlane = &internal.ControlPlane{ - KCP: kcp, - Cluster: cluster, - Machines: collections.FromMachineList(bothMachines), - } - controlPlane.InjectTestManagementCluster(r.managementCluster) - - result, err = r.reconcile(context.Background(), controlPlane) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: preflightFailedRequeueAfter})) - g.Eventually(func(g Gomega) { - g.Expect(env.List(context.Background(), bothMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) - g.Expect(bothMachines.Items).To(HaveLen(2)) - }, timeout).Should(Succeed()) - - // manually increase number of nodes, make control plane healthy again - r.managementCluster.(*fakeManagementCluster).Workload.Status.Nodes++ - for i := range bothMachines.Items { - setMachineHealthy(&bothMachines.Items[i]) - } - controlPlane.Machines = collections.FromMachineList(bothMachines) - - machinesRequireUpgrade := collections.Machines{} - for i := range bothMachines.Items { - if bothMachines.Items[i].Spec.Version != "" && bothMachines.Items[i].Spec.Version != UpdatedVersion { - machinesRequireUpgrade[bothMachines.Items[i].Name] = &bothMachines.Items[i] - } - } - - // run upgrade the second time, expect we scale down - result, err = r.upgradeControlPlane(ctx, controlPlane, machinesRequireUpgrade) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) - finalMachine := &clusterv1.MachineList{} - g.Eventually(func(g Gomega) { - g.Expect(env.List(ctx, finalMachine, client.InNamespace(cluster.Namespace))).To(Succeed()) - g.Expect(finalMachine.Items).To(HaveLen(1)) - // assert that the deleted machine is the initial machine - g.Expect(finalMachine.Items[0].Name).ToNot(Equal(initialMachine.Items[0].Name)) - }, timeout).Should(Succeed()) -} - -func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { - version := "v1.17.3" - g := NewWithT(t) - - cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault) - cluster.Spec.ControlPlaneEndpoint.Host = "nodomain.example.com1" - cluster.Spec.ControlPlaneEndpoint.Port = 6443 - kcp.Spec.Replicas = ptr.To[int32](3) - kcp.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal = 0 - setKCPHealthy(kcp) - - fmc := &fakeManagementCluster{ - Machines: collections.Machines{}, - Workload: &fakeWorkloadCluster{ - Status: internal.ClusterStatus{Nodes: 3}, - }, - } - objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} - for i := range 3 { - name := fmt.Sprintf("test-%d", i) - m := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: cluster.Namespace, - Name: name, - Labels: internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name), - }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: bootstrapv1.GroupVersion.Group, - Kind: "KubeadmConfig", - Name: name, - }, - }, - Version: version, - }, - } - cfg := &bootstrapv1.KubeadmConfig{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: cluster.Namespace, - Name: name, - }, - } - objs = append(objs, m, cfg) - fmc.Machines.Insert(m) - } - fakeClient := newFakeClient(objs...) - fmc.Reader = fakeClient - r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - SecretCachingClient: fakeClient, - managementCluster: fmc, - managementClusterUncached: fmc, - } - - controlPlane := &internal.ControlPlane{ - KCP: kcp, - Cluster: cluster, - Machines: nil, - } - controlPlane.InjectTestManagementCluster(r.managementCluster) - - result, err := r.reconcile(ctx, controlPlane) - g.Expect(result).To(BeComparableTo(ctrl.Result{})) - g.Expect(err).ToNot(HaveOccurred()) - - machineList := &clusterv1.MachineList{} - g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed()) - g.Expect(machineList.Items).To(HaveLen(3)) - for i := range machineList.Items { - setMachineHealthy(&machineList.Items[i]) - } - - // change the KCP spec so the machine becomes outdated - kcp.Spec.Version = UpdatedVersion - - // run upgrade, expect we scale down - needingUpgrade := collections.FromMachineList(machineList) - controlPlane.Machines = needingUpgrade - - result, err = r.upgradeControlPlane(ctx, controlPlane, needingUpgrade) - g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: true})) - g.Expect(err).ToNot(HaveOccurred()) - remainingMachines := &clusterv1.MachineList{} - g.Expect(fakeClient.List(ctx, remainingMachines, client.InNamespace(cluster.Namespace))).To(Succeed()) - g.Expect(remainingMachines.Items).To(HaveLen(2)) -} - -type machineOpt func(*clusterv1.Machine) - -func machine(name string, opts ...machineOpt) *clusterv1.Machine { - m := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: metav1.NamespaceDefault, - }, - } - for _, opt := range opts { - opt(m) - } - return m -} diff --git a/controlplane/kubeadm/internal/desiredstate/desired_state.go b/controlplane/kubeadm/internal/desiredstate/desired_state.go new file mode 100644 index 000000000000..5449f7cb9129 --- /dev/null +++ b/controlplane/kubeadm/internal/desiredstate/desired_state.go @@ -0,0 +1,341 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package desiredstate contains utils to compute the desired state of a Machine. +package desiredstate + +import ( + "context" + "strings" + + "github.com/blang/semver/v4" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/controllers/external" + "sigs.k8s.io/cluster-api/internal/contract" + topologynames "sigs.k8s.io/cluster-api/internal/topology/names" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/labels/format" + "sigs.k8s.io/cluster-api/util/version" +) + +var ( + // minKubernetesVersionControlPlaneKubeletLocalMode is the min version from which + // we will enable the ControlPlaneKubeletLocalMode kubeadm feature gate. + // Note: We have to do this with Kubernetes 1.31. Because with that version we encountered + // a case where it's not okay anymore to ignore the Kubernetes version skew (kubelet 1.31 uses + // the spec.clusterIP field selector that is only implemented in kube-apiserver >= 1.31.0). + minKubernetesVersionControlPlaneKubeletLocalMode = semver.MustParse("1.31.0") + + // ControlPlaneKubeletLocalMode is a feature gate of kubeadm that ensures + // kubelets only communicate with the local apiserver. + ControlPlaneKubeletLocalMode = "ControlPlaneKubeletLocalMode" +) + +// MandatoryMachineReadinessGates are readinessGates KCP enforces to be set on machine it owns. +var MandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ + {ConditionType: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition}, + {ConditionType: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition}, + {ConditionType: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition}, +} + +// etcdMandatoryMachineReadinessGates are readinessGates KCP enforces to be set on machine it owns if etcd is managed. +var etcdMandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ + {ConditionType: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition}, + {ConditionType: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition}, +} + +// kubeadmClusterConfigurationAnnotation is an annotation that was set in Cluster API <= v1.11. +// Starting with Cluster API v1.12 we remove it from existing Machines. +// +// Deprecated: This constant and corresponding cleanup code can be removed once we don't support upgrades from Cluster API v1.12 anymore. +const kubeadmClusterConfigurationAnnotation = "controlplane.cluster.x-k8s.io/kubeadm-cluster-configuration" + +// ComputeDesiredMachine computes the desired Machine. +// This Machine will be used during reconciliation to: +// * create a new Machine +// * update an existing Machine +// Because we are using Server-Side-Apply we always have to calculate the full object. +// There are small differences in how we calculate the Machine depending on if it +// is a create or update. Example: for a new Machine we have to calculate a new name, +// while for an existing Machine we have to use the name of the existing Machine. +func ComputeDesiredMachine(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, failureDomain string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { + var machineName string + var machineUID types.UID + var version string + annotations := map[string]string{} + if existingMachine == nil { + // Creating a new machine + nameTemplate := "{{ .kubeadmControlPlane.name }}-{{ .random }}" + if kcp.Spec.MachineNaming.Template != "" { + nameTemplate = kcp.Spec.MachineNaming.Template + if !strings.Contains(nameTemplate, "{{ .random }}") { + return nil, errors.New("failed to compute desired Machine: cannot generate Machine name: {{ .random }} is missing in machineNaming.template") + } + } + generatedMachineName, err := topologynames.KCPMachineNameGenerator(nameTemplate, cluster.Name, kcp.Name).GenerateName() + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired Machine: failed to generate Machine name") + } + machineName = generatedMachineName + version = kcp.Spec.Version + + // In case this machine is being created as a consequence of a remediation, then add an annotation + // tracking remediating data. + // NOTE: This is required in order to track remediation retries. + if remediationData, ok := kcp.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { + annotations[controlplanev1.RemediationForAnnotation] = remediationData + } + } else { + // Updating an existing machine + machineName = existingMachine.Name + machineUID = existingMachine.UID + version = existingMachine.Spec.Version + + // Cleanup the KubeadmClusterConfigurationAnnotation annotation that was set in Cluster API <= v1.11. + delete(annotations, kubeadmClusterConfigurationAnnotation) + + // If the machine already has remediation data then preserve it. + // NOTE: This is required in order to track remediation retries. + if remediationData, ok := existingMachine.Annotations[controlplanev1.RemediationForAnnotation]; ok { + annotations[controlplanev1.RemediationForAnnotation] = remediationData + } + } + // Setting pre-terminate hook so we can later remove the etcd member right before Machine termination + // (i.e. before InfraMachine deletion). + annotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" + + // Construct the basic Machine. + desiredMachine := &clusterv1.Machine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + }, + ObjectMeta: metav1.ObjectMeta{ + UID: machineUID, + Name: machineName, + Namespace: kcp.Namespace, + // Note: by setting the ownerRef on creation we signal to the Machine controller that this is not a stand-alone Machine. + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: version, + FailureDomain: failureDomain, + }, + } + + // Set the in-place mutable fields. + // When we create a new Machine we will just create the Machine with those fields. + // When we update an existing Machine will we update the fields on the existing Machine (in-place mutate). + + // Set labels + desiredMachine.Labels = ControlPlaneMachineLabels(kcp, cluster.Name) + + // Set annotations + desiredMachine.Annotations = ControlPlaneMachineAnnotations(kcp) + for k, v := range annotations { + desiredMachine.Annotations[k] = v + } + + // Set other in-place mutable fields + desiredMachine.Spec.Deletion.NodeDrainTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds + desiredMachine.Spec.Deletion.NodeDeletionTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds + desiredMachine.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds + + // Note: We intentionally don't set "minReadySeconds" on Machines because we consider it enough to have machine availability driven by readiness of control plane components. + if existingMachine != nil { + desiredMachine.Spec.InfrastructureRef = existingMachine.Spec.InfrastructureRef + desiredMachine.Spec.Bootstrap.ConfigRef = existingMachine.Spec.Bootstrap.ConfigRef + } + + // Set machines readiness gates + allReadinessGates := []clusterv1.MachineReadinessGate{} + allReadinessGates = append(allReadinessGates, MandatoryMachineReadinessGates...) + isEtcdManaged := !kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External.IsDefined() + if isEtcdManaged { + allReadinessGates = append(allReadinessGates, etcdMandatoryMachineReadinessGates...) + } + allReadinessGates = append(allReadinessGates, kcp.Spec.MachineTemplate.Spec.ReadinessGates...) + + desiredMachine.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{} + knownGates := sets.Set[string]{} + for _, gate := range allReadinessGates { + if knownGates.Has(gate.ConditionType) { + continue + } + desiredMachine.Spec.ReadinessGates = append(desiredMachine.Spec.ReadinessGates, gate) + knownGates.Insert(gate.ConditionType) + } + + return desiredMachine, nil +} + +// ComputeDesiredKubeadmConfig computes the desired KubeadmConfig. +func ComputeDesiredKubeadmConfig(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, isJoin bool, name string, existingKubeadmConfig *bootstrapv1.KubeadmConfig) (*bootstrapv1.KubeadmConfig, error) { + // Create an owner reference without a controller reference because the owning controller is the machine controller + var ownerReferences []metav1.OwnerReference + if existingKubeadmConfig == nil || !util.HasOwner(existingKubeadmConfig.OwnerReferences, clusterv1.GroupVersion.String(), []string{"Machine"}) { + ownerReferences = append(ownerReferences, metav1.OwnerReference{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + }) + } + + spec := kcp.Spec.KubeadmConfigSpec.DeepCopy() + if isJoin { + // Note: When building a KubeadmConfig for a joining CP machine empty out the unnecessary InitConfiguration. + spec.InitConfiguration = bootstrapv1.InitConfiguration{} + // Note: For the joining we are preserving the ClusterConfiguration in order to determine if the + // cluster is using an external etcd in the kubeadm bootstrap provider (even if this is not required by kubeadm Join). + // Note: We are always setting JoinConfiguration.ControlPlane so we can later identify this KubeadmConfig as a + // join KubeadmConfig. + if spec.JoinConfiguration.ControlPlane == nil { + spec.JoinConfiguration.ControlPlane = &bootstrapv1.JoinControlPlane{} + } + } else { + // Note: When building a KubeadmConfig for the first CP machine empty out the unnecessary JoinConfiguration. + spec.JoinConfiguration = bootstrapv1.JoinConfiguration{} + } + + parsedVersion, err := semver.ParseTolerant(kcp.Spec.Version) + if err != nil { + return nil, errors.Wrapf(err, "failed to compute desired KubeadmConfig: failed to parse Kubernetes version %q", kcp.Spec.Version) + } + DefaultFeatureGates(spec, parsedVersion) + + kubeadmConfig := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: kcp.Namespace, + Labels: ControlPlaneMachineLabels(kcp, cluster.Name), + Annotations: ControlPlaneMachineAnnotations(kcp), + OwnerReferences: ownerReferences, + }, + Spec: *spec, + } + if existingKubeadmConfig != nil { + kubeadmConfig.SetUID(existingKubeadmConfig.GetUID()) + } + return kubeadmConfig, nil +} + +// ComputeDesiredInfraMachine computes the desired InfraMachine. +func ComputeDesiredInfraMachine(ctx context.Context, c client.Client, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, name string, existingInfraMachine *unstructured.Unstructured) (*unstructured.Unstructured, error) { + // Create an owner reference without a controller reference because the owning controller is the machine controller + var ownerReference *metav1.OwnerReference + if existingInfraMachine == nil || !util.HasOwner(existingInfraMachine.GetOwnerReferences(), clusterv1.GroupVersion.String(), []string{"Machine"}) { + ownerReference = &metav1.OwnerReference{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + } + } + + apiVersion, err := contract.GetAPIVersion(ctx, c, kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind()) + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired InfraMachine") + } + templateRef := &corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Kind, + Namespace: kcp.Namespace, + Name: kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name, + } + + template, err := external.Get(ctx, c, templateRef) + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired InfraMachine") + } + generateTemplateInput := &external.GenerateTemplateInput{ + Template: template, + TemplateRef: templateRef, + Namespace: kcp.Namespace, + Name: name, + ClusterName: cluster.Name, + OwnerRef: ownerReference, + Labels: ControlPlaneMachineLabels(kcp, cluster.Name), + Annotations: ControlPlaneMachineAnnotations(kcp), + } + infraMachine, err := external.GenerateTemplate(generateTemplateInput) + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired InfraMachine") + } + if existingInfraMachine != nil { + infraMachine.SetUID(existingInfraMachine.GetUID()) + } + return infraMachine, nil +} + +// DefaultFeatureGates defaults the feature gates field. +func DefaultFeatureGates(kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec, kubernetesVersion semver.Version) { + if version.Compare(kubernetesVersion, minKubernetesVersionControlPlaneKubeletLocalMode, version.WithoutPreReleases()) < 0 { + return + } + + if kubeadmConfigSpec.ClusterConfiguration.FeatureGates == nil { + kubeadmConfigSpec.ClusterConfiguration.FeatureGates = map[string]bool{} + } + + if _, ok := kubeadmConfigSpec.ClusterConfiguration.FeatureGates[ControlPlaneKubeletLocalMode]; !ok { + kubeadmConfigSpec.ClusterConfiguration.FeatureGates[ControlPlaneKubeletLocalMode] = true + } +} + +// ControlPlaneMachineLabels returns a set of labels to add to a control plane machine for this specific cluster. +func ControlPlaneMachineLabels(kcp *controlplanev1.KubeadmControlPlane, clusterName string) map[string]string { + labels := map[string]string{} + + // Add the labels from the MachineTemplate. + // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. + for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Labels { + labels[k] = v + } + + // Always force these labels over the ones coming from the spec. + labels[clusterv1.ClusterNameLabel] = clusterName + labels[clusterv1.MachineControlPlaneLabel] = "" + // Note: MustFormatValue is used here as the label value can be a hash if the control plane name is longer than 63 characters. + labels[clusterv1.MachineControlPlaneNameLabel] = format.MustFormatValue(kcp.Name) + return labels +} + +// ControlPlaneMachineAnnotations returns a set of annotations to add to a control plane machine for this specific cluster. +func ControlPlaneMachineAnnotations(kcp *controlplanev1.KubeadmControlPlane) map[string]string { + annotations := map[string]string{} + + // Add the annotations from the MachineTemplate. + // Note: we intentionally don't use the map directly to ensure we don't modify the map in KCP. + for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Annotations { + annotations[k] = v + } + + return annotations +} diff --git a/controlplane/kubeadm/internal/desiredstate/desired_state_test.go b/controlplane/kubeadm/internal/desiredstate/desired_state_test.go new file mode 100644 index 000000000000..b7d5de7f8091 --- /dev/null +++ b/controlplane/kubeadm/internal/desiredstate/desired_state_test.go @@ -0,0 +1,885 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package desiredstate + +import ( + "fmt" + "testing" + + "github.com/blang/semver/v4" + . "github.com/onsi/gomega" + gomegatypes "github.com/onsi/gomega/types" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/test/builder" +) + +func Test_ComputeDesiredMachine(t *testing.T) { + namingTemplateKey := "-kcp" + kcpName := "testControlPlane" + clusterName := "testCluster" + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: metav1.NamespaceDefault, + }, + } + duration5s := ptr.To(int32(5)) + duration10s := ptr.To(int32(10)) + kcpMachineTemplateObjectMeta := clusterv1.ObjectMeta{ + Labels: map[string]string{ + "machineTemplateLabel": "machineTemplateLabelValue", + }, + Annotations: map[string]string{ + "machineTemplateAnnotation": "machineTemplateAnnotationValue", + }, + } + kcpMachineTemplateObjectMetaCopy := kcpMachineTemplateObjectMeta.DeepCopy() + + infraRef := &clusterv1.ContractVersionedObjectReference{ + Kind: "InfraKind", + APIGroup: clusterv1.GroupVersionInfrastructure.Group, + Name: "infra", + } + bootstrapRef := clusterv1.ContractVersionedObjectReference{ + Kind: "BootstrapKind", + APIGroup: clusterv1.GroupVersionBootstrap.Group, + Name: "bootstrap", + } + + tests := []struct { + name string + kcp *controlplanev1.KubeadmControlPlane + isUpdatingExistingMachine bool + want []gomegatypes.GomegaMatcher + wantErr bool + }{ + { + name: "should return the correct Machine object when creating a new Machine", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Foo", + }, + }, + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + }, + }, + }, + isUpdatingExistingMachine: false, + want: []gomegatypes.GomegaMatcher{ + HavePrefix(kcpName + namingTemplateKey), + Not(HaveSuffix("00000")), + }, + wantErr: false, + }, + { + name: "should return error when creating a new Machine when '.random' is not added in template", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey, + }, + }, + }, + isUpdatingExistingMachine: false, + wantErr: true, + }, + { + name: "should not return error when creating a new Machine when the generated name exceeds 63", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .random }}" + fmt.Sprintf("%059d", 0), + }, + }, + }, + isUpdatingExistingMachine: false, + want: []gomegatypes.GomegaMatcher{ + ContainSubstring(fmt.Sprintf("%053d", 0)), + Not(HaveSuffix("00000")), + }, + wantErr: false, + }, + { + name: "should return error when creating a new Machine with invalid template", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "some-hardcoded-name-{{ .doesnotexistindata }}-{{ .random }}", // invalid template + }, + }, + }, + isUpdatingExistingMachine: false, + wantErr: true, + }, + { + name: "should return the correct Machine object when creating a new Machine with default templated name", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + }, + }, + isUpdatingExistingMachine: false, + wantErr: false, + want: []gomegatypes.GomegaMatcher{ + HavePrefix(kcpName), + Not(HaveSuffix("00000")), + }, + }, + { + name: "should return the correct Machine object when creating a new Machine with additional kcp readinessGates", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Bar", + }, + }, + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + }, + }, + isUpdatingExistingMachine: false, + wantErr: false, + }, + { + name: "should return the correct Machine object when updating an existing Machine (empty ClusterConfiguration annotation)", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Foo", + }, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + }, + }, + }, + isUpdatingExistingMachine: true, + wantErr: false, + }, + { + name: "should return the correct Machine object when updating an existing Machine (outdated ClusterConfiguration annotation)", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Foo", + }, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + }, + }, + }, + isUpdatingExistingMachine: true, + wantErr: false, + }, + { + name: "should return the correct Machine object when updating an existing Machine (up to date ClusterConfiguration annotation)", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + }, + ReadinessGates: []clusterv1.MachineReadinessGate{ + { + ConditionType: "Foo", + }, + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, + }, + MachineNaming: controlplanev1.MachineNamingSpec{ + Template: "{{ .kubeadmControlPlane.name }}" + namingTemplateKey + "-{{ .random }}", + }, + }, + }, + isUpdatingExistingMachine: true, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + var desiredMachine *clusterv1.Machine + failureDomain := "fd-1" + var expectedMachineSpec clusterv1.MachineSpec + var err error + + if tt.isUpdatingExistingMachine { + machineName := "existing-machine" + machineUID := types.UID("abc-123-existing-machine") + // Use different ClusterConfiguration string than the information present in KCP + // to verify that for an existing machine we do not override this information. + remediationData := "remediation-data" + machineVersion := "v1.25.3" + existingMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName, + UID: machineUID, + Annotations: map[string]string{ + controlplanev1.RemediationForAnnotation: remediationData, + }, + }, + Spec: clusterv1.MachineSpec{ + Version: machineVersion, + FailureDomain: failureDomain, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDrainTimeoutSeconds: duration10s, + NodeDeletionTimeoutSeconds: duration10s, + NodeVolumeDetachTimeoutSeconds: duration10s, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: bootstrapRef, + }, + InfrastructureRef: *infraRef, + ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "Foo"}}, + }, + } + + desiredMachine, err = ComputeDesiredMachine( + tt.kcp, cluster, + existingMachine.Spec.FailureDomain, existingMachine, + ) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + expectedMachineSpec = clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: machineVersion, // Should use the Machine version and not the version from KCP. + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: bootstrapRef, + }, + InfrastructureRef: *infraRef, + FailureDomain: failureDomain, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds, + NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds, + NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds, + }, + ReadinessGates: append(append(MandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.Spec.ReadinessGates...), + } + + // Verify the Name and UID of the Machine remain unchanged + g.Expect(desiredMachine.Name).To(Equal(machineName)) + g.Expect(desiredMachine.UID).To(Equal(machineUID)) + // Verify annotations. + expectedAnnotations := map[string]string{} + for k, v := range kcpMachineTemplateObjectMeta.Annotations { + expectedAnnotations[k] = v + } + expectedAnnotations[controlplanev1.RemediationForAnnotation] = remediationData + // The pre-terminate annotation should always be added + expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" + g.Expect(desiredMachine.Annotations).To(Equal(expectedAnnotations)) + } else { + desiredMachine, err = ComputeDesiredMachine( + tt.kcp, cluster, + failureDomain, nil, + ) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).ToNot(HaveOccurred()) + + expectedMachineSpec = clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: tt.kcp.Spec.Version, + FailureDomain: failureDomain, + Deletion: clusterv1.MachineDeletionSpec{ + NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds, + NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds, + NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds, + }, + ReadinessGates: append(append(MandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.Spec.ReadinessGates...), + } + // Verify Name. + for _, matcher := range tt.want { + g.Expect(desiredMachine.Name).To(matcher) + } + // Verify annotations. + expectedAnnotations := map[string]string{} + for k, v := range kcpMachineTemplateObjectMeta.Annotations { + expectedAnnotations[k] = v + } + // The pre-terminate annotation should always be added + expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" + g.Expect(desiredMachine.Annotations).To(Equal(expectedAnnotations)) + } + + g.Expect(desiredMachine.Namespace).To(Equal(tt.kcp.Namespace)) + g.Expect(desiredMachine.OwnerReferences).To(HaveLen(1)) + g.Expect(desiredMachine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(tt.kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")))) + g.Expect(desiredMachine.Spec).To(BeComparableTo(expectedMachineSpec)) + + // Verify that the machineTemplate.ObjectMeta has been propagated to the Machine. + // Verify labels. + expectedLabels := map[string]string{} + for k, v := range kcpMachineTemplateObjectMeta.Labels { + expectedLabels[k] = v + } + expectedLabels[clusterv1.ClusterNameLabel] = cluster.Name + expectedLabels[clusterv1.MachineControlPlaneLabel] = "" + expectedLabels[clusterv1.MachineControlPlaneNameLabel] = tt.kcp.Name + g.Expect(desiredMachine.Labels).To(Equal(expectedLabels)) + + // Verify that machineTemplate.ObjectMeta in KCP has not been modified. + g.Expect(tt.kcp.Spec.MachineTemplate.ObjectMeta.Labels).To(Equal(kcpMachineTemplateObjectMetaCopy.Labels)) + g.Expect(tt.kcp.Spec.MachineTemplate.ObjectMeta.Annotations).To(Equal(kcpMachineTemplateObjectMetaCopy.Annotations)) + }) + } +} + +func Test_ComputeDesiredKubeadmConfig(t *testing.T) { + g := NewWithT(t) + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + }, + } + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kcp-foo", + Namespace: cluster.Namespace, + UID: "abc-123-kcp-control-plane", + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("4"), + }, + }, + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("8"), + }, + }, + }, + }, + }, + Version: "v1.31.0", + }, + } + expectedKubeadmConfigWithoutOwner := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-1", + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.MachineControlPlaneNameLabel: kcp.Name, + }, + Annotations: map[string]string{}, + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + }, + }, + // InitConfiguration and JoinConfiguration is added below. + }, + } + preExistingKubeadmConfigOwnedByMachine := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-1", + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.MachineControlPlaneNameLabel: kcp.Name, + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + Name: "machine-1", + }}, + }, + } + + for _, isJoin := range []bool{true, false} { + expectedKubeadmConfigWithoutOwner := expectedKubeadmConfigWithoutOwner.DeepCopy() + if isJoin { + expectedKubeadmConfigWithoutOwner.Spec.InitConfiguration = bootstrapv1.InitConfiguration{} + expectedKubeadmConfigWithoutOwner.Spec.JoinConfiguration = kcp.Spec.KubeadmConfigSpec.JoinConfiguration + expectedKubeadmConfigWithoutOwner.Spec.JoinConfiguration.ControlPlane = &bootstrapv1.JoinControlPlane{} + } else { + expectedKubeadmConfigWithoutOwner.Spec.InitConfiguration = kcp.Spec.KubeadmConfigSpec.InitConfiguration + expectedKubeadmConfigWithoutOwner.Spec.JoinConfiguration = bootstrapv1.JoinConfiguration{} + } + + kubeadmConfig, err := ComputeDesiredKubeadmConfig(kcp, cluster, isJoin, "machine-1", nil) + g.Expect(err).ToNot(HaveOccurred()) + expectedKubeadmConfig := expectedKubeadmConfigWithoutOwner.DeepCopy() + // New KubeadmConfig should have KCP ownerReference. + expectedKubeadmConfig.SetOwnerReferences([]metav1.OwnerReference{{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + }}) + g.Expect(kubeadmConfig).To(BeComparableTo(expectedKubeadmConfig)) + + kubeadmConfig, err = ComputeDesiredKubeadmConfig(kcp, cluster, isJoin, "machine-1", preExistingKubeadmConfigOwnedByMachine) + g.Expect(err).ToNot(HaveOccurred()) + // If there is a pre-existing KubeadmConfig that is owned by a Machine, the computed KubeadmConfig + // should have no ownerReferences, so we don't overwrite the ownerReference set by the Machine controller. + g.Expect(kubeadmConfig).To(BeComparableTo(expectedKubeadmConfigWithoutOwner)) + } +} + +func Test_ComputeDesiredInfraMachine(t *testing.T) { + g := NewWithT(t) + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + }, + } + infrastructureMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineTemplateKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "infra-machine-template-1", + "namespace": cluster.Namespace, + }, + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + }, + }, + } + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kcp-foo", + Namespace: cluster.Namespace, + UID: "abc-123-kcp-control-plane", + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: infrastructureMachineTemplate.GetKind(), + APIGroup: infrastructureMachineTemplate.GroupVersionKind().Group, + Name: infrastructureMachineTemplate.GetName(), + }, + }, + }, + }, + } + expectedInfraMachineWithoutOwner := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "machine-1", + "namespace": cluster.Namespace, + "labels": map[string]interface{}{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.MachineControlPlaneNameLabel: kcp.Name, + }, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + }, + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + } + preExistingInfraMachineOwnedByMachine := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": builder.GenericInfrastructureMachineKind, + "apiVersion": builder.InfrastructureGroupVersion.String(), + "metadata": map[string]interface{}{ + "name": "machine-1", + "namespace": cluster.Namespace, + "labels": map[string]interface{}{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachineControlPlaneLabel: "", + clusterv1.MachineControlPlaneNameLabel: kcp.Name, + }, + "annotations": map[string]interface{}{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template-1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }, + "ownerReferences": []interface{}{ + map[string]interface{}{ + "apiVersion": clusterv1.GroupVersion.String(), + "kind": "Machine", + "name": "machine-1", + }, + }, + }, + "spec": map[string]interface{}{ + "hello": "world", + }, + }, + } + + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(infrastructureMachineTemplate.DeepCopy(), builder.GenericInfrastructureMachineTemplateCRD).Build() + + infraMachine, err := ComputeDesiredInfraMachine(t.Context(), fakeClient, kcp, cluster, "machine-1", nil) + g.Expect(err).ToNot(HaveOccurred()) + expectedInfraMachine := expectedInfraMachineWithoutOwner.DeepCopy() + // New InfraMachine should have KCP ownerReference. + expectedInfraMachine.SetOwnerReferences([]metav1.OwnerReference{{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: "KubeadmControlPlane", + Name: kcp.Name, + UID: kcp.UID, + }}) + g.Expect(infraMachine).To(BeComparableTo(expectedInfraMachine)) + + infraMachine, err = ComputeDesiredInfraMachine(t.Context(), fakeClient, kcp, cluster, "machine-1", preExistingInfraMachineOwnedByMachine) + g.Expect(err).ToNot(HaveOccurred()) + // If there is a pre-existing InfraMachine that is owned by a Machine, the computed InfraMachine + // should have no ownerReferences, so we don't overwrite the ownerReference set by the Machine controller. + g.Expect(infraMachine).To(BeComparableTo(expectedInfraMachineWithoutOwner)) +} + +func TestDefaultFeatureGates(t *testing.T) { + tests := []struct { + name string + kubernetesVersion semver.Version + kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec + wantKubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec + }{ + { + name: "don't default ControlPlaneKubeletLocalMode for 1.30", + kubernetesVersion: semver.MustParse("1.30.99"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + "EtcdLearnerMode": true, + }, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + "EtcdLearnerMode": true, + }, + }, + }, + }, + { + name: "default ControlPlaneKubeletLocalMode for 1.31", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + }, + }, + }, + }, + { + name: "default ControlPlaneKubeletLocalMode for 1.31", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: nil, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + }, + }, + }, + }, + { + name: "default ControlPlaneKubeletLocalMode for 1.31", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{}, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + }, + }, + }, + }, + { + name: "default ControlPlaneKubeletLocalMode for 1.31", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + "EtcdLearnerMode": true, + }, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: true, + "EtcdLearnerMode": true, + }, + }, + }, + }, + { + name: "don't default ControlPlaneKubeletLocalMode for 1.31 if already set to false", + kubernetesVersion: semver.MustParse("1.31.0"), + kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: false, + }, + }, + }, + wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + ControlPlaneKubeletLocalMode: false, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + DefaultFeatureGates(tt.kubeadmConfigSpec, tt.kubernetesVersion) + g.Expect(tt.wantKubeadmConfigSpec).Should(BeComparableTo(tt.kubeadmConfigSpec)) + }) + } +} diff --git a/controlplane/kubeadm/internal/etcd/etcd.go b/controlplane/kubeadm/internal/etcd/etcd.go index 835f320256ca..6d959fa8451e 100644 --- a/controlplane/kubeadm/internal/etcd/etcd.go +++ b/controlplane/kubeadm/internal/etcd/etcd.go @@ -41,7 +41,7 @@ type etcd interface { AlarmList(ctx context.Context) (*clientv3.AlarmResponse, error) Close() error Endpoints() []string - MemberList(ctx context.Context) (*clientv3.MemberListResponse, error) + MemberList(ctx context.Context, opts ...clientv3.OpOption) (*clientv3.MemberListResponse, error) MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) MoveLeader(ctx context.Context, id uint64) (*clientv3.MoveLeaderResponse, error) Status(ctx context.Context, endpoint string) (*clientv3.StatusResponse, error) diff --git a/controlplane/kubeadm/internal/etcd/fake/client.go b/controlplane/kubeadm/internal/etcd/fake/client.go index 9d0f28a08811..7c2b024f22a0 100644 --- a/controlplane/kubeadm/internal/etcd/fake/client.go +++ b/controlplane/kubeadm/internal/etcd/fake/client.go @@ -52,7 +52,7 @@ func (c *FakeEtcdClient) AlarmList(_ context.Context) (*clientv3.AlarmResponse, return c.AlarmResponse, c.ErrorResponse } -func (c *FakeEtcdClient) MemberList(_ context.Context) (*clientv3.MemberListResponse, error) { +func (c *FakeEtcdClient) MemberList(_ context.Context, _ ...clientv3.OpOption) (*clientv3.MemberListResponse, error) { return c.MemberListResponse, c.ErrorResponse } func (c *FakeEtcdClient) MemberRemove(_ context.Context, i uint64) (*clientv3.MemberRemoveResponse, error) { diff --git a/controlplane/kubeadm/internal/filters.go b/controlplane/kubeadm/internal/filters.go index 45ad54e1a569..21aa383bf4da 100644 --- a/controlplane/kubeadm/internal/filters.go +++ b/controlplane/kubeadm/internal/filters.go @@ -17,25 +17,98 @@ limitations under the License. package internal import ( - "encoding/json" + "context" "fmt" - "reflect" - "strings" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" - bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/defaulting" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" "sigs.k8s.io/cluster-api/internal/util/compare" "sigs.k8s.io/cluster-api/util/collections" ) +// UpToDateResult is the result of calling the UpToDate func for a Machine. +type UpToDateResult struct { + LogMessages []string + ConditionMessages []string + EligibleForInPlaceUpdate bool + DesiredMachine *clusterv1.Machine + CurrentInfraMachine *unstructured.Unstructured + DesiredInfraMachine *unstructured.Unstructured + CurrentKubeadmConfig *bootstrapv1.KubeadmConfig + DesiredKubeadmConfig *bootstrapv1.KubeadmConfig +} + +// UpToDate checks if a Machine is up to date with the control plane's configuration. +// If not, messages explaining why are provided with different level of detail for logs and conditions. +func UpToDate( + ctx context.Context, + c client.Client, + cluster *clusterv1.Cluster, + machine *clusterv1.Machine, + kcp *controlplanev1.KubeadmControlPlane, + reconciliationTime *metav1.Time, + infraMachines map[string]*unstructured.Unstructured, + kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig, +) (bool, *UpToDateResult, error) { + res := &UpToDateResult{ + // A Machine is eligible for in-place update except if we find a reason why it shouldn't be, + // e.g. rollout.before, rollout.after or the Machine is already up-to-date. + EligibleForInPlaceUpdate: true, + } + + // If a Machine is marked for deletion it is not eligible for in-place update. + if _, ok := machine.Annotations[clusterv1.DeleteMachineAnnotation]; ok { + res.EligibleForInPlaceUpdate = false + } + + // If a Machine is marked for remediation it is not eligible for in-place update. + if _, ok := machine.Annotations[clusterv1.RemediateMachineAnnotation]; ok { + res.EligibleForInPlaceUpdate = false + } + + // Machines whose certificates are about to expire. + if collections.ShouldRolloutBefore(reconciliationTime, kcp.Spec.Rollout.Before)(machine) { + res.LogMessages = append(res.LogMessages, "certificates will expire soon, rolloutBefore expired") + res.ConditionMessages = append(res.ConditionMessages, "Certificates will expire soon") + res.EligibleForInPlaceUpdate = false + } + + // Machines that are scheduled for rollout (KCP.Spec.RolloutAfter set, + // the RolloutAfter deadline is expired, and the machine was created before the deadline). + if collections.ShouldRolloutAfter(reconciliationTime, kcp.Spec.Rollout.After)(machine) { + res.LogMessages = append(res.LogMessages, "rolloutAfter expired") + res.ConditionMessages = append(res.ConditionMessages, "KubeadmControlPlane spec.rolloutAfter expired") + res.EligibleForInPlaceUpdate = false + } + + // Machines that do not match with KCP config. + // Note: matchesMachineSpec will update res with desired and current objects if necessary. + matches, specLogMessages, specConditionMessages, err := matchesMachineSpec(ctx, c, infraMachines, kubeadmConfigs, kcp, cluster, machine, res) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to determine if Machine %s is up-to-date", machine.Name) + } + if !matches { + res.LogMessages = append(res.LogMessages, specLogMessages...) + res.ConditionMessages = append(res.ConditionMessages, specConditionMessages...) + } + + if len(res.LogMessages) > 0 || len(res.ConditionMessages) > 0 { + return false, res, nil + } + + // If everything matches no need for an update. + res.EligibleForInPlaceUpdate = false + return true, res, nil +} + // matchesMachineSpec checks if a Machine matches any of a set of KubeadmConfigs and a set of infra machine configs. // If it doesn't, it returns the reasons why. // Kubernetes version, infrastructure template, and KubeadmConfig field need to be equivalent. @@ -44,68 +117,55 @@ import ( // - mutated in-place (ex: NodeDrainTimeoutSeconds) // - are not dictated by KCP (ex: ProviderID) // - are not relevant for the rollout decision (ex: failureDomain). -func matchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (bool, []string, []string, error) { +func matchesMachineSpec( + ctx context.Context, + c client.Client, + infraMachines map[string]*unstructured.Unstructured, + kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig, + kcp *controlplanev1.KubeadmControlPlane, + cluster *clusterv1.Cluster, + machine *clusterv1.Machine, + res *UpToDateResult, +) (bool, []string, []string, error) { logMessages := []string{} conditionMessages := []string{} - if !collections.MatchesKubernetesVersion(kcp.Spec.Version)(machine) { - machineVersion := "" - if machine != nil && machine.Spec.Version != "" { - machineVersion = machine.Spec.Version - } - logMessages = append(logMessages, fmt.Sprintf("Machine version %q is not equal to KCP version %q", machineVersion, kcp.Spec.Version)) + desiredMachine, err := desiredstate.ComputeDesiredMachine(kcp, cluster, machine.Spec.FailureDomain, machine) + if err != nil { + return false, nil, nil, errors.Wrapf(err, "failed to match Machine") + } + // Note: spec.version is not mutated in-place by syncMachines and accordingly + // not updated by desiredstate.ComputeDesiredMachine, so we have to update it here. + desiredMachine.Spec.Version = kcp.Spec.Version + // Note: spec.failureDomain is in general only changed on delete/create, so we don't have to update it here for in-place. + res.DesiredMachine = desiredMachine + // Note: Intentionally not storing currentMachine as it can change later, e.g. through syncMachines. + if desiredMachine.Spec.Version != machine.Spec.Version { + logMessages = append(logMessages, fmt.Sprintf("Machine version %q is not equal to KCP version %q", machine.Spec.Version, desiredMachine.Spec.Version)) // Note: the code computing the message for KCP's RolloutOut condition is making assumptions on the format/content of this message. - conditionMessages = append(conditionMessages, fmt.Sprintf("Version %s, %s required", machineVersion, kcp.Spec.Version)) + conditionMessages = append(conditionMessages, fmt.Sprintf("Version %s, %s required", machine.Spec.Version, desiredMachine.Spec.Version)) } - reason, matches, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, machine) + reason, currentKubeadmConfig, desiredKubeadmConfig, matches, err := matchesKubeadmConfig(kubeadmConfigs, kcp, cluster, machine) if err != nil { - return false, nil, nil, errors.Wrapf(err, "failed to match Machine spec") + return false, nil, nil, errors.Wrapf(err, "failed to match Machine") } + res.CurrentKubeadmConfig = currentKubeadmConfig + res.DesiredKubeadmConfig = desiredKubeadmConfig if !matches { logMessages = append(logMessages, reason) conditionMessages = append(conditionMessages, "KubeadmConfig is not up-to-date") } - if reason, matches := matchesTemplateClonedFrom(infraConfigs, kcp, machine); !matches { - logMessages = append(logMessages, reason) - conditionMessages = append(conditionMessages, fmt.Sprintf("%s is not up-to-date", machine.Spec.InfrastructureRef.Kind)) - } - - if len(logMessages) > 0 || len(conditionMessages) > 0 { - return false, logMessages, conditionMessages, nil - } - - return true, nil, nil, nil -} - -// UpToDate checks if a Machine is up to date with the control plane's configuration. -// If not, messages explaining why are provided with different level of detail for logs and conditions. -func UpToDate(machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlane, reconciliationTime *metav1.Time, infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*bootstrapv1.KubeadmConfig) (bool, []string, []string, error) { - logMessages := []string{} - conditionMessages := []string{} - - // Machines whose certificates are about to expire. - if collections.ShouldRolloutBefore(reconciliationTime, kcp.Spec.Rollout.Before)(machine) { - logMessages = append(logMessages, "certificates will expire soon, rolloutBefore expired") - conditionMessages = append(conditionMessages, "Certificates will expire soon") - } - - // Machines that are scheduled for rollout (KCP.Spec.RolloutAfter set, - // the RolloutAfter deadline is expired, and the machine was created before the deadline). - if collections.ShouldRolloutAfter(reconciliationTime, kcp.Spec.Rollout.After)(machine) { - logMessages = append(logMessages, "rolloutAfter expired") - conditionMessages = append(conditionMessages, "KubeadmControlPlane spec.rolloutAfter expired") - } - - // Machines that do not match with KCP config. - matches, specLogMessages, specConditionMessages, err := matchesMachineSpec(infraConfigs, machineConfigs, kcp, machine) + reason, currentInfraMachine, desiredInfraMachine, matches, err := matchesInfraMachine(ctx, c, infraMachines, kcp, cluster, machine) if err != nil { - return false, nil, nil, errors.Wrapf(err, "failed to determine if Machine %s is up-to-date", machine.Name) + return false, nil, nil, errors.Wrapf(err, "failed to match Machine") } + res.CurrentInfraMachine = currentInfraMachine + res.DesiredInfraMachine = desiredInfraMachine if !matches { - logMessages = append(logMessages, specLogMessages...) - conditionMessages = append(conditionMessages, specConditionMessages...) + logMessages = append(logMessages, reason) + conditionMessages = append(conditionMessages, fmt.Sprintf("%s is not up-to-date", machine.Spec.InfrastructureRef.Kind)) } if len(logMessages) > 0 || len(conditionMessages) > 0 { @@ -115,28 +175,36 @@ func UpToDate(machine *clusterv1.Machine, kcp *controlplanev1.KubeadmControlPlan return true, nil, nil, nil } -// matchesTemplateClonedFrom checks if a Machine has a corresponding infrastructure machine that +// matchesInfraMachine checks if a Machine has a corresponding infrastructure machine that // matches a given KCP infra template and if it doesn't match returns the reason why. // Note: Differences to the labels and annotations on the infrastructure machine are not considered for matching // criteria, because changes to labels and annotations are propagated in-place to the infrastructure machines. -// TODO: This function will be renamed in a follow-up PR to something better. (ex: MatchesInfraMachine). -func matchesTemplateClonedFrom(infraConfigs map[string]*unstructured.Unstructured, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool) { - if machine == nil { - return "Machine cannot be compared with KCP.spec.machineTemplate.spec.infrastructureRef: Machine is nil", false - } - infraObj, found := infraConfigs[machine.Name] +func matchesInfraMachine( + ctx context.Context, + c client.Client, + infraMachines map[string]*unstructured.Unstructured, + kcp *controlplanev1.KubeadmControlPlane, + cluster *clusterv1.Cluster, + machine *clusterv1.Machine, +) (string, *unstructured.Unstructured, *unstructured.Unstructured, bool, error) { + currentInfraMachine, found := infraMachines[machine.Name] if !found { // Return true here because failing to get infrastructure machine should not be considered as unmatching. - return "", true + return "", nil, nil, true, nil } - clonedFromName, ok1 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] - clonedFromGroupKind, ok2 := infraObj.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation] + clonedFromName, ok1 := currentInfraMachine.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] + clonedFromGroupKind, ok2 := currentInfraMachine.GetAnnotations()[clusterv1.TemplateClonedFromGroupKindAnnotation] if !ok1 || !ok2 { // All kcp cloned infra machines should have this annotation. // Missing the annotation may be due to older version machines or adopted machines. // Should not be considered as mismatch. - return "", true + return "", nil, nil, true, nil + } + + desiredInfraMachine, err := desiredstate.ComputeDesiredInfraMachine(ctx, c, kcp, cluster, machine.Name, currentInfraMachine) + if err != nil { + return "", nil, nil, false, errors.Wrapf(err, "failed to match %s", currentInfraMachine.GetKind()) } // Check if the machine's infrastructure reference has been created from the current KCP infrastructure template. @@ -144,260 +212,148 @@ func matchesTemplateClonedFrom(infraConfigs map[string]*unstructured.Unstructure clonedFromGroupKind != kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind().String() { return fmt.Sprintf("Infrastructure template on KCP rotated from %s %s to %s %s", clonedFromGroupKind, clonedFromName, - kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind().String(), kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name), false + kcp.Spec.MachineTemplate.Spec.InfrastructureRef.GroupKind().String(), kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name), currentInfraMachine, desiredInfraMachine, false, nil } - return "", true + return "", currentInfraMachine, desiredInfraMachine, true, nil } -// matchesKubeadmBootstrapConfig checks if machine's KubeadmConfigSpec is equivalent with KCP's KubeadmConfigSpec. +// matchesKubeadmConfig checks if machine's KubeadmConfigSpec is equivalent with KCP's KubeadmConfigSpec. // Note: Differences to the labels and annotations on the KubeadmConfig are not considered for matching // criteria, because changes to labels and annotations are propagated in-place to KubeadmConfig. -func matchesKubeadmBootstrapConfig(machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (string, bool, error) { - if machine == nil { - return "Machine KubeadmConfig cannot be compared: Machine is nil", false, nil - } - - // Check if KCP and machine ClusterConfiguration matches, if not return - match, diff, err := matchClusterConfiguration(kcp, machine) - if err != nil { - return "", false, errors.Wrapf(err, "failed to match KubeadmConfig") - } - if !match { - return fmt.Sprintf("Machine KubeadmConfig ClusterConfiguration is outdated: diff: %s", diff), false, nil - } - +func matchesKubeadmConfig( + kubeadmConfigs map[string]*bootstrapv1.KubeadmConfig, + kcp *controlplanev1.KubeadmControlPlane, + cluster *clusterv1.Cluster, + machine *clusterv1.Machine, +) (string, *bootstrapv1.KubeadmConfig, *bootstrapv1.KubeadmConfig, bool, error) { bootstrapRef := machine.Spec.Bootstrap.ConfigRef if !bootstrapRef.IsDefined() { // Missing bootstrap reference should not be considered as unmatching. // This is a safety precaution to avoid selecting machines that are broken, which in the future should be remediated separately. - return "", true, nil + return "", nil, nil, true, nil } - machineConfig, found := machineConfigs[machine.Name] + currentKubeadmConfig, found := kubeadmConfigs[machine.Name] if !found { // Return true here because failing to get KubeadmConfig should not be considered as unmatching. // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. - return "", true, nil + return "", nil, nil, true, nil } - // Check if KCP and machine InitConfiguration or JoinConfiguration matches - // NOTE: only one between init configuration and join configuration is set on a machine, depending - // on the fact that the machine was the initial control plane node or a joining control plane node. - match, diff, err = matchInitOrJoinConfiguration(machineConfig, kcp) + // Note: Compute a KubeadmConfig assuming we are dealing with a joining machine, which is the most common scenario. + // When dealing with the KubeadmConfig for the init machine, the code will make a first tentative comparison under + // the assumption that KCP initConfiguration and joinConfiguration should be configured identically. + // In order to do so, PrepareKubeadmConfigsForDiff will attempt to convert initConfiguration to + // joinConfiguration in currentKubeadmConfig. + desiredKubeadmConfigWithJoin, err := desiredstate.ComputeDesiredKubeadmConfig(kcp, cluster, true, machine.Name, currentKubeadmConfig) if err != nil { - return "", false, errors.Wrapf(err, "failed to match KubeadmConfig") - } - if !match { - return fmt.Sprintf("Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: %s", diff), false, nil - } - - return "", true, nil -} - -// matchClusterConfiguration verifies if KCP and machine ClusterConfiguration matches. -// NOTE: Machines that have KubeadmClusterConfigurationAnnotation will have to match with KCP ClusterConfiguration. -// If the annotation is not present (machine is either old or adopted), we won't roll out on any possible changes -// made in KCP's ClusterConfiguration given that we don't have enough information to make a decision. -// Users should use KCP.Spec.RolloutAfter field to force a rollout in this case. -func matchClusterConfiguration(kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (bool, string, error) { - if _, ok := machine.GetAnnotations()[controlplanev1.KubeadmClusterConfigurationAnnotation]; !ok { - // We don't have enough information to make a decision; don't' trigger a roll out. - return true, "", nil + return "", nil, nil, false, errors.Wrapf(err, "failed to match KubeadmConfig") } + desiredKubeadmConfigWithJoinForDiff, currentKubeadmConfigWithJoinForDiff := PrepareKubeadmConfigsForDiff(desiredKubeadmConfigWithJoin, currentKubeadmConfig, true) - machineClusterConfig, err := ClusterConfigurationFromMachine(machine) + // Check if current and desired KubeadmConfigs match. + // Note: desiredKubeadmConfigWithJoinForDiff has been computed for a kubeadm join. + // Note: currentKubeadmConfigWithJoinForDiff has been migrated from init to join, if currentKubeadmConfig was for a kubeadm init. + match, diff, err := compare.Diff(¤tKubeadmConfigWithJoinForDiff.Spec, &desiredKubeadmConfigWithJoinForDiff.Spec) if err != nil { - // ClusterConfiguration annotation is not correct, only solution is to rollout. - // The call to json.Unmarshal has to take a pointer to the pointer struct defined above, - // otherwise we won't be able to handle a nil ClusterConfiguration (that is serialized into "null"). - // See https://github.com/kubernetes-sigs/cluster-api/issues/3353. - return false, "", nil //nolint:nilerr // Intentionally not returning the error here + return "", nil, nil, false, errors.Wrapf(err, "failed to match KubeadmConfig") } + if !match { + // Note: KCP initConfiguration and joinConfiguration should be configured identically. + // But if they are not configured identically and the currentKubeadmConfig is for init we still + // want to avoid unnecessary rollouts. + // Accordingly, we also have to compare the currentKubeadmConfig against a desiredKubeadmConfig + // computed for init to avoid unnecessary rollouts. + // Note: In any case we are going to use desiredKubeadmConfigWithJoin for in-place updates and return it accordingly. + if isKubeadmConfigForInit(currentKubeadmConfig) { + desiredKubeadmConfigWithInit, err := desiredstate.ComputeDesiredKubeadmConfig(kcp, cluster, false, machine.Name, currentKubeadmConfig) + if err != nil { + return "", nil, nil, false, errors.Wrapf(err, "failed to match KubeadmConfig") + } + desiredKubeadmConfigWithInitForDiff, currentKubeadmConfigWithInitForDiff := PrepareKubeadmConfigsForDiff(desiredKubeadmConfigWithInit, currentKubeadmConfig, false) + + // Check if current and desired KubeadmConfigs match. + // Note: desiredKubeadmConfigWithInitForDiff has been computed for a kubeadm init. + // Note: currentKubeadmConfigWithInitForDiff is for a kubeadm init. + match, diff, err := compare.Diff(¤tKubeadmConfigWithInitForDiff.Spec, &desiredKubeadmConfigWithInitForDiff.Spec) + if err != nil { + return "", nil, nil, false, errors.Wrapf(err, "failed to match KubeadmConfig") + } + // Always return desiredKubeadmConfigWithJoin (not desiredKubeadmConfigWithInit) as it should always be used for in-place updates. + if !match { + return fmt.Sprintf("Machine KubeadmConfig is outdated: diff: %s", diff), currentKubeadmConfig, desiredKubeadmConfigWithJoin, false, nil + } + return "", currentKubeadmConfig, desiredKubeadmConfigWithJoin, true, nil + } - // If any of the compared values are nil, treat them the same as an empty ClusterConfiguration. - if machineClusterConfig == nil { - machineClusterConfig = &bootstrapv1.ClusterConfiguration{} - } - - kcpLocalClusterConfiguration := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.DeepCopy() - if kcpLocalClusterConfiguration == nil { - kcpLocalClusterConfiguration = &bootstrapv1.ClusterConfiguration{} - } - - // Skip checking DNS fields because we can update the configuration of the working cluster in place. - machineClusterConfig.DNS = kcpLocalClusterConfiguration.DNS - - // Drop differences that do not lead to changes to Machines, but that might exist due - // to changes in how we serialize objects or how webhooks work. - specKCP := &bootstrapv1.KubeadmConfigSpec{ClusterConfiguration: *kcpLocalClusterConfiguration} - specMachine := &bootstrapv1.KubeadmConfigSpec{ClusterConfiguration: *machineClusterConfig} - dropOmittableFields(specKCP) - dropOmittableFields(specMachine) - - // Compare and return. - match, diff, err := compare.Diff(specMachine.ClusterConfiguration, specKCP.ClusterConfiguration) - if err != nil { - return false, "", errors.Wrapf(err, "failed to match ClusterConfiguration") - } - return match, diff, nil -} - -type versionedClusterConfiguration struct { - MarshalVersion string `json:"marshalVersion,omitempty"` - *bootstrapv1.ClusterConfiguration -} - -// ClusterConfigurationAnnotationFromMachineIsOutdated return true if the annotation is outdated. -// Note: this is intentionally implemented with a string check to prevent an additional json.Unmarshal operation. -func ClusterConfigurationAnnotationFromMachineIsOutdated(annotation string) bool { - return !strings.Contains(annotation, fmt.Sprintf("\"marshalVersion\":%q", bootstrapv1.GroupVersion.Version)) -} - -// ClusterConfigurationToMachineAnnotationValue returns an annotation valued to add on machines for -// tracking the ClusterConfiguration value at the time the machine was created. -func ClusterConfigurationToMachineAnnotationValue(clusterConfiguration *bootstrapv1.ClusterConfiguration) (string, error) { - machineClusterConfig := &versionedClusterConfiguration{ - MarshalVersion: bootstrapv1.GroupVersion.Version, - ClusterConfiguration: clusterConfiguration, + return fmt.Sprintf("Machine KubeadmConfig is outdated: diff: %s", diff), currentKubeadmConfig, desiredKubeadmConfigWithJoin, false, nil } - annotationBytes, err := json.Marshal(machineClusterConfig) - if err != nil { - return "", errors.Wrap(err, "failed to marshal cluster configuration") - } - return string(annotationBytes), nil + return "", currentKubeadmConfig, desiredKubeadmConfigWithJoin, true, nil } -// ClusterConfigurationFromMachine returns the ClusterConfiguration value at the time the machine was created. -// Note: In case the annotation was created with an older version of the KCP API, the value is converted to the current API version. -func ClusterConfigurationFromMachine(machine *clusterv1.Machine) (*bootstrapv1.ClusterConfiguration, error) { - machineClusterConfigStr, ok := machine.GetAnnotations()[controlplanev1.KubeadmClusterConfigurationAnnotation] - if !ok { - return nil, nil - } - - if ClusterConfigurationAnnotationFromMachineIsOutdated(machineClusterConfigStr) { - // Note: Only conversion from v1beta1 is supported as of today. - machineClusterConfigV1Beta1 := &bootstrapv1beta1.ClusterConfiguration{} - if err := json.Unmarshal([]byte(machineClusterConfigStr), &machineClusterConfigV1Beta1); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal ClusterConfiguration from Machine %s", klog.KObj(machine)) - } - - kubeadmConfigV1Beta1 := &bootstrapv1beta1.KubeadmConfig{ - Spec: bootstrapv1beta1.KubeadmConfigSpec{ - ClusterConfiguration: machineClusterConfigV1Beta1, - }, - } - kubeadmConfig := &bootstrapv1.KubeadmConfig{} - err := kubeadmConfigV1Beta1.ConvertTo(kubeadmConfig) - if err != nil { - return nil, errors.Wrapf(err, "failed to convert ClusterConfiguration from Machine %s", klog.KObj(machine)) +// PrepareKubeadmConfigsForDiff cleans up all fields that are not relevant for the comparison. +func PrepareKubeadmConfigsForDiff(desiredKubeadmConfig, currentKubeadmConfig *bootstrapv1.KubeadmConfig, convertCurrentInitConfigurationToJoinConfiguration bool) (desired, current *bootstrapv1.KubeadmConfig) { + // DeepCopy to ensure the passed in KubeadmConfigs are not modified. + // This has to be done because we eventually want to be able to apply the desiredKubeadmConfig + // (without the modifications that we make here). + desiredKubeadmConfig = desiredKubeadmConfig.DeepCopy() + currentKubeadmConfig = currentKubeadmConfig.DeepCopy() + + if convertCurrentInitConfigurationToJoinConfiguration && isKubeadmConfigForInit(currentKubeadmConfig) { + // Convert InitConfiguration to JoinConfiguration + currentKubeadmConfig.Spec.JoinConfiguration.Timeouts = currentKubeadmConfig.Spec.InitConfiguration.Timeouts + currentKubeadmConfig.Spec.JoinConfiguration.Patches = currentKubeadmConfig.Spec.InitConfiguration.Patches + currentKubeadmConfig.Spec.JoinConfiguration.SkipPhases = currentKubeadmConfig.Spec.InitConfiguration.SkipPhases + currentKubeadmConfig.Spec.JoinConfiguration.NodeRegistration = currentKubeadmConfig.Spec.InitConfiguration.NodeRegistration + currentKubeadmConfig.Spec.JoinConfiguration.ControlPlane = &bootstrapv1.JoinControlPlane{ + LocalAPIEndpoint: currentKubeadmConfig.Spec.InitConfiguration.LocalAPIEndpoint, } + currentKubeadmConfig.Spec.InitConfiguration = bootstrapv1.InitConfiguration{} - machineClusterConfig := kubeadmConfig.Spec.ClusterConfiguration - return &machineClusterConfig, nil + // CACertPath can only be configured for join. + // The CACertPath should never trigger a rollout of Machines created via kubeadm init. + currentKubeadmConfig.Spec.JoinConfiguration.CACertPath = desiredKubeadmConfig.Spec.JoinConfiguration.CACertPath } - machineClusterConfig := &versionedClusterConfiguration{} - if err := json.Unmarshal([]byte(machineClusterConfigStr), &machineClusterConfig); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal ClusterConfiguration from Machine %s", klog.KObj(machine)) - } - return machineClusterConfig.ClusterConfiguration, nil -} + // Ignore ControlPlaneEndpoint which is added on the Machine KubeadmConfig by CABPK. + // Note: ControlPlaneEndpoint should also never change for a Cluster, so no reason to trigger a rollout because of that. + currentKubeadmConfig.Spec.ClusterConfiguration.ControlPlaneEndpoint = desiredKubeadmConfig.Spec.ClusterConfiguration.ControlPlaneEndpoint -// matchInitOrJoinConfiguration verifies if KCP and machine InitConfiguration or JoinConfiguration matches. -// NOTE: By extension this method takes care of detecting changes in other fields of the KubeadmConfig configuration (e.g. Files, Mounts etc.) -func matchInitOrJoinConfiguration(machineConfig *bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane) (bool, string, error) { - if machineConfig == nil { - // Return true here because failing to get KubeadmConfig should not be considered as unmatching. - // This is a safety precaution to avoid rolling out machines if the client or the api-server is misbehaving. - return true, "", nil - } - machineConfig = machineConfig.DeepCopy() - - // takes the KubeadmConfigSpec from KCP and applies the transformations required - // to allow a comparison with the KubeadmConfig referenced from the machine. - kcpConfig := getAdjustedKcpConfig(kcp, machineConfig) + // Skip checking DNS fields because we can update the configuration of the working cluster in place. + currentKubeadmConfig.Spec.ClusterConfiguration.DNS = desiredKubeadmConfig.Spec.ClusterConfiguration.DNS // Default both KubeadmConfigSpecs before comparison. // *Note* This assumes that newly added default values never // introduce a semantic difference to the unset value. // But that is something that is ensured by our API guarantees. - defaulting.ApplyPreviousKubeadmConfigDefaults(kcpConfig) - defaulting.ApplyPreviousKubeadmConfigDefaults(&machineConfig.Spec) - - // cleanups all the fields that are not relevant for the comparison. - cleanupConfigFields(kcpConfig, machineConfig) - - match, diff, err := compare.Diff(&machineConfig.Spec, kcpConfig) - if err != nil { - return false, "", errors.Wrapf(err, "failed to match InitConfiguration or JoinConfiguration") - } - return match, diff, nil -} - -// getAdjustedKcpConfig takes the KubeadmConfigSpec from KCP and applies the transformations required -// to allow a comparison with the KubeadmConfig referenced from the machine. -// NOTE: The KCP controller applies a set of transformations when creating a KubeadmConfig referenced from the machine; -// those transformations are implemented in ControlPlane.InitialControlPlaneConfig() and ControlPlane.JoinControlPlaneConfig(). -func getAdjustedKcpConfig(kcp *controlplanev1.KubeadmControlPlane, machineConfig *bootstrapv1.KubeadmConfig) *bootstrapv1.KubeadmConfigSpec { - kcpConfig := kcp.Spec.KubeadmConfigSpec.DeepCopy() - - // if Machine's JoinConfiguration is set, this is a joining control plane machine, so empty out the InitConfiguration; - // otherwise empty out the JoinConfiguration. - // Note: a KubeadmConfig for a joining control plane must have at least joinConfiguration.controlPlane and joinConfiguration.discovery to be set for joining; - // if those fields are missing in the KCP config, CABPK sets them. - if machineConfig.Spec.JoinConfiguration.IsDefined() { - kcpConfig.InitConfiguration = bootstrapv1.InitConfiguration{} - } else { - kcpConfig.JoinConfiguration = bootstrapv1.JoinConfiguration{} - } + defaulting.ApplyPreviousKubeadmConfigDefaults(&desiredKubeadmConfig.Spec) + defaulting.ApplyPreviousKubeadmConfigDefaults(¤tKubeadmConfig.Spec) - return kcpConfig -} - -// cleanupConfigFields cleanups all the fields that are not relevant for the comparison. -func cleanupConfigFields(kcpConfig *bootstrapv1.KubeadmConfigSpec, machineConfig *bootstrapv1.KubeadmConfig) { - // KCP ClusterConfiguration will only be compared with a machine's ClusterConfiguration annotation, so - // we are cleaning up from the reflect.DeepEqual comparison. - kcpConfig.ClusterConfiguration = bootstrapv1.ClusterConfiguration{} - machineConfig.Spec.ClusterConfiguration = bootstrapv1.ClusterConfiguration{} - - // If KCP JoinConfiguration is not present, set machine JoinConfiguration to nil (nothing can trigger rollout here). - // NOTE: this is required because CABPK applies an empty joinConfiguration in case no one is provided. - if !kcpConfig.JoinConfiguration.IsDefined() { - machineConfig.Spec.JoinConfiguration = bootstrapv1.JoinConfiguration{} - } - - // Cleanup JoinConfiguration.Discovery from kcpConfig and machineConfig, because those info are relevant only for + // Cleanup JoinConfiguration.Discovery from desiredKubeadmConfig and currentKubeadmConfig, because those info are relevant only for // the join process and not for comparing the configuration of the machine. - emptyDiscovery := bootstrapv1.Discovery{} - if kcpConfig.JoinConfiguration.IsDefined() { - kcpConfig.JoinConfiguration.Discovery = emptyDiscovery - } - if machineConfig.Spec.JoinConfiguration.IsDefined() { - machineConfig.Spec.JoinConfiguration.Discovery = emptyDiscovery - } - - // If KCP JoinConfiguration.ControlPlane is not present, set machine join configuration to nil (nothing can trigger rollout here). - // NOTE: this is required because CABPK applies an empty joinConfiguration.ControlPlane in case no one is provided. - if kcpConfig.JoinConfiguration.IsDefined() && kcpConfig.JoinConfiguration.ControlPlane == nil && - machineConfig.Spec.JoinConfiguration.ControlPlane != nil { - machineConfig.Spec.JoinConfiguration.ControlPlane = nil - } - - // If KCP's join NodeRegistration is empty, set machine's node registration to empty as no changes should trigger rollout. - emptyNodeRegistration := bootstrapv1.NodeRegistrationOptions{} - if kcpConfig.JoinConfiguration.IsDefined() && reflect.DeepEqual(kcpConfig.JoinConfiguration.NodeRegistration, emptyNodeRegistration) && - !reflect.DeepEqual(machineConfig.Spec.JoinConfiguration.NodeRegistration, emptyNodeRegistration) { - machineConfig.Spec.JoinConfiguration.NodeRegistration = emptyNodeRegistration - } + // Note: Changes to Discovery will apply for the next join, but they will not lead to a rollout. + // Note: We should also not send Discovery.BootstrapToken.Token to a RuntimeExtension for security reasons. + desiredKubeadmConfig.Spec.JoinConfiguration.Discovery = bootstrapv1.Discovery{} + currentKubeadmConfig.Spec.JoinConfiguration.Discovery = bootstrapv1.Discovery{} + + // Cleanup ControlPlaneComponentHealthCheckSeconds from desiredKubeadmConfig and currentKubeadmConfig, + // because through conversion apiServer.timeoutForControlPlane in v1beta1 is converted to + // initConfiguration/joinConfiguration.timeouts.controlPlaneComponentHealthCheckSeconds in v1beta2 and + // this can lead to a diff here that would lead to a rollout. + // Note: Changes to ControlPlaneComponentHealthCheckSeconds will apply for the next join, but they will not lead to a rollout. + desiredKubeadmConfig.Spec.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = nil + desiredKubeadmConfig.Spec.JoinConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = nil + currentKubeadmConfig.Spec.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = nil + currentKubeadmConfig.Spec.JoinConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = nil // Drop differences that do not lead to changes to Machines, but that might exist due // to changes in how we serialize objects or how webhooks work. - dropOmittableFields(kcpConfig) - dropOmittableFields(&machineConfig.Spec) + dropOmittableFields(&desiredKubeadmConfig.Spec) + dropOmittableFields(¤tKubeadmConfig.Spec) + + return desiredKubeadmConfig, currentKubeadmConfig } // dropOmittableFields makes the comparison tolerant to omittable fields being set in the go struct. It applies to: @@ -571,3 +527,23 @@ func dropOmittableFields(spec *bootstrapv1.KubeadmConfigSpec) { spec.NTP.Servers = nil } } + +// isKubeadmConfigForJoin returns true if the KubeadmConfig is for a control plane +// or a worker machine that joined an existing cluster. +// Note: This check is based on the assumption that KubeadmConfig for joining +// control plane always have a non-empty JoinConfiguration.ControlPlane, while +// instead the entire JoinConfiguration for the first control plane machine in the +// cluster is emptied out by KCP. +// Note: Previously we checked if the entire JoinConfiguration is defined, but that +// is not safe because apiServer.timeoutForControlPlane in v1beta1 is also converted to +// joinConfiguration.timeouts.controlPlaneComponentHealthCheckSeconds in v1beta2 and +// accordingly we would also detect init KubeadmConfigs as join. +func isKubeadmConfigForJoin(c *bootstrapv1.KubeadmConfig) bool { + return c.Spec.JoinConfiguration.ControlPlane != nil +} + +// isKubeadmConfigForInit returns true if the KubeadmConfig is for the first control plane +// machine in the cluster. +func isKubeadmConfigForInit(c *bootstrapv1.KubeadmConfig) bool { + return !isKubeadmConfigForJoin(c) +} diff --git a/controlplane/kubeadm/internal/filters_test.go b/controlplane/kubeadm/internal/filters_test.go index 0d3157ec6529..1e7c78a89f79 100644 --- a/controlplane/kubeadm/internal/filters_test.go +++ b/controlplane/kubeadm/internal/filters_test.go @@ -17,7 +17,6 @@ limitations under the License. package internal import ( - "fmt" "testing" "time" @@ -25,130 +24,62 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" kubeadmtypes "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstream" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" + "sigs.k8s.io/cluster-api/util/test/builder" ) -func TestClusterConfigurationAnnotation(t *testing.T) { - t.Run("ClusterConfigurationToMachineAnnotationValue", func(t *testing.T) { +func TestMatchesKubeadmConfig(t *testing.T) { + t.Run("returns true if Machine configRef is not defined", func(t *testing.T) { g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - APIServer: bootstrapv1.APIServer{ - ExtraArgs: []bootstrapv1.Arg{ - { - Name: "foo", - Value: ptr.To("bar"), - }, - }, - }, - CertificatesDir: "foo", - }, - }, - }, - } - - annotations, err := ClusterConfigurationToMachineAnnotationValue(&kcp.Spec.KubeadmConfigSpec.ClusterConfiguration) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(annotations).To(Equal("{\"marshalVersion\":\"v1beta2\",\"apiServer\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"certificatesDir\":\"foo\"}")) - }) - t.Run("ClusterConfigurationFromMachineIsOutdated", func(t *testing.T) { - g := NewWithT(t) - - // Without annotation. - annotation := "" - g.Expect(ClusterConfigurationAnnotationFromMachineIsOutdated(annotation)).To(BeTrue()) - - // v1beta1 annotation (without marshalVersion) - annotation = "{\"etcd\":{},\"apiServer\":{\"extraArgs\":{\"foo\":\"bar\"}},\"controllerManager\":{},\"scheduler\":{},\"dns\":{}}" - g.Expect(ClusterConfigurationAnnotationFromMachineIsOutdated(annotation)).To(BeTrue()) - - // up to date annotation (marshalVersion equal to current version) - annotation = fmt.Sprintf("{\"marshalVersion\":%q,\"etcd\":{},\"apiServer\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"controllerManager\":{},\"scheduler\":{},\"dns\":{}}", bootstrapv1.GroupVersion.Version) - g.Expect(ClusterConfigurationAnnotationFromMachineIsOutdated(annotation)).To(BeFalse()) - - // marshalVersion not equal to the current version (this should not happen because marshalVersion has been introduced with the v1beta2 API) - annotation = "{\"marshalVersion\":\"foo\",\"etcd\":{},\"apiServer\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"controllerManager\":{},\"scheduler\":{},\"dns\":{}}" - g.Expect(ClusterConfigurationAnnotationFromMachineIsOutdated(annotation)).To(BeTrue()) - }) - t.Run("ClusterConfigurationFromMachine", func(t *testing.T) { - g := NewWithT(t) - m1 := &clusterv1.Machine{} - - // Without annotation. - clusterConfiguration, err := ClusterConfigurationFromMachine(m1) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(clusterConfiguration).To(BeNil()) - - // v1beta1 annotation (without marshalVersion) - m1.SetAnnotations(map[string]string{controlplanev1.KubeadmClusterConfigurationAnnotation: "{\"etcd\":{},\"apiServer\":{\"extraArgs\":{\"foo\":\"bar\"}},\"controllerManager\":{},\"scheduler\":{},\"dns\":{},\"certificatesDir\":\"foo\"}"}) - clusterConfiguration, err = ClusterConfigurationFromMachine(m1) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(clusterConfiguration).To(Equal(&bootstrapv1.ClusterConfiguration{ - APIServer: bootstrapv1.APIServer{ - ExtraArgs: []bootstrapv1.Arg{ // Extra args converted from old format to new format. - { - Name: "foo", - Value: ptr.To("bar"), - }, - }, + m := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine", }, - CertificatesDir: "foo", - })) - - // up to date annotation (marshalVersion equal to current version) - m1.SetAnnotations(map[string]string{controlplanev1.KubeadmClusterConfigurationAnnotation: fmt.Sprintf("{\"marshalVersion\":%q,\"etcd\":{},\"apiServer\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"controllerManager\":{},\"scheduler\":{},\"dns\":{},\"certificatesDir\":\"foo\"}", bootstrapv1.GroupVersion.Version)}) - clusterConfiguration, err = ClusterConfigurationFromMachine(m1) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(clusterConfiguration).To(Equal(&bootstrapv1.ClusterConfiguration{ - APIServer: bootstrapv1.APIServer{ - ExtraArgs: []bootstrapv1.Arg{ - { - Name: "foo", - Value: ptr.To("bar"), - }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + // ConfigRef not defined }, }, - CertificatesDir: "foo", - })) - }) -} - -func TestMatchClusterConfiguration(t *testing.T) { - t.Run("machine without the ClusterConfiguration annotation should match (not enough information to make a decision)", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{} - m := &clusterv1.Machine{} - match, diff, err := matchClusterConfiguration(kcp, m) + } + reason, _, _, match, err := matchesKubeadmConfig(map[string]*bootstrapv1.KubeadmConfig{}, nil, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("machine with an invalid ClusterConfiguration annotation should not match (only solution is to rollout)", func(t *testing.T) { + t.Run("returns true if Machine KubeadmConfig is not found", func(t *testing.T) { g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{} m := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: "$|^^_", + Name: "machine", + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "test", + }, }, }, } - match, diff, err := matchClusterConfiguration(kcp, m) + reason, _, _, match, err := matchesKubeadmConfig(map[string]*bootstrapv1.KubeadmConfig{}, nil, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeFalse()) - g.Expect(diff).To(BeEmpty()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("Return true if cluster configuration matches", func(t *testing.T) { + t.Run("returns true if ClusterConfiguration is equal", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ @@ -157,382 +88,287 @@ func TestMatchClusterConfiguration(t *testing.T) { CertificatesDir: "foo", }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: "{\n \"certificatesDir\": \"foo\"\n}", - }, + Name: "machine", }, - } - match, diff, err := matchClusterConfiguration(kcp, m) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) - }) - t.Run("Return false if cluster configuration does not match", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "test", }, }, }, } - m := &clusterv1.Machine{ + machineConfig := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: "{\n \"certificatesDir\": \"bar\"\n}", + Namespace: "default", + Name: "test", + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", }, }, } - match, diff, err := matchClusterConfiguration(kcp, m) + machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ + m.Name: machineConfig, + } + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeFalse()) - g.Expect(diff).To(BeComparableTo(`v1beta2.ClusterConfiguration{ - ... // 4 identical fields - Scheduler: {}, - DNS: {}, -- CertificatesDir: "bar", -+ CertificatesDir: "foo", - ImageRepository: "", - FeatureGates: nil, - ... // 2 identical fields - }`)) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("Return true if only omittable fields are changed", func(t *testing.T) { + t.Run("returns true if ClusterConfiguration is equal (empty)", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{}, - }, + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: "{}", + Name: "machine", + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "test", + }, }, }, } - match, diff, err := matchClusterConfiguration(kcp, m) + machineConfig := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, + }, + } + machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ + m.Name: machineConfig, + } + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("Return true if cluster configuration is nil (special case)", func(t *testing.T) { + t.Run("returns true if ClusterConfiguration is equal apart from defaulted FeatureGates field", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{}, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, + }, + Version: "v1.31.0", }, } m := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: "null", - }, + Name: "machine", }, - } - match, diff, err := matchClusterConfiguration(kcp, m) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) - }) - t.Run("Return true although the DNS fields are different", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - DNS: bootstrapv1.DNS{ - ImageTag: "v1.10.1", - ImageRepository: "gcr.io/capi-test", - }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "test", }, }, }, } - m := &clusterv1.Machine{ + machineConfig := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: "{\"dns\":{\"imageRepository\":\"gcr.io/capi-test\",\"imageTag\":\"v1.9.3\"}}", + Namespace: "default", + Name: "test", + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{ + desiredstate.ControlPlaneKubeletLocalMode: true, + }, }, }, } - match, diff, err := matchClusterConfiguration(kcp, m) + machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ + m.Name: machineConfig, + } + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("Check we are not introducing unexpected rollouts when changing the API", func(t *testing.T) { + t.Run("returns true if ClusterConfiguration is equal apart from ControlPlaneEndpoint and DNS fields", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - APIServer: bootstrapv1.APIServer{ - ExtraArgs: []bootstrapv1.Arg{ - { - Name: "foo", - Value: ptr.To("bar"), - }, - }, - }, - ControllerManager: bootstrapv1.ControllerManager{ - ExtraArgs: []bootstrapv1.Arg{ - { - Name: "foo", - Value: ptr.To("bar"), - }, - }, - }, - Scheduler: bootstrapv1.Scheduler{ - ExtraArgs: []bootstrapv1.Arg{ - { - Name: "foo", - Value: ptr.To("bar"), - }, - }, - }, DNS: bootstrapv1.DNS{ ImageTag: "v1.10.1", ImageRepository: "gcr.io/capi-test", }, }, }, + Version: "v1.30.0", }, } - - // This is a point in time snapshot of how a serialized ClusterConfiguration looks like; - // we are hardcoding this in the test so we can detect if a change in the API impacts serialization. - // NOTE: changes in the json representation do not always trigger a rollout in KCP, but they are an heads up that should be investigated. - annotationsCheckPoint := "{\"marshalVersion\":\"v1beta2\",\"apiServer\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"controllerManager\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"scheduler\":{\"extraArgs\":[{\"name\":\"foo\",\"value\":\"bar\"}]},\"dns\":{\"imageRepository\":\"gcr.io/capi-test\",\"imageTag\":\"v1.10.1\"}}" - - // compute how a serialized ClusterConfiguration looks like now - annotations, err := ClusterConfigurationToMachineAnnotationValue(&kcp.Spec.KubeadmConfigSpec.ClusterConfiguration) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(annotations).To(Equal(annotationsCheckPoint)) - - // check the match function detects if a Machine with the annotation string above matches the object it originates from (round trip). m := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: annotationsCheckPoint, - }, + Name: "machine", }, - } - g.Expect(matchClusterConfiguration(kcp, m)).To(BeTrue()) - }) -} - -func TestGetAdjustedKcpConfig(t *testing.T) { - t.Run("if the machine is the first control plane, kcp config should get InitConfiguration", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{ - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, - }, - JoinConfiguration: bootstrapv1.JoinConfiguration{ - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "test", }, }, }, } machineConfig := &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{ // first control-plane - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + ControlPlaneEndpoint: "1.2.3.4:6443", + DNS: bootstrapv1.DNS{ + ImageTag: "v1.9.3", + ImageRepository: "gcr.io/capi-test", }, }, }, } - kcpConfig := getAdjustedKcpConfig(kcp, machineConfig) - g.Expect(kcpConfig.InitConfiguration.IsDefined()).To(BeTrue()) - g.Expect(kcpConfig.JoinConfiguration.IsDefined()).To(BeFalse()) + machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ + m.Name: machineConfig, + } + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("if the machine is a joining control plane, kcp config should get JoinConfiguration", func(t *testing.T) { + t.Run("returns false if ClusterConfiguration is NOT equal", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{ - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, - }, - JoinConfiguration: bootstrapv1.JoinConfiguration{ - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", - }, - }, - }, - }, - } - machineConfig := &bootstrapv1.KubeadmConfig{ - Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ // joining control-plane - Patches: bootstrapv1.Patches{ - Directory: "/tmp/patches", + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", }, }, + Version: "v1.30.0", }, } - kcpConfig := getAdjustedKcpConfig(kcp, machineConfig) - g.Expect(kcpConfig.InitConfiguration.IsDefined()).To(BeFalse()) - g.Expect(kcpConfig.JoinConfiguration.IsDefined()).To(BeTrue()) - }) -} - -func TestCleanupConfigFields(t *testing.T) { - t.Run("ClusterConfiguration gets removed from KcpConfig and MachineConfig", func(t *testing.T) { - g := NewWithT(t) - kcpConfig := &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "/tmp/certs", - }, - } - machineConfig := &bootstrapv1.KubeadmConfig{ - Spec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "/tmp/certs", - }, - }, - } - cleanupConfigFields(kcpConfig, machineConfig) - g.Expect(kcpConfig.ClusterConfiguration.IsDefined()).To(BeFalse()) - g.Expect(machineConfig.Spec.ClusterConfiguration.IsDefined()).To(BeFalse()) - }) - t.Run("JoinConfiguration gets removed from MachineConfig if it was not derived by KCPConfig", func(t *testing.T) { - g := NewWithT(t) - kcpConfig := &bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{}, // KCP not providing a JoinConfiguration - } - machineConfig := &bootstrapv1.KubeadmConfig{ - Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ - ControlPlane: &bootstrapv1.JoinControlPlane{}, - }, // Machine gets a default JoinConfiguration from CABPK - }, - } - cleanupConfigFields(kcpConfig, machineConfig) - g.Expect(kcpConfig.JoinConfiguration.IsDefined()).To(BeFalse()) - g.Expect(machineConfig.Spec.JoinConfiguration.IsDefined()).To(BeFalse()) - }) - t.Run("JoinConfiguration.Discovery gets removed because it is not relevant for compare", func(t *testing.T) { - g := NewWithT(t) - kcpConfig := &bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ - Discovery: bootstrapv1.Discovery{TLSBootstrapToken: "aaa"}, + m := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine", }, - } - machineConfig := &bootstrapv1.KubeadmConfig{ - Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ - Discovery: bootstrapv1.Discovery{TLSBootstrapToken: "aaa"}, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "test", + }, }, }, } - cleanupConfigFields(kcpConfig, machineConfig) - g.Expect(kcpConfig.JoinConfiguration.Discovery).To(BeComparableTo(bootstrapv1.Discovery{})) - g.Expect(machineConfig.Spec.JoinConfiguration.Discovery).To(BeComparableTo(bootstrapv1.Discovery{})) - }) - t.Run("JoinConfiguration.ControlPlane gets removed from MachineConfig if it was not derived by KCPConfig", func(t *testing.T) { - g := NewWithT(t) - kcpConfig := &bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ - ControlPlane: nil, // Control plane configuration missing in KCP - }, - } machineConfig := &bootstrapv1.KubeadmConfig{ - Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ - ControlPlane: &bootstrapv1.JoinControlPlane{}, // Machine gets a default JoinConfiguration.ControlPlane from CABPK - }, - }, - } - cleanupConfigFields(kcpConfig, machineConfig) - g.Expect(kcpConfig.JoinConfiguration).ToNot(BeNil()) - g.Expect(machineConfig.Spec.JoinConfiguration.ControlPlane).To(BeNil()) - }) - t.Run("JoinConfiguration.NodeRegistrationOptions gets removed from MachineConfig if it was not derived by KCPConfig", func(t *testing.T) { - g := NewWithT(t) - kcpConfig := &bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ - NodeRegistration: bootstrapv1.NodeRegistrationOptions{}, // NodeRegistrationOptions configuration missing in KCP + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", }, - } - machineConfig := &bootstrapv1.KubeadmConfig{ Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ - NodeRegistration: bootstrapv1.NodeRegistrationOptions{Name: "test"}, // Machine gets a some JoinConfiguration.NodeRegistrationOptions - }, - }, - } - cleanupConfigFields(kcpConfig, machineConfig) - g.Expect(kcpConfig.JoinConfiguration).ToNot(BeNil()) - g.Expect(machineConfig.Spec.JoinConfiguration.NodeRegistration).To(BeComparableTo(bootstrapv1.NodeRegistrationOptions{})) - }) - t.Run("drops omittable fields", func(t *testing.T) { - g := NewWithT(t) - kcpConfig := &bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ - NodeRegistration: bootstrapv1.NodeRegistrationOptions{ - KubeletExtraArgs: []bootstrapv1.Arg{}, + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "bar", }, }, } - machineConfig := &bootstrapv1.KubeadmConfig{ - Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{ - NodeRegistration: bootstrapv1.NodeRegistrationOptions{ - KubeletExtraArgs: []bootstrapv1.Arg{}, - }, - }, - }, + machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ + m.Name: machineConfig, } - cleanupConfigFields(kcpConfig, machineConfig) - g.Expect(kcpConfig.JoinConfiguration.NodeRegistration.KubeletExtraArgs).To(BeNil()) - g.Expect(machineConfig.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs).To(BeNil()) - }) -} - -func TestMatchInitOrJoinConfiguration(t *testing.T) { - t.Run("returns true if the machine does not have a bootstrap config", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{} - match, diff, err := matchInitOrJoinConfiguration(nil, kcp) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ + ClusterConfiguration: v1beta2.ClusterConfiguration{ + ... // 4 identical fields + Scheduler: {}, + DNS: {}, +- CertificatesDir: "bar", ++ CertificatesDir: "foo", + ImageRepository: "", + FeatureGates: nil, + ... // 3 identical fields + }, + InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + ... // 11 identical fields + }`)) }) - t.Run("returns true if one format is empty and the other one cloud-config", func(t *testing.T) { + t.Run("returns true if InitConfiguration is equal after conversion to JoinConfiguration", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - Format: bootstrapv1.CloudConfig, + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + Timeouts: bootstrapv1.Timeouts{ + // ControlPlaneComponentHealthCheckSeconds is different, but it is ignored for the diff + ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](5), + KubernetesAPICallSeconds: ptr.To[int32](7), + }, + Patches: bootstrapv1.Patches{ + Directory: "/test/patches", + }, + SkipPhases: []string{"skip-phase"}, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("8"), + }, + }, + }, + ControlPlane: &bootstrapv1.JoinControlPlane{ + LocalAPIEndpoint: bootstrapv1.APIEndpoint{ + AdvertiseAddress: "1.2.3.4", + BindPort: 6443, + }, + }, + CACertPath: "/tmp/cacert", // This field doesn't exist in InitConfiguration, so it should not lead to a rollout. + }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -549,40 +385,68 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - Format: "", + // InitConfiguration will be converted to JoinConfiguration and then compared against the JoinConfiguration from KCP. + InitConfiguration: bootstrapv1.InitConfiguration{ + Timeouts: bootstrapv1.Timeouts{ + // ControlPlaneComponentHealthCheckSeconds is different, but it is ignored for the diff + ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](1), + KubernetesAPICallSeconds: ptr.To[int32](7), + }, + Patches: bootstrapv1.Patches{ + Directory: "/test/patches", + }, + SkipPhases: []string{"skip-phase"}, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + KubeletExtraArgs: []bootstrapv1.Arg{ + { + Name: "v", + Value: ptr.To("8"), + }, + }, + }, + LocalAPIEndpoint: bootstrapv1.APIEndpoint{ + AdvertiseAddress: "1.2.3.4", + BindPort: 6443, + }, + }, }, }, } - match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("returns true if InitConfiguration is equal", func(t *testing.T) { + t.Run("returns true if JoinConfiguration is not equal, but InitConfiguration is", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, - InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "Different name", + }, + }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -599,23 +463,26 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{}, + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, }, }, } - match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) + g.Expect(reason).To(BeEmpty()) }) t.Run("returns false if InitConfiguration is NOT equal", func(t *testing.T) { g := NewWithT(t) @@ -628,15 +495,16 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { Name: "A new name", // This is a change }, }, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", // This is a change + }, + }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -653,10 +521,6 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -670,10 +534,13 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { }, }, } - match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) g.Expect(match).To(BeFalse()) - g.Expect(diff).To(BeComparableTo(`&v1beta2.KubeadmConfigSpec{ + g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ ClusterConfiguration: {}, InitConfiguration: v1beta2.InitConfiguration{ BootstrapTokens: nil, @@ -700,15 +567,16 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, + }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -725,25 +593,29 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + ControlPlane: &bootstrapv1.JoinControlPlane{}, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, + }, }, }, } - match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("returns false if JoinConfiguration is NOT equal", func(t *testing.T) { + t.Run("returns true if JoinConfiguration is equal apart from Discovery and Timeouts", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ @@ -752,17 +624,19 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { InitConfiguration: bootstrapv1.InitConfiguration{}, JoinConfiguration: bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ - Name: "A new name", // This is a change + Name: "A new name", + }, + // Discovery gets removed because Discovery is not relevant for the rollout decision. + Discovery: bootstrapv1.Discovery{TLSBootstrapToken: "aaa"}, + Timeouts: bootstrapv1.Timeouts{ + ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](1), }, }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -779,63 +653,51 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ JoinConfiguration: bootstrapv1.JoinConfiguration{ + ControlPlane: &bootstrapv1.JoinControlPlane{}, NodeRegistration: bootstrapv1.NodeRegistrationOptions{ - Name: "An old name", // This is a change + Name: "A new name", + }, + // Discovery gets removed because Discovery is not relevant for the rollout decision. + Discovery: bootstrapv1.Discovery{TLSBootstrapToken: "bbb"}, + Timeouts: bootstrapv1.Timeouts{ + ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](11), }, }, }, }, } - match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeFalse()) - g.Expect(diff).To(BeComparableTo(`&v1beta2.KubeadmConfigSpec{ - ClusterConfiguration: {}, - InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, - JoinConfiguration: v1beta2.JoinConfiguration{ - NodeRegistration: v1beta2.NodeRegistrationOptions{ -- Name: "An old name", -+ Name: "A new name", - CRISocket: "", - Taints: nil, - ... // 4 identical fields - }, - CACertPath: "", - Discovery: {}, - ... // 4 identical fields - }, - Files: nil, - DiskSetup: {}, - ... // 9 identical fields - }`)) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("returns true if returns true if only omittable configurations are not equal", func(t *testing.T) { + t.Run("returns true if JoinConfiguration is equal apart from JoinControlPlane", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, - Files: []bootstrapv1.File{}, // This is a change, but it is an omittable field and the diff between nil and empty array is not relevant. + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, + ControlPlane: nil, // Control plane configuration missing in KCP + }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -852,41 +714,51 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "A new name", + }, + // Machine gets a default JoinConfiguration.ControlPlane from CABPK + // Note: This field is now also set by KCP, but leaving this case here for additional coverage. + ControlPlane: &bootstrapv1.JoinControlPlane{}, + }, }, }, } - match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) g.Expect(match).To(BeTrue()) - g.Expect(diff).To(BeEmpty()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("returns false if some other configurations are not equal", func(t *testing.T) { + t.Run("returns false if JoinConfiguration is not equal, and InitConfiguration is also not equal", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, - InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, - Files: []bootstrapv1.File{{Path: "/tmp/foo"}}, // This is a change + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "Different name", + }, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "Different name", + }, + }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -903,113 +775,63 @@ func TestMatchInitOrJoinConfiguration(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{}, - }, - }, - } - match, diff, err := matchInitOrJoinConfiguration(machineConfigs[m.Name], kcp) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeFalse()) - g.Expect(diff).To(BeComparableTo(`&v1beta2.KubeadmConfigSpec{ - ClusterConfiguration: {}, - InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, - JoinConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, -- Files: nil, -+ Files: []v1beta2.File{{Path: "/tmp/foo"}}, - DiskSetup: {}, - Mounts: nil, - ... // 8 identical fields - }`)) - }) -} - -func TestMatchesKubeadmBootstrapConfig(t *testing.T) { - t.Run("returns true if ClusterConfiguration is equal", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", - }, - }, - }, - } - m := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: "{\n \"certificatesDir\": \"foo\"\n}", - }, - }, - } - machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ - m.Name: {}, - } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeTrue()) - g.Expect(reason).To(BeEmpty()) - }) - t.Run("returns false if ClusterConfiguration is NOT equal", func(t *testing.T) { - g := NewWithT(t) - kcp := &controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - CertificatesDir: "foo", + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, }, }, }, } - m := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: "{\n \"certificatesDir\": \"bar\"\n}", - }, - }, - } - machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ - m.Name: {}, - } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) g.Expect(match).To(BeFalse()) - g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig ClusterConfiguration is outdated: diff: v1beta2.ClusterConfiguration{ - ... // 4 identical fields - Scheduler: {}, - DNS: {}, -- CertificatesDir: "bar", -+ CertificatesDir: "foo", - ImageRepository: "", - FeatureGates: nil, - ... // 2 identical fields + g.Expect(reason).To(Equal(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ + ClusterConfiguration: {}, + InitConfiguration: v1beta2.InitConfiguration{ + BootstrapTokens: nil, + NodeRegistration: v1beta2.NodeRegistrationOptions{ +- Name: "name", ++ Name: "Different name", + CRISocket: "", + Taints: nil, + ... // 4 identical fields + }, + LocalAPIEndpoint: {}, + SkipPhases: nil, + ... // 2 identical fields + }, + JoinConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + Files: nil, + ... // 10 identical fields }`)) }) - t.Run("returns true if InitConfiguration is equal", func(t *testing.T) { + t.Run("returns false if JoinConfiguration has other differences in ControlPlane", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + ControlPlane: nil, // Control plane configuration missing in KCP + }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -1026,44 +848,72 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + ControlPlane: &bootstrapv1.JoinControlPlane{ + LocalAPIEndpoint: bootstrapv1.APIEndpoint{ + AdvertiseAddress: "1.2.3.4", + BindPort: 6443, + }, + }, + }, }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeTrue()) - g.Expect(reason).To(BeEmpty()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(Equal(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ + ClusterConfiguration: {}, + InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: v1beta2.JoinConfiguration{ + NodeRegistration: {Name: "name", ImagePullPolicy: "IfNotPresent"}, + CACertPath: "", + Discovery: {}, + ControlPlane: &v1beta2.JoinControlPlane{ + LocalAPIEndpoint: v1beta2.APIEndpoint{ +- AdvertiseAddress: "1.2.3.4", ++ AdvertiseAddress: "", +- BindPort: 6443, ++ BindPort: 0, + }, + }, + SkipPhases: nil, + Patches: {}, + Timeouts: {}, + }, + Files: nil, + DiskSetup: {}, + ... // 9 identical fields + }`)) }) - t.Run("returns false if InitConfiguration is NOT equal", func(t *testing.T) { + t.Run("returns false if JoinConfiguration is NOT equal", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, - InitConfiguration: bootstrapv1.InitConfiguration{ + InitConfiguration: bootstrapv1.InitConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ Name: "A new name", // This is a change }, }, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -1080,16 +930,13 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{ + JoinConfiguration: bootstrapv1.JoinConfiguration{ + ControlPlane: &bootstrapv1.JoinControlPlane{}, NodeRegistration: bootstrapv1.NodeRegistrationOptions{ Name: "An old name", // This is a change }, @@ -1097,13 +944,16 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) g.Expect(match).To(BeFalse()) - g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: &v1beta2.KubeadmConfigSpec{ + g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ ClusterConfiguration: {}, - InitConfiguration: v1beta2.InitConfiguration{ - BootstrapTokens: nil, + InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: v1beta2.JoinConfiguration{ NodeRegistration: v1beta2.NodeRegistrationOptions{ - Name: "An old name", + Name: "A new name", @@ -1111,31 +961,28 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { Taints: nil, ... // 4 identical fields }, - LocalAPIEndpoint: {}, - SkipPhases: nil, - ... // 2 identical fields + CACertPath: "", + Discovery: {}, + ... // 4 identical fields }, - JoinConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, - Files: nil, - ... // 10 identical fields + Files: nil, + DiskSetup: {}, + ... // 9 identical fields }`)) }) - t.Run("returns true if JoinConfiguration is equal", func(t *testing.T) { + t.Run("returns false if JoinConfiguration is NOT equal", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + // JoinConfiguration not set anymore. }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -1152,44 +999,72 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - JoinConfiguration: bootstrapv1.JoinConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + ControlPlane: &bootstrapv1.JoinControlPlane{}, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "An old name", // This is a change + }, + }, }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeTrue()) - g.Expect(reason).To(BeEmpty()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + // Can't check if desiredKubeadmConfig is for join because the test case is that JoinConfiguration is not set anymore. + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ + ClusterConfiguration: {}, + InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: v1beta2.JoinConfiguration{ + NodeRegistration: v1beta2.NodeRegistrationOptions{ +- Name: "An old name", ++ Name: "", + CRISocket: "", + Taints: nil, + ... // 4 identical fields + }, + CACertPath: "", + Discovery: {}, + ... // 4 identical fields + }, + Files: nil, + DiskSetup: {}, + ... // 9 identical fields + }`)) }) - t.Run("returns false if JoinConfiguration is NOT equal", func(t *testing.T) { + t.Run("returns true if only omittable configurations are not equal", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, - InitConfiguration: bootstrapv1.InitConfiguration{}, + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + FeatureGates: map[string]bool{}, // This is a change, but it is an omittable field + }, + InitConfiguration: bootstrapv1.InitConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + KubeletExtraArgs: []bootstrapv1.Arg{}, + }, + }, JoinConfiguration: bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ - Name: "A new name", // This is a change + Name: "name", + KubeletExtraArgs: []bootstrapv1.Arg{}, }, }, + Files: []bootstrapv1.File{}, // This is a change, but it is an omittable field and the diff between nil and empty array is not relevant. }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -1206,63 +1081,46 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, + InitConfiguration: bootstrapv1.InitConfiguration{}, JoinConfiguration: bootstrapv1.JoinConfiguration{ + ControlPlane: &bootstrapv1.JoinControlPlane{}, NodeRegistration: bootstrapv1.NodeRegistrationOptions{ - Name: "An old name", // This is a change + Name: "name", }, }, }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(match).To(BeFalse()) - g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: &v1beta2.KubeadmConfigSpec{ - ClusterConfiguration: {}, - InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, - JoinConfiguration: v1beta2.JoinConfiguration{ - NodeRegistration: v1beta2.NodeRegistrationOptions{ -- Name: "An old name", -+ Name: "A new name", - CRISocket: "", - Taints: nil, - ... // 4 identical fields - }, - CACertPath: "", - Discovery: {}, - ... // 4 identical fields - }, - Files: nil, - DiskSetup: {}, - ... // 9 identical fields - }`)) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) + g.Expect(match).To(BeTrue()) + g.Expect(reason).To(BeEmpty()) }) - t.Run("returns true if only omittable configurations are not equal", func(t *testing.T) { + t.Run("returns true if KubeadmConfig is equal apart from defaulted format field", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, - InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, - Files: []bootstrapv1.File{}, // This is a change, but it is an omittable field and the diff between nil and empty array is not relevant. + Format: bootstrapv1.CloudConfig, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -1279,41 +1137,47 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{}, + Format: "", + JoinConfiguration: bootstrapv1.JoinConfiguration{ + ControlPlane: &bootstrapv1.JoinControlPlane{}, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) - t.Run("returns false if some other configurations are not equal", func(t *testing.T) { + t.Run("returns false if KubeadmConfig is not equal (other configurations)", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, InitConfiguration: bootstrapv1.InitConfiguration{}, - JoinConfiguration: bootstrapv1.JoinConfiguration{}, - Files: []bootstrapv1.File{{Path: "/tmp/foo"}}, // This is a change + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, + Files: []bootstrapv1.File{{Path: "/tmp/foo"}}, // This is a change }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -1330,26 +1194,30 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", }, Spec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{}, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + ControlPlane: &bootstrapv1.JoinControlPlane{}, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "name", + }, + }, }, }, } - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, currentKubeadmConfig, desiredKubeadmConfig, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(currentKubeadmConfig).ToNot(BeNil()) + g.Expect(desiredKubeadmConfig).ToNot(BeNil()) + g.Expect(isKubeadmConfigForJoin(desiredKubeadmConfig)).To(BeTrue()) g.Expect(match).To(BeFalse()) - g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig InitConfiguration or JoinConfiguration are outdated: diff: &v1beta2.KubeadmConfigSpec{ + g.Expect(reason).To(BeComparableTo(`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ ClusterConfiguration: {}, InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, - JoinConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: {NodeRegistration: {Name: "name", ImagePullPolicy: "IfNotPresent"}, ControlPlane: &{}}, - Files: nil, + Files: []v1beta2.File{{Path: "/tmp/foo"}}, DiskSetup: {}, @@ -1375,13 +1243,10 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { InitConfiguration: bootstrapv1.InitConfiguration{}, JoinConfiguration: bootstrapv1.JoinConfiguration{}, }, + Version: "v1.30.0", }, } m := &clusterv1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: clusterv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -1398,10 +1263,6 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { } machineConfigs := map[string]*bootstrapv1.KubeadmConfig{ m.Name: { - TypeMeta: metav1.TypeMeta{ - Kind: "KubeadmConfig", - APIVersion: bootstrapv1.GroupVersion.String(), - }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "test", @@ -1416,7 +1277,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = nil machineConfigs[m.Name].Labels = nil - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, _, _, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1426,7 +1287,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = kcp.Spec.MachineTemplate.ObjectMeta.Annotations machineConfigs[m.Name].Labels = nil - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, _, _, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1436,7 +1297,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Annotations = nil machineConfigs[m.Name].Labels = kcp.Spec.MachineTemplate.ObjectMeta.Labels - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, _, _, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1446,7 +1307,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { g := NewWithT(t) machineConfigs[m.Name].Labels = kcp.Spec.MachineTemplate.ObjectMeta.Labels machineConfigs[m.Name].Annotations = kcp.Spec.MachineTemplate.ObjectMeta.Annotations - reason, match, err := matchesKubeadmBootstrapConfig(machineConfigs, kcp, m) + reason, _, _, match, err := matchesKubeadmConfig(machineConfigs, kcp, &clusterv1.Cluster{}, m) g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) @@ -1454,14 +1315,7 @@ func TestMatchesKubeadmBootstrapConfig(t *testing.T) { }) } -func TestMatchesTemplateClonedFrom(t *testing.T) { - t.Run("nil machine returns false", func(t *testing.T) { - g := NewWithT(t) - reason, match := matchesTemplateClonedFrom(nil, nil, nil) - g.Expect(match).To(BeFalse()) - g.Expect(reason).To(Equal("Machine cannot be compared with KCP.spec.machineTemplate.spec.infrastructureRef: Machine is nil")) - }) - +func TestMatchesInfraMachine(t *testing.T) { t.Run("returns true if machine not found", func(t *testing.T) { g := NewWithT(t) kcp := &controlplanev1.KubeadmControlPlane{} @@ -1474,7 +1328,8 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { }, }, } - reason, match := matchesTemplateClonedFrom(map[string]*unstructured.Unstructured{}, kcp, machine) + reason, _, _, match, err := matchesInfraMachine(t.Context(), nil, map[string]*unstructured.Unstructured{}, kcp, &clusterv1.Cluster{}, machine) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) @@ -1496,20 +1351,32 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { }, Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - Kind: "GenericMachineTemplate", - Name: "infra-foo", - APIGroup: "generic.io", + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineTemplateKind, + Name: "infra-machine-template1", }, }, }, }, } + + infraMachineTemplate := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineTemplateKind, + "metadata": map[string]interface{}{ + "name": "infra-machine-template1", + "namespace": "default", + }, + }, + } + m := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - Kind: "GenericMachine", - Name: "infra-foo", - APIGroup: "generic.io", + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineKind, + Name: "infra-config1", }, }, } @@ -1517,8 +1384,8 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { infraConfigs := map[string]*unstructured.Unstructured{ m.Name: { Object: map[string]interface{}{ - "kind": "InfrastructureMachine", - "apiVersion": clusterv1.GroupVersionInfrastructure.String(), + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineKind, "metadata": map[string]interface{}{ "name": "infra-config1", "namespace": "default", @@ -1526,160 +1393,86 @@ func TestMatchesTemplateClonedFrom(t *testing.T) { }, }, } + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(builder.TestInfrastructureMachineTemplateCRD, infraMachineTemplate).Build() - t.Run("by returning true if neither labels or annotations match", func(t *testing.T) { + t.Run("by returning true if annotations don't exist", func(t *testing.T) { g := NewWithT(t) - infraConfigs[m.Name].SetAnnotations(map[string]string{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", - }) - infraConfigs[m.Name].SetLabels(nil) - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) + infraConfigs[m.Name].SetAnnotations(map[string]string{}) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(match).To(BeTrue()) g.Expect(reason).To(BeEmpty()) }) - t.Run("by returning true if only labels don't match", func(t *testing.T) { + t.Run("by returning false if neither Name nor GroupKind matches", func(t *testing.T) { g := NewWithT(t) infraConfigs[m.Name].SetAnnotations(map[string]string{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", - "test": "annotation", + clusterv1.TemplateClonedFromNameAnnotation: "different-infra-machine-template1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "DifferentTestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", }) - infraConfigs[m.Name].SetLabels(nil) - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) - g.Expect(match).To(BeTrue()) - g.Expect(reason).To(BeEmpty()) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(Equal("Infrastructure template on KCP rotated from DifferentTestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io different-infra-machine-template1 to TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1")) }) - t.Run("by returning true if only annotations don't match", func(t *testing.T) { + t.Run("by returning false if only GroupKind matches", func(t *testing.T) { g := NewWithT(t) infraConfigs[m.Name].SetAnnotations(map[string]string{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", + clusterv1.TemplateClonedFromNameAnnotation: "different-infra-machine-template1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", }) - infraConfigs[m.Name].SetLabels(kcp.Spec.MachineTemplate.ObjectMeta.Labels) - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) - g.Expect(match).To(BeTrue()) - g.Expect(reason).To(BeEmpty()) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(Equal("Infrastructure template on KCP rotated from TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io different-infra-machine-template1 to TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1")) }) - t.Run("by returning true if both labels and annotations match", func(t *testing.T) { + t.Run("by returning false if only Name matches", func(t *testing.T) { g := NewWithT(t) infraConfigs[m.Name].SetAnnotations(map[string]string{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", - "test": "annotation", + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "DifferentTestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", }) - infraConfigs[m.Name].SetLabels(kcp.Spec.MachineTemplate.ObjectMeta.Labels) - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, m) - g.Expect(match).To(BeTrue()) - g.Expect(reason).To(BeEmpty()) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(match).To(BeFalse()) + g.Expect(reason).To(Equal("Infrastructure template on KCP rotated from DifferentTestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1 to TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1")) }) - }) -} - -func TestMatchesTemplateClonedFrom_WithClonedFromAnnotations(t *testing.T) { - kcp := &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - Kind: "GenericMachineTemplate", - Name: "infra-foo", - APIGroup: "generic.io", - }, - }, - }, - }, - } - machine := &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Kind: "InfrastructureMachine", - Name: "infra-config1", - }, - }, - } - tests := []struct { - name string - annotations map[string]interface{} - expectMatch bool - expectReason string - }{ - { - name: "returns true if annotations don't exist", - annotations: map[string]interface{}{}, - expectMatch: true, - }, - { - name: "returns false if annotations don't match anything", - annotations: map[string]interface{}{ - clusterv1.TemplateClonedFromNameAnnotation: "barfoo1", - clusterv1.TemplateClonedFromGroupKindAnnotation: "barfoo2", - }, - expectMatch: false, - expectReason: "Infrastructure template on KCP rotated from barfoo2 barfoo1 to GenericMachineTemplate.generic.io infra-foo", - }, - { - name: "returns false if TemplateClonedFromNameAnnotation matches but TemplateClonedFromGroupKindAnnotation doesn't", - annotations: map[string]interface{}{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "barfoo2", - }, - expectMatch: false, - expectReason: "Infrastructure template on KCP rotated from barfoo2 infra-foo to GenericMachineTemplate.generic.io infra-foo", - }, - { - name: "returns true if both annotations match", - annotations: map[string]interface{}{ - clusterv1.TemplateClonedFromNameAnnotation: "infra-foo", - clusterv1.TemplateClonedFromGroupKindAnnotation: "GenericMachineTemplate.generic.io", - }, - expectMatch: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run("by returning true if both Name and GroupKind match", func(t *testing.T) { g := NewWithT(t) - infraConfigs := map[string]*unstructured.Unstructured{ - machine.Name: { - Object: map[string]interface{}{ - "kind": "InfrastructureMachine", - "apiVersion": clusterv1.GroupVersionInfrastructure.String(), - "metadata": map[string]interface{}{ - "name": "infra-config1", - "namespace": "default", - "annotations": tt.annotations, - }, - }, - }, - } - reason, match := matchesTemplateClonedFrom(infraConfigs, kcp, machine) - g.Expect(match).To(Equal(tt.expectMatch)) - g.Expect(reason).To(Equal(tt.expectReason)) + infraConfigs[m.Name].SetAnnotations(map[string]string{ + clusterv1.TemplateClonedFromNameAnnotation: "infra-machine-template1", + clusterv1.TemplateClonedFromGroupKindAnnotation: "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io", + }) + reason, _, _, match, err := matchesInfraMachine(t.Context(), c, infraConfigs, kcp, &clusterv1.Cluster{}, m) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(reason).To(BeEmpty()) + g.Expect(match).To(BeTrue()) }) - } + }) } func TestUpToDate(t *testing.T) { reconciliationTime := metav1.Now() defaultKcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "kcp", + }, Spec: controlplanev1.KubeadmControlPlaneSpec{ Replicas: nil, - Version: "v1.31.0", + Version: "v1.30.0", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: clusterv1.GroupVersionInfrastructure.Group, - Kind: "AWSMachineTemplate", - Name: "template1", + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineTemplateKind, + Name: "infra-machine-template1", }, }, }, @@ -1696,16 +1489,46 @@ func TestUpToDate(t *testing.T) { }, }, } + + infraMachineTemplate1 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineTemplateKind, + "metadata": map[string]interface{}{ + "name": "infra-machine-template1", + "namespace": "default", + }, + }, + } + infraMachineTemplate2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": builder.InfrastructureGroupVersion.String(), + "kind": builder.TestInfrastructureMachineTemplateKind, + "metadata": map[string]interface{}{ + "name": "infra-machine-template2", + "namespace": "default", + }, + }, + } + defaultMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ CreationTimestamp: metav1.Time{Time: reconciliationTime.Add(-2 * 24 * time.Hour)}, // two days ago. - Annotations: map[string]string{ - controlplanev1.KubeadmClusterConfigurationAnnotation: "{\n \"certificatesDir\": \"foo\"\n}", - }, }, Spec: clusterv1.MachineSpec{ - Version: "v1.31.0", - InfrastructureRef: clusterv1.ContractVersionedObjectReference{APIGroup: clusterv1.GroupVersionInfrastructure.Group, Kind: "AWSMachine", Name: "infra-machine1"}, + Version: "v1.30.0", + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: bootstrapv1.GroupVersion.Group, + Kind: "KubeadmConfig", + Name: "boostrap-config1", + }, + }, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: clusterv1.GroupVersionInfrastructure.Group, + Kind: "TestInfrastructureMachine", + Name: "infra-machine1", + }, }, Status: clusterv1.MachineStatus{ CertificatesExpiryDate: metav1.Time{Time: reconciliationTime.Add(100 * 24 * time.Hour)}, // certificates will expire in 100 days from now. @@ -1715,14 +1538,14 @@ func TestUpToDate(t *testing.T) { defaultInfraConfigs := map[string]*unstructured.Unstructured{ defaultMachine.Name: { Object: map[string]interface{}{ - "kind": "AWSMachine", + "kind": builder.TestInfrastructureMachineKind, "apiVersion": clusterv1.GroupVersionInfrastructure.String(), "metadata": map[string]interface{}{ - "name": "infra-config1", + "name": "infra-machine1", "namespace": "default", "annotations": map[string]interface{}{ - "cluster.x-k8s.io/cloned-from-name": "template1", - "cluster.x-k8s.io/cloned-from-groupkind": "AWSMachineTemplate.infrastructure.cluster.x-k8s.io", + "cluster.x-k8s.io/cloned-from-name": "infra-machine-template1", + "cluster.x-k8s.io/cloned-from-groupkind": builder.InfrastructureGroupVersion.WithKind(builder.TestInfrastructureMachineTemplateKind).GroupKind().String(), }, }, }, @@ -1731,31 +1554,39 @@ func TestUpToDate(t *testing.T) { defaultMachineConfigs := map[string]*bootstrapv1.KubeadmConfig{ defaultMachine.Name: { + ObjectMeta: metav1.ObjectMeta{ + Name: "boostrap-config1", + }, Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + CertificatesDir: "foo", + }, InitConfiguration: bootstrapv1.InitConfiguration{}, // first control-plane }, }, } tests := []struct { - name string - kcp *controlplanev1.KubeadmControlPlane - machine *clusterv1.Machine - infraConfigs map[string]*unstructured.Unstructured - machineConfigs map[string]*bootstrapv1.KubeadmConfig - expectUptoDate bool - expectLogMessages []string - expectConditionMessages []string + name string + kcp *controlplanev1.KubeadmControlPlane + machine *clusterv1.Machine + infraConfigs map[string]*unstructured.Unstructured + machineConfigs map[string]*bootstrapv1.KubeadmConfig + expectUptoDate bool + expectEligibleForInPlaceUpdate bool + expectLogMessages []string + expectConditionMessages []string }{ { - name: "machine up-to-date", - kcp: defaultKcp, - machine: defaultMachine, - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: true, - expectLogMessages: nil, - expectConditionMessages: nil, + name: "machine up-to-date", + kcp: defaultKcp, + machine: defaultMachine, + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: true, + expectEligibleForInPlaceUpdate: false, + expectLogMessages: nil, + expectConditionMessages: nil, }, { name: "certificate are expiring soon", @@ -1764,12 +1595,13 @@ func TestUpToDate(t *testing.T) { kcp.Spec.Rollout.Before.CertificatesExpiryDays = 150 // rollout if certificates will expire in less then 150 days. return kcp }(), - machine: defaultMachine, // certificates will expire in 100 days from now. - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"certificates will expire soon, rolloutBefore expired"}, - expectConditionMessages: []string{"Certificates will expire soon"}, + machine: defaultMachine, // certificates will expire in 100 days from now. + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: false, + expectLogMessages: []string{"certificates will expire soon, rolloutBefore expired"}, + expectConditionMessages: []string{"Certificates will expire soon"}, }, { name: "rollout after expired", @@ -1778,26 +1610,76 @@ func TestUpToDate(t *testing.T) { kcp.Spec.Rollout.After = metav1.Time{Time: reconciliationTime.Add(-1 * 24 * time.Hour)} // one day ago return kcp }(), - machine: defaultMachine, // created two days ago - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"rolloutAfter expired"}, - expectConditionMessages: []string{"KubeadmControlPlane spec.rolloutAfter expired"}, + machine: defaultMachine, // created two days ago + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: false, + expectLogMessages: []string{"rolloutAfter expired"}, + expectConditionMessages: []string{"KubeadmControlPlane spec.rolloutAfter expired"}, }, { name: "kubernetes version does not match", kcp: func() *controlplanev1.KubeadmControlPlane { kcp := defaultKcp.DeepCopy() - kcp.Spec.Version = "v1.31.2" + kcp.Spec.Version = "v1.30.2" + return kcp + }(), + machine: func() *clusterv1.Machine { + machine := defaultMachine.DeepCopy() + machine.Spec.Version = "v1.30.0" + return machine + }(), + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: true, + expectLogMessages: []string{"Machine version \"v1.30.0\" is not equal to KCP version \"v1.30.2\""}, + expectConditionMessages: []string{"Version v1.30.0, v1.30.2 required"}, + }, + { + name: "kubernetes version does not match + delete annotation", + kcp: func() *controlplanev1.KubeadmControlPlane { + kcp := defaultKcp.DeepCopy() + kcp.Spec.Version = "v1.30.2" + return kcp + }(), + machine: func() *clusterv1.Machine { + machine := defaultMachine.DeepCopy() + machine.Spec.Version = "v1.30.0" + machine.Annotations = map[string]string{ + clusterv1.DeleteMachineAnnotation: "", + } + return machine + }(), + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: false, // Not eligible for in-place update because of delete annotation. + expectLogMessages: []string{"Machine version \"v1.30.0\" is not equal to KCP version \"v1.30.2\""}, + expectConditionMessages: []string{"Version v1.30.0, v1.30.2 required"}, + }, + { + name: "kubernetes version does not match + remediate annotation", + kcp: func() *controlplanev1.KubeadmControlPlane { + kcp := defaultKcp.DeepCopy() + kcp.Spec.Version = "v1.30.2" return kcp }(), - machine: defaultMachine, // defaultMachine has "v1.31.0" - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"Machine version \"v1.31.0\" is not equal to KCP version \"v1.31.2\""}, - expectConditionMessages: []string{"Version v1.31.0, v1.31.2 required"}, + machine: func() *clusterv1.Machine { + machine := defaultMachine.DeepCopy() + machine.Spec.Version = "v1.30.0" + machine.Annotations = map[string]string{ + clusterv1.RemediateMachineAnnotation: "", + } + return machine + }(), + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: false, // Not eligible for in-place update because of remediate annotation. + expectLogMessages: []string{"Machine version \"v1.30.0\" is not equal to KCP version \"v1.30.2\""}, + expectConditionMessages: []string{"Version v1.30.0, v1.30.2 required"}, }, { name: "KubeadmConfig is not up-to-date", @@ -1806,26 +1688,48 @@ func TestUpToDate(t *testing.T) { kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "bar" return kcp }(), - machine: defaultMachine, // was created with cluster name "foo" - infraConfigs: defaultInfraConfigs, - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"Machine KubeadmConfig ClusterConfiguration is outdated: diff: v1beta2.ClusterConfiguration{\n ... // 4 identical fields\n Scheduler: {},\n DNS: {},\n- CertificatesDir: \"foo\",\n+ CertificatesDir: \"bar\",\n ImageRepository: \"\",\n FeatureGates: nil,\n ... // 2 identical fields\n }"}, + machine: defaultMachine, // was created with cluster name "foo" + infraConfigs: defaultInfraConfigs, + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: true, + expectLogMessages: []string{`Machine KubeadmConfig is outdated: diff: &v1beta2.KubeadmConfigSpec{ + ClusterConfiguration: v1beta2.ClusterConfiguration{ + ... // 4 identical fields + Scheduler: {}, + DNS: {}, +- CertificatesDir: "foo", ++ CertificatesDir: "bar", + ImageRepository: "", + FeatureGates: nil, + ... // 3 identical fields + }, + InitConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + JoinConfiguration: {NodeRegistration: {ImagePullPolicy: "IfNotPresent"}}, + ... // 11 identical fields + }`}, expectConditionMessages: []string{"KubeadmConfig is not up-to-date"}, }, { - name: "AWSMachine is not up-to-date", + name: "InfraMachine is not up-to-date", kcp: func() *controlplanev1.KubeadmControlPlane { kcp := defaultKcp.DeepCopy() - kcp.Spec.MachineTemplate.Spec.InfrastructureRef = clusterv1.ContractVersionedObjectReference{APIGroup: clusterv1.GroupVersionInfrastructure.Group, Kind: "AWSMachineTemplate", Name: "template2"} // kcp moving to template 2 + kcp.Spec.MachineTemplate.Spec.InfrastructureRef = clusterv1.ContractVersionedObjectReference{ + APIGroup: builder.InfrastructureGroupVersion.Group, + Kind: builder.TestInfrastructureMachineTemplateKind, + Name: "infra-machine-template2", + } // kcp moving to infra-machine-template2 return kcp }(), - machine: defaultMachine, - infraConfigs: defaultInfraConfigs, // infra config cloned from template1 - machineConfigs: defaultMachineConfigs, - expectUptoDate: false, - expectLogMessages: []string{"Infrastructure template on KCP rotated from AWSMachineTemplate.infrastructure.cluster.x-k8s.io template1 to AWSMachineTemplate.infrastructure.cluster.x-k8s.io template2"}, - expectConditionMessages: []string{"AWSMachine is not up-to-date"}, + machine: defaultMachine, + infraConfigs: defaultInfraConfigs, // infra config cloned from infra-machine-template1 + machineConfigs: defaultMachineConfigs, + expectUptoDate: false, + expectEligibleForInPlaceUpdate: true, + expectLogMessages: []string{"Infrastructure template on KCP rotated from " + + "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template1 to " + + "TestInfrastructureMachineTemplate.infrastructure.cluster.x-k8s.io infra-machine-template2"}, + expectConditionMessages: []string{"TestInfrastructureMachine is not up-to-date"}, }, } @@ -1833,11 +1737,28 @@ func TestUpToDate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - upToDate, logMessages, conditionMessages, err := UpToDate(tt.machine, tt.kcp, &reconciliationTime, tt.infraConfigs, tt.machineConfigs) + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(builder.TestInfrastructureMachineTemplateCRD, infraMachineTemplate1, infraMachineTemplate2).Build() + + upToDate, res, err := UpToDate(t.Context(), c, &clusterv1.Cluster{}, tt.machine, tt.kcp, &reconciliationTime, tt.infraConfigs, tt.machineConfigs) g.Expect(err).ToNot(HaveOccurred()) g.Expect(upToDate).To(Equal(tt.expectUptoDate)) - g.Expect(logMessages).To(BeComparableTo(tt.expectLogMessages)) - g.Expect(conditionMessages).To(Equal(tt.expectConditionMessages)) + g.Expect(res).ToNot(BeNil()) + g.Expect(res.EligibleForInPlaceUpdate).To(Equal(tt.expectEligibleForInPlaceUpdate)) + g.Expect(res.DesiredMachine).ToNot(BeNil()) + g.Expect(res.DesiredMachine.Spec.Version).To(Equal(tt.kcp.Spec.Version)) + g.Expect(res.CurrentInfraMachine).ToNot(BeNil()) + g.Expect(res.DesiredInfraMachine).ToNot(BeNil()) + g.Expect(res.CurrentKubeadmConfig).ToNot(BeNil()) + g.Expect(res.DesiredKubeadmConfig).ToNot(BeNil()) + if upToDate { + g.Expect(res.LogMessages).To(BeEmpty()) + g.Expect(res.ConditionMessages).To(BeEmpty()) + } else { + g.Expect(res.LogMessages).To(BeComparableTo(tt.expectLogMessages)) + g.Expect(res.ConditionMessages).To(Equal(tt.expectConditionMessages)) + } }) } } diff --git a/controlplane/kubeadm/internal/proxy/dial.go b/controlplane/kubeadm/internal/proxy/dial.go index f30ced0bb697..b22b0d25065c 100644 --- a/controlplane/kubeadm/internal/proxy/dial.go +++ b/controlplane/kubeadm/internal/proxy/dial.go @@ -26,6 +26,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/portforward" "k8s.io/client-go/transport/spdy" @@ -108,6 +109,16 @@ func (d *Dialer) DialContext(ctx context.Context, _ string, addr string) (net.Co dialer := spdy.NewDialer(d.upgrader, httpClient, "POST", req.URL()) + // Configure websocket dialer and keep spdy as fallback + // Note: websockets are enabled per default starting with kubernetes 1.31. + tunnelingDialer, err := portforward.NewSPDYOverWebsocketDialer(req.URL(), d.proxy.KubeConfig) + if err != nil { + return nil, errors.Wrap(err, "error creating websocket tunneling dialer") + } + dialer = portforward.NewFallbackDialer(tunnelingDialer, dialer, func(err error) bool { + return httpstream.IsUpgradeFailure(err) || httpstream.IsHTTPSProxyError(err) + }) + // Create a new connection from the dialer. // // Warning: Any early return should close this connection, otherwise we're going to leak them. diff --git a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go deleted file mode 100644 index a132bd90f5a4..000000000000 --- a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go +++ /dev/null @@ -1,685 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhooks - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "strings" - - "github.com/blang/semver/v4" - "github.com/coredns/corefile-migration/migration" - jsonpatch "github.com/evanphx/json-patch/v5" - "github.com/pkg/errors" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" - controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - topologynames "sigs.k8s.io/cluster-api/internal/topology/names" - "sigs.k8s.io/cluster-api/util/container" - "sigs.k8s.io/cluster-api/util/secret" - "sigs.k8s.io/cluster-api/util/version" -) - -func (webhook *KubeadmControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(&controlplanev1.KubeadmControlPlane{}). - WithDefaulter(webhook). - WithValidator(webhook). - Complete() -} - -// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta2-kubeadmcontrolplane,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,versions=v1beta2,name=default.kubeadmcontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 -// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta2-kubeadmcontrolplane,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,versions=v1beta2,name=validation.kubeadmcontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 - -// KubeadmControlPlane implements a validation and defaulting webhook for KubeadmControlPlane. -type KubeadmControlPlane struct{} - -var _ webhook.CustomValidator = &KubeadmControlPlane{} -var _ webhook.CustomDefaulter = &KubeadmControlPlane{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type. -func (webhook *KubeadmControlPlane) Default(_ context.Context, obj runtime.Object) error { - k, ok := obj.(*controlplanev1.KubeadmControlPlane) - if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", obj)) - } - - defaultKubeadmControlPlaneSpec(&k.Spec) - - return nil -} - -func defaultKubeadmControlPlaneSpec(s *controlplanev1.KubeadmControlPlaneSpec) { - if s.Replicas == nil { - replicas := int32(1) - s.Replicas = &replicas - } - - if !strings.HasPrefix(s.Version, "v") { - s.Version = "v" + s.Version - } - - // Enforce RollingUpdate strategy and default MaxSurge if not set. - s.Rollout.Strategy.Type = controlplanev1.RollingUpdateStrategyType - s.Rollout.Strategy.RollingUpdate.MaxSurge = intstr.ValueOrDefault(s.Rollout.Strategy.RollingUpdate.MaxSurge, intstr.FromInt32(1)) -} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (webhook *KubeadmControlPlane) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { - k, ok := obj.(*controlplanev1.KubeadmControlPlane) - if !ok { - return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", obj)) - } - - spec := k.Spec - allErrs := validateKubeadmControlPlaneSpec(spec, field.NewPath("spec")) - allErrs = append(allErrs, validateClusterConfiguration(nil, &spec.KubeadmConfigSpec.ClusterConfiguration, field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration"))...) - allErrs = append(allErrs, spec.KubeadmConfigSpec.Validate(true, field.NewPath("spec", "kubeadmConfigSpec"))...) - if len(allErrs) > 0 { - return nil, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), k.Name, allErrs) - } - return nil, nil -} - -const ( - spec = "spec" - kubeadmConfigSpec = "kubeadmConfigSpec" - clusterConfiguration = "clusterConfiguration" - initConfiguration = "initConfiguration" - joinConfiguration = "joinConfiguration" - nodeRegistration = "nodeRegistration" - skipPhases = "skipPhases" - patches = "patches" - directory = "directory" - bootCommands = "bootCommands" - preKubeadmCommands = "preKubeadmCommands" - postKubeadmCommands = "postKubeadmCommands" - files = "files" - users = "users" - apiServer = "apiServer" - controllerManager = "controllerManager" - scheduler = "scheduler" - ntp = "ntp" - ignition = "ignition" - diskSetup = "diskSetup" - featureGates = "featureGates" - timeouts = "timeouts" -) - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (webhook *KubeadmControlPlane) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - // add a * to indicate everything beneath is ok. - // For example, {"spec", "*"} will allow any path under "spec" to change. - // For example, {"spec"} will allow "spec" to also be unset. - allowedPaths := [][]string{ - // metadata - {"metadata", "*"}, - // spec.kubeadmConfigSpec.clusterConfiguration - {spec, kubeadmConfigSpec, clusterConfiguration, "etcd"}, - {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "*"}, - {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local"}, - {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local", "*"}, - {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "external", "*"}, - {spec, kubeadmConfigSpec, clusterConfiguration, "dns"}, - {spec, kubeadmConfigSpec, clusterConfiguration, "dns", "*"}, - {spec, kubeadmConfigSpec, clusterConfiguration, "imageRepository"}, - {spec, kubeadmConfigSpec, clusterConfiguration, featureGates}, - {spec, kubeadmConfigSpec, clusterConfiguration, featureGates, "*"}, - {spec, kubeadmConfigSpec, clusterConfiguration, apiServer}, - {spec, kubeadmConfigSpec, clusterConfiguration, apiServer, "*"}, - {spec, kubeadmConfigSpec, clusterConfiguration, controllerManager}, - {spec, kubeadmConfigSpec, clusterConfiguration, controllerManager, "*"}, - {spec, kubeadmConfigSpec, clusterConfiguration, scheduler}, - {spec, kubeadmConfigSpec, clusterConfiguration, scheduler, "*"}, - {spec, kubeadmConfigSpec, clusterConfiguration, "certificateValidityPeriodDays"}, - // spec.kubeadmConfigSpec.initConfiguration - {spec, kubeadmConfigSpec, initConfiguration, nodeRegistration}, - {spec, kubeadmConfigSpec, initConfiguration, nodeRegistration, "*"}, - {spec, kubeadmConfigSpec, initConfiguration, patches, directory}, - {spec, kubeadmConfigSpec, initConfiguration, patches}, - {spec, kubeadmConfigSpec, initConfiguration, skipPhases}, - {spec, kubeadmConfigSpec, initConfiguration, "bootstrapTokens"}, - {spec, kubeadmConfigSpec, initConfiguration, "localAPIEndpoint"}, - {spec, kubeadmConfigSpec, initConfiguration, "localAPIEndpoint", "*"}, - {spec, kubeadmConfigSpec, initConfiguration, timeouts}, - {spec, kubeadmConfigSpec, initConfiguration, timeouts, "*"}, - // spec.kubeadmConfigSpec.joinConfiguration - {spec, kubeadmConfigSpec, joinConfiguration, nodeRegistration}, - {spec, kubeadmConfigSpec, joinConfiguration, nodeRegistration, "*"}, - {spec, kubeadmConfigSpec, joinConfiguration, patches, directory}, - {spec, kubeadmConfigSpec, joinConfiguration, patches}, - {spec, kubeadmConfigSpec, joinConfiguration, skipPhases}, - {spec, kubeadmConfigSpec, joinConfiguration, "caCertPath"}, - {spec, kubeadmConfigSpec, joinConfiguration, "controlPlane"}, - {spec, kubeadmConfigSpec, joinConfiguration, "controlPlane", "*"}, - {spec, kubeadmConfigSpec, joinConfiguration, "discovery"}, - {spec, kubeadmConfigSpec, joinConfiguration, "discovery", "*"}, - {spec, kubeadmConfigSpec, joinConfiguration, timeouts}, - {spec, kubeadmConfigSpec, joinConfiguration, timeouts, "*"}, - // spec.kubeadmConfigSpec - {spec, kubeadmConfigSpec, bootCommands}, - {spec, kubeadmConfigSpec, preKubeadmCommands}, - {spec, kubeadmConfigSpec, postKubeadmCommands}, - {spec, kubeadmConfigSpec, files}, - {spec, kubeadmConfigSpec, "verbosity"}, - {spec, kubeadmConfigSpec, users}, - {spec, kubeadmConfigSpec, ntp}, - {spec, kubeadmConfigSpec, ntp, "*"}, - {spec, kubeadmConfigSpec, ignition}, - {spec, kubeadmConfigSpec, ignition, "*"}, - {spec, kubeadmConfigSpec, diskSetup}, - {spec, kubeadmConfigSpec, diskSetup, "*"}, - {spec, kubeadmConfigSpec, "format"}, - {spec, kubeadmConfigSpec, "mounts"}, - // spec.machineTemplate - {spec, "machineTemplate", "metadata"}, - {spec, "machineTemplate", "metadata", "*"}, - {spec, "machineTemplate", "spec"}, - {spec, "machineTemplate", "spec", "*"}, - // spec - {spec, "replicas"}, - {spec, "version"}, - {spec, "remediation"}, - {spec, "remediation", "*"}, - {spec, "machineNaming"}, - {spec, "machineNaming", "*"}, - {spec, "rollout"}, - {spec, "rollout", "*"}, - } - - oldK, ok := oldObj.(*controlplanev1.KubeadmControlPlane) - if !ok { - return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", oldObj)) - } - - newK, ok := newObj.(*controlplanev1.KubeadmControlPlane) - if !ok { - return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", newObj)) - } - - allErrs := validateKubeadmControlPlaneSpec(newK.Spec, field.NewPath("spec")) - - originalJSON, err := json.Marshal(oldK) - if err != nil { - return nil, apierrors.NewInternalError(err) - } - modifiedJSON, err := json.Marshal(newK) - if err != nil { - return nil, apierrors.NewInternalError(err) - } - - diff, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) - if err != nil { - return nil, apierrors.NewInternalError(err) - } - jsonPatch := map[string]interface{}{} - if err := json.Unmarshal(diff, &jsonPatch); err != nil { - return nil, apierrors.NewInternalError(err) - } - // Build a list of all paths that are trying to change - diffpaths := paths([]string{}, jsonPatch) - // Every path in the diff must be valid for the update function to work. - for _, path := range diffpaths { - // Ignore paths that are empty - if len(path) == 0 { - continue - } - if !allowed(allowedPaths, path) { - if len(path) == 1 { - allErrs = append(allErrs, field.Forbidden(field.NewPath(path[0]), "cannot be modified")) - continue - } - allErrs = append(allErrs, field.Forbidden(field.NewPath(path[0], path[1:]...), "cannot be modified")) - } - } - - allErrs = append(allErrs, webhook.validateVersion(oldK, newK)...) - allErrs = append(allErrs, validateClusterConfiguration(&oldK.Spec.KubeadmConfigSpec.ClusterConfiguration, &newK.Spec.KubeadmConfigSpec.ClusterConfiguration, field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration"))...) - allErrs = append(allErrs, webhook.validateCoreDNSVersion(oldK, newK)...) - allErrs = append(allErrs, newK.Spec.KubeadmConfigSpec.Validate(true, field.NewPath("spec", "kubeadmConfigSpec"))...) - - if len(allErrs) > 0 { - return nil, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), newK.Name, allErrs) - } - - return nil, nil -} - -func validateKubeadmControlPlaneSpec(s controlplanev1.KubeadmControlPlaneSpec, pathPrefix *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if s.Replicas == nil { - allErrs = append( - allErrs, - field.Required( - pathPrefix.Child("replicas"), - "is required", - ), - ) - } else if *s.Replicas <= 0 { - // The use of the scale subresource should provide a guarantee that negative values - // should not be accepted for this field, but since we have to validate that Replicas != 0 - // it doesn't hurt to also additionally validate for negative numbers here as well. - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("replicas"), - "cannot be less than or equal to 0", - ), - ) - } - - externalEtcd := s.KubeadmConfigSpec.ClusterConfiguration.Etcd.External.IsDefined() - if !externalEtcd { - if s.Replicas != nil && *s.Replicas%2 == 0 { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("replicas"), - "cannot be an even number when etcd is stacked", - ), - ) - } - } - - if s.MachineTemplate.Spec.InfrastructureRef.APIGroup == "" { - allErrs = append( - allErrs, - field.Invalid( - pathPrefix.Child("machineTemplate", "infrastructure", "apiGroup"), - s.MachineTemplate.Spec.InfrastructureRef.APIGroup, - "cannot be empty", - ), - ) - } - if s.MachineTemplate.Spec.InfrastructureRef.Kind == "" { - allErrs = append( - allErrs, - field.Invalid( - pathPrefix.Child("machineTemplate", "infrastructure", "kind"), - s.MachineTemplate.Spec.InfrastructureRef.Kind, - "cannot be empty", - ), - ) - } - if s.MachineTemplate.Spec.InfrastructureRef.Name == "" { - allErrs = append( - allErrs, - field.Invalid( - pathPrefix.Child("machineTemplate", "infrastructure", "name"), - s.MachineTemplate.Spec.InfrastructureRef.Name, - "cannot be empty", - ), - ) - } - - // Validate the metadata of the MachineTemplate - allErrs = append(allErrs, s.MachineTemplate.ObjectMeta.Validate(pathPrefix.Child("machineTemplate", "metadata"))...) - - if !version.KubeSemver.MatchString(s.Version) { - allErrs = append(allErrs, field.Invalid(pathPrefix.Child("version"), s.Version, "must be a valid semantic version")) - } - - allErrs = append(allErrs, validateRolloutAndCertValidityFields(s.Rollout, s.KubeadmConfigSpec.ClusterConfiguration, s.Replicas, pathPrefix)...) - allErrs = append(allErrs, validateNaming(s.MachineNaming, pathPrefix.Child("machineNaming"))...) - return allErrs -} - -func validateRolloutAndCertValidityFields(rolloutSpec controlplanev1.KubeadmControlPlaneRolloutSpec, clusterConfiguration bootstrapv1.ClusterConfiguration, replicas *int32, pathPrefix *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - rolloutStrategy := rolloutSpec.Strategy - - if reflect.DeepEqual(rolloutStrategy, controlplanev1.KubeadmControlPlaneRolloutStrategy{}) { - return nil - } - - if rolloutStrategy.Type != controlplanev1.RollingUpdateStrategyType { - allErrs = append( - allErrs, - field.Required( - pathPrefix.Child("rollout", "strategy", "type"), - "only RollingUpdate is supported", - ), - ) - } - - if rolloutStrategy.RollingUpdate.MaxSurge != nil { - ios1 := intstr.FromInt32(1) - ios0 := intstr.FromInt32(0) - if rolloutStrategy.RollingUpdate.MaxSurge.IntValue() == ios0.IntValue() && (replicas != nil && *replicas < int32(3)) { - allErrs = append( - allErrs, - field.Required( - pathPrefix.Child("rollout", "strategy", "rollingUpdate"), - "when KubeadmControlPlane is configured to scale-in, replica count needs to be at least 3", - ), - ) - } - if rolloutStrategy.RollingUpdate.MaxSurge.IntValue() != ios1.IntValue() && rolloutStrategy.RollingUpdate.MaxSurge.IntValue() != ios0.IntValue() { - allErrs = append( - allErrs, - field.Required( - pathPrefix.Child("rollout", "strategy", "rollingUpdate", "maxSurge"), - "value must be 1 or 0", - ), - ) - } - } - - if clusterConfiguration.CertificateValidityPeriodDays != 0 { - if clusterConfiguration.CACertificateValidityPeriodDays != 0 { - if clusterConfiguration.CertificateValidityPeriodDays > clusterConfiguration.CACertificateValidityPeriodDays { - allErrs = append(allErrs, - field.Invalid( - pathPrefix.Child("kubeadmConfigSpec", "clusterConfiguration", "certificateValidityPeriodDays"), - clusterConfiguration.CertificateValidityPeriodDays, - fmt.Sprintf("must be less than or equal to caCertificateValidityPeriodDays (%d)", clusterConfiguration.CACertificateValidityPeriodDays), - ), - ) - } - } else if clusterConfiguration.CertificateValidityPeriodDays > secret.DefaultCACertificatesExpiryDays { - allErrs = append(allErrs, - field.Invalid( - pathPrefix.Child("kubeadmConfigSpec", "clusterConfiguration", "certificateValidityPeriodDays"), - clusterConfiguration.CertificateValidityPeriodDays, - fmt.Sprintf("must be less than or equal to the default value of caCertificateValidityPeriodDays (%d)", secret.DefaultCACertificatesExpiryDays), - ), - ) - } - - if rolloutSpec.Before.CertificatesExpiryDays != 0 { - if rolloutSpec.Before.CertificatesExpiryDays >= clusterConfiguration.CertificateValidityPeriodDays { - allErrs = append(allErrs, - field.Invalid( - pathPrefix.Child("rollout", "before", "certificatesExpiryDays"), - rolloutSpec.Before.CertificatesExpiryDays, - fmt.Sprintf("must be less than certificateValidityPeriodDays (%d)", clusterConfiguration.CertificateValidityPeriodDays))) - } - } - } - - return allErrs -} - -func validateNaming(machineNaming controlplanev1.MachineNamingSpec, pathPrefix *field.Path) field.ErrorList { - var allErrs field.ErrorList - - if machineNaming.Template != "" { - if !strings.Contains(machineNaming.Template, "{{ .random }}") { - allErrs = append(allErrs, - field.Invalid( - pathPrefix.Child("template"), - machineNaming.Template, - "invalid template, {{ .random }} is missing", - )) - } - name, err := topologynames.KCPMachineNameGenerator(machineNaming.Template, "cluster", "kubeadmcontrolplane").GenerateName() - if err != nil { - allErrs = append(allErrs, - field.Invalid( - pathPrefix.Child("template"), - machineNaming.Template, - fmt.Sprintf("invalid template: %v", err), - )) - } else { - for _, err := range validation.IsDNS1123Subdomain(name) { - allErrs = append(allErrs, - field.Invalid( - pathPrefix.Child("template"), - machineNaming.Template, - fmt.Sprintf("invalid template, generated names would not be valid Kubernetes object names: %v", err), - )) - } - } - } - - return allErrs -} - -func validateClusterConfiguration(oldClusterConfiguration, newClusterConfiguration *bootstrapv1.ClusterConfiguration, pathPrefix *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if newClusterConfiguration == nil { - return allErrs - } - - // TODO: Remove when kubeadm types include OpenAPI validation - if !container.ImageTagIsValid(newClusterConfiguration.DNS.ImageTag) { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("dns", "imageTag"), - fmt.Sprintf("tag %s is invalid", newClusterConfiguration.DNS.ImageTag), - ), - ) - } - - if newClusterConfiguration.DNS.ImageTag != "" { - if _, err := version.ParseTolerantImageTag(newClusterConfiguration.DNS.ImageTag); err != nil { - allErrs = append(allErrs, - field.Invalid( - field.NewPath("dns", "imageTag"), - newClusterConfiguration.DNS.ImageTag, - fmt.Sprintf("failed to parse CoreDNS version: %v", err), - ), - ) - } - } - - // TODO: Remove when kubeadm types include OpenAPI validation - if newClusterConfiguration.Etcd.Local.IsDefined() && !container.ImageTagIsValid(newClusterConfiguration.Etcd.Local.ImageTag) { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("etcd", "local", "imageTag"), - fmt.Sprintf("tag %s is invalid", newClusterConfiguration.Etcd.Local.ImageTag), - ), - ) - } - - if newClusterConfiguration.Etcd.Local.IsDefined() && newClusterConfiguration.Etcd.External.IsDefined() { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("etcd", "local"), - "cannot have both external and local etcd", - ), - ) - } - - // update validations - if oldClusterConfiguration != nil { - if (newClusterConfiguration.Etcd.External.IsDefined() && !oldClusterConfiguration.Etcd.External.IsDefined()) || - (!newClusterConfiguration.Etcd.External.IsDefined() && oldClusterConfiguration.Etcd.External.IsDefined()) { - allErrs = append( - allErrs, - field.Forbidden( - pathPrefix.Child("etcd", "external"), - "cannot change between external and local etcd", - ), - ) - } - } - - return allErrs -} - -func allowed(allowList [][]string, path []string) bool { - for _, allowed := range allowList { - if pathsMatch(allowed, path) { - return true - } - } - return false -} - -func pathsMatch(allowed, path []string) bool { - // if either are empty then no match can be made - if len(allowed) == 0 || len(path) == 0 { - return false - } - i := 0 - for i = range path { - // reached the end of the allowed path and no match was found - if i > len(allowed)-1 { - return false - } - if allowed[i] == "*" { - return true - } - if path[i] != allowed[i] { - return false - } - } - // path has been completely iterated and has not matched the end of the path. - // e.g. allowed: []string{"a","b","c"}, path: []string{"a"} - return i >= len(allowed)-1 -} - -// paths builds a slice of paths that are being modified. -func paths(path []string, diff map[string]interface{}) [][]string { - allPaths := [][]string{} - for key, m := range diff { - nested, ok := m.(map[string]interface{}) - if !ok { - // We have to use a copy of path, because otherwise the slice we append to - // allPaths would be overwritten in another iteration. - tmp := make([]string, len(path)) - copy(tmp, path) - allPaths = append(allPaths, append(tmp, key)) - continue - } - allPaths = append(allPaths, paths(append(path, key), nested)...) - } - return allPaths -} - -func (webhook *KubeadmControlPlane) validateCoreDNSVersion(oldK, newK *controlplanev1.KubeadmControlPlane) (allErrs field.ErrorList) { - // return if either current or target versions is empty - if newK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" || oldK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" { - return allErrs - } - targetDNS := &newK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS - - fromVersion, err := version.ParseTolerantImageTag(oldK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag) - if err != nil { - allErrs = append(allErrs, - field.Invalid( - field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "dns", "imageTag"), - oldK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag, - fmt.Sprintf("failed to parse current CoreDNS version: %v", err), - ), - ) - return allErrs - } - - toVersion, err := version.ParseTolerantImageTag(targetDNS.ImageTag) - if err != nil { - allErrs = append(allErrs, - field.Invalid( - field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "dns", "imageTag"), - targetDNS.ImageTag, - fmt.Sprintf("failed to parse target CoreDNS version: %v", err), - ), - ) - return allErrs - } - // If the versions are equal return here without error. - // This allows an upgrade where the version of CoreDNS in use is not supported by the migration tool. - if version.Compare(toVersion, fromVersion, version.WithoutPreReleases()) == 0 { - return allErrs - } - - // Skip validating if the skip CoreDNS annotation is set. If set, KCP doesn't use the migration library. - if _, ok := newK.Annotations[controlplanev1.SkipCoreDNSAnnotation]; ok { - return allErrs - } - - if err := migration.ValidUpMigration(version.MajorMinorPatch(fromVersion).String(), version.MajorMinorPatch(toVersion).String()); err != nil { - allErrs = append( - allErrs, - field.Forbidden( - field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "dns", "imageTag"), - fmt.Sprintf("cannot migrate CoreDNS up to '%v' from '%v': %v", toVersion, fromVersion, err), - ), - ) - } - - return allErrs -} - -func (webhook *KubeadmControlPlane) validateVersion(oldK, newK *controlplanev1.KubeadmControlPlane) (allErrs field.ErrorList) { - previousVersion := oldK.Spec.Version - fromVersion, err := semver.ParseTolerant(previousVersion) - if err != nil { - allErrs = append(allErrs, - field.InternalError( - field.NewPath("spec", "version"), - errors.Wrapf(err, "failed to parse current kubeadmcontrolplane version: %s", previousVersion), - ), - ) - return allErrs - } - - toVersion, err := semver.ParseTolerant(newK.Spec.Version) - if err != nil { - allErrs = append(allErrs, - field.InternalError( - field.NewPath("spec", "version"), - errors.Wrapf(err, "failed to parse updated kubeadmcontrolplane version: %s", newK.Spec.Version), - ), - ) - return allErrs - } - - // Validate that the update is upgrading at most one minor version. - // Note: Skipping a minor version is not allowed. - // Note: Checking against this ceilVersion allows upgrading to the next minor - // version irrespective of the patch version. - ceilVersion := semver.Version{ - Major: fromVersion.Major, - Minor: fromVersion.Minor + 2, - Patch: 0, - } - if version.Compare(toVersion, ceilVersion, version.WithoutPreReleases()) >= 0 { - allErrs = append(allErrs, - field.Forbidden( - field.NewPath("spec", "version"), - fmt.Sprintf("cannot update Kubernetes version from %s to %s", previousVersion, newK.Spec.Version), - ), - ) - } - - return allErrs -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (webhook *KubeadmControlPlane) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { - return nil, nil -} diff --git a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go deleted file mode 100644 index 1d738699dc93..000000000000 --- a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go +++ /dev/null @@ -1,1467 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhooks - -import ( - "strings" - "testing" - "time" - - . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/ptr" - ctrl "sigs.k8s.io/controller-runtime" - - bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" - controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/internal/webhooks/util" -) - -var ( - ctx = ctrl.SetupSignalHandler() -) - -func TestKubeadmControlPlaneDefault(t *testing.T) { - g := NewWithT(t) - - kcp := &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.18.3", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: "test", - Kind: "UnknownInfraMachine", - Name: "foo", - }, - }, - }, - }, - } - updateDefaultingValidationKCP := kcp.DeepCopy() - updateDefaultingValidationKCP.Spec.Version = "v1.18.3" - updateDefaultingValidationKCP.Spec.MachineTemplate.Spec.InfrastructureRef = clusterv1.ContractVersionedObjectReference{ - APIGroup: "test", - Kind: "UnknownInfraMachine", - Name: "foo", - } - webhook := &KubeadmControlPlane{} - t.Run("for KubeadmControlPlane", util.CustomDefaultValidateTest(ctx, updateDefaultingValidationKCP, webhook)) - g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) - - g.Expect(kcp.Spec.Version).To(Equal("v1.18.3")) - g.Expect(kcp.Spec.Rollout.Strategy.Type).To(Equal(controlplanev1.RollingUpdateStrategyType)) - g.Expect(kcp.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) -} - -func TestKubeadmControlPlaneValidateCreate(t *testing.T) { - valid := &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "foo", - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: "test", - Kind: "UnknownInfraMachine", - Name: "infraTemplate", - }, - }, - }, - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, - }, - Replicas: ptr.To[int32](1), - Version: "v1.19.0", - Rollout: controlplanev1.KubeadmControlPlaneRolloutSpec{ - Strategy: controlplanev1.KubeadmControlPlaneRolloutStrategy{ - Type: controlplanev1.RollingUpdateStrategyType, - RollingUpdate: controlplanev1.KubeadmControlPlaneRolloutStrategyRollingUpdate{ - MaxSurge: &intstr.IntOrString{ - IntVal: 1, - }, - }, - }, - }, - }, - } - - invalidMaxSurge := valid.DeepCopy() - invalidMaxSurge.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal = int32(3) - - stringMaxSurge := valid.DeepCopy() - val := intstr.FromString("1") - stringMaxSurge.Spec.Rollout.Strategy.RollingUpdate.MaxSurge = &val - - missingReplicas := valid.DeepCopy() - missingReplicas.Spec.Replicas = nil - - zeroReplicas := valid.DeepCopy() - zeroReplicas.Spec.Replicas = ptr.To[int32](0) - - evenReplicas := valid.DeepCopy() - evenReplicas.Spec.Replicas = ptr.To[int32](2) - - evenReplicasExternalEtcd := evenReplicas.DeepCopy() - evenReplicasExternalEtcd.Spec.KubeadmConfigSpec = bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - Etcd: bootstrapv1.Etcd{ - External: bootstrapv1.ExternalEtcd{ - Endpoints: []string{"1.2.3.4"}, - }, - }, - }, - } - - validVersion := valid.DeepCopy() - validVersion.Spec.Version = "v1.16.6" - - invalidVersion1 := valid.DeepCopy() - invalidVersion1.Spec.Version = "vv1.16.6" - - invalidVersion2 := valid.DeepCopy() - invalidVersion2.Spec.Version = "1.16.6" - - invalidCoreDNSVersion := valid.DeepCopy() - invalidCoreDNSVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag = "1-7" // not a valid semantic version - - invalidIgnitionConfiguration := valid.DeepCopy() // Format is not set to ignition. - invalidIgnitionConfiguration.Spec.KubeadmConfigSpec.Ignition = bootstrapv1.IgnitionSpec{ - ContainerLinuxConfig: bootstrapv1.ContainerLinuxConfig{ - AdditionalConfig: "config", - }, - } - - validIgnitionConfiguration := valid.DeepCopy() - validIgnitionConfiguration.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition - validIgnitionConfiguration.Spec.KubeadmConfigSpec.Ignition = bootstrapv1.IgnitionSpec{} - - invalidMetadata := valid.DeepCopy() - invalidMetadata.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ - "foo": "$invalid-key", - "bar": strings.Repeat("a", 64) + "too-long-value", - "/invalid-key": "foo", - } - invalidMetadata.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{ - "/invalid-key": "foo", - } - - invalidControlPlaneComponentHealthCheckSeconds := valid.DeepCopy() - invalidControlPlaneComponentHealthCheckSeconds.Spec.KubeadmConfigSpec.InitConfiguration = bootstrapv1.InitConfiguration{Timeouts: bootstrapv1.Timeouts{ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10)}} - - validControlPlaneComponentHealthCheckSeconds := valid.DeepCopy() - validControlPlaneComponentHealthCheckSeconds.Spec.KubeadmConfigSpec.InitConfiguration = bootstrapv1.InitConfiguration{Timeouts: bootstrapv1.Timeouts{ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10)}} - validControlPlaneComponentHealthCheckSeconds.Spec.KubeadmConfigSpec.JoinConfiguration = bootstrapv1.JoinConfiguration{Timeouts: bootstrapv1.Timeouts{ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10)}} - - invalidCertificateValidityPeriodDaysGreaterCA := valid.DeepCopy() - invalidCertificateValidityPeriodDaysGreaterCA.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays = 350 - invalidCertificateValidityPeriodDaysGreaterCA.Spec.KubeadmConfigSpec.ClusterConfiguration.CACertificateValidityPeriodDays = 300 - - invalidCertificateValidityPeriodDaysGreaterDefault := valid.DeepCopy() - invalidCertificateValidityPeriodDaysGreaterDefault.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays = 3651 - invalidCertificateValidityPeriodDaysGreaterDefault.Spec.KubeadmConfigSpec.ClusterConfiguration.CACertificateValidityPeriodDays = 0 // default is 3650 - - invalidRolloutBeforeCertificatesExpiryDays := valid.DeepCopy() - invalidRolloutBeforeCertificatesExpiryDays.Spec.Rollout.Before.CertificatesExpiryDays = 8 - invalidRolloutBeforeCertificatesExpiryDays.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays = 7 - - tests := []struct { - name string - enableIgnitionFeature bool - expectErr bool - kcp *controlplanev1.KubeadmControlPlane - }{ - { - name: "should succeed when given a valid config", - expectErr: false, - kcp: valid, - }, - { - name: "should return error when replicas is nil", - expectErr: true, - kcp: missingReplicas, - }, - { - name: "should return error when replicas is zero", - expectErr: true, - kcp: zeroReplicas, - }, - { - name: "should return error when replicas is even", - expectErr: true, - kcp: evenReplicas, - }, - { - name: "should allow even replicas when using external etcd", - expectErr: false, - kcp: evenReplicasExternalEtcd, - }, - { - name: "should succeed when given a valid semantic version with prepended 'v'", - expectErr: false, - kcp: validVersion, - }, - { - name: "should error when given a valid semantic version without 'v'", - expectErr: true, - kcp: invalidVersion2, - }, - { - name: "should return error when given an invalid semantic version", - expectErr: true, - kcp: invalidVersion1, - }, - { - name: "should return error when given an invalid semantic CoreDNS version", - expectErr: true, - kcp: invalidCoreDNSVersion, - }, - { - name: "should return error when maxSurge is not 1", - expectErr: true, - kcp: invalidMaxSurge, - }, - { - name: "should succeed when maxSurge is a string", - expectErr: false, - kcp: stringMaxSurge, - }, - { - name: "should return error when Ignition configuration is invalid", - enableIgnitionFeature: true, - expectErr: true, - kcp: invalidIgnitionConfiguration, - }, - { - name: "should succeed when Ignition configuration is valid", - enableIgnitionFeature: true, - expectErr: false, - kcp: validIgnitionConfiguration, - }, - { - name: "should return error for invalid metadata", - enableIgnitionFeature: true, - expectErr: true, - kcp: invalidMetadata, - }, - { - name: "should return error for invalid Timeouts.ControlPlaneComponentHealthCheckSeconds", - expectErr: true, - kcp: invalidControlPlaneComponentHealthCheckSeconds, - }, - { - name: "should pass for valid Timeouts.ControlPlaneComponentHealthCheckSeconds", - kcp: validControlPlaneComponentHealthCheckSeconds, - }, - { - name: "should return error when CertificateValidityPeriodDays greater than CACertificateValidityPeriodDays", - expectErr: true, - kcp: invalidCertificateValidityPeriodDaysGreaterCA, - }, - { - name: "should return error when CertificateValidityPeriodDays greater than CACertificateValidityPeriodDays default", - expectErr: true, - kcp: invalidCertificateValidityPeriodDaysGreaterDefault, - }, - { - name: "should return error when rolloutBefore CertificatesExpiryDays greater than cluster CertificateValidityPeriodDays", - expectErr: true, - kcp: invalidRolloutBeforeCertificatesExpiryDays, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if tt.enableIgnitionFeature { - // NOTE: KubeadmBootstrapFormatIgnition feature flag is disabled by default. - // Enabling the feature flag temporarily for this test. - utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.KubeadmBootstrapFormatIgnition, true) - } - - g := NewWithT(t) - - webhook := &KubeadmControlPlane{} - - warnings, err := webhook.ValidateCreate(ctx, tt.kcp) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - g.Expect(warnings).To(BeEmpty()) - }) - } -} - -func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { - before := &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "foo", - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: "test", - Kind: "UnknownInfraMachine", - Name: "infraTemplate", - }, - Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ - NodeDrainTimeoutSeconds: ptr.To(int32(1)), - NodeVolumeDetachTimeoutSeconds: ptr.To(int32(1)), - NodeDeletionTimeoutSeconds: ptr.To(int32(1)), - }, - }, - }, - Replicas: ptr.To[int32](1), - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - InitConfiguration: bootstrapv1.InitConfiguration{ - LocalAPIEndpoint: bootstrapv1.APIEndpoint{ - AdvertiseAddress: "127.0.0.1", - BindPort: int32(443), - }, - NodeRegistration: bootstrapv1.NodeRegistrationOptions{ - Name: "test", - }, - Timeouts: bootstrapv1.Timeouts{ - ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10), - KubeletHealthCheckSeconds: ptr.To[int32](40), - }, - }, - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - DNS: bootstrapv1.DNS{ - ImageRepository: "registry.k8s.io/coredns", - ImageTag: "1.6.5", - }, - CertificateValidityPeriodDays: 100, - CACertificateValidityPeriodDays: 365, - }, - JoinConfiguration: bootstrapv1.JoinConfiguration{ - NodeRegistration: bootstrapv1.NodeRegistrationOptions{ - Name: "test", - }, - Timeouts: bootstrapv1.Timeouts{ - ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10), - KubeletHealthCheckSeconds: ptr.To[int32](40), - }, - }, - PreKubeadmCommands: []string{ - "test", "foo", - }, - PostKubeadmCommands: []string{ - "test", "foo", - }, - Files: []bootstrapv1.File{ - { - Path: "test", - }, - }, - Users: []bootstrapv1.User{ - { - Name: "user", - SSHAuthorizedKeys: []string{ - "ssh-rsa foo", - }, - }, - }, - NTP: bootstrapv1.NTP{ - Servers: []string{"test-server-1", "test-server-2"}, - Enabled: ptr.To(true), - }, - }, - Version: "v1.16.6", - Rollout: controlplanev1.KubeadmControlPlaneRolloutSpec{ - Before: controlplanev1.KubeadmControlPlaneRolloutBeforeSpec{ - CertificatesExpiryDays: 7, - }, - Strategy: controlplanev1.KubeadmControlPlaneRolloutStrategy{ - Type: controlplanev1.RollingUpdateStrategyType, - RollingUpdate: controlplanev1.KubeadmControlPlaneRolloutStrategyRollingUpdate{ - MaxSurge: &intstr.IntOrString{ - IntVal: 1, - }, - }, - }, - }, - }, - } - - updateMaxSurgeVal := before.DeepCopy() - updateMaxSurgeVal.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal = int32(0) - updateMaxSurgeVal.Spec.Replicas = ptr.To[int32](3) - - wrongReplicaCountForScaleIn := before.DeepCopy() - wrongReplicaCountForScaleIn.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal = int32(0) - - validUpdateKubeadmConfigInit := before.DeepCopy() - validUpdateKubeadmConfigInit.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration = bootstrapv1.NodeRegistrationOptions{} - - invalidUpdateKubeadmConfigCluster := before.DeepCopy() - invalidUpdateKubeadmConfigCluster.Spec.KubeadmConfigSpec.ClusterConfiguration = bootstrapv1.ClusterConfiguration{ - CertificatesDir: "some-other-value", - } - - validUpdateKubeadmConfigJoin := before.DeepCopy() - validUpdateKubeadmConfigJoin.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration = bootstrapv1.NodeRegistrationOptions{} - - beforeKubeadmConfigFormatSet := before.DeepCopy() - beforeKubeadmConfigFormatSet.Spec.KubeadmConfigSpec.Format = bootstrapv1.CloudConfig - invalidUpdateKubeadmConfigFormat := beforeKubeadmConfigFormatSet.DeepCopy() - invalidUpdateKubeadmConfigFormat.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition - - validUpdate := before.DeepCopy() - validUpdate.Labels = map[string]string{"blue": "green"} - validUpdate.Spec.KubeadmConfigSpec.BootCommands = []string{"ab", "abc"} - validUpdate.Spec.KubeadmConfigSpec.PreKubeadmCommands = []string{"ab", "abc"} - validUpdate.Spec.KubeadmConfigSpec.PostKubeadmCommands = []string{"ab", "abc"} - validUpdate.Spec.KubeadmConfigSpec.Files = []bootstrapv1.File{ - { - Path: "ab", - }, - { - Path: "abc", - }, - } - validUpdate.Spec.Version = "v1.17.1" - validUpdate.Spec.KubeadmConfigSpec.Users = []bootstrapv1.User{ - { - Name: "bar", - SSHAuthorizedKeys: []string{ - "ssh-rsa bar", - "ssh-rsa foo", - }, - }, - } - validUpdate.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ - "label": "labelValue", - } - validUpdate.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{ - "annotation": "labelAnnotation", - } - validUpdate.Spec.MachineTemplate.Spec.InfrastructureRef.APIGroup = "test-changed" - validUpdate.Spec.MachineTemplate.Spec.InfrastructureRef.Name = "orange" - validUpdate.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds = ptr.To(int32(10)) - validUpdate.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = ptr.To(int32(10)) - validUpdate.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds = ptr.To(int32(10)) - validUpdate.Spec.Replicas = ptr.To[int32](5) - now := metav1.NewTime(time.Now()) - validUpdate.Spec.Rollout.After = now - validUpdate.Spec.Rollout.Before.CertificatesExpiryDays = 14 - validUpdate.Spec.Remediation = controlplanev1.KubeadmControlPlaneRemediationSpec{ - MaxRetry: ptr.To[int32](50), - MinHealthyPeriodSeconds: ptr.To(int32(10 * 60 * 60)), - RetryPeriodSeconds: ptr.To[int32](10 * 60), - } - validUpdate.Spec.KubeadmConfigSpec.Format = bootstrapv1.CloudConfig - - scaleToZero := before.DeepCopy() - scaleToZero.Spec.Replicas = ptr.To[int32](0) - - scaleToEven := before.DeepCopy() - scaleToEven.Spec.Replicas = ptr.To[int32](2) - - missingReplicas := before.DeepCopy() - missingReplicas.Spec.Replicas = nil - - etcdLocalImageTag := before.DeepCopy() - etcdLocalImageTag.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ - ImageTag: "v9.1.1", - } - etcdLocalImageTagAndDataDir := etcdLocalImageTag.DeepCopy() - etcdLocalImageTagAndDataDir.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.DataDir = "/foo" - - etcdLocalImageBuildTag := before.DeepCopy() - etcdLocalImageBuildTag.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ - ImageTag: "v9.1.1_validBuild1", - } - - etcdLocalImageInvalidTag := before.DeepCopy() - etcdLocalImageInvalidTag.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ - ImageTag: "v9.1.1+invalidBuild1", - } - - unsetEtcdLocal := etcdLocalImageTag.DeepCopy() - unsetEtcdLocal.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{} - - controlPlaneEndpoint := before.DeepCopy() - controlPlaneEndpoint.Spec.KubeadmConfigSpec.ClusterConfiguration.ControlPlaneEndpoint = "some control plane endpoint" - - apiServer := before.DeepCopy() - apiServer.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer = bootstrapv1.APIServer{ - ExtraArgs: []bootstrapv1.Arg{ - { - Name: "foo", - Value: ptr.To("bar"), - }, - }, - ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount1"}}, - CertSANs: []string{"foo", "bar"}, - } - - controllerManager := before.DeepCopy() - controllerManager.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager = bootstrapv1.ControllerManager{ - ExtraArgs: []bootstrapv1.Arg{ - { - Name: "controller manager field", - Value: ptr.To("controller manager value"), - }, - }, - ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount", HostPath: "/foo", MountPath: "bar", ReadOnly: ptr.To(true), PathType: "File"}}, - } - - scheduler := before.DeepCopy() - scheduler.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler = bootstrapv1.Scheduler{ - ExtraArgs: []bootstrapv1.Arg{ - { - Name: "scheduler field", - Value: ptr.To("scheduler value"), - }, - }, - ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount", HostPath: "/foo", MountPath: "bar", ReadOnly: ptr.To(true), PathType: "File"}}, - } - - dns := before.DeepCopy() - dns.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ - ImageRepository: "gcr.io/capi-test", - ImageTag: "v1.6.6_foobar.1", - } - - dnsBuildTag := before.DeepCopy() - dnsBuildTag.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ - ImageRepository: "gcr.io/capi-test", - ImageTag: "1.6.7", - } - - dnsInvalidTag := before.DeepCopy() - dnsInvalidTag.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ - ImageRepository: "gcr.io/capi-test", - ImageTag: "v0.20.0+invalidBuild1", - } - - dnsInvalidCoreDNSToVersion := dns.DeepCopy() - dnsInvalidCoreDNSToVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ - ImageRepository: "gcr.io/capi-test", - ImageTag: "1.6.5", - } - - validCoreDNSCustomToVersion := dns.DeepCopy() - validCoreDNSCustomToVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ - ImageRepository: "gcr.io/capi-test", - ImageTag: "v1.6.6_foobar.2", - } - validUnsupportedCoreDNSVersion := dns.DeepCopy() - validUnsupportedCoreDNSVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ - ImageRepository: "gcr.io/capi-test", - ImageTag: "v99.99.99", - } - - validUnsupportedCoreDNSVersionWithSkipAnnotation := dns.DeepCopy() - validUnsupportedCoreDNSVersionWithSkipAnnotation.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ - ImageRepository: "gcr.io/capi-test", - ImageTag: "v99.99.99", - } - validUnsupportedCoreDNSVersionWithSkipAnnotation.Annotations = map[string]string{ - controlplanev1.SkipCoreDNSAnnotation: "", - } - - unsetCoreDNSToVersion := dns.DeepCopy() - unsetCoreDNSToVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ - ImageRepository: "", - ImageTag: "", - } - - certificatesDir := before.DeepCopy() - certificatesDir.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "a new certificates directory" - - imageRepository := before.DeepCopy() - imageRepository.Spec.KubeadmConfigSpec.ClusterConfiguration.ImageRepository = "a new image repository" - - featureGates := before.DeepCopy() - featureGates.Spec.KubeadmConfigSpec.ClusterConfiguration.FeatureGates = map[string]bool{"a feature gate": true} - - externalEtcd := before.DeepCopy() - externalEtcd.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External = bootstrapv1.ExternalEtcd{ - KeyFile: "some key file", - } - externalEtcdChanged := before.DeepCopy() - externalEtcdChanged.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External = bootstrapv1.ExternalEtcd{ - KeyFile: "another key file", - } - - localDataDir := before.DeepCopy() - localDataDir.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ - DataDir: "some local data dir", - } - - localPeerCertSANs := before.DeepCopy() - localPeerCertSANs.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ - PeerCertSANs: []string{"a cert"}, - } - - localServerCertSANs := before.DeepCopy() - localServerCertSANs.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ - ServerCertSANs: []string{"a cert"}, - } - - localExtraArgs := before.DeepCopy() - localExtraArgs.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ - ExtraArgs: []bootstrapv1.Arg{ - { - Name: "an arg", - Value: ptr.To("a value"), - }, - }, - } - - beforeExternalEtcdCluster := before.DeepCopy() - beforeExternalEtcdCluster.Spec.KubeadmConfigSpec.ClusterConfiguration = bootstrapv1.ClusterConfiguration{ - Etcd: bootstrapv1.Etcd{ - External: bootstrapv1.ExternalEtcd{ - Endpoints: []string{"127.0.0.1"}, - }, - }, - } - scaleToEvenExternalEtcdCluster := beforeExternalEtcdCluster.DeepCopy() - scaleToEvenExternalEtcdCluster.Spec.Replicas = ptr.To[int32](2) - - beforeInvalidEtcdCluster := before.DeepCopy() - beforeInvalidEtcdCluster.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = bootstrapv1.Etcd{ - Local: bootstrapv1.LocalEtcd{ - ImageRepository: "image-repository", - ImageTag: "latest", - }, - } - - afterInvalidEtcdCluster := beforeInvalidEtcdCluster.DeepCopy() - afterInvalidEtcdCluster.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = bootstrapv1.Etcd{ - External: bootstrapv1.ExternalEtcd{ - Endpoints: []string{"127.0.0.1"}, - }, - } - - withoutClusterConfiguration := before.DeepCopy() - withoutClusterConfiguration.Spec.KubeadmConfigSpec.ClusterConfiguration = bootstrapv1.ClusterConfiguration{} - - updateNTPServers := before.DeepCopy() - updateNTPServers.Spec.KubeadmConfigSpec.NTP.Servers = []string{"new-server"} - - disableNTPServers := before.DeepCopy() - disableNTPServers.Spec.KubeadmConfigSpec.NTP.Enabled = ptr.To(false) - - unsetRolloutBefore := before.DeepCopy() - unsetRolloutBefore.Spec.Rollout.Before = controlplanev1.KubeadmControlPlaneRolloutBeforeSpec{} - - invalidIgnitionConfiguration := before.DeepCopy() - invalidIgnitionConfiguration.Spec.KubeadmConfigSpec.Ignition = bootstrapv1.IgnitionSpec{ // Format is not set - ContainerLinuxConfig: bootstrapv1.ContainerLinuxConfig{ - AdditionalConfig: "config", - }, - } - - validIgnitionConfigurationBefore := before.DeepCopy() - validIgnitionConfigurationBefore.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition - validIgnitionConfigurationBefore.Spec.KubeadmConfigSpec.Ignition = bootstrapv1.IgnitionSpec{ - ContainerLinuxConfig: bootstrapv1.ContainerLinuxConfig{ - AdditionalConfig: "config-before", - }, - } - - validIgnitionConfigurationAfter := validIgnitionConfigurationBefore.DeepCopy() - validIgnitionConfigurationAfter.Spec.KubeadmConfigSpec.Ignition.ContainerLinuxConfig.AdditionalConfig = "foo: bar" - - updateInitConfigurationPatches := before.DeepCopy() - updateInitConfigurationPatches.Spec.KubeadmConfigSpec.InitConfiguration.Patches = bootstrapv1.Patches{ - Directory: "/tmp/patches", - } - - updateJoinConfigurationPatches := before.DeepCopy() - updateJoinConfigurationPatches.Spec.KubeadmConfigSpec.InitConfiguration.Patches = bootstrapv1.Patches{ - Directory: "/tmp/patches", - } - - updateInitConfigurationSkipPhases := before.DeepCopy() - updateInitConfigurationSkipPhases.Spec.KubeadmConfigSpec.InitConfiguration.SkipPhases = []string{"addon/kube-proxy"} - - updateJoinConfigurationSkipPhases := before.DeepCopy() - updateJoinConfigurationSkipPhases.Spec.KubeadmConfigSpec.JoinConfiguration.SkipPhases = []string{"addon/kube-proxy"} - - updateDiskSetup := before.DeepCopy() - updateDiskSetup.Spec.KubeadmConfigSpec.DiskSetup = bootstrapv1.DiskSetup{ - Filesystems: []bootstrapv1.Filesystem{ - { - Device: "/dev/sda", - Filesystem: "ext4", - }, - }, - } - - switchFromCloudInitToIgnition := before.DeepCopy() - switchFromCloudInitToIgnition.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition - switchFromCloudInitToIgnition.Spec.KubeadmConfigSpec.Mounts = []bootstrapv1.MountPoints{ - {"/var/lib/testdir", "/var/lib/etcd/data"}, - } - - invalidMetadata := before.DeepCopy() - invalidMetadata.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ - "foo": "$invalid-key", - "bar": strings.Repeat("a", 64) + "too-long-value", - "/invalid-key": "foo", - } - invalidMetadata.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{ - "/invalid-key": "foo", - } - - changeTimeouts := before.DeepCopy() - changeTimeouts.Spec.KubeadmConfigSpec.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = ptr.To[int32](20) // before 10 - changeTimeouts.Spec.KubeadmConfigSpec.InitConfiguration.Timeouts.KubeletHealthCheckSeconds = nil // before set - changeTimeouts.Spec.KubeadmConfigSpec.InitConfiguration.Timeouts.EtcdAPICallSeconds = ptr.To[int32](20) // before not set - changeTimeouts.Spec.KubeadmConfigSpec.JoinConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = ptr.To[int32](20) // before 10 - changeTimeouts.Spec.KubeadmConfigSpec.JoinConfiguration.Timeouts.KubeletHealthCheckSeconds = nil // before set - changeTimeouts.Spec.KubeadmConfigSpec.JoinConfiguration.Timeouts.EtcdAPICallSeconds = ptr.To[int32](20) // before not set - - unsetTimeouts := before.DeepCopy() - unsetTimeouts.Spec.KubeadmConfigSpec.InitConfiguration.Timeouts = bootstrapv1.Timeouts{} - unsetTimeouts.Spec.KubeadmConfigSpec.JoinConfiguration.Timeouts = bootstrapv1.Timeouts{} - - validUpdateCertificateValidityPeriod := before.DeepCopy() - validUpdateCertificateValidityPeriod.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays = 150 - - invalidUpdateCACertificateValidityPeriodDays := before.DeepCopy() - invalidUpdateCACertificateValidityPeriodDays.Spec.KubeadmConfigSpec.ClusterConfiguration = bootstrapv1.ClusterConfiguration{ - CACertificateValidityPeriodDays: 730, - } - - tests := []struct { - name string - enableIgnitionFeature bool - expectErr bool - before *controlplanev1.KubeadmControlPlane - kcp *controlplanev1.KubeadmControlPlane - }{ - { - name: "should succeed when given a valid config", - expectErr: false, - before: before, - kcp: validUpdate, - }, - { - name: "should not return an error when trying to mutate the kubeadmconfigspec initconfiguration noderegistration", - expectErr: false, - before: before, - kcp: validUpdateKubeadmConfigInit, - }, - { - name: "should return error when trying to mutate the kubeadmconfigspec clusterconfiguration", - expectErr: true, - before: before, - kcp: invalidUpdateKubeadmConfigCluster, - }, - { - name: "should not return an error when trying to mutate the kubeadmconfigspec joinconfiguration noderegistration", - expectErr: false, - before: before, - kcp: validUpdateKubeadmConfigJoin, - }, - { - name: "should return error when trying to mutate the kubeadmconfigspec format from cloud-config to ignition", - expectErr: true, - before: beforeKubeadmConfigFormatSet, - kcp: invalidUpdateKubeadmConfigFormat, - }, - { - name: "should return error when trying to scale to zero", - expectErr: true, - before: before, - kcp: scaleToZero, - }, - { - name: "should return error when trying to scale to an even number", - expectErr: true, - before: before, - kcp: scaleToEven, - }, - { - name: "should return error when trying to scale to nil", - expectErr: true, - before: before, - kcp: missingReplicas, - }, - { - name: "should succeed when trying to scale to an even number with external etcd defined in ClusterConfiguration", - expectErr: false, - before: beforeExternalEtcdCluster, - kcp: scaleToEvenExternalEtcdCluster, - }, - { - name: "should succeed when making a change to the local etcd image tag", - expectErr: false, - before: before, - kcp: etcdLocalImageTag, - }, - { - name: "should succeed when making a change to the local etcd image tag", - expectErr: false, - before: before, - kcp: etcdLocalImageBuildTag, - }, - { - name: "should fail when using an invalid etcd image tag", - expectErr: true, - before: before, - kcp: etcdLocalImageInvalidTag, - }, - { - name: "should fail when making a change to the cluster config's controlPlaneEndpoint", - expectErr: true, - before: before, - kcp: controlPlaneEndpoint, - }, - { - name: "should allow changes to the cluster config's apiServer", - expectErr: false, - before: before, - kcp: apiServer, - }, - { - name: "should allow changes to the cluster config's controllerManager", - expectErr: false, - before: before, - kcp: controllerManager, - }, - { - name: "should allow changes to the cluster config's scheduler", - expectErr: false, - before: before, - kcp: scheduler, - }, - { - name: "should succeed when making a change to the cluster config's dns", - expectErr: false, - before: before, - kcp: dns, - }, - { - name: "should succeed when changing to a valid custom CoreDNS version", - expectErr: false, - before: dns, - kcp: validCoreDNSCustomToVersion, - }, - { - name: "should succeed when CoreDNS ImageTag is unset", - expectErr: false, - before: dns, - kcp: unsetCoreDNSToVersion, - }, - { - name: "should succeed when DNS is set to nil", - expectErr: false, - before: dns, - kcp: unsetCoreDNSToVersion, - }, - { - name: "should succeed when using an valid DNS build", - expectErr: false, - before: before, - kcp: dnsBuildTag, - }, - { - name: "should succeed when using the same CoreDNS version", - before: dns, - kcp: dns.DeepCopy(), - }, - { - name: "should succeed when using the same CoreDNS version - not supported", - before: validUnsupportedCoreDNSVersion, - kcp: validUnsupportedCoreDNSVersion, - }, - { - name: "should fail when upgrading to an unsupported version", - before: dns, - kcp: validUnsupportedCoreDNSVersion, - expectErr: true, - }, - { - name: "should succeed when upgrading to an unsupported version and KCP has skip annotation set", - before: dns, - kcp: validUnsupportedCoreDNSVersionWithSkipAnnotation, - }, - { - name: "should fail when using an invalid DNS build", - expectErr: true, - before: before, - kcp: dnsInvalidTag, - }, - { - name: "should fail when using an invalid CoreDNS version", - expectErr: true, - before: dns, - kcp: dnsInvalidCoreDNSToVersion, - }, - - { - name: "should fail when making a change to the cluster config's certificatesDir", - expectErr: true, - before: before, - kcp: certificatesDir, - }, - { - name: "should fail when making a change to the cluster config's imageRepository", - expectErr: false, - before: before, - kcp: imageRepository, - }, - { - name: "should succeed when making a change to the cluster config's featureGates", - expectErr: false, - before: before, - kcp: featureGates, - }, - { - name: "should succeed when making a change to the cluster config's local etcd's configuration localDataDir field", - expectErr: false, - before: before, - kcp: localDataDir, - }, - { - name: "should succeed when making a change to the cluster config's local etcd's configuration localPeerCertSANs field", - expectErr: false, - before: before, - kcp: localPeerCertSANs, - }, - { - name: "should succeed when making a change to the cluster config's local etcd's configuration localServerCertSANs field", - expectErr: false, - before: before, - kcp: localServerCertSANs, - }, - { - name: "should succeed when making a change to the cluster config's local etcd's configuration localExtraArgs field", - expectErr: false, - before: before, - kcp: localExtraArgs, - }, - { - name: "should succeed when making a change to the cluster config's external etcd's configuration", - expectErr: false, - before: externalEtcd, - kcp: externalEtcdChanged, - }, - { - name: "should succeed when adding the cluster config's local etcd's configuration", - expectErr: false, - before: unsetEtcdLocal, - kcp: etcdLocalImageTag, - }, - { - name: "should succeed when making a change to the cluster config's local etcd's configuration", - expectErr: false, - before: etcdLocalImageTag, - kcp: etcdLocalImageTagAndDataDir, - }, - { - name: "should succeed when attempting to unset the etcd local object to fallback to the default", - expectErr: false, - before: etcdLocalImageTag, - kcp: unsetEtcdLocal, - }, - { - name: "should fail if both local and external etcd are set", - expectErr: true, - before: beforeInvalidEtcdCluster, - kcp: afterInvalidEtcdCluster, - }, - { - name: "should pass if ClusterConfiguration is nil", - expectErr: false, - before: withoutClusterConfiguration, - kcp: withoutClusterConfiguration, - }, - { - name: "should not return an error when maxSurge value is updated to 0", - expectErr: false, - before: before, - kcp: updateMaxSurgeVal, - }, - { - name: "should return an error when maxSurge value is updated to 0, but replica count is < 3", - expectErr: true, - before: before, - kcp: wrongReplicaCountForScaleIn, - }, - { - name: "should pass if NTP servers are updated", - expectErr: false, - before: before, - kcp: updateNTPServers, - }, - { - name: "should pass if NTP servers is disabled during update", - expectErr: false, - before: before, - kcp: disableNTPServers, - }, - { - name: "should allow changes to initConfiguration.patches", - expectErr: false, - before: before, - kcp: updateInitConfigurationPatches, - }, - { - name: "should allow changes to joinConfiguration.patches", - expectErr: false, - before: before, - kcp: updateJoinConfigurationPatches, - }, - { - name: "should allow changes to initConfiguration.skipPhases", - expectErr: false, - before: before, - kcp: updateInitConfigurationSkipPhases, - }, - { - name: "should allow changes to joinConfiguration.skipPhases", - expectErr: false, - before: before, - kcp: updateJoinConfigurationSkipPhases, - }, - { - name: "should allow changes to diskSetup", - expectErr: false, - before: before, - kcp: updateDiskSetup, - }, - { - name: "should allow unsetting rolloutBefore", - expectErr: false, - before: before, - kcp: unsetRolloutBefore, - }, - { - name: "should return error when Ignition configuration is invalid", - enableIgnitionFeature: true, - expectErr: true, - before: invalidIgnitionConfiguration, - kcp: invalidIgnitionConfiguration, - }, - { - name: "should succeed when Ignition configuration is modified", - enableIgnitionFeature: true, - expectErr: false, - before: validIgnitionConfigurationBefore, - kcp: validIgnitionConfigurationAfter, - }, - { - name: "should succeed when CloudInit was used before", - enableIgnitionFeature: true, - expectErr: false, - before: before, - kcp: switchFromCloudInitToIgnition, - }, - { - name: "should return error for invalid metadata", - enableIgnitionFeature: true, - expectErr: true, - before: before, - kcp: invalidMetadata, - }, - { - name: "should succeed when changing timeouts", - expectErr: false, - before: before, - kcp: changeTimeouts, - }, - { - name: "should succeed when unsetting timeouts", - expectErr: false, - before: before, - kcp: unsetTimeouts, - }, - { - name: "should succeed when setting timeouts", - expectErr: false, - before: unsetTimeouts, - kcp: changeTimeouts, - }, - { - name: "should succeed when making a change to the cluster config's certificateValidityPeriod", - expectErr: false, - before: before, - kcp: validUpdateCertificateValidityPeriod, - }, - { - name: "should return error when trying to mutate the cluster config's caCertificateValidityPeriodDays", - expectErr: true, - before: before, - kcp: invalidUpdateCACertificateValidityPeriodDays, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if tt.enableIgnitionFeature { - // NOTE: KubeadmBootstrapFormatIgnition feature flag is disabled by default. - // Enabling the feature flag temporarily for this test. - utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.KubeadmBootstrapFormatIgnition, true) - } - - g := NewWithT(t) - - webhook := &KubeadmControlPlane{} - - warnings, err := webhook.ValidateUpdate(ctx, tt.before.DeepCopy(), tt.kcp) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).To(Succeed()) - } - g.Expect(warnings).To(BeEmpty()) - }) - } -} - -func TestValidateVersion(t *testing.T) { - tests := []struct { - name string - clusterConfiguration bootstrapv1.ClusterConfiguration - oldVersion string - newVersion string - expectErr bool - }{ - // Basic validation of old and new version. - { - name: "error when old version is empty", - oldVersion: "", - newVersion: "v1.16.6", - expectErr: true, - }, - { - name: "error when old version is invalid", - oldVersion: "invalid-version", - newVersion: "v1.18.1", - expectErr: true, - }, - { - name: "error when new version is empty", - oldVersion: "v1.16.6", - newVersion: "", - expectErr: true, - }, - { - name: "error when new version is invalid", - oldVersion: "v1.18.1", - newVersion: "invalid-version", - expectErr: true, - }, - { - name: "pass when both versions are v1.19.0", - oldVersion: "v1.19.0", - newVersion: "v1.19.0", - expectErr: false, - }, - // Validation for skip-level upgrades. - { - name: "error when upgrading two minor versions", - oldVersion: "v1.18.8", - newVersion: "v1.20.0-alpha.0.734_ba502ee555924a", - expectErr: true, - }, - { - name: "pass when upgrading one minor version", - oldVersion: "v1.20.1", - newVersion: "v1.21.18", - expectErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - kcpNew := controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: tt.clusterConfiguration, - }, - Version: tt.newVersion, - }, - } - - kcpOld := controlplanev1.KubeadmControlPlane{ - Spec: controlplanev1.KubeadmControlPlaneSpec{ - KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: tt.clusterConfiguration, - }, - Version: tt.oldVersion, - }, - } - - webhook := &KubeadmControlPlane{} - - allErrs := webhook.validateVersion(&kcpOld, &kcpNew) - if tt.expectErr { - g.Expect(allErrs).ToNot(BeEmpty()) - } else { - g.Expect(allErrs).To(BeEmpty()) - } - }) - } -} -func TestKubeadmControlPlaneValidateUpdateAfterDefaulting(t *testing.T) { - g := NewWithT(t) - - before := &controlplanev1.KubeadmControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "foo", - }, - Spec: controlplanev1.KubeadmControlPlaneSpec{ - Version: "v1.19.0", - MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ - InfrastructureRef: clusterv1.ContractVersionedObjectReference{ - APIGroup: "test", - Kind: "UnknownInfraMachine", - Name: "infraTemplate", - }, - }, - }, - }, - } - - afterDefault := before.DeepCopy() - webhook := &KubeadmControlPlane{} - g.Expect(webhook.Default(ctx, afterDefault)).To(Succeed()) - - tests := []struct { - name string - expectErr bool - before *controlplanev1.KubeadmControlPlane - kcp *controlplanev1.KubeadmControlPlane - }{ - { - name: "update should succeed after defaulting", - expectErr: false, - before: before, - kcp: afterDefault, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - webhook := &KubeadmControlPlane{} - - warnings, err := webhook.ValidateUpdate(ctx, tt.before.DeepCopy(), tt.kcp) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).To(Succeed()) - g.Expect(tt.kcp.Spec.Version).To(Equal("v1.19.0")) - g.Expect(tt.kcp.Spec.Rollout.Strategy.Type).To(Equal(controlplanev1.RollingUpdateStrategyType)) - g.Expect(tt.kcp.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) - g.Expect(tt.kcp.Spec.Replicas).To(Equal(ptr.To[int32](1))) - } - g.Expect(warnings).To(BeEmpty()) - }) - } -} - -func TestPathsMatch(t *testing.T) { - tests := []struct { - name string - allowed, path []string - match bool - }{ - { - name: "a simple match case", - allowed: []string{"a", "b", "c"}, - path: []string{"a", "b", "c"}, - match: true, - }, - { - name: "a case can't match", - allowed: []string{"a", "b", "c"}, - path: []string{"a"}, - match: false, - }, - { - name: "an empty path for whatever reason", - allowed: []string{"a"}, - path: []string{""}, - match: false, - }, - { - name: "empty allowed matches nothing", - allowed: []string{}, - path: []string{"a"}, - match: false, - }, - { - name: "wildcard match", - allowed: []string{"a", "b", "c", "d", "*"}, - path: []string{"a", "b", "c", "d", "e", "f", "g"}, - match: true, - }, - { - name: "long path", - allowed: []string{"a"}, - path: []string{"a", "b", "c", "d", "e", "f", "g"}, - match: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - g.Expect(pathsMatch(tt.allowed, tt.path)).To(Equal(tt.match)) - }) - } -} - -func TestAllowed(t *testing.T) { - tests := []struct { - name string - allowList [][]string - path []string - match bool - }{ - { - name: "matches the first and none of the others", - allowList: [][]string{ - {"a", "b", "c"}, - {"b", "d", "x"}, - }, - path: []string{"a", "b", "c"}, - match: true, - }, - { - name: "matches none in the allow list", - allowList: [][]string{ - {"a", "b", "c"}, - {"b", "c", "d"}, - {"e", "*"}, - }, - path: []string{"a"}, - match: false, - }, - { - name: "an empty path matches nothing", - allowList: [][]string{ - {"a", "b", "c"}, - {"*"}, - {"b", "c"}, - }, - path: []string{}, - match: false, - }, - { - name: "empty allowList matches nothing", - allowList: [][]string{}, - path: []string{"a"}, - match: false, - }, - { - name: "length test check", - allowList: [][]string{ - {"a", "b", "c", "d", "e", "f"}, - {"a", "b", "c", "d", "e", "f", "g", "h"}, - }, - path: []string{"a", "b", "c", "d", "e", "f", "g"}, - match: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - g.Expect(allowed(tt.allowList, tt.path)).To(Equal(tt.match)) - }) - } -} - -func TestPaths(t *testing.T) { - tests := []struct { - name string - path []string - diff map[string]interface{} - expected [][]string - }{ - { - name: "basic check", - diff: map[string]interface{}{ - "spec": map[string]interface{}{ - "replicas": 4, - "version": "1.17.3", - "kubeadmConfigSpec": map[string]interface{}{ - "clusterConfiguration": map[string]interface{}{ - "version": "v2.0.1", - }, - "initConfiguration": map[string]interface{}{ - "bootstrapToken": []string{"abcd", "defg"}, - }, - "joinConfiguration": nil, - }, - }, - }, - expected: [][]string{ - {"spec", "replicas"}, - {"spec", "version"}, - {"spec", "kubeadmConfigSpec", "joinConfiguration"}, - {"spec", "kubeadmConfigSpec", "clusterConfiguration", "version"}, - {"spec", "kubeadmConfigSpec", "initConfiguration", "bootstrapToken"}, - }, - }, - { - name: "empty input makes for empty output", - path: []string{"a"}, - diff: map[string]interface{}{}, - expected: [][]string{}, - }, - { - name: "long recursive check with two keys", - diff: map[string]interface{}{ - "spec": map[string]interface{}{ - "kubeadmConfigSpec": map[string]interface{}{ - "clusterConfiguration": map[string]interface{}{ - "version": "v2.0.1", - "abc": "d", - }, - }, - }, - }, - expected: [][]string{ - {"spec", "kubeadmConfigSpec", "clusterConfiguration", "version"}, - {"spec", "kubeadmConfigSpec", "clusterConfiguration", "abc"}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - g.Expect(paths(tt.path, tt.diff)).To(ConsistOf(tt.expected)) - }) - } -} diff --git a/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplane.go b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplane.go new file mode 100644 index 000000000000..390268c57857 --- /dev/null +++ b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplane.go @@ -0,0 +1,686 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhooks + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + + "github.com/blang/semver/v4" + "github.com/coredns/corefile-migration/migration" + jsonpatch "github.com/evanphx/json-patch/v5" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + topologynames "sigs.k8s.io/cluster-api/internal/topology/names" + "sigs.k8s.io/cluster-api/util/container" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/cluster-api/util/version" +) + +func (webhook *KubeadmControlPlane) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(&controlplanev1.KubeadmControlPlane{}). + WithDefaulter(webhook). + WithValidator(webhook). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/mutate-controlplane-cluster-x-k8s-io-v1beta2-kubeadmcontrolplane,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,versions=v1beta2,name=default.kubeadmcontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/validate-controlplane-cluster-x-k8s-io-v1beta2-kubeadmcontrolplane,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=controlplane.cluster.x-k8s.io,resources=kubeadmcontrolplanes,versions=v1beta2,name=validation.kubeadmcontrolplane.controlplane.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +// KubeadmControlPlane implements a validation and defaulting webhook for KubeadmControlPlane. +type KubeadmControlPlane struct{} + +var _ webhook.CustomValidator = &KubeadmControlPlane{} +var _ webhook.CustomDefaulter = &KubeadmControlPlane{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type. +func (webhook *KubeadmControlPlane) Default(_ context.Context, obj runtime.Object) error { + k, ok := obj.(*controlplanev1.KubeadmControlPlane) + if !ok { + return apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", obj)) + } + + defaultKubeadmControlPlaneSpec(&k.Spec) + + return nil +} + +func defaultKubeadmControlPlaneSpec(s *controlplanev1.KubeadmControlPlaneSpec) { + if s.Replicas == nil { + replicas := int32(1) + s.Replicas = &replicas + } + + if !strings.HasPrefix(s.Version, "v") { + s.Version = "v" + s.Version + } + + // Enforce RollingUpdate strategy and default MaxSurge if not set. + s.Rollout.Strategy.Type = controlplanev1.RollingUpdateStrategyType + s.Rollout.Strategy.RollingUpdate.MaxSurge = intstr.ValueOrDefault(s.Rollout.Strategy.RollingUpdate.MaxSurge, intstr.FromInt32(1)) +} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (webhook *KubeadmControlPlane) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + k, ok := obj.(*controlplanev1.KubeadmControlPlane) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", obj)) + } + + spec := k.Spec + allErrs := validateKubeadmControlPlaneSpec(spec, field.NewPath("spec")) + allErrs = append(allErrs, validateClusterConfiguration(nil, &spec.KubeadmConfigSpec.ClusterConfiguration, field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration"))...) + allErrs = append(allErrs, spec.KubeadmConfigSpec.Validate(true, field.NewPath("spec", "kubeadmConfigSpec"))...) + if len(allErrs) > 0 { + return nil, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), k.Name, allErrs) + } + return nil, nil +} + +const ( + spec = "spec" + kubeadmConfigSpec = "kubeadmConfigSpec" + clusterConfiguration = "clusterConfiguration" + initConfiguration = "initConfiguration" + joinConfiguration = "joinConfiguration" + nodeRegistration = "nodeRegistration" + skipPhases = "skipPhases" + patches = "patches" + directory = "directory" + bootCommands = "bootCommands" + preKubeadmCommands = "preKubeadmCommands" + postKubeadmCommands = "postKubeadmCommands" + files = "files" + users = "users" + apiServer = "apiServer" + controllerManager = "controllerManager" + scheduler = "scheduler" + ntp = "ntp" + ignition = "ignition" + diskSetup = "diskSetup" + featureGates = "featureGates" + timeouts = "timeouts" +) + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (webhook *KubeadmControlPlane) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + // add a * to indicate everything beneath is ok. + // For example, {"spec", "*"} will allow any path under "spec" to change. + // For example, {"spec"} will allow "spec" to also be unset. + allowedPaths := [][]string{ + // metadata + {"metadata", "*"}, + // spec.kubeadmConfigSpec.clusterConfiguration + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "*"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "local", "*"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "etcd", "external", "*"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "dns"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "dns", "*"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "imageRepository"}, + {spec, kubeadmConfigSpec, clusterConfiguration, featureGates}, + {spec, kubeadmConfigSpec, clusterConfiguration, featureGates, "*"}, + {spec, kubeadmConfigSpec, clusterConfiguration, apiServer}, + {spec, kubeadmConfigSpec, clusterConfiguration, apiServer, "*"}, + {spec, kubeadmConfigSpec, clusterConfiguration, controllerManager}, + {spec, kubeadmConfigSpec, clusterConfiguration, controllerManager, "*"}, + {spec, kubeadmConfigSpec, clusterConfiguration, scheduler}, + {spec, kubeadmConfigSpec, clusterConfiguration, scheduler, "*"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "certificateValidityPeriodDays"}, + {spec, kubeadmConfigSpec, clusterConfiguration, "encryptionAlgorithm"}, + // spec.kubeadmConfigSpec.initConfiguration + {spec, kubeadmConfigSpec, initConfiguration, nodeRegistration}, + {spec, kubeadmConfigSpec, initConfiguration, nodeRegistration, "*"}, + {spec, kubeadmConfigSpec, initConfiguration, patches, directory}, + {spec, kubeadmConfigSpec, initConfiguration, patches}, + {spec, kubeadmConfigSpec, initConfiguration, skipPhases}, + {spec, kubeadmConfigSpec, initConfiguration, "bootstrapTokens"}, + {spec, kubeadmConfigSpec, initConfiguration, "localAPIEndpoint"}, + {spec, kubeadmConfigSpec, initConfiguration, "localAPIEndpoint", "*"}, + {spec, kubeadmConfigSpec, initConfiguration, timeouts}, + {spec, kubeadmConfigSpec, initConfiguration, timeouts, "*"}, + // spec.kubeadmConfigSpec.joinConfiguration + {spec, kubeadmConfigSpec, joinConfiguration, nodeRegistration}, + {spec, kubeadmConfigSpec, joinConfiguration, nodeRegistration, "*"}, + {spec, kubeadmConfigSpec, joinConfiguration, patches, directory}, + {spec, kubeadmConfigSpec, joinConfiguration, patches}, + {spec, kubeadmConfigSpec, joinConfiguration, skipPhases}, + {spec, kubeadmConfigSpec, joinConfiguration, "caCertPath"}, + {spec, kubeadmConfigSpec, joinConfiguration, "controlPlane"}, + {spec, kubeadmConfigSpec, joinConfiguration, "controlPlane", "*"}, + {spec, kubeadmConfigSpec, joinConfiguration, "discovery"}, + {spec, kubeadmConfigSpec, joinConfiguration, "discovery", "*"}, + {spec, kubeadmConfigSpec, joinConfiguration, timeouts}, + {spec, kubeadmConfigSpec, joinConfiguration, timeouts, "*"}, + // spec.kubeadmConfigSpec + {spec, kubeadmConfigSpec, bootCommands}, + {spec, kubeadmConfigSpec, preKubeadmCommands}, + {spec, kubeadmConfigSpec, postKubeadmCommands}, + {spec, kubeadmConfigSpec, files}, + {spec, kubeadmConfigSpec, "verbosity"}, + {spec, kubeadmConfigSpec, users}, + {spec, kubeadmConfigSpec, ntp}, + {spec, kubeadmConfigSpec, ntp, "*"}, + {spec, kubeadmConfigSpec, ignition}, + {spec, kubeadmConfigSpec, ignition, "*"}, + {spec, kubeadmConfigSpec, diskSetup}, + {spec, kubeadmConfigSpec, diskSetup, "*"}, + {spec, kubeadmConfigSpec, "format"}, + {spec, kubeadmConfigSpec, "mounts"}, + // spec.machineTemplate + {spec, "machineTemplate", "metadata"}, + {spec, "machineTemplate", "metadata", "*"}, + {spec, "machineTemplate", "spec"}, + {spec, "machineTemplate", "spec", "*"}, + // spec + {spec, "replicas"}, + {spec, "version"}, + {spec, "remediation"}, + {spec, "remediation", "*"}, + {spec, "machineNaming"}, + {spec, "machineNaming", "*"}, + {spec, "rollout"}, + {spec, "rollout", "*"}, + } + + oldK, ok := oldObj.(*controlplanev1.KubeadmControlPlane) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", oldObj)) + } + + newK, ok := newObj.(*controlplanev1.KubeadmControlPlane) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a KubeadmControlPlane but got a %T", newObj)) + } + + allErrs := validateKubeadmControlPlaneSpec(newK.Spec, field.NewPath("spec")) + + originalJSON, err := json.Marshal(oldK) + if err != nil { + return nil, apierrors.NewInternalError(err) + } + modifiedJSON, err := json.Marshal(newK) + if err != nil { + return nil, apierrors.NewInternalError(err) + } + + diff, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return nil, apierrors.NewInternalError(err) + } + jsonPatch := map[string]interface{}{} + if err := json.Unmarshal(diff, &jsonPatch); err != nil { + return nil, apierrors.NewInternalError(err) + } + // Build a list of all paths that are trying to change + diffpaths := paths([]string{}, jsonPatch) + // Every path in the diff must be valid for the update function to work. + for _, path := range diffpaths { + // Ignore paths that are empty + if len(path) == 0 { + continue + } + if !allowed(allowedPaths, path) { + if len(path) == 1 { + allErrs = append(allErrs, field.Forbidden(field.NewPath(path[0]), "cannot be modified")) + continue + } + allErrs = append(allErrs, field.Forbidden(field.NewPath(path[0], path[1:]...), "cannot be modified")) + } + } + + allErrs = append(allErrs, webhook.validateVersion(oldK, newK)...) + allErrs = append(allErrs, validateClusterConfiguration(&oldK.Spec.KubeadmConfigSpec.ClusterConfiguration, &newK.Spec.KubeadmConfigSpec.ClusterConfiguration, field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration"))...) + allErrs = append(allErrs, webhook.validateCoreDNSVersion(oldK, newK)...) + allErrs = append(allErrs, newK.Spec.KubeadmConfigSpec.Validate(true, field.NewPath("spec", "kubeadmConfigSpec"))...) + + if len(allErrs) > 0 { + return nil, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("KubeadmControlPlane").GroupKind(), newK.Name, allErrs) + } + + return nil, nil +} + +func validateKubeadmControlPlaneSpec(s controlplanev1.KubeadmControlPlaneSpec, pathPrefix *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if s.Replicas == nil { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("replicas"), + "is required", + ), + ) + } else if *s.Replicas <= 0 { + // The use of the scale subresource should provide a guarantee that negative values + // should not be accepted for this field, but since we have to validate that Replicas != 0 + // it doesn't hurt to also additionally validate for negative numbers here as well. + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("replicas"), + "cannot be less than or equal to 0", + ), + ) + } + + externalEtcd := s.KubeadmConfigSpec.ClusterConfiguration.Etcd.External.IsDefined() + if !externalEtcd { + if s.Replicas != nil && *s.Replicas%2 == 0 { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("replicas"), + "cannot be an even number when etcd is stacked", + ), + ) + } + } + + if s.MachineTemplate.Spec.InfrastructureRef.APIGroup == "" { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("machineTemplate", "infrastructure", "apiGroup"), + s.MachineTemplate.Spec.InfrastructureRef.APIGroup, + "cannot be empty", + ), + ) + } + if s.MachineTemplate.Spec.InfrastructureRef.Kind == "" { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("machineTemplate", "infrastructure", "kind"), + s.MachineTemplate.Spec.InfrastructureRef.Kind, + "cannot be empty", + ), + ) + } + if s.MachineTemplate.Spec.InfrastructureRef.Name == "" { + allErrs = append( + allErrs, + field.Invalid( + pathPrefix.Child("machineTemplate", "infrastructure", "name"), + s.MachineTemplate.Spec.InfrastructureRef.Name, + "cannot be empty", + ), + ) + } + + // Validate the metadata of the MachineTemplate + allErrs = append(allErrs, s.MachineTemplate.ObjectMeta.Validate(pathPrefix.Child("machineTemplate", "metadata"))...) + + if !version.KubeSemver.MatchString(s.Version) { + allErrs = append(allErrs, field.Invalid(pathPrefix.Child("version"), s.Version, "must be a valid semantic version")) + } + + allErrs = append(allErrs, validateRolloutAndCertValidityFields(s.Rollout, s.KubeadmConfigSpec.ClusterConfiguration, s.Replicas, pathPrefix)...) + allErrs = append(allErrs, validateNaming(s.MachineNaming, pathPrefix.Child("machineNaming"))...) + return allErrs +} + +func validateRolloutAndCertValidityFields(rolloutSpec controlplanev1.KubeadmControlPlaneRolloutSpec, clusterConfiguration bootstrapv1.ClusterConfiguration, replicas *int32, pathPrefix *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + rolloutStrategy := rolloutSpec.Strategy + + if reflect.DeepEqual(rolloutStrategy, controlplanev1.KubeadmControlPlaneRolloutStrategy{}) { + return nil + } + + if rolloutStrategy.Type != controlplanev1.RollingUpdateStrategyType { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("rollout", "strategy", "type"), + "only RollingUpdate is supported", + ), + ) + } + + if rolloutStrategy.RollingUpdate.MaxSurge != nil { + ios1 := intstr.FromInt32(1) + ios0 := intstr.FromInt32(0) + if rolloutStrategy.RollingUpdate.MaxSurge.IntValue() == ios0.IntValue() && (replicas != nil && *replicas < int32(3)) { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("rollout", "strategy", "rollingUpdate"), + "when KubeadmControlPlane is configured to scale-in, replica count needs to be at least 3", + ), + ) + } + if rolloutStrategy.RollingUpdate.MaxSurge.IntValue() != ios1.IntValue() && rolloutStrategy.RollingUpdate.MaxSurge.IntValue() != ios0.IntValue() { + allErrs = append( + allErrs, + field.Required( + pathPrefix.Child("rollout", "strategy", "rollingUpdate", "maxSurge"), + "value must be 1 or 0", + ), + ) + } + } + + if clusterConfiguration.CertificateValidityPeriodDays != 0 { + if clusterConfiguration.CACertificateValidityPeriodDays != 0 { + if clusterConfiguration.CertificateValidityPeriodDays > clusterConfiguration.CACertificateValidityPeriodDays { + allErrs = append(allErrs, + field.Invalid( + pathPrefix.Child("kubeadmConfigSpec", "clusterConfiguration", "certificateValidityPeriodDays"), + clusterConfiguration.CertificateValidityPeriodDays, + fmt.Sprintf("must be less than or equal to caCertificateValidityPeriodDays (%d)", clusterConfiguration.CACertificateValidityPeriodDays), + ), + ) + } + } else if clusterConfiguration.CertificateValidityPeriodDays > secret.DefaultCACertificatesExpiryDays { + allErrs = append(allErrs, + field.Invalid( + pathPrefix.Child("kubeadmConfigSpec", "clusterConfiguration", "certificateValidityPeriodDays"), + clusterConfiguration.CertificateValidityPeriodDays, + fmt.Sprintf("must be less than or equal to the default value of caCertificateValidityPeriodDays (%d)", secret.DefaultCACertificatesExpiryDays), + ), + ) + } + + if rolloutSpec.Before.CertificatesExpiryDays != 0 { + if rolloutSpec.Before.CertificatesExpiryDays >= clusterConfiguration.CertificateValidityPeriodDays { + allErrs = append(allErrs, + field.Invalid( + pathPrefix.Child("rollout", "before", "certificatesExpiryDays"), + rolloutSpec.Before.CertificatesExpiryDays, + fmt.Sprintf("must be less than certificateValidityPeriodDays (%d)", clusterConfiguration.CertificateValidityPeriodDays))) + } + } + } + + return allErrs +} + +func validateNaming(machineNaming controlplanev1.MachineNamingSpec, pathPrefix *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if machineNaming.Template != "" { + if !strings.Contains(machineNaming.Template, "{{ .random }}") { + allErrs = append(allErrs, + field.Invalid( + pathPrefix.Child("template"), + machineNaming.Template, + "invalid template, {{ .random }} is missing", + )) + } + name, err := topologynames.KCPMachineNameGenerator(machineNaming.Template, "cluster", "kubeadmcontrolplane").GenerateName() + if err != nil { + allErrs = append(allErrs, + field.Invalid( + pathPrefix.Child("template"), + machineNaming.Template, + fmt.Sprintf("invalid template: %v", err), + )) + } else { + for _, err := range validation.IsDNS1123Subdomain(name) { + allErrs = append(allErrs, + field.Invalid( + pathPrefix.Child("template"), + machineNaming.Template, + fmt.Sprintf("invalid template, generated names would not be valid Kubernetes object names: %v", err), + )) + } + } + } + + return allErrs +} + +func validateClusterConfiguration(oldClusterConfiguration, newClusterConfiguration *bootstrapv1.ClusterConfiguration, pathPrefix *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if newClusterConfiguration == nil { + return allErrs + } + + // TODO: Remove when kubeadm types include OpenAPI validation + if !container.ImageTagIsValid(newClusterConfiguration.DNS.ImageTag) { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("dns", "imageTag"), + fmt.Sprintf("tag %s is invalid", newClusterConfiguration.DNS.ImageTag), + ), + ) + } + + if newClusterConfiguration.DNS.ImageTag != "" { + if _, err := version.ParseTolerantImageTag(newClusterConfiguration.DNS.ImageTag); err != nil { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("dns", "imageTag"), + newClusterConfiguration.DNS.ImageTag, + fmt.Sprintf("failed to parse CoreDNS version: %v", err), + ), + ) + } + } + + // TODO: Remove when kubeadm types include OpenAPI validation + if newClusterConfiguration.Etcd.Local.IsDefined() && !container.ImageTagIsValid(newClusterConfiguration.Etcd.Local.ImageTag) { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("etcd", "local", "imageTag"), + fmt.Sprintf("tag %s is invalid", newClusterConfiguration.Etcd.Local.ImageTag), + ), + ) + } + + if newClusterConfiguration.Etcd.Local.IsDefined() && newClusterConfiguration.Etcd.External.IsDefined() { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("etcd", "local"), + "cannot have both external and local etcd", + ), + ) + } + + // update validations + if oldClusterConfiguration != nil { + if (newClusterConfiguration.Etcd.External.IsDefined() && !oldClusterConfiguration.Etcd.External.IsDefined()) || + (!newClusterConfiguration.Etcd.External.IsDefined() && oldClusterConfiguration.Etcd.External.IsDefined()) { + allErrs = append( + allErrs, + field.Forbidden( + pathPrefix.Child("etcd", "external"), + "cannot change between external and local etcd", + ), + ) + } + } + + return allErrs +} + +func allowed(allowList [][]string, path []string) bool { + for _, allowed := range allowList { + if pathsMatch(allowed, path) { + return true + } + } + return false +} + +func pathsMatch(allowed, path []string) bool { + // if either are empty then no match can be made + if len(allowed) == 0 || len(path) == 0 { + return false + } + i := 0 + for i = range path { + // reached the end of the allowed path and no match was found + if i > len(allowed)-1 { + return false + } + if allowed[i] == "*" { + return true + } + if path[i] != allowed[i] { + return false + } + } + // path has been completely iterated and has not matched the end of the path. + // e.g. allowed: []string{"a","b","c"}, path: []string{"a"} + return i >= len(allowed)-1 +} + +// paths builds a slice of paths that are being modified. +func paths(path []string, diff map[string]interface{}) [][]string { + allPaths := [][]string{} + for key, m := range diff { + nested, ok := m.(map[string]interface{}) + if !ok { + // We have to use a copy of path, because otherwise the slice we append to + // allPaths would be overwritten in another iteration. + tmp := make([]string, len(path)) + copy(tmp, path) + allPaths = append(allPaths, append(tmp, key)) + continue + } + allPaths = append(allPaths, paths(append(path, key), nested)...) + } + return allPaths +} + +func (webhook *KubeadmControlPlane) validateCoreDNSVersion(oldK, newK *controlplanev1.KubeadmControlPlane) (allErrs field.ErrorList) { + // return if either current or target versions is empty + if newK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" || oldK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag == "" { + return allErrs + } + targetDNS := &newK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS + + fromVersion, err := version.ParseTolerantImageTag(oldK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag) + if err != nil { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "dns", "imageTag"), + oldK.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag, + fmt.Sprintf("failed to parse current CoreDNS version: %v", err), + ), + ) + return allErrs + } + + toVersion, err := version.ParseTolerantImageTag(targetDNS.ImageTag) + if err != nil { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "dns", "imageTag"), + targetDNS.ImageTag, + fmt.Sprintf("failed to parse target CoreDNS version: %v", err), + ), + ) + return allErrs + } + // If the versions are equal return here without error. + // This allows an upgrade where the version of CoreDNS in use is not supported by the migration tool. + if version.Compare(toVersion, fromVersion, version.WithoutPreReleases()) == 0 { + return allErrs + } + + // Skip validating if the skip CoreDNS annotation is set. If set, KCP doesn't use the migration library. + if _, ok := newK.Annotations[controlplanev1.SkipCoreDNSAnnotation]; ok { + return allErrs + } + + if err := migration.ValidUpMigration(version.MajorMinorPatch(fromVersion).String(), version.MajorMinorPatch(toVersion).String()); err != nil { + allErrs = append( + allErrs, + field.Forbidden( + field.NewPath("spec", "kubeadmConfigSpec", "clusterConfiguration", "dns", "imageTag"), + fmt.Sprintf("cannot migrate CoreDNS up to '%v' from '%v': %v", toVersion, fromVersion, err), + ), + ) + } + + return allErrs +} + +func (webhook *KubeadmControlPlane) validateVersion(oldK, newK *controlplanev1.KubeadmControlPlane) (allErrs field.ErrorList) { + previousVersion := oldK.Spec.Version + fromVersion, err := semver.ParseTolerant(previousVersion) + if err != nil { + allErrs = append(allErrs, + field.InternalError( + field.NewPath("spec", "version"), + errors.Wrapf(err, "failed to parse current kubeadmcontrolplane version: %s", previousVersion), + ), + ) + return allErrs + } + + toVersion, err := semver.ParseTolerant(newK.Spec.Version) + if err != nil { + allErrs = append(allErrs, + field.InternalError( + field.NewPath("spec", "version"), + errors.Wrapf(err, "failed to parse updated kubeadmcontrolplane version: %s", newK.Spec.Version), + ), + ) + return allErrs + } + + // Validate that the update is upgrading at most one minor version. + // Note: Skipping a minor version is not allowed. + // Note: Checking against this ceilVersion allows upgrading to the next minor + // version irrespective of the patch version. + ceilVersion := semver.Version{ + Major: fromVersion.Major, + Minor: fromVersion.Minor + 2, + Patch: 0, + } + if version.Compare(toVersion, ceilVersion, version.WithoutPreReleases()) >= 0 { + allErrs = append(allErrs, + field.Forbidden( + field.NewPath("spec", "version"), + fmt.Sprintf("cannot update Kubernetes version from %s to %s", previousVersion, newK.Spec.Version), + ), + ) + } + + return allErrs +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (webhook *KubeadmControlPlane) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} diff --git a/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplane_test.go b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplane_test.go new file mode 100644 index 000000000000..27bec1e462e8 --- /dev/null +++ b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplane_test.go @@ -0,0 +1,1476 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhooks + +import ( + "strings" + "testing" + "time" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + utilfeature "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/internal/webhooks/util" +) + +var ( + ctx = ctrl.SetupSignalHandler() +) + +func TestKubeadmControlPlaneDefault(t *testing.T) { + g := NewWithT(t) + + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.18.3", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: "test", + Kind: "UnknownInfraMachine", + Name: "foo", + }, + }, + }, + }, + } + updateDefaultingValidationKCP := kcp.DeepCopy() + updateDefaultingValidationKCP.Spec.Version = "v1.18.3" + updateDefaultingValidationKCP.Spec.MachineTemplate.Spec.InfrastructureRef = clusterv1.ContractVersionedObjectReference{ + APIGroup: "test", + Kind: "UnknownInfraMachine", + Name: "foo", + } + webhook := &KubeadmControlPlane{} + t.Run("for KubeadmControlPlane", util.CustomDefaultValidateTest(ctx, updateDefaultingValidationKCP, webhook)) + g.Expect(webhook.Default(ctx, kcp)).To(Succeed()) + + g.Expect(kcp.Spec.Version).To(Equal("v1.18.3")) + g.Expect(kcp.Spec.Rollout.Strategy.Type).To(Equal(controlplanev1.RollingUpdateStrategyType)) + g.Expect(kcp.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) +} + +func TestKubeadmControlPlaneValidateCreate(t *testing.T) { + valid := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "foo", + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: "test", + Kind: "UnknownInfraMachine", + Name: "infraTemplate", + }, + }, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, + }, + Replicas: ptr.To[int32](1), + Version: "v1.19.0", + Rollout: controlplanev1.KubeadmControlPlaneRolloutSpec{ + Strategy: controlplanev1.KubeadmControlPlaneRolloutStrategy{ + Type: controlplanev1.RollingUpdateStrategyType, + RollingUpdate: controlplanev1.KubeadmControlPlaneRolloutStrategyRollingUpdate{ + MaxSurge: &intstr.IntOrString{ + IntVal: 1, + }, + }, + }, + }, + }, + } + + invalidMaxSurge := valid.DeepCopy() + invalidMaxSurge.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal = int32(3) + + stringMaxSurge := valid.DeepCopy() + val := intstr.FromString("1") + stringMaxSurge.Spec.Rollout.Strategy.RollingUpdate.MaxSurge = &val + + missingReplicas := valid.DeepCopy() + missingReplicas.Spec.Replicas = nil + + zeroReplicas := valid.DeepCopy() + zeroReplicas.Spec.Replicas = ptr.To[int32](0) + + evenReplicas := valid.DeepCopy() + evenReplicas.Spec.Replicas = ptr.To[int32](2) + + evenReplicasExternalEtcd := evenReplicas.DeepCopy() + evenReplicasExternalEtcd.Spec.KubeadmConfigSpec = bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + Etcd: bootstrapv1.Etcd{ + External: bootstrapv1.ExternalEtcd{ + Endpoints: []string{"1.2.3.4"}, + }, + }, + }, + } + + validVersion := valid.DeepCopy() + validVersion.Spec.Version = "v1.16.6" + + invalidVersion1 := valid.DeepCopy() + invalidVersion1.Spec.Version = "vv1.16.6" + + invalidVersion2 := valid.DeepCopy() + invalidVersion2.Spec.Version = "1.16.6" + + invalidCoreDNSVersion := valid.DeepCopy() + invalidCoreDNSVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag = "1-7" // not a valid semantic version + + invalidIgnitionConfiguration := valid.DeepCopy() // Format is not set to ignition. + invalidIgnitionConfiguration.Spec.KubeadmConfigSpec.Ignition = bootstrapv1.IgnitionSpec{ + ContainerLinuxConfig: bootstrapv1.ContainerLinuxConfig{ + AdditionalConfig: "config", + }, + } + + validIgnitionConfiguration := valid.DeepCopy() + validIgnitionConfiguration.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition + validIgnitionConfiguration.Spec.KubeadmConfigSpec.Ignition = bootstrapv1.IgnitionSpec{} + + invalidMetadata := valid.DeepCopy() + invalidMetadata.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ + "foo": "$invalid-key", + "bar": strings.Repeat("a", 64) + "too-long-value", + "/invalid-key": "foo", + } + invalidMetadata.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{ + "/invalid-key": "foo", + } + + invalidControlPlaneComponentHealthCheckSeconds := valid.DeepCopy() + invalidControlPlaneComponentHealthCheckSeconds.Spec.KubeadmConfigSpec.InitConfiguration = bootstrapv1.InitConfiguration{Timeouts: bootstrapv1.Timeouts{ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10)}} + + validControlPlaneComponentHealthCheckSeconds := valid.DeepCopy() + validControlPlaneComponentHealthCheckSeconds.Spec.KubeadmConfigSpec.InitConfiguration = bootstrapv1.InitConfiguration{Timeouts: bootstrapv1.Timeouts{ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10)}} + validControlPlaneComponentHealthCheckSeconds.Spec.KubeadmConfigSpec.JoinConfiguration = bootstrapv1.JoinConfiguration{Timeouts: bootstrapv1.Timeouts{ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10)}} + + invalidCertificateValidityPeriodDaysGreaterCA := valid.DeepCopy() + invalidCertificateValidityPeriodDaysGreaterCA.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays = 350 + invalidCertificateValidityPeriodDaysGreaterCA.Spec.KubeadmConfigSpec.ClusterConfiguration.CACertificateValidityPeriodDays = 300 + + invalidCertificateValidityPeriodDaysGreaterDefault := valid.DeepCopy() + invalidCertificateValidityPeriodDaysGreaterDefault.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays = 3651 + invalidCertificateValidityPeriodDaysGreaterDefault.Spec.KubeadmConfigSpec.ClusterConfiguration.CACertificateValidityPeriodDays = 0 // default is 3650 + + invalidRolloutBeforeCertificatesExpiryDays := valid.DeepCopy() + invalidRolloutBeforeCertificatesExpiryDays.Spec.Rollout.Before.CertificatesExpiryDays = 8 + invalidRolloutBeforeCertificatesExpiryDays.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays = 7 + + tests := []struct { + name string + enableIgnitionFeature bool + expectErr bool + kcp *controlplanev1.KubeadmControlPlane + }{ + { + name: "should succeed when given a valid config", + expectErr: false, + kcp: valid, + }, + { + name: "should return error when replicas is nil", + expectErr: true, + kcp: missingReplicas, + }, + { + name: "should return error when replicas is zero", + expectErr: true, + kcp: zeroReplicas, + }, + { + name: "should return error when replicas is even", + expectErr: true, + kcp: evenReplicas, + }, + { + name: "should allow even replicas when using external etcd", + expectErr: false, + kcp: evenReplicasExternalEtcd, + }, + { + name: "should succeed when given a valid semantic version with prepended 'v'", + expectErr: false, + kcp: validVersion, + }, + { + name: "should error when given a valid semantic version without 'v'", + expectErr: true, + kcp: invalidVersion2, + }, + { + name: "should return error when given an invalid semantic version", + expectErr: true, + kcp: invalidVersion1, + }, + { + name: "should return error when given an invalid semantic CoreDNS version", + expectErr: true, + kcp: invalidCoreDNSVersion, + }, + { + name: "should return error when maxSurge is not 1", + expectErr: true, + kcp: invalidMaxSurge, + }, + { + name: "should succeed when maxSurge is a string", + expectErr: false, + kcp: stringMaxSurge, + }, + { + name: "should return error when Ignition configuration is invalid", + enableIgnitionFeature: true, + expectErr: true, + kcp: invalidIgnitionConfiguration, + }, + { + name: "should succeed when Ignition configuration is valid", + enableIgnitionFeature: true, + expectErr: false, + kcp: validIgnitionConfiguration, + }, + { + name: "should return error for invalid metadata", + enableIgnitionFeature: true, + expectErr: true, + kcp: invalidMetadata, + }, + { + name: "should return error for invalid Timeouts.ControlPlaneComponentHealthCheckSeconds", + expectErr: true, + kcp: invalidControlPlaneComponentHealthCheckSeconds, + }, + { + name: "should pass for valid Timeouts.ControlPlaneComponentHealthCheckSeconds", + kcp: validControlPlaneComponentHealthCheckSeconds, + }, + { + name: "should return error when CertificateValidityPeriodDays greater than CACertificateValidityPeriodDays", + expectErr: true, + kcp: invalidCertificateValidityPeriodDaysGreaterCA, + }, + { + name: "should return error when CertificateValidityPeriodDays greater than CACertificateValidityPeriodDays default", + expectErr: true, + kcp: invalidCertificateValidityPeriodDaysGreaterDefault, + }, + { + name: "should return error when rolloutBefore CertificatesExpiryDays greater than cluster CertificateValidityPeriodDays", + expectErr: true, + kcp: invalidRolloutBeforeCertificatesExpiryDays, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.enableIgnitionFeature { + // NOTE: KubeadmBootstrapFormatIgnition feature flag is disabled by default. + // Enabling the feature flag temporarily for this test. + utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.KubeadmBootstrapFormatIgnition, true) + } + + g := NewWithT(t) + + webhook := &KubeadmControlPlane{} + + warnings, err := webhook.ValidateCreate(ctx, tt.kcp) + if tt.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(warnings).To(BeEmpty()) + }) + } +} + +func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { + before := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "foo", + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: "test", + Kind: "UnknownInfraMachine", + Name: "infraTemplate", + }, + Deletion: controlplanev1.KubeadmControlPlaneMachineTemplateDeletionSpec{ + NodeDrainTimeoutSeconds: ptr.To(int32(1)), + NodeVolumeDetachTimeoutSeconds: ptr.To(int32(1)), + NodeDeletionTimeoutSeconds: ptr.To(int32(1)), + }, + }, + }, + Replicas: ptr.To[int32](1), + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + InitConfiguration: bootstrapv1.InitConfiguration{ + LocalAPIEndpoint: bootstrapv1.APIEndpoint{ + AdvertiseAddress: "127.0.0.1", + BindPort: int32(443), + }, + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "test", + }, + Timeouts: bootstrapv1.Timeouts{ + ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10), + KubeletHealthCheckSeconds: ptr.To[int32](40), + }, + }, + ClusterConfiguration: bootstrapv1.ClusterConfiguration{ + DNS: bootstrapv1.DNS{ + ImageRepository: "registry.k8s.io/coredns", + ImageTag: "1.6.5", + }, + CertificateValidityPeriodDays: 100, + CACertificateValidityPeriodDays: 365, + EncryptionAlgorithm: bootstrapv1.EncryptionAlgorithmRSA2048, + }, + JoinConfiguration: bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{ + Name: "test", + }, + Timeouts: bootstrapv1.Timeouts{ + ControlPlaneComponentHealthCheckSeconds: ptr.To[int32](10), + KubeletHealthCheckSeconds: ptr.To[int32](40), + }, + }, + PreKubeadmCommands: []string{ + "test", "foo", + }, + PostKubeadmCommands: []string{ + "test", "foo", + }, + Files: []bootstrapv1.File{ + { + Path: "test", + }, + }, + Users: []bootstrapv1.User{ + { + Name: "user", + SSHAuthorizedKeys: []string{ + "ssh-rsa foo", + }, + }, + }, + NTP: bootstrapv1.NTP{ + Servers: []string{"test-server-1", "test-server-2"}, + Enabled: ptr.To(true), + }, + }, + Version: "v1.16.6", + Rollout: controlplanev1.KubeadmControlPlaneRolloutSpec{ + Before: controlplanev1.KubeadmControlPlaneRolloutBeforeSpec{ + CertificatesExpiryDays: 7, + }, + Strategy: controlplanev1.KubeadmControlPlaneRolloutStrategy{ + Type: controlplanev1.RollingUpdateStrategyType, + RollingUpdate: controlplanev1.KubeadmControlPlaneRolloutStrategyRollingUpdate{ + MaxSurge: &intstr.IntOrString{ + IntVal: 1, + }, + }, + }, + }, + }, + } + + updateMaxSurgeVal := before.DeepCopy() + updateMaxSurgeVal.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal = int32(0) + updateMaxSurgeVal.Spec.Replicas = ptr.To[int32](3) + + wrongReplicaCountForScaleIn := before.DeepCopy() + wrongReplicaCountForScaleIn.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal = int32(0) + + validUpdateKubeadmConfigInit := before.DeepCopy() + validUpdateKubeadmConfigInit.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration = bootstrapv1.NodeRegistrationOptions{} + + invalidUpdateKubeadmConfigCluster := before.DeepCopy() + invalidUpdateKubeadmConfigCluster.Spec.KubeadmConfigSpec.ClusterConfiguration = bootstrapv1.ClusterConfiguration{ + CertificatesDir: "some-other-value", + } + + validUpdateKubeadmConfigJoin := before.DeepCopy() + validUpdateKubeadmConfigJoin.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration = bootstrapv1.NodeRegistrationOptions{} + + beforeKubeadmConfigFormatSet := before.DeepCopy() + beforeKubeadmConfigFormatSet.Spec.KubeadmConfigSpec.Format = bootstrapv1.CloudConfig + invalidUpdateKubeadmConfigFormat := beforeKubeadmConfigFormatSet.DeepCopy() + invalidUpdateKubeadmConfigFormat.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition + + validUpdate := before.DeepCopy() + validUpdate.Labels = map[string]string{"blue": "green"} + validUpdate.Spec.KubeadmConfigSpec.BootCommands = []string{"ab", "abc"} + validUpdate.Spec.KubeadmConfigSpec.PreKubeadmCommands = []string{"ab", "abc"} + validUpdate.Spec.KubeadmConfigSpec.PostKubeadmCommands = []string{"ab", "abc"} + validUpdate.Spec.KubeadmConfigSpec.Files = []bootstrapv1.File{ + { + Path: "ab", + }, + { + Path: "abc", + }, + } + validUpdate.Spec.Version = "v1.17.1" + validUpdate.Spec.KubeadmConfigSpec.Users = []bootstrapv1.User{ + { + Name: "bar", + SSHAuthorizedKeys: []string{ + "ssh-rsa bar", + "ssh-rsa foo", + }, + }, + } + validUpdate.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ + "label": "labelValue", + } + validUpdate.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{ + "annotation": "labelAnnotation", + } + validUpdate.Spec.MachineTemplate.Spec.InfrastructureRef.APIGroup = "test-changed" + validUpdate.Spec.MachineTemplate.Spec.InfrastructureRef.Name = "orange" + validUpdate.Spec.MachineTemplate.Spec.Deletion.NodeDrainTimeoutSeconds = ptr.To(int32(10)) + validUpdate.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = ptr.To(int32(10)) + validUpdate.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds = ptr.To(int32(10)) + validUpdate.Spec.Replicas = ptr.To[int32](5) + now := metav1.NewTime(time.Now()) + validUpdate.Spec.Rollout.After = now + validUpdate.Spec.Rollout.Before.CertificatesExpiryDays = 14 + validUpdate.Spec.Remediation = controlplanev1.KubeadmControlPlaneRemediationSpec{ + MaxRetry: ptr.To[int32](50), + MinHealthyPeriodSeconds: ptr.To(int32(10 * 60 * 60)), + RetryPeriodSeconds: ptr.To[int32](10 * 60), + } + validUpdate.Spec.KubeadmConfigSpec.Format = bootstrapv1.CloudConfig + + scaleToZero := before.DeepCopy() + scaleToZero.Spec.Replicas = ptr.To[int32](0) + + scaleToEven := before.DeepCopy() + scaleToEven.Spec.Replicas = ptr.To[int32](2) + + missingReplicas := before.DeepCopy() + missingReplicas.Spec.Replicas = nil + + etcdLocalImageTag := before.DeepCopy() + etcdLocalImageTag.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ + ImageTag: "v9.1.1", + } + etcdLocalImageTagAndDataDir := etcdLocalImageTag.DeepCopy() + etcdLocalImageTagAndDataDir.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.DataDir = "/foo" + + etcdLocalImageBuildTag := before.DeepCopy() + etcdLocalImageBuildTag.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ + ImageTag: "v9.1.1_validBuild1", + } + + etcdLocalImageInvalidTag := before.DeepCopy() + etcdLocalImageInvalidTag.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ + ImageTag: "v9.1.1+invalidBuild1", + } + + unsetEtcdLocal := etcdLocalImageTag.DeepCopy() + unsetEtcdLocal.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{} + + controlPlaneEndpoint := before.DeepCopy() + controlPlaneEndpoint.Spec.KubeadmConfigSpec.ClusterConfiguration.ControlPlaneEndpoint = "some control plane endpoint" + + apiServer := before.DeepCopy() + apiServer.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer = bootstrapv1.APIServer{ + ExtraArgs: []bootstrapv1.Arg{ + { + Name: "foo", + Value: ptr.To("bar"), + }, + }, + ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount1"}}, + CertSANs: []string{"foo", "bar"}, + } + + controllerManager := before.DeepCopy() + controllerManager.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager = bootstrapv1.ControllerManager{ + ExtraArgs: []bootstrapv1.Arg{ + { + Name: "controller manager field", + Value: ptr.To("controller manager value"), + }, + }, + ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount", HostPath: "/foo", MountPath: "bar", ReadOnly: ptr.To(true), PathType: "File"}}, + } + + scheduler := before.DeepCopy() + scheduler.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler = bootstrapv1.Scheduler{ + ExtraArgs: []bootstrapv1.Arg{ + { + Name: "scheduler field", + Value: ptr.To("scheduler value"), + }, + }, + ExtraVolumes: []bootstrapv1.HostPathMount{{Name: "mount", HostPath: "/foo", MountPath: "bar", ReadOnly: ptr.To(true), PathType: "File"}}, + } + + dns := before.DeepCopy() + dns.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ + ImageRepository: "gcr.io/capi-test", + ImageTag: "v1.6.6_foobar.1", + } + + dnsBuildTag := before.DeepCopy() + dnsBuildTag.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ + ImageRepository: "gcr.io/capi-test", + ImageTag: "1.6.7", + } + + dnsInvalidTag := before.DeepCopy() + dnsInvalidTag.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ + ImageRepository: "gcr.io/capi-test", + ImageTag: "v0.20.0+invalidBuild1", + } + + dnsInvalidCoreDNSToVersion := dns.DeepCopy() + dnsInvalidCoreDNSToVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ + ImageRepository: "gcr.io/capi-test", + ImageTag: "1.6.5", + } + + validCoreDNSCustomToVersion := dns.DeepCopy() + validCoreDNSCustomToVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ + ImageRepository: "gcr.io/capi-test", + ImageTag: "v1.6.6_foobar.2", + } + validUnsupportedCoreDNSVersion := dns.DeepCopy() + validUnsupportedCoreDNSVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ + ImageRepository: "gcr.io/capi-test", + ImageTag: "v99.99.99", + } + + validUnsupportedCoreDNSVersionWithSkipAnnotation := dns.DeepCopy() + validUnsupportedCoreDNSVersionWithSkipAnnotation.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ + ImageRepository: "gcr.io/capi-test", + ImageTag: "v99.99.99", + } + validUnsupportedCoreDNSVersionWithSkipAnnotation.Annotations = map[string]string{ + controlplanev1.SkipCoreDNSAnnotation: "", + } + + unsetCoreDNSToVersion := dns.DeepCopy() + unsetCoreDNSToVersion.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS = bootstrapv1.DNS{ + ImageRepository: "", + ImageTag: "", + } + + certificatesDir := before.DeepCopy() + certificatesDir.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "a new certificates directory" + + imageRepository := before.DeepCopy() + imageRepository.Spec.KubeadmConfigSpec.ClusterConfiguration.ImageRepository = "a new image repository" + + featureGates := before.DeepCopy() + featureGates.Spec.KubeadmConfigSpec.ClusterConfiguration.FeatureGates = map[string]bool{"a feature gate": true} + + externalEtcd := before.DeepCopy() + externalEtcd.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External = bootstrapv1.ExternalEtcd{ + KeyFile: "some key file", + } + externalEtcdChanged := before.DeepCopy() + externalEtcdChanged.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External = bootstrapv1.ExternalEtcd{ + KeyFile: "another key file", + } + + localDataDir := before.DeepCopy() + localDataDir.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ + DataDir: "some local data dir", + } + + localPeerCertSANs := before.DeepCopy() + localPeerCertSANs.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ + PeerCertSANs: []string{"a cert"}, + } + + localServerCertSANs := before.DeepCopy() + localServerCertSANs.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ + ServerCertSANs: []string{"a cert"}, + } + + localExtraArgs := before.DeepCopy() + localExtraArgs.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = bootstrapv1.LocalEtcd{ + ExtraArgs: []bootstrapv1.Arg{ + { + Name: "an arg", + Value: ptr.To("a value"), + }, + }, + } + + beforeExternalEtcdCluster := before.DeepCopy() + beforeExternalEtcdCluster.Spec.KubeadmConfigSpec.ClusterConfiguration = bootstrapv1.ClusterConfiguration{ + Etcd: bootstrapv1.Etcd{ + External: bootstrapv1.ExternalEtcd{ + Endpoints: []string{"127.0.0.1"}, + }, + }, + } + scaleToEvenExternalEtcdCluster := beforeExternalEtcdCluster.DeepCopy() + scaleToEvenExternalEtcdCluster.Spec.Replicas = ptr.To[int32](2) + + beforeInvalidEtcdCluster := before.DeepCopy() + beforeInvalidEtcdCluster.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = bootstrapv1.Etcd{ + Local: bootstrapv1.LocalEtcd{ + ImageRepository: "image-repository", + ImageTag: "latest", + }, + } + + afterInvalidEtcdCluster := beforeInvalidEtcdCluster.DeepCopy() + afterInvalidEtcdCluster.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = bootstrapv1.Etcd{ + External: bootstrapv1.ExternalEtcd{ + Endpoints: []string{"127.0.0.1"}, + }, + } + + withoutClusterConfiguration := before.DeepCopy() + withoutClusterConfiguration.Spec.KubeadmConfigSpec.ClusterConfiguration = bootstrapv1.ClusterConfiguration{} + + updateNTPServers := before.DeepCopy() + updateNTPServers.Spec.KubeadmConfigSpec.NTP.Servers = []string{"new-server"} + + disableNTPServers := before.DeepCopy() + disableNTPServers.Spec.KubeadmConfigSpec.NTP.Enabled = ptr.To(false) + + unsetRolloutBefore := before.DeepCopy() + unsetRolloutBefore.Spec.Rollout.Before = controlplanev1.KubeadmControlPlaneRolloutBeforeSpec{} + + invalidIgnitionConfiguration := before.DeepCopy() + invalidIgnitionConfiguration.Spec.KubeadmConfigSpec.Ignition = bootstrapv1.IgnitionSpec{ // Format is not set + ContainerLinuxConfig: bootstrapv1.ContainerLinuxConfig{ + AdditionalConfig: "config", + }, + } + + validIgnitionConfigurationBefore := before.DeepCopy() + validIgnitionConfigurationBefore.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition + validIgnitionConfigurationBefore.Spec.KubeadmConfigSpec.Ignition = bootstrapv1.IgnitionSpec{ + ContainerLinuxConfig: bootstrapv1.ContainerLinuxConfig{ + AdditionalConfig: "config-before", + }, + } + + validIgnitionConfigurationAfter := validIgnitionConfigurationBefore.DeepCopy() + validIgnitionConfigurationAfter.Spec.KubeadmConfigSpec.Ignition.ContainerLinuxConfig.AdditionalConfig = "foo: bar" + + updateInitConfigurationPatches := before.DeepCopy() + updateInitConfigurationPatches.Spec.KubeadmConfigSpec.InitConfiguration.Patches = bootstrapv1.Patches{ + Directory: "/tmp/patches", + } + + updateJoinConfigurationPatches := before.DeepCopy() + updateJoinConfigurationPatches.Spec.KubeadmConfigSpec.InitConfiguration.Patches = bootstrapv1.Patches{ + Directory: "/tmp/patches", + } + + updateInitConfigurationSkipPhases := before.DeepCopy() + updateInitConfigurationSkipPhases.Spec.KubeadmConfigSpec.InitConfiguration.SkipPhases = []string{"addon/kube-proxy"} + + updateJoinConfigurationSkipPhases := before.DeepCopy() + updateJoinConfigurationSkipPhases.Spec.KubeadmConfigSpec.JoinConfiguration.SkipPhases = []string{"addon/kube-proxy"} + + updateDiskSetup := before.DeepCopy() + updateDiskSetup.Spec.KubeadmConfigSpec.DiskSetup = bootstrapv1.DiskSetup{ + Filesystems: []bootstrapv1.Filesystem{ + { + Device: "/dev/sda", + Filesystem: "ext4", + }, + }, + } + + switchFromCloudInitToIgnition := before.DeepCopy() + switchFromCloudInitToIgnition.Spec.KubeadmConfigSpec.Format = bootstrapv1.Ignition + switchFromCloudInitToIgnition.Spec.KubeadmConfigSpec.Mounts = []bootstrapv1.MountPoints{ + {"/var/lib/testdir", "/var/lib/etcd/data"}, + } + + invalidMetadata := before.DeepCopy() + invalidMetadata.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{ + "foo": "$invalid-key", + "bar": strings.Repeat("a", 64) + "too-long-value", + "/invalid-key": "foo", + } + invalidMetadata.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{ + "/invalid-key": "foo", + } + + changeTimeouts := before.DeepCopy() + changeTimeouts.Spec.KubeadmConfigSpec.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = ptr.To[int32](20) // before 10 + changeTimeouts.Spec.KubeadmConfigSpec.InitConfiguration.Timeouts.KubeletHealthCheckSeconds = nil // before set + changeTimeouts.Spec.KubeadmConfigSpec.InitConfiguration.Timeouts.EtcdAPICallSeconds = ptr.To[int32](20) // before not set + changeTimeouts.Spec.KubeadmConfigSpec.JoinConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = ptr.To[int32](20) // before 10 + changeTimeouts.Spec.KubeadmConfigSpec.JoinConfiguration.Timeouts.KubeletHealthCheckSeconds = nil // before set + changeTimeouts.Spec.KubeadmConfigSpec.JoinConfiguration.Timeouts.EtcdAPICallSeconds = ptr.To[int32](20) // before not set + + unsetTimeouts := before.DeepCopy() + unsetTimeouts.Spec.KubeadmConfigSpec.InitConfiguration.Timeouts = bootstrapv1.Timeouts{} + unsetTimeouts.Spec.KubeadmConfigSpec.JoinConfiguration.Timeouts = bootstrapv1.Timeouts{} + + validUpdateCertificateValidityPeriod := before.DeepCopy() + validUpdateCertificateValidityPeriod.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificateValidityPeriodDays = 150 + + invalidUpdateCACertificateValidityPeriodDays := before.DeepCopy() + invalidUpdateCACertificateValidityPeriodDays.Spec.KubeadmConfigSpec.ClusterConfiguration = bootstrapv1.ClusterConfiguration{ + CACertificateValidityPeriodDays: 730, + } + + validEncryptionAlgorithm := before.DeepCopy() + validEncryptionAlgorithm.Spec.KubeadmConfigSpec.ClusterConfiguration.EncryptionAlgorithm = bootstrapv1.EncryptionAlgorithmRSA3072 + + tests := []struct { + name string + enableIgnitionFeature bool + expectErr bool + before *controlplanev1.KubeadmControlPlane + kcp *controlplanev1.KubeadmControlPlane + }{ + { + name: "should succeed when given a valid config", + expectErr: false, + before: before, + kcp: validUpdate, + }, + { + name: "should not return an error when trying to mutate the kubeadmconfigspec initconfiguration noderegistration", + expectErr: false, + before: before, + kcp: validUpdateKubeadmConfigInit, + }, + { + name: "should return error when trying to mutate the kubeadmconfigspec clusterconfiguration", + expectErr: true, + before: before, + kcp: invalidUpdateKubeadmConfigCluster, + }, + { + name: "should not return an error when trying to mutate the kubeadmconfigspec joinconfiguration noderegistration", + expectErr: false, + before: before, + kcp: validUpdateKubeadmConfigJoin, + }, + { + name: "should return error when trying to mutate the kubeadmconfigspec format from cloud-config to ignition", + expectErr: true, + before: beforeKubeadmConfigFormatSet, + kcp: invalidUpdateKubeadmConfigFormat, + }, + { + name: "should return error when trying to scale to zero", + expectErr: true, + before: before, + kcp: scaleToZero, + }, + { + name: "should return error when trying to scale to an even number", + expectErr: true, + before: before, + kcp: scaleToEven, + }, + { + name: "should return error when trying to scale to nil", + expectErr: true, + before: before, + kcp: missingReplicas, + }, + { + name: "should succeed when trying to scale to an even number with external etcd defined in ClusterConfiguration", + expectErr: false, + before: beforeExternalEtcdCluster, + kcp: scaleToEvenExternalEtcdCluster, + }, + { + name: "should succeed when making a change to the local etcd image tag", + expectErr: false, + before: before, + kcp: etcdLocalImageTag, + }, + { + name: "should succeed when making a change to the local etcd image tag", + expectErr: false, + before: before, + kcp: etcdLocalImageBuildTag, + }, + { + name: "should fail when using an invalid etcd image tag", + expectErr: true, + before: before, + kcp: etcdLocalImageInvalidTag, + }, + { + name: "should fail when making a change to the cluster config's controlPlaneEndpoint", + expectErr: true, + before: before, + kcp: controlPlaneEndpoint, + }, + { + name: "should allow changes to the cluster config's apiServer", + expectErr: false, + before: before, + kcp: apiServer, + }, + { + name: "should allow changes to the cluster config's controllerManager", + expectErr: false, + before: before, + kcp: controllerManager, + }, + { + name: "should allow changes to the cluster config's scheduler", + expectErr: false, + before: before, + kcp: scheduler, + }, + { + name: "should succeed when making a change to the cluster config's dns", + expectErr: false, + before: before, + kcp: dns, + }, + { + name: "should succeed when changing to a valid custom CoreDNS version", + expectErr: false, + before: dns, + kcp: validCoreDNSCustomToVersion, + }, + { + name: "should succeed when CoreDNS ImageTag is unset", + expectErr: false, + before: dns, + kcp: unsetCoreDNSToVersion, + }, + { + name: "should succeed when DNS is set to nil", + expectErr: false, + before: dns, + kcp: unsetCoreDNSToVersion, + }, + { + name: "should succeed when using an valid DNS build", + expectErr: false, + before: before, + kcp: dnsBuildTag, + }, + { + name: "should succeed when using the same CoreDNS version", + before: dns, + kcp: dns.DeepCopy(), + }, + { + name: "should succeed when using the same CoreDNS version - not supported", + before: validUnsupportedCoreDNSVersion, + kcp: validUnsupportedCoreDNSVersion, + }, + { + name: "should fail when upgrading to an unsupported version", + before: dns, + kcp: validUnsupportedCoreDNSVersion, + expectErr: true, + }, + { + name: "should succeed when upgrading to an unsupported version and KCP has skip annotation set", + before: dns, + kcp: validUnsupportedCoreDNSVersionWithSkipAnnotation, + }, + { + name: "should fail when using an invalid DNS build", + expectErr: true, + before: before, + kcp: dnsInvalidTag, + }, + { + name: "should fail when using an invalid CoreDNS version", + expectErr: true, + before: dns, + kcp: dnsInvalidCoreDNSToVersion, + }, + + { + name: "should fail when making a change to the cluster config's certificatesDir", + expectErr: true, + before: before, + kcp: certificatesDir, + }, + { + name: "should fail when making a change to the cluster config's imageRepository", + expectErr: false, + before: before, + kcp: imageRepository, + }, + { + name: "should succeed when making a change to the cluster config's featureGates", + expectErr: false, + before: before, + kcp: featureGates, + }, + { + name: "should succeed when making a change to the cluster config's local etcd's configuration localDataDir field", + expectErr: false, + before: before, + kcp: localDataDir, + }, + { + name: "should succeed when making a change to the cluster config's local etcd's configuration localPeerCertSANs field", + expectErr: false, + before: before, + kcp: localPeerCertSANs, + }, + { + name: "should succeed when making a change to the cluster config's local etcd's configuration localServerCertSANs field", + expectErr: false, + before: before, + kcp: localServerCertSANs, + }, + { + name: "should succeed when making a change to the cluster config's local etcd's configuration localExtraArgs field", + expectErr: false, + before: before, + kcp: localExtraArgs, + }, + { + name: "should succeed when making a change to the cluster config's external etcd's configuration", + expectErr: false, + before: externalEtcd, + kcp: externalEtcdChanged, + }, + { + name: "should succeed when adding the cluster config's local etcd's configuration", + expectErr: false, + before: unsetEtcdLocal, + kcp: etcdLocalImageTag, + }, + { + name: "should succeed when making a change to the cluster config's local etcd's configuration", + expectErr: false, + before: etcdLocalImageTag, + kcp: etcdLocalImageTagAndDataDir, + }, + { + name: "should succeed when attempting to unset the etcd local object to fallback to the default", + expectErr: false, + before: etcdLocalImageTag, + kcp: unsetEtcdLocal, + }, + { + name: "should fail if both local and external etcd are set", + expectErr: true, + before: beforeInvalidEtcdCluster, + kcp: afterInvalidEtcdCluster, + }, + { + name: "should pass if ClusterConfiguration is nil", + expectErr: false, + before: withoutClusterConfiguration, + kcp: withoutClusterConfiguration, + }, + { + name: "should not return an error when maxSurge value is updated to 0", + expectErr: false, + before: before, + kcp: updateMaxSurgeVal, + }, + { + name: "should return an error when maxSurge value is updated to 0, but replica count is < 3", + expectErr: true, + before: before, + kcp: wrongReplicaCountForScaleIn, + }, + { + name: "should pass if NTP servers are updated", + expectErr: false, + before: before, + kcp: updateNTPServers, + }, + { + name: "should pass if NTP servers is disabled during update", + expectErr: false, + before: before, + kcp: disableNTPServers, + }, + { + name: "should allow changes to initConfiguration.patches", + expectErr: false, + before: before, + kcp: updateInitConfigurationPatches, + }, + { + name: "should allow changes to joinConfiguration.patches", + expectErr: false, + before: before, + kcp: updateJoinConfigurationPatches, + }, + { + name: "should allow changes to initConfiguration.skipPhases", + expectErr: false, + before: before, + kcp: updateInitConfigurationSkipPhases, + }, + { + name: "should allow changes to joinConfiguration.skipPhases", + expectErr: false, + before: before, + kcp: updateJoinConfigurationSkipPhases, + }, + { + name: "should allow changes to diskSetup", + expectErr: false, + before: before, + kcp: updateDiskSetup, + }, + { + name: "should allow unsetting rolloutBefore", + expectErr: false, + before: before, + kcp: unsetRolloutBefore, + }, + { + name: "should return error when Ignition configuration is invalid", + enableIgnitionFeature: true, + expectErr: true, + before: invalidIgnitionConfiguration, + kcp: invalidIgnitionConfiguration, + }, + { + name: "should succeed when Ignition configuration is modified", + enableIgnitionFeature: true, + expectErr: false, + before: validIgnitionConfigurationBefore, + kcp: validIgnitionConfigurationAfter, + }, + { + name: "should succeed when CloudInit was used before", + enableIgnitionFeature: true, + expectErr: false, + before: before, + kcp: switchFromCloudInitToIgnition, + }, + { + name: "should return error for invalid metadata", + enableIgnitionFeature: true, + expectErr: true, + before: before, + kcp: invalidMetadata, + }, + { + name: "should succeed when changing timeouts", + expectErr: false, + before: before, + kcp: changeTimeouts, + }, + { + name: "should succeed when unsetting timeouts", + expectErr: false, + before: before, + kcp: unsetTimeouts, + }, + { + name: "should succeed when setting timeouts", + expectErr: false, + before: unsetTimeouts, + kcp: changeTimeouts, + }, + { + name: "should succeed when making a change to the cluster config's certificateValidityPeriod", + expectErr: false, + before: before, + kcp: validUpdateCertificateValidityPeriod, + }, + { + name: "should return error when trying to mutate the cluster config's caCertificateValidityPeriodDays", + expectErr: true, + before: before, + kcp: invalidUpdateCACertificateValidityPeriodDays, + }, + { + name: "should allow to update encryptionAlgorithm", + before: before, + kcp: validEncryptionAlgorithm, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.enableIgnitionFeature { + // NOTE: KubeadmBootstrapFormatIgnition feature flag is disabled by default. + // Enabling the feature flag temporarily for this test. + utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.KubeadmBootstrapFormatIgnition, true) + } + + g := NewWithT(t) + + webhook := &KubeadmControlPlane{} + + warnings, err := webhook.ValidateUpdate(ctx, tt.before.DeepCopy(), tt.kcp) + if tt.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).To(Succeed()) + } + g.Expect(warnings).To(BeEmpty()) + }) + } +} + +func TestValidateVersion(t *testing.T) { + tests := []struct { + name string + clusterConfiguration bootstrapv1.ClusterConfiguration + oldVersion string + newVersion string + expectErr bool + }{ + // Basic validation of old and new version. + { + name: "error when old version is empty", + oldVersion: "", + newVersion: "v1.16.6", + expectErr: true, + }, + { + name: "error when old version is invalid", + oldVersion: "invalid-version", + newVersion: "v1.18.1", + expectErr: true, + }, + { + name: "error when new version is empty", + oldVersion: "v1.16.6", + newVersion: "", + expectErr: true, + }, + { + name: "error when new version is invalid", + oldVersion: "v1.18.1", + newVersion: "invalid-version", + expectErr: true, + }, + { + name: "pass when both versions are v1.19.0", + oldVersion: "v1.19.0", + newVersion: "v1.19.0", + expectErr: false, + }, + // Validation for skip-level upgrades. + { + name: "error when upgrading two minor versions", + oldVersion: "v1.18.8", + newVersion: "v1.20.0-alpha.0.734_ba502ee555924a", + expectErr: true, + }, + { + name: "pass when upgrading one minor version", + oldVersion: "v1.20.1", + newVersion: "v1.21.18", + expectErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + kcpNew := controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: tt.clusterConfiguration, + }, + Version: tt.newVersion, + }, + } + + kcpOld := controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: tt.clusterConfiguration, + }, + Version: tt.oldVersion, + }, + } + + webhook := &KubeadmControlPlane{} + + allErrs := webhook.validateVersion(&kcpOld, &kcpNew) + if tt.expectErr { + g.Expect(allErrs).ToNot(BeEmpty()) + } else { + g.Expect(allErrs).To(BeEmpty()) + } + }) + } +} +func TestKubeadmControlPlaneValidateUpdateAfterDefaulting(t *testing.T) { + g := NewWithT(t) + + before := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "foo", + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.19.0", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneMachineTemplateSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: "test", + Kind: "UnknownInfraMachine", + Name: "infraTemplate", + }, + }, + }, + }, + } + + afterDefault := before.DeepCopy() + webhook := &KubeadmControlPlane{} + g.Expect(webhook.Default(ctx, afterDefault)).To(Succeed()) + + tests := []struct { + name string + expectErr bool + before *controlplanev1.KubeadmControlPlane + kcp *controlplanev1.KubeadmControlPlane + }{ + { + name: "update should succeed after defaulting", + expectErr: false, + before: before, + kcp: afterDefault, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + webhook := &KubeadmControlPlane{} + + warnings, err := webhook.ValidateUpdate(ctx, tt.before.DeepCopy(), tt.kcp) + if tt.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).To(Succeed()) + g.Expect(tt.kcp.Spec.Version).To(Equal("v1.19.0")) + g.Expect(tt.kcp.Spec.Rollout.Strategy.Type).To(Equal(controlplanev1.RollingUpdateStrategyType)) + g.Expect(tt.kcp.Spec.Rollout.Strategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) + g.Expect(tt.kcp.Spec.Replicas).To(Equal(ptr.To[int32](1))) + } + g.Expect(warnings).To(BeEmpty()) + }) + } +} + +func TestPathsMatch(t *testing.T) { + tests := []struct { + name string + allowed, path []string + match bool + }{ + { + name: "a simple match case", + allowed: []string{"a", "b", "c"}, + path: []string{"a", "b", "c"}, + match: true, + }, + { + name: "a case can't match", + allowed: []string{"a", "b", "c"}, + path: []string{"a"}, + match: false, + }, + { + name: "an empty path for whatever reason", + allowed: []string{"a"}, + path: []string{""}, + match: false, + }, + { + name: "empty allowed matches nothing", + allowed: []string{}, + path: []string{"a"}, + match: false, + }, + { + name: "wildcard match", + allowed: []string{"a", "b", "c", "d", "*"}, + path: []string{"a", "b", "c", "d", "e", "f", "g"}, + match: true, + }, + { + name: "long path", + allowed: []string{"a"}, + path: []string{"a", "b", "c", "d", "e", "f", "g"}, + match: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(pathsMatch(tt.allowed, tt.path)).To(Equal(tt.match)) + }) + } +} + +func TestAllowed(t *testing.T) { + tests := []struct { + name string + allowList [][]string + path []string + match bool + }{ + { + name: "matches the first and none of the others", + allowList: [][]string{ + {"a", "b", "c"}, + {"b", "d", "x"}, + }, + path: []string{"a", "b", "c"}, + match: true, + }, + { + name: "matches none in the allow list", + allowList: [][]string{ + {"a", "b", "c"}, + {"b", "c", "d"}, + {"e", "*"}, + }, + path: []string{"a"}, + match: false, + }, + { + name: "an empty path matches nothing", + allowList: [][]string{ + {"a", "b", "c"}, + {"*"}, + {"b", "c"}, + }, + path: []string{}, + match: false, + }, + { + name: "empty allowList matches nothing", + allowList: [][]string{}, + path: []string{"a"}, + match: false, + }, + { + name: "length test check", + allowList: [][]string{ + {"a", "b", "c", "d", "e", "f"}, + {"a", "b", "c", "d", "e", "f", "g", "h"}, + }, + path: []string{"a", "b", "c", "d", "e", "f", "g"}, + match: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(allowed(tt.allowList, tt.path)).To(Equal(tt.match)) + }) + } +} + +func TestPaths(t *testing.T) { + tests := []struct { + name string + path []string + diff map[string]interface{} + expected [][]string + }{ + { + name: "basic check", + diff: map[string]interface{}{ + "spec": map[string]interface{}{ + "replicas": 4, + "version": "1.17.3", + "kubeadmConfigSpec": map[string]interface{}{ + "clusterConfiguration": map[string]interface{}{ + "version": "v2.0.1", + }, + "initConfiguration": map[string]interface{}{ + "bootstrapToken": []string{"abcd", "defg"}, + }, + "joinConfiguration": nil, + }, + }, + }, + expected: [][]string{ + {"spec", "replicas"}, + {"spec", "version"}, + {"spec", "kubeadmConfigSpec", "joinConfiguration"}, + {"spec", "kubeadmConfigSpec", "clusterConfiguration", "version"}, + {"spec", "kubeadmConfigSpec", "initConfiguration", "bootstrapToken"}, + }, + }, + { + name: "empty input makes for empty output", + path: []string{"a"}, + diff: map[string]interface{}{}, + expected: [][]string{}, + }, + { + name: "long recursive check with two keys", + diff: map[string]interface{}{ + "spec": map[string]interface{}{ + "kubeadmConfigSpec": map[string]interface{}{ + "clusterConfiguration": map[string]interface{}{ + "version": "v2.0.1", + "abc": "d", + }, + }, + }, + }, + expected: [][]string{ + {"spec", "kubeadmConfigSpec", "clusterConfiguration", "version"}, + {"spec", "kubeadmConfigSpec", "clusterConfiguration", "abc"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(paths(tt.path, tt.diff)).To(ConsistOf(tt.expected)) + }) + } +} diff --git a/controlplane/kubeadm/internal/workload_cluster.go b/controlplane/kubeadm/internal/workload_cluster.go index 80d497b90c9c..d4f6401664b9 100644 --- a/controlplane/kubeadm/internal/workload_cluster.go +++ b/controlplane/kubeadm/internal/workload_cluster.go @@ -20,7 +20,6 @@ import ( "context" "crypto" "crypto/rand" - "crypto/rsa" "crypto/tls" "crypto/x509" "crypto/x509/pkix" @@ -45,13 +44,13 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" kubeadmtypes "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/proxy" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" containerutil "sigs.k8s.io/cluster-api/util/container" "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/version" ) const ( @@ -63,13 +62,6 @@ const ( ) var ( - // minKubernetesVersionControlPlaneKubeletLocalMode is the min version from which - // we will enable the ControlPlaneKubeletLocalMode kubeadm feature gate. - // Note: We have to do this with Kubernetes 1.31. Because with that version we encountered - // a case where it's not okay anymore to ignore the Kubernetes version skew (kubelet 1.31 uses - // the spec.clusterIP field selector that is only implemented in kube-apiserver >= 1.31.0). - minKubernetesVersionControlPlaneKubeletLocalMode = semver.MustParse("1.31.0") - // ErrControlPlaneMinNodes signals that a cluster doesn't meet the minimum required nodes // to remove an etcd member. ErrControlPlaneMinNodes = errors.New("cluster has fewer than 2 control plane nodes; removing an etcd member is not supported") @@ -95,6 +87,7 @@ type WorkloadCluster interface { UpdateControllerManagerInKubeadmConfigMap(controllerManager bootstrapv1.ControllerManager) func(*bootstrapv1.ClusterConfiguration) UpdateSchedulerInKubeadmConfigMap(scheduler bootstrapv1.Scheduler) func(*bootstrapv1.ClusterConfiguration) UpdateCertificateValidityPeriodDays(certificateValidityPeriodDays int32) func(*bootstrapv1.ClusterConfiguration) + UpdateEncryptionAlgorithm(encryptionAlgorithm bootstrapv1.EncryptionAlgorithmType) func(*bootstrapv1.ClusterConfiguration) UpdateKubeProxyImageInfo(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane) error RemoveEtcdMemberForMachine(ctx context.Context, machine *clusterv1.Machine) error @@ -166,7 +159,7 @@ func (w *Workload) UpdateFeatureGatesInKubeadmConfigMap(kubeadmConfigSpec bootst return func(c *bootstrapv1.ClusterConfiguration) { // We use DeepCopy here to avoid modifying the KCP object in the apiserver. kubeadmConfigSpec := kubeadmConfigSpec.DeepCopy() - DefaultFeatureGates(kubeadmConfigSpec, kubernetesVersion) + desiredstate.DefaultFeatureGates(kubeadmConfigSpec, kubernetesVersion) // Even if featureGates is nil, reset it to ClusterConfiguration // to override any previously set feature gates. @@ -174,27 +167,6 @@ func (w *Workload) UpdateFeatureGatesInKubeadmConfigMap(kubeadmConfigSpec bootst } } -const ( - // ControlPlaneKubeletLocalMode is a feature gate of kubeadm that ensures - // kubelets only communicate with the local apiserver. - ControlPlaneKubeletLocalMode = "ControlPlaneKubeletLocalMode" -) - -// DefaultFeatureGates defaults the feature gates field. -func DefaultFeatureGates(kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec, kubernetesVersion semver.Version) { - if version.Compare(kubernetesVersion, minKubernetesVersionControlPlaneKubeletLocalMode, version.WithoutPreReleases()) < 0 { - return - } - - if kubeadmConfigSpec.ClusterConfiguration.FeatureGates == nil { - kubeadmConfigSpec.ClusterConfiguration.FeatureGates = map[string]bool{} - } - - if _, ok := kubeadmConfigSpec.ClusterConfiguration.FeatureGates[ControlPlaneKubeletLocalMode]; !ok { - kubeadmConfigSpec.ClusterConfiguration.FeatureGates[ControlPlaneKubeletLocalMode] = true - } -} - // UpdateAPIServerInKubeadmConfigMap updates api server configuration in kubeadm config map. func (w *Workload) UpdateAPIServerInKubeadmConfigMap(apiServer bootstrapv1.APIServer) func(*bootstrapv1.ClusterConfiguration) { return func(c *bootstrapv1.ClusterConfiguration) { @@ -223,6 +195,13 @@ func (w *Workload) UpdateCertificateValidityPeriodDays(certificateValidityPeriod } } +// UpdateEncryptionAlgorithm updates EncryptionAlgorithmType in kubeadm config map. +func (w *Workload) UpdateEncryptionAlgorithm(encryptionAlgorithm bootstrapv1.EncryptionAlgorithmType) func(*bootstrapv1.ClusterConfiguration) { + return func(c *bootstrapv1.ClusterConfiguration) { + c.EncryptionAlgorithm = encryptionAlgorithm + } +} + // UpdateClusterConfiguration gets the ClusterConfiguration kubeadm-config ConfigMap, converts it to the // Cluster API representation, and then applies a mutation func; if changes are detected, the // data are converted back into the Kubeadm API version in use for the target Kubernetes version and the @@ -375,7 +354,7 @@ func calculateAPIServerPort(config *bootstrapv1.KubeadmConfig) int32 { return 6443 } -func generateClientCert(caCertEncoded, caKeyEncoded []byte, clientKey *rsa.PrivateKey) (tls.Certificate, error) { +func generateClientCert(caCertEncoded, caKeyEncoded []byte, keyEncryptionAlgorithm bootstrapv1.EncryptionAlgorithmType) (tls.Certificate, error) { caCert, err := certs.DecodeCertPEM(caCertEncoded) if err != nil { return tls.Certificate{}, err @@ -384,14 +363,24 @@ func generateClientCert(caCertEncoded, caKeyEncoded []byte, clientKey *rsa.Priva if err != nil { return tls.Certificate{}, err } + + clientKey, err := certs.NewSigner(keyEncryptionAlgorithm) + if err != nil { + return tls.Certificate{}, err + } x509Cert, err := newClientCert(caCert, clientKey, caKey) if err != nil { return tls.Certificate{}, err } - return tls.X509KeyPair(certs.EncodeCertPEM(x509Cert), certs.EncodePrivateKeyPEM(clientKey)) + encodedClientKey, err := certs.EncodePrivateKeyPEMFromSigner(clientKey) + if err != nil { + return tls.Certificate{}, err + } + + return tls.X509KeyPair(certs.EncodeCertPEM(x509Cert), encodedClientKey) } -func newClientCert(caCert *x509.Certificate, key *rsa.PrivateKey, caKey crypto.Signer) (*x509.Certificate, error) { +func newClientCert(caCert *x509.Certificate, key crypto.Signer, caKey crypto.Signer) (*x509.Certificate, error) { cfg := certs.Config{ CommonName: "cluster-api.x-k8s.io", } diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go index 0f0be800a820..57396f9e0f9e 100644 --- a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go @@ -64,10 +64,6 @@ func TestUpdateCoreDNS(t *testing.T) { } depl := &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, ObjectMeta: metav1.ObjectMeta{ Name: coreDNSKey, Namespace: metav1.NamespaceSystem, @@ -1139,10 +1135,6 @@ func (m *fakeMigrator) Migrate(_, _, _ string, _ bool) (string, error) { func newCoreDNSInfoDeploymentWithimage(image string) *appsv1.Deployment { return &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, ObjectMeta: metav1.ObjectMeta{ Name: coreDNSKey, Namespace: metav1.NamespaceSystem, diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index 5390ac42faf6..4d4bfdae50f2 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -35,6 +35,7 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/desiredstate" utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) @@ -947,7 +948,7 @@ func TestUpdateFeatureGatesInKubeadmConfigMap(t *testing.T) { }, wantClusterConfiguration: bootstrapv1.ClusterConfiguration{ FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, + desiredstate.ControlPlaneKubeletLocalMode: true, }, }, }, @@ -964,8 +965,8 @@ func TestUpdateFeatureGatesInKubeadmConfigMap(t *testing.T) { }, wantClusterConfiguration: bootstrapv1.ClusterConfiguration{ FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - "EtcdLearnerMode": true, + desiredstate.ControlPlaneKubeletLocalMode: true, + "EtcdLearnerMode": true, }, }, }, @@ -977,14 +978,14 @@ func TestUpdateFeatureGatesInKubeadmConfigMap(t *testing.T) { kubernetesVersion: semver.MustParse("1.31.0"), newClusterConfiguration: bootstrapv1.ClusterConfiguration{ FeatureGates: map[string]bool{ - "EtcdLearnerMode": true, - ControlPlaneKubeletLocalMode: false, + "EtcdLearnerMode": true, + desiredstate.ControlPlaneKubeletLocalMode: false, }, }, wantClusterConfiguration: bootstrapv1.ClusterConfiguration{ FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: false, - "EtcdLearnerMode": true, + desiredstate.ControlPlaneKubeletLocalMode: false, + "EtcdLearnerMode": true, }, }, }, @@ -1085,126 +1086,6 @@ func TestUpdateCertificateValidityPeriodDaysInKubeadmConfigMap(t *testing.T) { } } -func TestDefaultFeatureGates(t *testing.T) { - tests := []struct { - name string - kubernetesVersion semver.Version - kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec - wantKubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec - }{ - { - name: "don't default ControlPlaneKubeletLocalMode for 1.30", - kubernetesVersion: semver.MustParse("1.30.99"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - "EtcdLearnerMode": true, - }, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - "EtcdLearnerMode": true, - }, - }, - }, - }, - { - name: "default ControlPlaneKubeletLocalMode for 1.31", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{}, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - }, - }, - }, - }, - { - name: "default ControlPlaneKubeletLocalMode for 1.31", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: nil, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - }, - }, - }, - }, - { - name: "default ControlPlaneKubeletLocalMode for 1.31", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{}, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - }, - }, - }, - }, - { - name: "default ControlPlaneKubeletLocalMode for 1.31", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - "EtcdLearnerMode": true, - }, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: true, - "EtcdLearnerMode": true, - }, - }, - }, - }, - { - name: "don't default ControlPlaneKubeletLocalMode for 1.31 if already set to false", - kubernetesVersion: semver.MustParse("1.31.0"), - kubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: false, - }, - }, - }, - wantKubeadmConfigSpec: &bootstrapv1.KubeadmConfigSpec{ - ClusterConfiguration: bootstrapv1.ClusterConfiguration{ - FeatureGates: map[string]bool{ - ControlPlaneKubeletLocalMode: false, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - DefaultFeatureGates(tt.kubeadmConfigSpec, tt.kubernetesVersion) - g.Expect(tt.wantKubeadmConfigSpec).Should(BeComparableTo(tt.kubeadmConfigSpec)) - }) - } -} - func getProxyImageInfo(ctx context.Context, c client.Client) (string, error) { ds := &appsv1.DaemonSet{} diff --git a/controlplane/kubeadm/main.go b/controlplane/kubeadm/main.go index 2d6c992f2667..ca7c82e6cb38 100644 --- a/controlplane/kubeadm/main.go +++ b/controlplane/kubeadm/main.go @@ -56,22 +56,30 @@ import ( controlplanev1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" + runtimev1 "sigs.k8s.io/cluster-api/api/runtime/v1beta2" + "sigs.k8s.io/cluster-api/controllers" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controllers/crdmigrator" "sigs.k8s.io/cluster-api/controllers/remote" kubeadmcontrolplanecontrollers "sigs.k8s.io/cluster-api/controlplane/kubeadm/controllers" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" kcpwebhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/webhooks" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" + runtimeclient "sigs.k8s.io/cluster-api/exp/runtime/client" "sigs.k8s.io/cluster-api/feature" controlplanev1alpha3 "sigs.k8s.io/cluster-api/internal/api/controlplane/kubeadm/v1alpha3" controlplanev1alpha4 "sigs.k8s.io/cluster-api/internal/api/controlplane/kubeadm/v1alpha4" "sigs.k8s.io/cluster-api/internal/contract" + internalruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client" + runtimeregistry "sigs.k8s.io/cluster-api/internal/runtime/registry" "sigs.k8s.io/cluster-api/util/apiwarnings" "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/cluster-api/version" ) var ( + catalog = runtimecatalog.New() scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") controllerName = "cluster-api-kubeadm-control-plane-manager" @@ -94,6 +102,8 @@ var ( webhookCertDir string webhookCertName string webhookKeyName string + runtimeExtensionCertFile string + runtimeExtensionKeyFile string healthAddr string managerOptions = flags.ManagerOptions{} logOptions = logs.NewOptions() @@ -116,6 +126,10 @@ func init() { _ = controlplanev1.AddToScheme(scheme) _ = bootstrapv1.AddToScheme(scheme) _ = apiextensionsv1.AddToScheme(scheme) + _ = runtimev1.AddToScheme(scheme) + + // Register the RuntimeHook types into the catalog. + _ = runtimehooksv1.AddToCatalog(catalog) } // InitFlags initializes the flags. @@ -186,6 +200,12 @@ func InitFlags(fs *pflag.FlagSet) { fs.StringVar(&webhookKeyName, "webhook-key-name", "tls.key", "Webhook key name.") + fs.StringVar(&runtimeExtensionCertFile, "runtime-extension-client-cert-file", "", + "Path of the PEM-encoded client certificate to be used when calling runtime extensions.") + + fs.StringVar(&runtimeExtensionKeyFile, "runtime-extension-client-key-file", "", + "Path of the PEM-encoded client key to be used when calling runtime extensions.") + fs.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") @@ -209,6 +229,9 @@ func InitFlags(fs *pflag.FlagSet) { // ADD CRD RBAC for CRD Migrator. // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions;customresourcedefinitions/status,verbs=update;patch,resourceNames=kubeadmcontrolplanes.controlplane.cluster.x-k8s.io;kubeadmcontrolplanetemplates.controlplane.cluster.x-k8s.io +// Add RBAC for ExtensionConfig controller and runtime client (intentionally does not include write permissions) +// +kubebuilder:rbac:groups=runtime.cluster.x-k8s.io,resources=extensionconfigs,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch func main() { InitFlags(pflag.CommandLine) @@ -437,8 +460,33 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { setupLog.Error(err, "unable to create etcd logger") os.Exit(1) } + + var runtimeClient runtimeclient.Client + if feature.Gates.Enabled(feature.InPlaceUpdates) { + // This is the creation of the runtimeClient for the controllers, embedding a shared catalog and registry instance. + runtimeClient = internalruntimeclient.New(internalruntimeclient.Options{ + CertFile: runtimeExtensionCertFile, + KeyFile: runtimeExtensionKeyFile, + Catalog: catalog, + Registry: runtimeregistry.New(), + Client: mgr.GetClient(), + }) + + if err = (&controllers.ExtensionConfigReconciler{ + Client: mgr.GetClient(), + APIReader: mgr.GetAPIReader(), + RuntimeClient: runtimeClient, + ReadOnly: true, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(10)); err != nil { + setupLog.Error(err, "Unable to create controller", "controller", "ExtensionConfig") + os.Exit(1) + } + } + if err := (&kubeadmcontrolplanecontrollers.KubeadmControlPlaneReconciler{ Client: mgr.GetClient(), + APIReader: mgr.GetAPIReader(), SecretCachingClient: secretCachingClient, ClusterCache: clusterCache, WatchFilterValue: watchFilterValue, @@ -446,7 +494,11 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { EtcdCallTimeout: etcdCallTimeout, EtcdLogger: etcdLogger, RemoteConditionsGracePeriod: remoteConditionsGracePeriod, - }).SetupWithManager(ctx, mgr, concurrency(kubeadmControlPlaneConcurrency)); err != nil { + RuntimeClient: runtimeClient, + }).SetupWithManager(ctx, mgr, controller.Options{ + MaxConcurrentReconciles: kubeadmControlPlaneConcurrency, + ReconciliationTimeout: 3 * time.Minute, // increase reconciliation timeout because the KubeadmControlPlaneReconciler tries to connect with all the etcd member, and times out if those operations might sum up. + }); err != nil { setupLog.Error(err, "unable to create controller", "controller", "KubeadmControlPlane") os.Exit(1) } diff --git a/docs/book/src/SUMMARY.md b/docs/book/src/SUMMARY.md index d2e7feb3f3a7..88d7b4a17f3e 100644 --- a/docs/book/src/SUMMARY.md +++ b/docs/book/src/SUMMARY.md @@ -37,8 +37,10 @@ - [Operating a managed Cluster](./tasks/experimental-features/cluster-class/operate-cluster.md) - [Runtime SDK](tasks/experimental-features/runtime-sdk/index.md) - [Implementing Runtime Extensions](./tasks/experimental-features/runtime-sdk/implement-extensions.md) + - [Implementing In-Place Update Hooks Extensions](./tasks/experimental-features/runtime-sdk/implement-in-place-update-hooks.md) - [Implementing Lifecycle Hook Extensions](./tasks/experimental-features/runtime-sdk/implement-lifecycle-hooks.md) - [Implementing Topology Mutation Hook Extensions](./tasks/experimental-features/runtime-sdk/implement-topology-mutation-hook.md) + - [Implementing Upgrade Plan Runtime Extensions](./tasks/experimental-features/runtime-sdk/implement-upgrade-plan-hooks.md) - [Deploying Runtime Extensions](./tasks/experimental-features/runtime-sdk/deploy-runtime-extension.md) - [Ignition Bootstrap configuration](./tasks/experimental-features/ignition.md) - [Running multiple providers](./tasks/multiple-providers.md) @@ -47,6 +49,7 @@ - [ClusterResourceSet](./tasks/cluster-resource-set.md) - [Security Guidelines](./security/index.md) - [Pod Security Standards](./security/pod-security-standards.md) + - [Security Guidelines for Cluster API Users](./security/security-guidelines.md) - [clusterctl CLI](./clusterctl/overview.md) - [clusterctl Commands](clusterctl/commands/commands.md) - [init](clusterctl/commands/init.md) @@ -94,6 +97,7 @@ - [Provider contracts](developer/providers/contracts/overview.md) - [InfraCluster](./developer/providers/contracts/infra-cluster.md) - [InfraMachine](developer/providers/contracts/infra-machine.md) + - [InfraMachinePool](developer/providers/contracts/infra-machinepool.md) - [BootstrapConfig](developer/providers/contracts/bootstrap-config.md) - [ControlPlane](developer/providers/contracts/control-plane.md) - [clusterctl](developer/providers/contracts/clusterctl.md) @@ -101,9 +105,9 @@ - [Best practices](./developer/providers/best-practices.md) - [Security guidelines](./developer/providers/security-guidelines.md) - [Version migration](developer/providers/migrations/overview.md) - - [v1.8 to v1.9](./developer/providers/migrations/v1.8-to-v1.9.md) - [v1.9 to v1.10](./developer/providers/migrations/v1.9-to-v1.10.md) - [v1.10 to v1.11](./developer/providers/migrations/v1.10-to-v1.11.md) + - [v1.11 to v1.12](./developer/providers/migrations/v1.11-to-v1.12.md) - [Troubleshooting](./user/troubleshooting.md) - [Reference](./reference/reference.md) - [API Reference](./reference/api/reference.md) diff --git a/docs/book/src/developer/core/controllers/machine-pool.md b/docs/book/src/developer/core/controllers/machine-pool.md index 0ce38cb05d20..93e7798a7745 100644 --- a/docs/book/src/developer/core/controllers/machine-pool.md +++ b/docs/book/src/developer/core/controllers/machine-pool.md @@ -2,6 +2,8 @@ ![](../../../images/cluster-admission-machinepool-controller.png) +📖 **For conceptual information about MachinePools, when to use them, and how they compare to MachineDeployments**, see the [MachinePool Guide](../../../tasks/experimental-features/machine-pools.md). + The MachinePool controller's main responsibilities are: * Setting an OwnerReference on each MachinePool object to: diff --git a/docs/book/src/developer/core/logging.md b/docs/book/src/developer/core/logging.md index 06e43eace1a7..66654115597c 100644 --- a/docs/book/src/developer/core/logging.md +++ b/docs/book/src/developer/core/logging.md @@ -19,6 +19,8 @@ In Cluster API we strive to follow three principles while implementing logging: Kubernetes defines a set of [logging conventions](https://git.k8s.io/community/contributors/devel/sig-instrumentation/logging.md), as well as tools and libraries for logging. +Cluster API should align to those guidelines and use those tools as much as possible. + ## Continuous improvement The foundational items of Cluster API logging are: @@ -100,6 +102,11 @@ key value pairs (in order of importance): creates a MachineSet. - Other Key value pairs. +Notably, over time in CAPI we are also standardizing usage of other key value pairs to improve consistency when reading +logs, e.g. +- key `reason` MUST be used when adding details about WHY a change happened. +- key `diff` MUST be used when documenting the diff in an object that either lead to a change, or that is resulting from a change. + ## Log Messages - A Message MUST always start with a capital letter. @@ -108,7 +115,13 @@ key value pairs (in order of importance): the action log and the corresponding error log; While logging before the action, log verbs should use the -ing form. - Ideally log messages should surface a different level of detail according to the target log level (see [log levels](#log-levels) for more details). -- If Kubernetes resource name is used in log messages, it should be used as is, For example `Reconciling DockerMachineTemplate` +- If Kubernetes resource name is used in log messages, it MUST be used as is, For example `Reconciling DockerMachineTemplate` +- If an API field name is used in log messages, the entire path MUST be used and field names MUST capitalized like in the + API (not as in the golang type). For example `Waiting for spec.providerID to be set` +- If a log message is about a controlled or a referenced object, e.g. Machine controller performing an action on MachineSet, + the message MUST contain the Kind of the controlled/referenced object and its name, for example `Created MachineSet foo-bar` + - If the controlled/referenced object is in another namespace, use namespace/name instead of name + - The controlled/referenced object MUST also be added as a key value pair (see guidelines above) ## Log Levels @@ -136,19 +149,19 @@ thorny parts of code. Over time, based on feedback from SRE/developers, more log ## Developing and testing logs -Our [Tilt](tilt.md) setup offers a batteries-included log suite based on [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/), [Loki](https://grafana.com/docs/loki/latest/fundamentals/overview/) and [Grafana](https://grafana.com/docs/grafana/latest/explore/logs-integration/). +Our [Tilt](tilt.md) setup offers a batteries-included log suite based on [alloy](https://grafana.com/docs/loki/latest/send-data/alloy/), [Loki](https://grafana.com/docs/loki/latest/fundamentals/overview/) and [Grafana](https://grafana.com/docs/grafana/latest/explore/logs-integration/). We are working to continuously improving this experience, allowing Cluster API developers to use logs and improve them as part of their development process. For the best experience exploring the logs using Tilt: 1. Set `--logging-format=json`. 2. Set a high log verbosity, e.g. `v=5`. -3. Enable Promtail, Loki, and Grafana under `deploy_observability`. +3. Enable alloy, Loki, and Grafana under `deploy_observability`. A minimal example of a tilt-settings.yaml file that deploys a ready-to-use logging suite looks like: ```yaml deploy_observability: - - promtail + - alloy - loki - grafana enable_providers: @@ -209,7 +222,6 @@ Will return logs from the `capi-controller-manager`, associated with the Cluster Will return the logs from four CAPI providers - the Core provider, Kubeadm Control Plane provider, Kubeadm Bootstrap provider and the Docker infrastructure provider. It filters by the cluster name and the machine name and then formats the log lines to show just the source controller and the message. This allows us to correlate logs and see actions taken by each of these four providers related to the machine `my-cluster-linux-worker-1`. For more information on formatting and filtering logs using Grafana and Loki see: -- [json parsing](https://grafana.com/docs/loki/latest/clients/promtail/stages/json/) - [log queries](https://grafana.com/docs/loki/latest/logql/log_queries/) ## What about providers diff --git a/docs/book/src/developer/core/testing.md b/docs/book/src/developer/core/testing.md index b0dff071a918..82d5fe8d23c2 100644 --- a/docs/book/src/developer/core/testing.md +++ b/docs/book/src/developer/core/testing.md @@ -308,7 +308,7 @@ analyzing them via Grafana. 1. Start the development environment as described in [Developing Cluster API with Tilt](tilt.md). * Make sure to deploy Loki and Grafana via `deploy_observability`. - * If you only want to see imported logs, don't deploy promtail (via `deploy_observability`). + * If you only want to see imported logs, don't deploy alloy (via `deploy_observability`). * If you want to drop all logs from Loki, just delete the Loki Pod in the `observability` namespace. 2. You can then import logs via the `Import Logs` button on the top right of the [Loki resource page](http://localhost:10350/r/loki/overview). Just click on the downwards arrow, enter either a ProwJob URL, a GCS path or a local folder and click on `Import Logs`. diff --git a/docs/book/src/developer/core/tilt.md b/docs/book/src/developer/core/tilt.md index 85d33eb96907..8d456fd71069 100644 --- a/docs/book/src/developer/core/tilt.md +++ b/docs/book/src/developer/core/tilt.md @@ -297,7 +297,7 @@ Supported values are: * `loki`: To receive and store logs. * `metrics-server`: To enable `kubectl top node/pod`. * `prometheus`*: For collecting metrics from Kubernetes. -* `promtail`: For providing pod logs to `loki`. +* `alloy`: For providing pod logs to `loki`. * `parca`*: For visualizing profiling data. * `tempo`: To store traces. * `visualizer`*: Visualize Cluster API resources for each cluster, provide quick access to the specs and status of any resource. diff --git a/docs/book/src/developer/core/tuning.md b/docs/book/src/developer/core/tuning.md index 19fb5fd018ff..964fb32e4b7f 100644 --- a/docs/book/src/developer/core/tuning.md +++ b/docs/book/src/developer/core/tuning.md @@ -9,7 +9,7 @@ When tuning controllers, both for scalability, performance or for reducing their Cluster API provides a full stack of tools for tuning its own controllers as well as controllers for all providers if developed using controller runtime. As a bonus, most of this tooling can be used with any other controller runtime based controllers. -With tilt, you can easily deploy a full observability stack with Grafana, Loki, promtail, Prometheus, kube-state-metrics, Parca and Tempo. +With tilt, you can easily deploy a full observability stack with Grafana, Loki, alloy, Prometheus, kube-state-metrics, Parca and Tempo. All tools are preconfigured, and most notably kube-state-metrics already collects CAPI metrics and Grafana is configured with a set of dashboards that we used in previous rounds of CAPI tuning. Overall, the CAPI dev environment offers a considerable amount of expertise, free to use and to improve for the entire community. We highly recommend to invest time in looking into those tools, learn and provide feedback. diff --git a/docs/book/src/developer/providers/contracts/bootstrap-config.md b/docs/book/src/developer/providers/contracts/bootstrap-config.md index b3cf28a8bc5e..ec940276c358 100644 --- a/docs/book/src/developer/providers/contracts/bootstrap-config.md +++ b/docs/book/src/developer/providers/contracts/bootstrap-config.md @@ -1,6 +1,6 @@ # Contract rules for BootstrapConfig -Bootstrap providers SHOULD implement a BootstrapConfig resource. +Bootstrap providers SHOULD implement a BootstrapConfig resource using Kubernetes' CustomResourceDefinition (CRD). The goal of a BootstrapConfig resource is to generates bootstrap data that is used to bootstrap a Kubernetes node. These may be e.g. [cloud-init] scripts. @@ -501,6 +501,6 @@ The following diagram shows the typical logic for a bootstrap provider: [clusterctl provider contract]: clusterctl.md [implementation best practices]: ../best-practices.md [Server Side Apply]: https://kubernetes.io/docs/reference/using-api/server-side-apply/ -[the DockerMachineTemplate webhook]: https://github.com/kubernetes-sigs/cluster-api/blob/main/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook.go +[the DockerMachineTemplate webhook]: https://github.com/kubernetes-sigs/cluster-api/blob/main/test/infrastructure/docker/internal/webhooks/dockermachinetemplate.go [BootstrapConfig: pausing]: #bootstrapconfig-pausing [Cluster API v1.11 migration notes]: ../migrations/v1.10-to-v1.11.md diff --git a/docs/book/src/developer/providers/contracts/clusterctl.md b/docs/book/src/developer/providers/contracts/clusterctl.md index e6f0a1dee08a..f2fd9de61b56 100644 --- a/docs/book/src/developer/providers/contracts/clusterctl.md +++ b/docs/book/src/developer/providers/contracts/clusterctl.md @@ -131,7 +131,7 @@ A provider url should be in the form * The components YAML, the metadata YAML and eventually the workload cluster templates are included into the same package version See the [GitLab docs](https://docs.gitlab.com/ee/user/packages/generic_packages/) for more information -about how to create a generic package. +about how to create a generic package. If you are hosting a private Gitlab repository, you can use a [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) or [project access token](https://docs.gitlab.com/user/project/settings/project_access_tokens.html) to access the provider artifacts by adding the `gitlab-access-token` variable to the `clusterctl` configuration in order to authenticate against the GitLab API. @@ -350,6 +350,7 @@ providers. | CAPKK | cluster.x-k8s.io/provider=infrastructure-kubekey | | CAPK | cluster.x-k8s.io/provider=infrastructure-kubevirt | | CAPM3 | cluster.x-k8s.io/provider=infrastructure-metal3 | +| CAPMS | cluster.x-k8s.io/provider=infrastructure-metal-stack | | CAPN | cluster.x-k8s.io/provider=infrastructure-nested | | CAPONE | cluster.x-k8s.io/provider=infrastructure-opennebula | | CAPO | cluster.x-k8s.io/provider=infrastructure-openstack | diff --git a/docs/book/src/developer/providers/contracts/control-plane.md b/docs/book/src/developer/providers/contracts/control-plane.md index 14448608d25e..aab4a600e96b 100644 --- a/docs/book/src/developer/providers/contracts/control-plane.md +++ b/docs/book/src/developer/providers/contracts/control-plane.md @@ -1,6 +1,6 @@ # Contract rules for ControlPlane -Control plane providers MUST implement a ControlPlane resource. +Control plane providers MUST implement a ControlPlane resource using Kubernetes' CustomResourceDefinition (CRD). The goal of a ControlPlane resource is to instantiate a Kubernetes control plane; a Kubernetes control plane at least contains the following components: @@ -68,6 +68,7 @@ repo or add an item to the agenda in the [Cluster API community meeting](https:/ | [ControlPlane: version] | No | Mandatory if control plane allows direct management of the Kubernetes version in use; Mandatory for cluster class support. | | [ControlPlane: machines] | No | Mandatory if control plane instances are represented with a set of Cluster API Machines. | | [ControlPlane: initialization completed] | Yes | | +| [ControlPlane: in-place updates] | No | Only supported for control plane providers with control plane machines | | [ControlPlane: conditions] | No | | | [ControlPlane: terminal failures] | No | | | [ControlPlaneTemplate, ControlPlaneTemplateList resource definition] | No | Mandatory for ClusterClasses support | @@ -616,8 +617,34 @@ the ControlPlane resource will be ignored. -### ControlPlane: conditions +### ControlPlane: in-place updates + +In case a control plane provider would like to provide support for in-place updates, please check the [proposal](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/20240807-in-place-updates.md). + +Supporting in-place updates requires: +- implementing the call for the registered `CanUpdateMachine` hook when performing the "can update in-place" decision. +- when it is decided to perform the in-place decision: + - the machine spec must be updated to the desired state, as well as the spec for the corresponding infrastructure machine and bootstrap config + - while updating those objects also the `in-place-updates.internal.cluster.x-k8s.io/update-in-progress` annotation must be set + - once all objects are updated the `UpdateMachine` hook must be set as pending on the machine object + +After above steps are completed, the machine controller will take over and complete the in-place upgrade. + + + + +### ControlPlane: conditions According to [Kubernetes API Conventions], Conditions provide a standard mechanism for higher-level status reporting from a controller. @@ -873,7 +900,8 @@ is implemented in ControlPlane controllers: [ControlPlane: machines]: #controlplane-machines [In place propagation of changes affecting Kubernetes objects only]: https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20221003-In-place-propagation-of-Kubernetes-objects-only-changes.md [ControlPlane: version]: #controlplane-version -[ControlPlane: initialization completed]: #controlplane-initialization-completed +[ControlPlane: initialization completed]: #controlplane-initialization-completed +[ControlPlane: in-place updates]: #controlplane-in-place-updates [ControlPlane: conditions]: #controlplane-conditions [Kubernetes API Conventions]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties [Improving status in CAPI resources]: https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md diff --git a/docs/book/src/developer/providers/contracts/infra-cluster.md b/docs/book/src/developer/providers/contracts/infra-cluster.md index f6d19fdb8b04..12cd9ae040d2 100644 --- a/docs/book/src/developer/providers/contracts/infra-cluster.md +++ b/docs/book/src/developer/providers/contracts/infra-cluster.md @@ -1,6 +1,6 @@ # Contract rules for InfraCluster -Infrastructure providers SHOULD implement an InfraCluster resource. +Infrastructure providers SHOULD implement an InfraCluster resource using Kubernetes' CustomResourceDefinition (CRD). The goal of an InfraCluster resource is to supply whatever prerequisites (in term of infrastructure) are necessary for running machines. Examples might include networking, load balancers, firewall rules, and so on. diff --git a/docs/book/src/developer/providers/contracts/infra-machine.md b/docs/book/src/developer/providers/contracts/infra-machine.md index 44a8d1b94c1c..21463c077059 100644 --- a/docs/book/src/developer/providers/contracts/infra-machine.md +++ b/docs/book/src/developer/providers/contracts/infra-machine.md @@ -1,6 +1,6 @@ # Contract rules for InfraMachine -Infrastructure providers SHOULD implement an InfraMachine resource. +Infrastructure providers SHOULD implement an InfraMachine resource using Kubernetes' CustomResourceDefinition (CRD). The goal of an InfraMachine resource is to manage the lifecycle of a provider-specific machine instances. These may be physical or virtual instances, and they represent the infrastructure for Kubernetes nodes. @@ -641,7 +641,7 @@ is implemented in InfraMachine controllers: [implementation best practices]: ../best-practices.md [infrastructure Provider Security Guidance]: ../security-guidelines.md [Server Side Apply]: https://kubernetes.io/docs/reference/using-api/server-side-apply/ -[the DockerMachineTemplate webhook]: https://github.com/kubernetes-sigs/cluster-api/blob/main/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook.go +[the DockerMachineTemplate webhook]: https://github.com/kubernetes-sigs/cluster-api/blob/main/test/infrastructure/docker/internal/webhooks/dockermachinetemplate.go [Cluster API v1.11 migration notes]: ../migrations/v1.10-to-v1.11.md [Opt-in Autoscaling from Zero]: https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md [InfraMachine: pausing]: #inframachine-pausing diff --git a/docs/book/src/developer/providers/contracts/infra-machinepool.md b/docs/book/src/developer/providers/contracts/infra-machinepool.md new file mode 100644 index 000000000000..d8adbea7e426 --- /dev/null +++ b/docs/book/src/developer/providers/contracts/infra-machinepool.md @@ -0,0 +1,534 @@ +# Contract rules for InfraMachinePool + +Infrastructure providers CAN OPTIONALLY implement an InfraMachinePool resource using Kubernetes' CustomResourceDefinition (CRD). + +The goal of an InfraMachinePool is to manage the lifecycle of a provider-specific pool of machines using a provider specific service (like Auto Scaling groups in AWS & Virtual Machine Scale Sets in Azure). + +The machines in the pool may be physical or virtual instances (although most likely virtual), and they represent the infrastructure for Kubernetes nodes. + +The InfraMachinePool resource will be referenced by one of the Cluster API core resources, MachinePool. + +The [core MachinePool's controller](../../core/controllers/machine-pool.md) is responsible to coordinate operations of the MachinePool with the InfraMachinePool. The operations are coordinated via the contract rules defined in this page. + +Once contract rules are satisfied by an InfraMachinePool implementation, other implementation details +could be addressed according to the specific needs (Cluster API is not prescriptive). + +Nevertheless, it is always recommended to take a look at Cluster API controllers, +in-tree providers, other providers and use them as a reference implementation (unless custom solutions are required +in order to address very specific needs). + + + +## Rules (contract version v1beta2) + +| Rule | Mandatory | Note | +|----------------------------------------------------------------------|-----------|--------------------------------------| +| [All resources: scope] | Yes | | +| [All resources: `TypeMeta` and `ObjectMeta`field] | Yes | | +| [All resources: `APIVersion` field value] | Yes | | +| [InfraMachinePool, InfraMachinePoolList resource definition] | Yes | | +| [InfraMachinePool: instances] | No | | +| [MachinePoolMachines support] | No | | +| [InfraMachinePool: providerID] | No | | +| [InfraMachinePool: providerIDList] | Yes | | +| [InfraMachinePool: initialization completed] | Yes | | +| [InfraMachinePool: pausing] | No | | +| [InfraMachinePool: conditions] | No | | +| [InfraMachinePool: replicas] | Yes | | +| [InfraMachinePool: terminal failures] | No | | +| [InfraMachinePoolTemplate, InfraMachineTemplatePoolList resource definition] | No | Mandatory for ClusterClasses support | +| [InfraMachinePoolTemplate: support for SSA dry run] | No | Mandatory for ClusterClasses support | +| [Multi tenancy] | No | Mandatory for clusterctl CLI support | +| [Clusterctl support] | No | Mandatory for clusterctl CLI support | + +Note: + +- `All resources` refers to all the provider's resources "core" Cluster API interacts with; + In the context of this page: `InfraMachinePool`, `InfraMachinePoolTemplate` and corresponding list types + +### All resources: scope + +All resources MUST be namespace-scoped. + +### All resources: `TypeMeta` and `ObjectMeta` field + +All resources MUST have the standard Kubernetes `TypeMeta` and `ObjectMeta` fields. + +### All resources: `APIVersion` field value + +In Kubernetes `APIVersion` is a combination of API group and version. +Special consideration MUST apply to both API group and version for all the resources Cluster API interacts with. + +#### All resources: API group + +The domain for Cluster API resources is `cluster.x-k8s.io`, and infrastructure providers under the Kubernetes SIGS org +generally use `infrastructure.cluster.x-k8s.io` as API group. + +If your provider uses a different API group, you MUST grant full read/write RBAC permissions for resources in your API group +to the Cluster API core controllers. The canonical way to do so is via a `ClusterRole` resource with the [aggregation label] +`cluster.x-k8s.io/aggregate-to-manager: "true"`. + +The following is an example ClusterRole for a `FooMachinePool` resource in the `infrastructure.foo.com` API group: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: capi-foo-clusters + labels: + cluster.x-k8s.io/aggregate-to-manager: "true" +rules: +- apiGroups: + - infrastructure.foo.com + resources: + - foomachinepools + - foomachinepooltemplates + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +``` + +Note: The write permissions are required because Cluster API manages InfraMachinePools generated from InfraMachinePoolTemplates; when using ClusterClass and managed topologies, also InfraMachinePoolTemplates are managed directly by Cluster API. + +#### All resources: version + +The resource Version defines the stability of the API and its backward compatibility guarantees. +Examples include `v1alpha1`, `v1beta1`, `v1`, etc. and are governed by the [Kubernetes API Deprecation Policy]. + +Your provider SHOULD abide by the same policies. + +Note: The version of your provider does not need to be in sync with the version of core Cluster API resources. +Instead, prefer choosing a version that matches the stability of the provider API and its backward compatibility guarantees. + +Additionally: + +Providers MUST set `cluster.x-k8s.io/` label on the InfraMachinePool Custom Resource Definitions. + +The label is a map from a Cluster API contract version to your Custom Resource Definition versions. +The value is an underscore-delimited (_) list of versions. Each value MUST point to an available version in your CRD Spec. + +The label allows Cluster API controllers to perform automatic conversions for object references, the controllers will pick +the last available version in the list if multiple versions are found. + +To apply the label to CRDs it’s possible to use labels in your `kustomization.yaml` file, usually in `config/crd`: + +```yaml +labels: +- pairs: + cluster.x-k8s.io/v1beta1: v1beta1 + cluster.x-k8s.io/v1beta2: v1beta2 +``` + +An example of this is in the [AWS infrastructure provider](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/config/crd/kustomization.yaml). + +### InfraMachinePool, InfraMachinePoolList resource definition + +You MUST define a InfraMachinePool resource if you provider supports MachinePools. +The InfraMachinePool CRD name must have the format produced by [`sigs.k8s.io/cluster-api/util/contract.CalculateCRDName(Group, Kind)`](https://github.com/search?q=repo%3Akubernetes-sigs%2Fcluster-api+%22func+CalculateCRDName%22&type=code). + +Note: Cluster API is using such a naming convention to avoid an expensive CRD lookup operation when looking for labels from +the CRD definition of the InfraMachinePool resource. + +It is a generally applied convention to use names in the format `${env}MachinePool`, where ${env} is a, possibly short, name +for the environment in question. For example `AWSMachinePool` is an implementation for Amazon Web Services, and `AzureMachinePool` +is one for Azure. + +```go +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=foomachinepools,shortName=foomp,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of FooMachinePool" + +// FooMachinePool is the Schema for foomachinepools. +type FooMachinePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FooMachinePoolSpec `json:"spec,omitempty"` + Status FooMachinePoolStatus `json:"status,omitempty"` +} + +type FooMachinePoolSpec struct { + // See other rules for more details about mandatory/optional fields in InfraMachinePool spec. + // Other fields SHOULD be added based on the needs of your provider. +} + +type FooMachinePoolStatus struct { + // See other rules for more details about mandatory/optional fields in InfraMachinePool status. + // Other fields SHOULD be added based on the needs of your provider. +} +``` + +For each InfraMachinePool resource, you MUST also add the corresponding list resource. +The list resource MUST be named as `List`. + +```go +// +kubebuilder:object:root=true + +// FooMachinePoolList contains a list of foomachinepools. +type FooMachinePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FooMachinePool `json:"items"` +} +``` + +### InfraMachinePool: instances + +Each InfraMachinePool MAY specify a status field that is used to report information about each replica within the machine pool. This field is not used by core CAPI. It is purely informational and is used as convenient way for a user to get details of the replicas in the machine pool, such as their provider id and ip addresses. + +If you implement this then create a `status.instances` field that is a slice of a struct type that contains the information you want to store and be made available to the users. + +```go +type FooMachinePoolStatus struct { + // Instances contains the status for each instance in the pool + // +optional + Instances []FooMachinePoolInstanceStatus `json:"instances,omitempty"` + + // See other rules for more details about mandatory/optional fields in InfraMachinePool status. + // Other fields SHOULD be added based on the needs of your provider. +} + +// FooMachinePoolInstanceStatus contains instance status information about a FooMachinePool. +type FooMachinePoolInstanceStatus struct { + // Addresses contains the associated addresses for the machine. + // +optional + Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + + // InstanceName is the identification of the Machine Instance within the Machine Pool + InstanceName string `json:"instanceName,omitempty"` + + // ProviderID is the provider identification of the Machine Pool Instance + // +optional + ProviderID *string `json:"providerID,omitempty"` + + // Version defines the Kubernetes version for the Machine Instance + // +optional + Version *string `json:"version,omitempty"` + + // Ready denotes that the machine is ready + // +optional + Ready bool `json:"ready"` +} +``` + +### MachinePoolMachines support + +A provider can opt-in to MachinePool Machines (MPM). With MPM machines all the replicas in a MachinePool are represented by a Machine & InfraMachine. This enables core CAPI to perform common operations on single machines (and their Nodes), such as draining a node before scale down, integration with Cluster Autoscaler and also [MachineHealthChecks]. + +If you want to adopt MPM then you MUST have an `status.infrastructureMachineKind` field and the fields value must be set to the resource kind that represents the replicas in the pool. This is usually the resource kind name for the providers InfraMachine. For example, for the AWS provider the value would be set to `AWSMachine`. + +By opting in, the infra provider is expected to create a InfraMachine for every replica in the pool. The lifecycle of these InfraMachines must be managed so that when scale up or scale down happens, the list of InfraMachines is kept up to date. + +```go +type FooMachinePoolStatus struct { + // InfrastructureMachineKind is the kind of the infrastructure resources behind MachinePool Machines. + // +optional + InfrastructureMachineKind string `json:"infrastructureMachineKind,omitempty"` + + // See other rules for more details about mandatory/optional fields in InfraMachinePool status. + // Other fields SHOULD be added based on the needs of your provider. +} +``` + +Note: not all InfraMachinePool implementations support MPM as it depends on whether the infrastructure service underpinning the InfraMachinePool supports operations being performed against single machines. For example, in CAPA `AWSManagedMachinePool` is used to represent an "EKS managed node group" and as a "managed" service you are expected to NOT perform operations against single nodes. + +For further information see the [proposal](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20220209-machinepool-machines.md). + +### InfraMachinePool: providerID + +Each InfraMachinePool MAY specify a provider ID on `spec.providerID` that can be used to identify the infrastructure resource that implements the InfraMachinePool. + +This field isn't used by core CAPI. Its main purpose is purely informational to the user to surface the infrastructures identifier for the InfraMachinePool. For example, for AWSMachinePool this would be the ASG identifier. + +```go +type FooMachinePoolSpec struct { + // providerID is the identification ID of the FooMachinePool. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=512 + ProviderID string `json:"providerID,omitempty"` + + // See other rules for more details about mandatory/optional fields in InfraMachinePool spec. + // Other fields SHOULD be added based on the needs of your provider. +} +``` + +NOTE: To align with API conventions, we recommend since the v1beta2 contract that the `ProviderID` field should be +of type `string`. + +### InfraMachinePool: providerIDList + +Each InfraMachinePool MUST supply a list of the identification IDs of the machine instances managed by the machine pool by storing these in `spec.providerIDList`. + +```go +type FooMachinePoolSpec struct { + // ProviderIDList is the list of identification IDs of machine instances managed by this Machine Pool + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=10000 + // +kubebuilder:validation:items:MinLength=1 + // +kubebuilder:validation:items:MaxLength=512 + ProviderIDList []string `json:"providerIDList,omitempty"` + + // See other rules for more details about mandatory/optional fields in InfraMachinePool spec. + // Other fields SHOULD be added based on the needs of your provider. +} +``` + +Cluster API uses this list to determine the status of the machine pool and to know when replicas have been deleted, at which point the Node will be deleted. Therefore, the list MUST be kept up to date. + +### InfraMachinePool: initialization completed + +Each InfraMachinePool MUST report when the MachinePool's infrastructure is fully provisioned (initialization) by setting `status.initialization.provisioned` in the InfraMachinePool resource. + +```go +type FooMachinePoolStatus struct { + // initialization provides observations of the FooMachinePool initialization process. + // +optional + Initialization FooMachinePoolInitializationStatus `json:"initialization,omitempty,omitzero"` + + // See other rules for more details about mandatory/optional fields in InfraMachinePool status. + // Other fields SHOULD be added based on the needs of your provider. +} + +// FooMachinePoolInitializationStatus provides observations of the FooMachinePool initialization process. +// +kubebuilder:validation:MinProperties=1 +type FooMachinePoolInitializationStatus struct { + // provisioned is true when the infrastructure provider reports that the MachinePool's infrastructure is fully provisioned. + // +optional + Provisioned *bool `json:"provisioned,omitempty"` +} + +``` + +Once `status.initialization.provisioned` is set, the MachinePool "core" controller will bubble this info in the MachinePool's `status.initialization.infrastructureProvisioned`; also InfraMachinePools’s `spec.providerIDList` and `status.replicas` will be surfaced on MachinePool’s corresponding fields at the same time. + + + +### InfraMachinePool: pausing + +Providers SHOULD implement the pause behaviour for every object with a reconciliation loop. This is done by checking if `spec.paused` is set on the Cluster object and by checking for the `cluster.x-k8s.io/paused` annotation on the InfraMachinePool object. Preferably, the utility `sigs.k8s.io/cluster-api/util/annotations.IsPaused(cluster, infraMachinePool)` SHOULD be used. + +If implementing the pause behaviour, providers SHOULD surface the paused status of an object using the Paused condition: `Status.Conditions[Paused]`. + +### InfraMachinePool: conditions + +According to [Kubernetes API Conventions], Conditions provide a standard mechanism for higher-level +status reporting from a controller. + +Providers implementers SHOULD implement `status.conditions` for their InfraMachinePool resource. +In case conditions are implemented on a InfraMachinePool resource, Cluster API will only consider conditions providing the following information: + +- `type` (required) +- `status` (required, one of True, False, Unknown) +- `reason` (optional, if omitted a default one will be used) +- `message` (optional, if omitted an empty message will be used) +- `lastTransitionTime` (optional, if omitted time.Now will be used) +- `observedGeneration` (optional, if omitted the generation of the InfraMachinePool resource will be used) + +Other fields will be ignored. + +If a condition with type `Ready` exist, such condition will be mirrored in MachinePool’s `InfrastructureReady` condition (not implemented yet). + +Please note that the `Ready` condition is expected to surface the status of the InfraMachinePool during its own entire lifecycle, including initial provisioning, the final deletion process, and the period in between these two moments. + +See [Improving status in CAPI resources] for more context. + + + +### InfraMachinePool: replicas + +Provider implementers MUST implement `status.replicas` to report the most recently observed number of machine instances in the pool. For example, in AWS this would be the number of replicas in a Auto Scaling group (ASG). + +```go +type FooMachinePoolStatus struct { + // Replicas is the most recently observed number of replicas. + // +optional + Replicas int32 `json:"replicas"` + + // See other rules for more details about mandatory/optional fields in InfraMachinePool status. + // Other fields SHOULD be added based on the needs of your provider. +} +``` + +The value from this field is surfaced via the MachinePool's `status.replicas` field. + +### InfraMachinePool: terminal failures + +Starting from the v1beta2 contract version, there is no more special treatment for provider's terminal failures within Cluster API. + +In case necessary, "terminal failures" should be surfaced using conditions, with a well documented type/reason; it is up to consumers to treat them accordingly. + +See [Improving status in CAPI resources] for more context. + + + +### InfraMachinePoolTemplate, InfraMachineTemplatePoolList resource definition + +For a given InfraMachinePool resource, you SHOULD also add a corresponding InfraMachinePoolTemplate resource in order to use it in ClusterClasses. The template resource MUST be name `Template`. + +```go +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=foomachinepooltemplates,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion + +// FooMachinePoolTemplate is the Schema for the foomachinepooltemplates API. +type FooMachinePoolTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FooMachinePoolTemplateSpec `json:"spec,omitempty"` +} + +type FooMachinePoolTemplateSpec struct { + Template FooMachinePooleTemplateResource `json:"template"` +} + +type FooMachinePoolTemplateResource struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty,omitzero"` + Spec FooMachinePoolSpec `json:"spec"` +} +``` + +NOTE: in this example `spec.template.spec` embeds `FooMachinePoolSpec` from MachinePool. This might not always be +the best choice depending of if/how InfraMachinePools spec fields applies to many machine pools vs only one. + +For each InfraMachinePoolTemplate resource, you MUST also add the corresponding list resource. +The list resource MUST be named as `List`. + +```go +// +kubebuilder:object:root=true + +// FooMachinePoolTemplateList contains a list of FooMachinePoolTemplates. +type FooMachinePoolTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FooMachinePoolTemplate `json:"items"` +} +``` + +### InfraMachinePoolTemplate: support for SSA dry run + +When Cluster API's topology controller is trying to identify differences between templates defined in a ClusterClass and +the current Cluster topology, it is required to run [Server Side Apply] (SSA) dry run call. + +However, in case you have immutability checks for your InfraMachinePoolTemplate, this can lead the SSA dry run call to error. + +In order to avoid this InfraMachinePoolTemplate MUST specifically implement support for SSA dry run calls from the topology controller. + +The implementation requires to use controller runtime's `CustomValidator`, available since version v0.12.3. + +This will allow to skip the immutability check only when the topology controller is dry running while preserving the +validation behavior for all other cases. + +### Multi tenancy + +Multi tenancy in Cluster API defines the capability of an infrastructure provider to manage different credentials, +each one of them corresponding to an infrastructure tenant. + +See [infrastructure Provider Security Guidance] for considerations about cloud provider credential management. + +Please also note that Cluster API does not support running multiples instances of the same provider, which someone can +assume an alternative solution to implement multi tenancy; same applies to the clusterctl CLI. + +See [Support running multiple instances of the same provider] for more context. + +However, if you want to make it possible for users to run multiples instances of your provider, your controller's SHOULD: + +- support the `--namespace` flag. +- support the `--watch-filter` flag. + +Please, read carefully the page linked above to fully understand implications and risks related to this option. + +### Clusterctl support + +The clusterctl command is designed to work with all the providers compliant with the rules defined in the [clusterctl provider contract]. + +[All resources: Scope]: #all-resources-scope +[All resources: `TypeMeta` and `ObjectMeta`field]: #all-resources-typemeta-and-objectmeta-field +[All resources: `APIVersion` field value]: #all-resources-apiversion-field-value +[InfraMachinePool, InfraMachinePoolList resource definition]: #inframachinepool-inframachinepoollist-resource-definition +[InfraMachinePool: instances]: #inframachinepool-instances +[InfraMachinePool: providerID]: #inframachinepool-providerid +[InfraMachinePool: providerIDList]: #inframachinepool-provideridlist +[InfraMachinePool: initialization completed]: #inframachinepool-initialization-completed +[InfraMachinePool: pausing]: #inframachinepool-pausing +[InfraMachinePool: conditions]: #inframachinepool-conditions +[InfraMachinePool: replicas]: #inframachinepool-replicas +[InfraMachinePool: terminal failures]: #inframachinepool-terminal-failures +[InfraMachinePoolTemplate, InfraMachineTemplatePoolList resource definition]: #inframachinepooltemplate-inframachinetemplatepoollist-resource-definition +[InfraMachinePoolTemplate: support for SSA dry run]: #inframachinepooltemplate-support-for-ssa-dry-run +[MachinePoolMachines support]: #machinepoolmachines-support +[Multi tenancy]: #multi-tenancy +[Clusterctl support]: #clusterctl-support +[Kubernetes API Conventions]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[Improving status in CAPI resources]: https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md +[infrastructure Provider Security Guidance]: ../security-guidelines.md +[Support running multiple instances of the same provider]: ../../core/support-multiple-instances.md +[clusterctl provider contract]: clusterctl.md +[MachineHealthChecks]: ../../../tasks/automated-machine-management/healthchecking.md diff --git a/docs/book/src/developer/providers/contracts/ipam.md b/docs/book/src/developer/providers/contracts/ipam.md index 54d40adde2cc..7540b71d2c01 100644 --- a/docs/book/src/developer/providers/contracts/ipam.md +++ b/docs/book/src/developer/providers/contracts/ipam.md @@ -14,7 +14,7 @@ Note that the IPAM contract is single-stack. If you need both v4 and v6 addresse ## Data Types -An IPAM provider must define one or more API types for IP address pools. The types: +An IPAM provider must define one or more API types for IP address pools using Kubernetes' CustomResourceDefinition (CRD). The types: 1. Must belong to an API group served by the Kubernetes apiserver 2. Must be implemented as a CustomResourceDefinition. diff --git a/docs/book/src/developer/providers/contracts/overview.md b/docs/book/src/developer/providers/contracts/overview.md index 08f1a27460e3..8a90030108a3 100644 --- a/docs/book/src/developer/providers/contracts/overview.md +++ b/docs/book/src/developer/providers/contracts/overview.md @@ -10,7 +10,7 @@ See [Cluster API release vs contract versions](../../../reference/versions.md#cl - Infrastructure provider - Contract rules for [InfraCluster](infra-cluster.md) resource - Contract rules for [InfraMachine](infra-machine.md) resource - - Contract rules for InfraMachinePool resource (TODO) + - Contract rules for [InfraMachinePool](infra-machinepool.md) resource - Bootstrap provider - Contract rules for [BootstrapConfig](bootstrap-config.md) resource diff --git a/docs/book/src/developer/providers/getting-started/building-running-and-testing.md b/docs/book/src/developer/providers/getting-started/building-running-and-testing.md index d095c356a9c9..9b6880b326dc 100644 --- a/docs/book/src/developer/providers/getting-started/building-running-and-testing.md +++ b/docs/book/src/developer/providers/getting-started/building-running-and-testing.md @@ -2,41 +2,20 @@ ## Docker Image Name -The patch in `config/manager/manager_image_patch.yaml` will be applied to the manager pod. -Right now there is a placeholder `IMAGE_URL`, which you will need to change to your actual image. - -### Development Images -It's likely that you will want one location and tag for release development, and another during development. - -The approach most Cluster API projects is using [a `Makefile` that uses `sed` to replace the image URL][sed] on demand during development. - -[sed]: https://github.com/kubernetes-sigs/cluster-api/blob/e0fb83a839b2755b14fbefbe6f93db9a58c76952/Makefile#L201-L204 - -## Deployment - -### cert-manager - -Cluster API uses [cert-manager] to manage the certificates it needs for its webhooks. -Before you apply Cluster API's yaml, you should [install `cert-manager`][cm-install] - -[cert-manager]: https://github.com/cert-manager/cert-manager -[cm-install]: https://cert-manager.io/docs/installation/ +The IMG variable is used to build the Docker image and push it to a registry. The default value is `controller:latest`, which is a local image. You can change it to a remote image if you want to push it to a registry. ```bash -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download//cert-manager.yaml +make docker-push IMG=ghcr.io/your-org/your-repo:dev ``` +## Deployment + ### Cluster API Before you can deploy the infrastructure controller, you'll need to deploy Cluster API itself to the management cluster. -You can use a precompiled manifest from the [release page][releases], run `clusterctl init`, or clone [`cluster-api`][capi] and apply its manifests using `kustomize`: +Follow the [quick start guide](https://cluster-api.sigs.k8s.io/user/quick-start) up to and including the step of [creating the management cluster](https://cluster-api.sigs.k8s.io/user/quick-start#initialize-the-management-cluster). We will proceed presuming you created a cluster with kind and initalized cluster-api with `clusterctl init`. -```bash -cd cluster-api -make envsubst -kustomize build config/default | ./hack/tools/bin/envsubst | kubectl apply -f - -``` Check the status of the manager to make sure it's running properly: @@ -45,11 +24,11 @@ kubectl describe -n capi-system pod | grep -A 5 Conditions ``` ```bash Conditions: - Type Status - Initialized True - Ready True - ContainersReady True - PodScheduled True + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True ``` [capi]: https://github.com/kubernetes-sigs/cluster-api @@ -66,24 +45,36 @@ labels: cluster.x-k8s.io/provider: infrastructure-mailgun ``` +If you're using kind for your management cluster, you can use the following command to build and push your image to the kind cluster's local registry. We need to use the IMG variable to override the default `controller:latest` image name with a specific version like `controller:0.1` to avoid having kubernetes try to pull the latest version of `controller` from docker hub. + +```bash +cd cluster-api-provider-mailgun + +# Build the Docker image +make docker-build IMG=controller:dev + +# Load the Docker image into the kind cluster +kind load docker-image controller:dev +``` + Now you can apply your provider as well: ```bash cd cluster-api-provider-mailgun # Install CRD and controller to current kubectl context -make install deploy +make install deploy IMG=controller:dev kubectl describe -n cluster-api-provider-mailgun-system pod | grep -A 5 Conditions ``` ```text Conditions: - Type Status - Initialized True - Ready True - ContainersReady True - PodScheduled True + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True ``` [label_prefix]: https://github.com/kubernetes-sigs/cluster-api/search?q=%22infrastructure-%22 @@ -102,6 +93,7 @@ config: image: controller:latest # change to remote image name if desired label: CAPM live_reload_deps: ["main.go", "go.mod", "go.sum", "api", "controllers", "pkg"] + go_main: cmd/main.go # kubebuilder puts main.go under the cmd directory ``` - Create file `tilt-settings.yaml` in the cluster-api directory: @@ -116,15 +108,11 @@ enable_providers: - mailgun ``` -- Create a kind cluster. By default, Tiltfile assumes the kind cluster is named `capi-test`. +- Bring tilt up by using the `make tilt-up` command in the cluster-api directory. This will ensure tilt is set up correctly to use a local registry for your image. You may need to `make tilt-clean` before this if you've been using tilt with other providers. ```bash -kind create cluster --name capi-test - -# If you want a more sophisticated setup of kind cluster + image registry, try: -# --- -# cd cluster-api -# hack/kind-install-for-capd.sh +cd cluster-api +make tilt-up ``` - Run `tilt up` in the cluster-api folder diff --git a/docs/book/src/developer/providers/getting-started/configure-the-deployment.md b/docs/book/src/developer/providers/getting-started/configure-the-deployment.md index c473feeb86c7..08c2f8d8763e 100644 --- a/docs/book/src/developer/providers/getting-started/configure-the-deployment.md +++ b/docs/book/src/developer/providers/getting-started/configure-the-deployment.md @@ -53,7 +53,7 @@ As you might have noticed, we are reading variable values from a `ConfigMap` and You now have to add those to the manifest, but how to inject configuration in production? The convention many Cluster-API projects use is environment variables. -`config/manager/configuration.yaml` +`config/manager/credentials.yaml` ```yaml --- diff --git a/docs/book/src/developer/providers/getting-started/controllers-and-reconciliation.md b/docs/book/src/developer/providers/getting-started/controllers-and-reconciliation.md index 7c179f966ccd..7ddbdf379ae3 100644 --- a/docs/book/src/developer/providers/getting-started/controllers-and-reconciliation.md +++ b/docs/book/src/developer/providers/getting-started/controllers-and-reconciliation.md @@ -1,6 +1,6 @@ # Controllers and Reconciliation -Right now, you can create objects with our API types, but those objects doesn't make any impact on your mailgun infrastrucrure. +Right now, you can create objects with your API types, but those objects don't make any impact on your mailgun infrastructure. Let's fix that by implementing controllers and reconciliation for your API objects. From the [kubebuilder book][controller]: @@ -25,17 +25,16 @@ Kubebuilder has created our first controller in `controllers/mailguncluster_cont // MailgunClusterReconciler reconciles a MailgunCluster object type MailgunClusterReconciler struct { client.Client - Log logr.Logger + Scheme *runtime.Scheme } // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=mailgunclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=mailgunclusters/status,verbs=get;update;patch func (r *MailgunClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = context.Background() - _ = r.Log.WithValues("mailguncluster", req.NamespacedName) + _ = logf.FromContext(ctx) - // your logic here + // TODO(user): your logic here return ctrl.Result{}, nil } @@ -88,7 +87,7 @@ We're going to be sending mail, so let's add a few extra fields: // MailgunClusterReconciler reconciles a MailgunCluster object type MailgunClusterReconciler struct { client.Client - Log logr.Logger + Scheme *runtime.Scheme Mailgun mailgun.Mailgun Recipient string } @@ -102,7 +101,7 @@ Here's a naive example: ```go func (r *MailgunClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { ctx := context.Background() - _ = r.Log.WithValues("mailguncluster", req.NamespacedName) + _ = ctrl.LoggerFrom(ctx) var cluster infrav1.MailgunCluster if err := r.Get(ctx, req.NamespacedName, &cluster); err != nil { @@ -117,8 +116,8 @@ By returning an error, you request that our controller will get `Reconcile()` ca That may not always be what you want - what if the object's been deleted? So let's check that: ```go - var cluster infrav1.MailgunCluster - if err := r.Get(ctx, req.NamespacedName, &cluster); err != nil { + var mailgunCluster infrav1.MailgunCluster + if err := r.Get(ctx, req.NamespacedName, &mailgunCluster); err != nil { // import apierrors "k8s.io/apimachinery/pkg/api/errors" if apierrors.IsNotFound(err) { return ctrl.Result{}, nil @@ -127,19 +126,57 @@ That may not always be what you want - what if the object's been deleted? So let } ``` -Now, if this were any old `kubebuilder` project you'd be done, but in our case you have one more object to retrieve. -Cluster API splits a cluster into two objects: the [`Cluster` defined by Cluster API itself][cluster]. -We'll want to retrieve that as well. +Now that we have our own cluster object (`MailGunCluster`) that represents all the +infrastructure provider specific details for our cluster, we also need to retrieve +the upstream [`Cluster` object that is defined by Cluster API itself][cluster]. Luckily, cluster API [provides a helper for us][getowner]. +First, you'll need to import the cluster-api package into our project if you haven't done so yet: + +```bash +# In your Mailgun repository's root directory +go get sigs.k8s.io/cluster-api +go mod tidy +``` + +Now we can add in a call to the `GetOwnerCluster` function to retrieve the cluster object: + ```go - cluster, err := util.GetOwnerCluster(ctx, r.Client, &mg) + // import sigs.k8s.io/cluster-api/util + cluster, err := util.GetOwnerCluster(ctx, r.Client, mailgunCluster.ObjectMeta) if err != nil { return ctrl.Result{}, err - } ``` +If our cluster was just created, the Cluster API controller may not have set the ownership reference on our object yet, so we'll have to return here and wait to do more with our cluster object until then. We can leave a log message noting that we're waiting for the main Cluster API controller to set the ownership reference. Here's what our `Reconcile()` function looks like now: + +```go +func (r *MailgunClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // We change the _ to `log` since we're going to log something now + log = ctrl.LoggerFrom(ctx) + + var mailgunCluster infrav1.MailgunCluster + if err := r.Get(ctx, req.NamespacedName, &mailgunCluster); err != nil { + // import apierrors "k8s.io/apimachinery/pkg/api/errors" + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // import sigs.k8s.io/cluster-api/util + cluster, err := util.GetOwnerCluster(ctx, r.Client, mailgunCluster.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + + if cluster == nil { + log.Info("Waiting for Cluster Controller to set OwnerRef on MailGunCluster") + return ctrl.Result{}, nil + } +``` + ### The fun part _More Documentation: [The Kubebuilder Book][book] has some excellent documentation on many things, including [how to write good controllers!][implement]_ @@ -152,10 +189,10 @@ This is where your provider really comes into its own. In our case, let's try sending some mail: ```go -subject := fmt.Sprintf("[%s] New Cluster %s requested", mgCluster.Spec.Priority, cluster.Name) -body := fmt.Sprint("Hello! One cluster please.\n\n%s\n", mgCluster.Spec.Request) +subject := fmt.Sprintf("[%s] New Cluster %s requested", mailgunCluster.Spec.Priority, cluster.Name) +body := fmt.Sprintf("Hello! One cluster please.\n\n%s\n", mailgunCluster.Spec.Request) -msg := mailgun.NewMessage(mgCluster.Spec.Requester, subject, body, r.Recipient) +msg := r.mailgun.NewMessage(mailgunCluster.Spec.Requester, subject, body, r.Recipient) _, _, err = r.Mailgun.Send(msg) if err != nil { return ctrl.Result{}, err @@ -172,28 +209,28 @@ This is an important thing about controllers: they need to be idempotent. This m So in our case, we'll store the result of sending a message, and then check to see if we've sent one before. ```go - if mgCluster.Status.MessageID != nil { + if mailgunCluster.Status.MessageID != nil { // We already sent a message, so skip reconciliation return ctrl.Result{}, nil } - subject := fmt.Sprintf("[%s] New Cluster %s requested", mgCluster.Spec.Priority, cluster.Name) - body := fmt.Sprintf("Hello! One cluster please.\n\n%s\n", mgCluster.Spec.Request) + subject := fmt.Sprintf("[%s] New Cluster %s requested", mailgunCluster.Spec.Priority, cluster.Name) + body := fmt.Sprintf("Hello! One cluster please.\n\n%s\n", mailgunCluster.Spec.Request) - msg := mailgun.NewMessage(mgCluster.Spec.Requester, subject, body, r.Recipient) + msg := r.Mailgun.NewMessage(mailgunCluster.Spec.Requester, subject, body, r.Recipient) _, msgID, err := r.Mailgun.Send(msg) if err != nil { return ctrl.Result{}, err } // patch from sigs.k8s.io/cluster-api/util/patch - helper, err := patch.NewHelper(&mgCluster, r.Client) + helper, err := patch.NewHelper(&mailgunCluster, r.Client) if err != nil { return ctrl.Result{}, err } - mgCluster.Status.MessageID = &msgID - if err := helper.Patch(ctx, &mgCluster); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "couldn't patch cluster %q", mgCluster.Name) + mailgunCluster.Status.MessageID = &msgID + if err := helper.Patch(ctx, &mailgunCluster); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "couldn't patch cluster %q", mailgunCluster.Name) } return ctrl.Result{}, nil @@ -223,7 +260,7 @@ Right now, it probably looks like this: ```go if err = (&controllers.MailgunClusterReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("MailgunCluster"), + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "Unable to create controller", "controller", "MailgunCluster") os.Exit(1) @@ -256,7 +293,7 @@ We're going to use environment variables for this: if err = (&controllers.MailgunClusterReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("MailgunCluster"), + Scheme: mgr.GetScheme(), Mailgun: mg, Recipient: recipient, }).SetupWithManager(mgr); err != nil { diff --git a/docs/book/src/developer/providers/getting-started/implement-api-types.md b/docs/book/src/developer/providers/getting-started/implement-api-types.md index 5f4b9e5ad8d2..275d4d0524bf 100644 --- a/docs/book/src/developer/providers/getting-started/implement-api-types.md +++ b/docs/book/src/developer/providers/getting-started/implement-api-types.md @@ -41,6 +41,9 @@ const ( // MailgunClusterSpec defines the desired state of MailgunCluster type MailgunClusterSpec struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + // Priority is how quickly you need this cluster Priority Priority `json:"priority"` // Request is where you ask extra nicely @@ -51,12 +54,15 @@ type MailgunClusterSpec struct { // MailgunClusterStatus defines the observed state of MailgunCluster type MailgunClusterStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + // MessageID is set to the message ID from Mailgun when our message has been sent MessageID *string `json:"response"` } ``` -As the deleted comments request, run `make manager manifests` to regenerate some of the generated data files afterwards. +As the comments request, run `make manager manifests` to regenerate some of the generated data files afterwards. ```bash git add . @@ -69,13 +75,19 @@ To enable clients to encode and decode your API, your types must be able to be r [scheme]: https://pkg.go.dev/k8s.io/apimachinery/pkg/runtime#Scheme -By default, Kubebuilder will provide you with a scheme builder like: +By default, Kubebuilder will provide you with a scheme builder (likely in `api/v1alpha1/groupversion_info.go`) like: ```go -import "sigs.k8s.io/controller-runtime/pkg/scheme" +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) var ( - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. @@ -83,11 +95,11 @@ var ( ) ``` -and scheme registration that looks like: +and scheme registration (likely in `api/v1alpha1/*_types.go`) that looks like: ```go func init() { - SchemeBuilder.Register(&Captain{}, &CaptainList{}) + SchemeBuilder.Register(&MailgunCluster{}, &MailgunClusterList{}) } ``` @@ -99,10 +111,17 @@ to be imported cleanly into other projects. To mitigate this, use the following schemebuilder pattern: ```go -import "k8s.io/apimachinery/pkg/runtime" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) var ( - // schemeBuilder is used to add go types to the GroupVersionKind scheme. + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme adds the types in this group-version to the given scheme. @@ -122,7 +141,7 @@ and register types as below: ```go func init() { - objectTypes = append(objectTypes, &Captain{}, &CaptainList{}) + objectTypes = append(objectTypes, &MailgunCluster{}, &MailgunClusterList{}) } ``` diff --git a/docs/book/src/developer/providers/getting-started/initialize-repo-and-api-types.md b/docs/book/src/developer/providers/getting-started/initialize-repo-and-api-types.md index a64a365737e2..b8556f468931 100644 --- a/docs/book/src/developer/providers/getting-started/initialize-repo-and-api-types.md +++ b/docs/book/src/developer/providers/getting-started/initialize-repo-and-api-types.md @@ -3,7 +3,7 @@ ## Create a repository ```bash -mkdir cluster-api-provider-mailgun +mkdir -p src/sigs.k8s.io/cluster-api-provider-mailgun cd src/sigs.k8s.io/cluster-api-provider-mailgun git init ``` @@ -38,6 +38,7 @@ The domain for Cluster API resources is `cluster.x-k8s.io`, and infrastructure p Commit your changes so far: ```bash +git add . git commit -m "Generate scaffolding." ``` @@ -75,29 +76,6 @@ Create Controller under pkg/controller [y/n]? y ``` -### Add Status subresource - -The [status subresource][status] lets Spec and Status requests for custom resources be addressed separately so requests don't conflict with each other. -It also lets you split RBAC rules between Spec and Status. You will have to [manually enable it in Kubebuilder][kbstatus]. - -Add the `subresource:status` annotation to your `cluster_types.go` `machine_types.go` - -```go -// +kubebuilder:subresource:status -// +kubebuilder:object:root=true - -// MailgunCluster is the Schema for the mailgunclusters API -type MailgunCluster struct { -``` - -```go -// +kubebuilder:subresource:status -// +kubebuilder:object:root=true - -// MailgunMachine is the Schema for the mailgunmachines API -type MailgunMachine struct { -``` - And regenerate the CRDs: ```bash make manifests @@ -110,9 +88,6 @@ git add . git commit -m "Generate Cluster and Machine resources." ``` -[status]: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#status-subresource -[kbstatus]: https://book.kubebuilder.io/reference/generating-crd.html?highlight=status#status - ### Apply further customizations The cluster API CRDs should be further customized, please refer to [provider contracts](../contracts/overview.md). diff --git a/docs/book/src/developer/providers/migrations/v1.10-to-v1.11.md b/docs/book/src/developer/providers/migrations/v1.10-to-v1.11.md index 41efc5e68215..020946572cfb 100644 --- a/docs/book/src/developer/providers/migrations/v1.10-to-v1.11.md +++ b/docs/book/src/developer/providers/migrations/v1.10-to-v1.11.md @@ -2800,6 +2800,12 @@ As documented in [Suggested changes for providers](#suggested-changes-for-provid external.GetObjectFromContractVersionedRef(ctx, r.Client, cluster.Spec.InfrastructureRef, cluster.Namespace) ``` + - This functions requires the permissions to `get`, `list` and `watch` objects of the type `customresourcedefinitions` to identify the used contract version. + + ```go + // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch + ``` + - Go clients writing status of core Cluster API objects, should use at least Cluster API v1.9 Go types. If that is not possible, avoid updating or patching the entire status field and instead you should patch only individual fields. (Cluster API v1.9 introduced `.status.v1beta2` fields that are necessary for lossless v1beta2 => v1beta1 => v1beta2 round trips) @@ -2854,6 +2860,12 @@ As documented in [Suggested changes for providers](#suggested-changes-for-provid external.GetObjectFromContractVersionedRef(ctx, r.Client, cluster.Spec.InfrastructureRef, cluster.Namespace) ``` + - This functions requires the permissions to `get`, `list` and `watch` objects of the type `customresourcedefinitions` to identify the used contract version. + + ```go + // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch + ``` + - core Cluster API added the new CRD migrator component in the v1.9 release. For more details, see: https://github.com/kubernetes-sigs/cluster-api/issues/11894 - CRD migration in clusterctl has been deprecated and will be removed in CAPI v1.13, so it's recommended to adopt the CRD migrator in providers instead. diff --git a/docs/book/src/developer/providers/migrations/v1.11-to-v1.12.md b/docs/book/src/developer/providers/migrations/v1.11-to-v1.12.md new file mode 100644 index 000000000000..a5e64c830d88 --- /dev/null +++ b/docs/book/src/developer/providers/migrations/v1.11-to-v1.12.md @@ -0,0 +1,212 @@ +# Cluster API v1.11 compared to v1.12 + +This document provides an overview over relevant changes between Cluster API v1.11 and v1.12 for +maintainers of providers and consumers of our Go API. + +Any feedback or contributions to improve following documentation is welcome! + + +* [Cluster API v1.11 compared to v1.12](#cluster-api-v111-compared-to-v112) + * [Go version](#go-version) + * [Dependencies](#dependencies) + * [Implemented proposal](#implemented-proposal) + * [API Changes](#api-changes) + * [Cluster](#cluster-) + * [Machine](#machine) + * [MachineDeployment](#machinedeployment) + * [MachineSet](#machineset) + * [MachinePool](#machinepool) + * [MachineHealthCheck](#machinehealthcheck) + * [ClusterClass](#clusterclass) + * [KubeadmConfig](#kubeadmconfig) + * [KubeadmConfigTemplate](#kubeadmconfigtemplate) + * [KubeadmControlPlane](#kubeadmcontrolplane) + * [KubeadmControlPlaneTemplate](#kubeadmcontrolplanetemplate) + * [Runtime hooks Changes](#runtime-hooks-changes) + * [Cluster API Contract changes](#cluster-api-contract-changes) + * [Deprecation](#deprecation) + * [Removals](#removals) + * [Suggested changes for providers](#suggested-changes-for-providers) + * [Removals scheduled for future releases](#removals-scheduled-for-future-releases) + + +## Go version + +- The minimal Go version required to build Cluster API is v1.24.x +- The Go version used by Cluster API is v1.24.x + +## Dependencies + +- The Controller Runtime version used by Cluster API is v0.22.x +- The version of the Kubernetes libraries used by Cluster API is v1.34.x + +## Implemented proposal + +The following proposal have been implemented in the Cluster API v1.12 release: +- [In-place updates](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240807-in-place-updates.md) +- [Chained and efficient upgrades](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20250513-chained-and-efficient-upgrades-for-clusters-with-managed-topologies.md) +- [Taint propagation](https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20250513-propogate-taints.md) (Only phase 1) + +## API Changes + +### Cluster + +- The new `spec.topology.controlPlane.healthCheck.checks.unhealthyMachineConditions` field has been added +- The new `spec.topology.workers.machineDeployments[].healthCheck.checks.unhealthyMachineConditions` field has been added +- The `TopologyReconciled` condition reports new `ClusterCreating` and `ClusterUpgrading` reasons + - The following reasons for the `TopologyReconciled` condition are now deprecated + - `ControlPlaneUpgradePending` + - `MachineDeploymentsCreatePending` + - `MachineDeploymentsUpgradePending` + - `MachinePoolsUpgradePending` + - `MachinePoolsCreatePending` + - `LifecycleHookBlocking` + +### Machine + +- The new `spec.taint` field has been added. +- The `HealthCheckSucceeded` condition reports a new `UnhealthyMachine` reason when `unhealthyMachineConditions` checks fail +- The `UpToDate` condition reports a new `Updating` reason when a machine is performing in-place updates +- The new `Updating` condition has been added. It reports status `True` when a machine is performing in-place updates + - Supported reasons are `NotUpdating`, `InPlaceUpdating`, `InPlaceUpdateFailed` +- The field `status.phases` reports a new `Updating` phase when a machine is performing in-place updates + +### MachineDeployment + +MachineDeployment `spec.template.spec` has been aligned to changes in the [Machine](#machine) `spec` struct + +### MachineSet + +MachineSet `spec.template.spec` has been aligned to changes in the [Machine](#machine) `spec` struct + +### MachinePool + +MachinePool `spec.template.spec` has been aligned to changes in the [Machine](#machine) `spec` struct + +### MachineHealthCheck + +- The new `spec.checks.unhealthyMachineConditions` field has been added + +### ClusterClass + +- The new `spec.controlPlane.healthCheck.checks.unhealthyMachineConditions` field has been added +- The new `spec.workers.machineDeployments[].healthCheck.checks.unhealthyMachineConditions` field has been added +- The new `spec.upgrade` field has been added +- The new `spec.kubernetesVersions` field has been added + +### KubeadmConfig + +- The new `spec.clusterConfiguration.encryptionAlgorithm` field has been added + +### KubeadmConfigTemplate + +KubeadmConfigTemplate `spec.template.spec` has been aligned to changes in the [KubeadmConfig](#kubeadmconfig) `spec` struct + +### KubeadmControlPlane + +KubeadmControlPlane `spec.kubeadmConfigSpec` has been aligned to changes in the [KubeadmConfig](#kubeadmconfig) `spec` struct + +### KubeadmControlPlaneTemplate + +KubeadmControlPlaneTemplate `spec.template.spec` has been aligned to changes in the [KubeadmControlPlane](#kubeadmcontrolplane) `spec` struct + +## Runtime hooks Changes + +- Lifecycle hooks + - The request message for the `BeforeClusterUpgrade` hook has been extended to include info about the upgrade plan + - The new `BeforeControlPlaneUpgrade` hook has been added; the hook is called before each "upgrade control plane" step + - The request message for the `AfterControlPlaneUpgrade` hook have been extended to include info about the upgrade plan + - The `AfterControlPlaneUpgrade` hook is now called after each "upgrade control plane" step + - The new `BeforeWorkersUpgrade`, `AfterWorkersUpgrade` hooks have been added; the hooks are called before/after each "upgrade workers" step + - The `AfterClusterUpgrade` has been transformed into a blocking hook (it blocks next upgrade) +- Upgrade plan hooks (new!) + - The new `GenerateUpgradePlan` has been added; the hook is called by the topology controller when it is required + to compute the upgrade plan for a cluster +- In-place extension hooks (new!) + - the new `CanUpdateMachine` hook has been added; the hook is called by the KubeadmControlPlane controller to perform the + "can update in-place" decision for a machine that is not up-to-date + - the new `CanUpdateMachineSet` hook has been added; the hook is called by the MachineDeployment controller to perform the + "can update in-place" decision for a MachineSet that is not up-to-date + - the new `UpdateMachine` hook has been added; the hook is called by the Machine controller to perform in-place upgrades +- Some optimization have been implemented to ensure that hooks are called only when necessary + - e.g. if a lifecycle hook response, e.g. the `BeforeClusterUpgradeResponse`, informs CAPI to retry after X seconds, + CAPI is going to not call the runtime extension until X expires, no matter if the underlying controller reconciles + one or more times in the between + - `GenerateUpgradePlan`, `CanUpdateMachine`, `CanUpdateMachineSet` hooks will use slightly different caching strategy + +## Cluster API Contract changes + +- Documentation for the [MachinePool](../contracts/infra-machinepool.md) contract has been aligned to the documentation + of other contracts. + - We strongly recommend all the MachinePool providers to read carefully this document and check compliance with + the updated definition of the contract rules. + +- A new, optional rule has been added to the control plane contract, defining what is required for implementing support + for in-place updates. + +## Deprecation + +- The following reasons for the `TopologyReconciled` condition on the `Cluster` object are now deprecated + - `ControlPlaneUpgradePending` + - `MachineDeploymentsCreatePending` + - `MachineDeploymentsUpgradePending` + - `MachinePoolsUpgradePending` + - `MachinePoolsCreatePending` + - `LifecycleHookBlocking` + +## Removals + +- The `controlplane.cluster.x-k8s.io/kubeadm-cluster-configuration` annotation that KCP was previously setting on Machines has been removed. + KCP used this annotation to detect if a Machine requires a rollout, this is now done via the KubeadmConfig of the Machine instead. + +## Suggested changes for providers + +- For providers that copied the core Cluster API v1beta1 `APIEndpoint` struct and used it in their InfraCluster + or ControlPlane Go type it is recommended to now make the `Host` and `Port` fields optional (they already have been made optional in + `clusterv1beta1.APIEndpoint` in Cluster API v1.12). + tl;dr The fields were previously required, but due to side effects that validation was never enforced. These side + effects might go away which then makes the fields suddenly required. To avoid issues we recommend making the fields + optional. Similar to how they are optional in the v1beta2 `APIEndpoint` struct and v1beta2 InfraCluster contract. + For more details, please see: https://github.com/kubernetes-sigs/cluster-api/pull/12634#discussion_r2275468291. + But this might look differently depending on how your InfraCluster/ControlPlane Go type and corresponding + mutating webhook evolved over time. + +- We strongly recommend all the MachinePool providers to read carefully the [MachinePool](../contracts/infra-machinepool.md) contract + and check compliance with the updated definition of the contract rules. + +- We strongly recommend providers to start moving to the new v1beta2 version of the Cluster API contract as soon as possible. + - v1beta1 version of the Cluster API contract is now deprecated and it will be removed tentatively in August 2026: + - In order to ease the transition to the new v1beta2 version of the Cluster API contract, v1beta2 version + will implement temporarily compatibility with the deprecated v1beta1 version of the Cluster API contract + - Compatibility is only intended to ease the transition for providers, and it has some limitations; please + read details in following paragraphs. + - Compatibility support for the v1beta1 version of the Cluster API contract will be removed tentatively in August 2026. + - After compatibility support for the v1beta1 version of the Cluster API contract is removed, providers + which are implementing the v1beta1 contract will stop to work (they will work only with older versions of Cluster API). + +- Various Cluster API e2e tests with Kubernetes upgrades now use the `wait-control-plane-upgrade` and `wait-machine-deployment-upgrade` timeouts. + If you are running Cluster API e2e tests with upgrades you have to configure the timeouts in your e2e test configuration file, otherwise + the e2e tests will use `1s` timeout which will lead to test failures. Example: + ```yaml + default/wait-control-plane-upgrade: ["15m", "10s"] + default/wait-machine-deployment-upgrade: ["10m", "10s"] + ``` + +- Reconciler rate limiting feature has been introduced ([#13006](https://github.com/kubernetes-sigs/cluster-api/pull/13006)) + - Can be enabled with the new `ReconcilerRateLimiting` feature gate + - It rate-limits all reconcilers to at most 1 request per second + +- `util.IsOwnedByObject`, `util.IsControlledBy` and `collections.OwnedMachines` now also require `schema.GroupKind` as input parameter. + `schema.GroupKind` is needed for cases where typed objects are passed in because controller-runtime does not guarantee that GVK is set on typed objects. + +## Removals scheduled for future releases + +As documented in [Suggested changes for providers](#suggested-changes-for-providers), it is highly recommended to start planning for future removals: + +- v1beta1 API version will be removed tentatively in August 2026 +- Starting from the CAPI release when v1beta1 removal will happen (tentative Aug 2026), the Cluster API project + will remove the Cluster API condition type, the `util/conditions/deprecated/v1beta1` package, + the `util/deprecated/v1beta1` package, the code handling old conditions in `util/patch.Helper` + and everything related to the custom Cluster API custom condition type. +- All the `status.deprecated` fields will be removed tentatively in August 2026. +- Compatibility support for the v1beta1 version of the Cluster API contract will be removed tentatively in August 2026 diff --git a/docs/book/src/developer/providers/migrations/v1.8-to-v1.9.md b/docs/book/src/developer/providers/migrations/v1.8-to-v1.9.md deleted file mode 100644 index 81e13b91167f..000000000000 --- a/docs/book/src/developer/providers/migrations/v1.8-to-v1.9.md +++ /dev/null @@ -1,37 +0,0 @@ -# Cluster API v1.8 compared to v1.9 - -This document provides an overview over relevant changes between Cluster API v1.8 and v1.9 for -maintainers of providers and consumers of our Go API. - -## Go version - -- The Go version used by Cluster API is Go 1.22.x - -## Changes by Kind - -### Deprecation - -### Removals - -### API Changes - -### Other - -- Notes for Machines managed by KCP (starting with Cluster API v1.8.2): - - KCP adds its own pre-terminate hook on all Machines it controls. This is done to ensure it can later remove - the etcd member right before Machine termination (i.e. before InfraMachine deletion). - - Starting with Kubernetes v1.31 the KCP pre-terminate hook will wait for all other pre-terminate hooks to finish to - ensure it runs last (thus ensuring that kubelet is still working while other pre-terminate hooks run). This is only done - for v1.31 or above because the kubeadm ControlPlaneKubeletLocalMode was introduced with kubeadm 1.31. This feature configures - the kubelet to communicate with the local apiserver. Only because of that the kubelet immediately starts failing after the etcd - member is removed. We need the ControlPlaneKubeletLocalMode feature with 1.31 to adhere to the kubelet skew policy. - -### Suggested changes for providers - -- The Errors package was created when capi provider implementation was running as machineActuators that needed to vendor core capi to function. There is no usage recommendations today and its value is questionable since we moved to CRDs that inter-operate mostly via conditions. Instead we plan to drop the dedicated semantic for terminal failure and keep improving Machine lifecycle signal through conditions. Therefore the Errors package [has been deprecated in v1.8](https://github.com/kubernetes-sigs/cluster-api/issues/10784). It's recommended to remove any usage of the currently exported variables. -- The `ClusterCacheTracker` component has been deprecated, please use the new `ClusterCache` instead. For more context and examples for - how to use it, see [PR: Introduce new ClusterCache](https://github.com/kubernetes-sigs/cluster-api/pull/11247) and the corresponding - [issue](https://github.com/kubernetes-sigs/cluster-api/issues/11272). Some notes: - - The `DisabledFor` option (previously `ClientUncachedObjects`) is not defaulted to `&corev1.ConfigMap` & `&corev1.Secret` anymore, - thus it's now necessary to explicitly set `DisabledFor` to avoid caching ConfigMaps and Secrets. - - `SecretClient` and `UserAgent` are now mandatory options, please take a look at the corresponding godoc. diff --git a/docs/book/src/images/machinehealthcheck-controller.plantuml b/docs/book/src/images/machinehealthcheck-controller.plantuml index 2481d302e7ff..deaf41401027 100644 --- a/docs/book/src/images/machinehealthcheck-controller.plantuml +++ b/docs/book/src/images/machinehealthcheck-controller.plantuml @@ -2,33 +2,54 @@ @startuml machinehealthcheck-controller start; -:Machine Health Check controller; +:MachineHealthCheck controller; repeat - repeat - :MachineHealthCheck controller enqueues a Reconcile call; - if (Nodes being watched in remote cluster) then (no) + :MachineHealthCheck controller enqueues a Reconcile call; + + if (Nodes being watched in remote cluster?) then (no) :Watch nodes in remote cluster; else (yes) endif - :Find targets: Machines matched by selector plus respective Nodes; - :Health check targets: Determine which Machines require remediation; - repeat while (Remediations are allowed (current unhealthy <= max unhealthy)) is (no) - -> yes; - repeat - if (Target requires remediation) then (yes) - if (Machine is owned by a MachineSet) then (yes) - if (Machine is a Control Plane Machine) then (no) - #LightBlue:Delete Machine; - else (yes) + + :Find targets: Machines matched by selector; + :Update ExpectedMachines and Targets in status; + + #LightBlue:Health check targets: Determine healthy/unhealthy machines; + :Update CurrentHealthy in status; + + if (Remediation allowed?\n(conditions in spec.triggerIf met)) then (no) + :Set RemediationsAllowed = 0; + #LightBlue:Mark RemediationAllowedCondition as False; + :Emit RemediationRestricted event; + else (yes) + :Calculate RemediationsAllowed count; + #LightBlue:Mark RemediationAllowedCondition as True; + + repeat + if (Target is unhealthy?) then (yes) + if (ExternalRemediationTemplate defined?) then (yes) + if (External remediation request exists?) then (no) + #LightBlue:Create external remediation request; + #LightBlue:Set MachineExternallyRemediatedCondition; + else (yes) + endif + else (no) + if (Machine not in deletion AND\n(OwnerRemediated missing OR completed)?) then (yes) + #LightBlue:Set MachineOwnerRemediatedCondition to False; + else (no) + endif endif + :Emit MachineMarkedUnhealthy event; else (no) + if (ExternalRemediationTemplate defined\nAND request exists?) then (yes) + #LightBlue:Delete external remediation request; + else (no) + endif endif - else (no) - endif - repeat while (more Targets) is (yes) - -> no; -repeat while (Targets likely to go unhealthy) is (yes: requeue with minimum - time before timeout as delay) + repeat while (More targets?) is (yes) + -> no; + endif +repeat while (Any target likely to go unhealthy soon?) is (yes: requeue with\nminimum timeout delay) -> no; stop; diff --git a/docs/book/src/images/machinehealthcheck-controller.png b/docs/book/src/images/machinehealthcheck-controller.png index 78f235b3b537..d83b071c6752 100644 Binary files a/docs/book/src/images/machinehealthcheck-controller.png and b/docs/book/src/images/machinehealthcheck-controller.png differ diff --git a/docs/book/src/images/runtime-sdk-lifecycle-hooks.png b/docs/book/src/images/runtime-sdk-lifecycle-hooks.png index 7153ee288aef..2cefd8508e85 100644 Binary files a/docs/book/src/images/runtime-sdk-lifecycle-hooks.png and b/docs/book/src/images/runtime-sdk-lifecycle-hooks.png differ diff --git a/docs/book/src/introduction.md b/docs/book/src/introduction.md index 9b8465d47e43..6002450ffad4 100644 --- a/docs/book/src/introduction.md +++ b/docs/book/src/introduction.md @@ -16,8 +16,9 @@ Started by the Kubernetes Special Interest Group (SIG) [Cluster Lifecycle](https

ClusterAPI documentation versions

-This book documents ClusterAPI v1.11. For other Cluster API versions please see the corresponding documentation: +This book documents ClusterAPI v1.12. For other Cluster API versions please see the corresponding documentation: * [main.cluster-api.sigs.k8s.io](https://main.cluster-api.sigs.k8s.io) +* [release-1-11.cluster-api.sigs.k8s.io](https://release-1-11.cluster-api.sigs.k8s.io) * [release-1-10.cluster-api.sigs.k8s.io](https://release-1-10.cluster-api.sigs.k8s.io) * [release-1-9.cluster-api.sigs.k8s.io](https://release-1-9.cluster-api.sigs.k8s.io) * [release-1-8.cluster-api.sigs.k8s.io](https://release-1-8.cluster-api.sigs.k8s.io) diff --git a/docs/book/src/reference/api/labels-and-annotations.md b/docs/book/src/reference/api/labels-and-annotations.md index fc428bd5acd6..03ee614caad4 100644 --- a/docs/book/src/reference/api/labels-and-annotations.md +++ b/docs/book/src/reference/api/labels-and-annotations.md @@ -21,14 +21,14 @@ | Annotation | Note | Managed By | Applies to | |:-----------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------|:-----------------------------------------------| | before-upgrade.hook.cluster.cluster.x-k8s.io | It specifies the prefix we search each annotation for during the before-upgrade lifecycle hook to block propagating the new version to the control plane. These hooks will prevent propagation of changes made to the Cluster Topology to the underlying objects. | User | Clusters | -| cluster.x-k8s.io/annotations-from-machine| It is set on nodes to track the annotations that originated from machines.| Cluster API | Nodes (workload cluster)| +| cluster.x-k8s.io/annotations-from-machine | It is set on nodes to track the annotations that originated from machines. | Cluster API | Nodes (workload cluster) | | cluster.x-k8s.io/cloned-from-groupkind | It is the annotation that stores the group-kind of the template from which the current resource has been cloned from. | Cluster API | All Cluster API objects cloned from a template | | cluster.x-k8s.io/cloned-from-name | It is the annotation that stores the name of the template from which the current resource has been cloned from. | Cluster API | All Cluster API objects cloned from a template | | cluster.x-k8s.io/cluster-name | It is set on nodes identifying the name of the cluster the node belongs to. | Cluster API | Nodes (workload cluster) | | cluster.x-k8s.io/cluster-namespace | It is set on nodes identifying the namespace of the cluster the node belongs to. | Cluster API | Nodes (workload cluster) | | cluster.x-k8s.io/delete-machine | It marks control plane and worker nodes that will be given priority for deletion when KCP or a MachineSet scales down. It is given top priority on all delete policies. | User | Machines | | cluster.x-k8s.io/disable-machine-create | It can be used to signal a MachineSet to stop creating new machines. It is utilized in the OnDelete MachineDeploymentStrategy to allow the MachineDeployment controller to scale down older MachineSets when Machines are deleted and add the new replicas to the latest MachineSet. | Cluster API | MachineSets | -| cluster.x-k8s.io/labels-from-machine| It is set on nodes to track the labels that originated from machines.| Cluster API | Nodes (workload cluster)| +| cluster.x-k8s.io/labels-from-machine | It is set on nodes to track the labels that originated from machines. | Cluster API | Nodes (workload cluster) | | cluster.x-k8s.io/managed-by | It can be applied to InfraCluster resources to signify that some external system is managing the cluster infrastructure. Provider InfraCluster controllers will ignore resources with this annotation. An external controller must fulfill the contract of the InfraCluster resource. External infrastructure providers should ensure that the annotation, once set, cannot be removed. | User | InfraClusters | | cluster.x-k8s.io/machine | It is set on nodes identifying the machine the node belongs to. | Cluster API | Nodes (workload cluster) | | cluster.x-k8s.io/owner-kind | It is set on nodes identifying the machine's owner kind the node belongs to. | Cluster API | Nodes (workload cluster) | @@ -40,7 +40,6 @@ | clusterctl.cluster.x-k8s.io/block-move | BlockMoveAnnotation prevents the cluster move operation from starting if it is defined on at least one of the objects in scope. Provider controllers are expected to set the annotation on resources that cannot be instantaneously paused and remove the annotation when the resource has been actually paused. | Providers | All Cluster API objects | | clusterctl.cluster.x-k8s.io/delete-for-move | DeleteForMoveAnnotation will be set to objects that are going to be deleted from the source cluster after being moved to the target cluster during the clusterctl move operation. It will help any validation webhook to take decision based on it. | Cluster API | All Cluster API objects | | clusterctl.cluster.x-k8s.io/skip-crd-name-preflight-check | Can be placed on provider CRDs, so that clusterctl doesn't emit an error if the CRD doesn't comply with Cluster APIs naming scheme. Only CRDs that are referenced by core Cluster API CRDs have to comply with the naming scheme. | Providers | CRDs | -| controlplane.cluster.x-k8s.io/kubeadm-cluster-configuration | It is a machine annotation that stores the json-marshalled string of KCP ClusterConfiguration. This annotation is used to detect any changes in ClusterConfiguration and trigger machine rollout in KCP. | Cluster API | Machines | | controlplane.cluster.x-k8s.io/remediation-for | It is a machine annotation that links a new machine to the unhealthy machine it is replacing. | Cluster API | Machines | | controlplane.cluster.x-k8s.io/remediation-in-progress | It is a KCP annotation that tracks that the system is in between having deleted an unhealthy machine and recreating its replacement. | Cluster API | KubeadmControlPlanes | | controlplane.cluster.x-k8s.io/skip-coredns | It explicitly skips reconciling CoreDNS if set. | User | KubeadmControlPlanes | @@ -61,3 +60,27 @@ | topology.cluster.x-k8s.io/upgrade-concurrency | It can be used to configure the maximum concurrency while upgrading MachineDeployments of a classy Cluster. It is set as a top level annotation on the Cluster object. The value should be >= 1. If unspecified the upgrade concurrency will default to 1. | Cluster API | Clusters | | unsafe.topology.cluster.x-k8s.io/disable-update-class-name-check | It can be used to disable the webhook check on update that disallows a pre-existing Cluster to be populated with Topology information and Class. | User | Clusters | | unsafe.topology.cluster.x-k8s.io/disable-update-version-check | It can be used to disable the webhook checks on update that disallows updating the .topology.spec.version on certain conditions. | User | Clusters | + + +# Internal Annotations + +Following annotation are used by CAPI internally. + + + +| Annotation | Note | Applies to | +|------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------| +| in-place-updates.internal.cluster.x-k8s.io/acknowledge-move | This annotation is added by the MD controller to a MachineSet when it acknowledges a machine pending acknowledge after being moved from an oldMS | MachineSet | +| in-place-updates.internal.cluster.x-k8s.io/move-machines-to-machineset | This annotation is added by the MD controller to the oldMS when it should scale down by moving machines that can be updated in-place to the newMS instead of deleting them. | MachineSet | +| in-place-updates.internal.cluster.x-k8s.io/pending-acknowledge-move | This annotation is by the MS controller to a machine when being moved from the oldMS to the newMS | Machine | +| in-place-updates.internal.cluster.x-k8s.io/receive-machines-from-machinesets | This annotation is added by the MD controller to the newMS when it should receive replicas from an oldMS | MachineSet | +| in-place-updates.internal.cluster.x-k8s.io/update-in-progress | This annotation is added to machines by the controller owning the Machine when in-place update is started | Machine | +| topology.internal.cluster.x-k8s.io/upgrade-step | This is an annotation used by the topology controller to a cluster to track upgrade steps. | Clusters | diff --git a/docs/book/src/reference/glossary.md b/docs/book/src/reference/glossary.md index 44210e338e17..bce714c45b5b 100644 --- a/docs/book/src/reference/glossary.md +++ b/docs/book/src/reference/glossary.md @@ -1,6 +1,6 @@ # Table of Contents -[A](#a) | [B](#b) | [C](#c) | [D](#d) | [E](#e) | [H](#h) | [I](#i) | [K](#k) | [L](#l)| [M](#m) | [N](#n) | [O](#o) | [P](#p) | [R](#r) | [S](#s) | [T](#t) | [W](#w) +[A](#a) | [B](#b) | [C](#c) | [D](#d) | [E](#e) | [H](#h) | [I](#i) | [K](#k) | [L](#l)| [M](#m) | [N](#n) | [O](#o) | [P](#p) | [R](#r) | [S](#s) | [T](#t) | [U](#u) |[W](#w) # A --- @@ -78,6 +78,9 @@ Cluster API Provider Akamai (Linode) ### CAPM3 Cluster API Provider Metal3 +### CAPMS +Cluster API Provider metal-stack + ### CAPN Cluster API Provider Nested @@ -129,6 +132,15 @@ Cluster API IPAM Provider Metal3 ### CAREX Cluster API Runtime Extensions Provider Nutanix +### Chained upgrade +An upgrade sequence that goes from one Kubernetes version to another by passing through a set of intermediate versions. +E.g. upgrading from v1.31.0 (current state) to v1.34.0 (target version) requires +a chained upgrade with the following steps: v1.32.0 (first intermediate version) -> v1.33.0 (second intermediate version) -> v1.34.0 (target version). + +The sequence of versions in a chained upgrade is also called [upgrade plan](#upgrade-plan). + +See also [efficient upgrade](#efficient-upgrade). + ### Cloud provider Or __Cloud service provider__ @@ -216,6 +228,14 @@ A feature implementation offered as part of the Cluster API project and maintain # E --- +### Efficient upgrade + +A [chained upgrade](#chained-upgrade) where worker nodes skip some of the intermediate versions, +when allowed by the [Kubernetes version skew policy](https://kubernetes.io/releases/version-skew-policy/). + +When the chained upgrade is also an efficient upgrade, the [upgrade plan](#upgrade-plan) for worker machines is a subset +of the [upgrade plan](#upgrade-plan) for control plane machines. + ### External patch [Patch](#patch) generated by an external component using [Runtime SDK](#runtime-sdk). Alternative to [inline patch](#inline-patch). @@ -261,6 +281,12 @@ are propagated in place by CAPI controllers to avoid the more elaborated mechani They include metadata, MinReadySeconds, NodeDrainTimeout, NodeVolumeDetachTimeout and NodeDeletionTimeout but are not limited to be expanded in the future. +### In-place update + +Any change to a Machine spec, that is performed without deleting the machines and creating a new one. + +Note: changing [in-place mutable fields](#in-place-mutable-fields) is not considered an in-place upgrade. + ### Instance see [Server](#server) @@ -269,6 +295,8 @@ see [Server](#server) A resource that does not mutate. In Kubernetes we often state the instance of a running pod is immutable or does not change once it is run. In order to make a change, a new pod is run. In the context of [Cluster API](#cluster-api) we often refer to a running instance of a [Machine](#machine) as being immutable, from a [Cluster API](#cluster-api) perspective. +Note: Cluster API also have extensibility points that make it possible to perform [in-place updates](#in-place-update) of machines. + ### IPAM provider Refers to a [provider](#provider) that allows Cluster API to interact with IPAM solutions. @@ -374,6 +402,16 @@ to refer to any project that can be deployed and provides functionality to the C See [Bootstrap provider](#bootstrap-provider), [Control plane provider](#control-plane-provider), [Core provider](#core-provider), [Infrastructure provider](#infrastructure-provider), [IPAM provider](#ipam-provider) [Runtime extension provider](#runtime-extension-provider). +### ProviderID + +ProviderID is the provider-specific identifier used to correlate Cluster API objects with the +underlying cloud instance. It appears in three resource types: InfrastructureMachine, Machine +(Cluster API core), and Node (workload cluster). CAPI copies the ProviderID from the +InfrastructureMachine to the Machine. The Node's ProviderID is set by the [Cloud Controller Manager +(CCM)](https://kubernetes.io/docs/concepts/architecture/cloud-controller/) or the kubelet. + +The format is a URI-like string: `://`. + ### Provider components Refers to the YAML artifact published as part of the release process for [providers](#provider); @@ -447,6 +485,24 @@ A [Runtime Hook](#runtime-hook) that allows external components to generate [pat See [Topology Mutation](../tasks/experimental-features/runtime-sdk/implement-topology-mutation-hook.md) +# U +--- + +### Update Extension + +A [runtime extension provider](#runtime-extension-provider) that implements [Update Lifecycle Hooks](#update-lifecycle-hooks). + +### Update Lifecycle Hooks +Is a set of Cluster API [Runtime Hooks](#runtime-hook) called when performing the "can update in-place" decision or +when performing an [in-place update](#in-place-update). + +### Upgrade plan +The sequence of intermediate versions ... target version that a Cluster must upgrade to when +performing a [chained upgrade](#chained-upgrade). + +Notably, the upgrade plan for control plane machines might be a superset of the upgrade plan for +workers machines. + # W --- diff --git a/docs/book/src/reference/providers.md b/docs/book/src/reference/providers.md index db1c810ed433..ab7046568fa6 100644 --- a/docs/book/src/reference/providers.md +++ b/docs/book/src/reference/providers.md @@ -7,7 +7,7 @@ by SIG Cluster Lifecycle as well as providers from other open-source repositorie Each provider is the responsibility of the respective maintainers and we highly recommend everyone interested in a specific provider to engage with the corresponding team to show support, share use cases, -learn more about the other users of the same provider. +learn more about the other users of the same provider. We also recommend to read provider's documentation carefully, test it, and perform a proper due diligence before deciding to use a provider in production, like you will do for any other open source project. @@ -28,6 +28,7 @@ source of inspiration and ideas for others. ## Control Plane - [Canonical Kubernetes Platform](https://github.com/canonical/cluster-api-k8s) +- [Hosted Control Plane](https://github.com/teutonet/cluster-api-provider-hosted-control-plane) - [k0smotron/k0s](https://github.com/k0sproject/k0smotron) - [K3s](https://github.com/cluster-api-provider-k3s/cluster-api-k3s) - [Kamaji](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) @@ -58,6 +59,7 @@ source of inspiration and ideas for others. - [KubeVirt](https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt) - [MAAS](https://github.com/spectrocloud/cluster-api-provider-maas) - [Metal3](https://github.com/metal3-io/cluster-api-provider-metal3) +- [metal-stack](https://github.com/metal-stack/cluster-api-provider-metal-stack) - [Microvm](https://github.com/liquidmetal-dev/cluster-api-provider-microvm) - [Nested](https://github.com/kubernetes-sigs/cluster-api-provider-nested) - [Nutanix](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix) diff --git a/docs/book/src/reference/versions.md b/docs/book/src/reference/versions.md index 94842046ba07..3b3e233b8c97 100644 --- a/docs/book/src/reference/versions.md +++ b/docs/book/src/reference/versions.md @@ -67,9 +67,10 @@ The table below documents support matrix for Cluster API versions (versions olde | Minor Release | Status | Supported Until (including maintenance mode) | |---------------|-------------------------|---------------------------------------------------------------------------------------------| +| v1.12.x | Standard support period | in maintenance mode when v1.14.0 will be released, EOL when v1.15.0 will be released | | v1.11.x | Standard support period | in maintenance mode when v1.13.0 will be released, EOL when v1.14.0 will be released | -| v1.10.x | Standard support period | in maintenance mode when v1.12.0 will be released, EOL when v1.13.0 will be released | -| v1.9.x | Maintenance mode | Maintenance mode since 2025-08-12 - v1.11.0 release date, EOL when v1.12.0 will be released | +| v1.10.x | Maintenance mode | Maintenance mode since 2025-12-18 - v1.12.0 release date, EOL when v1.13.0 will be released | +| v1.9.x | EOL | EOL since 2025-12-18 - v1.12.0 release date | | v1.8.x | EOL | EOL since 2025-08-12 - v1.11.0 release date | | v1.7.x | EOL | EOL since 2025-04-22 - v1.10.0 release date | | v1.6.x | EOL | EOL since 2024-12-10 - v1.9.0 release date | @@ -80,6 +81,41 @@ The table below documents support matrix for Cluster API versions (versions olde | v1.1.x | EOL | EOL since 2023-03-28 - v1.4.0 release date | | v1.0.x | EOL | EOL since 2022-12-01 - v1.3.0 release date | + + + + #### Skip upgrades Cluster API supports at maximum n-3 minor version skip upgrades. @@ -270,22 +306,32 @@ In some cases, also Cluster API and/or Cluster API providers are defining additi The following table defines the support matrix for the Cluster API core provider. See [Cluster API release support](#cluster-api-release-support) and [Kubernetes versions support](#kubernetes-versions-support). -| | v1.9, _Maintenance Mode_ | v1.10 | v1.11 | -|------------------|--------------------------|-------------------|-------------------| -| Kubernetes v1.24 | | | | -| Kubernetes v1.25 | | | | -| Kubernetes v1.26 | ✓ (only workload) | | | -| Kubernetes v1.27 | ✓ (only workload) | ✓ (only workload) | | -| Kubernetes v1.28 | ✓ | ✓ (only workload) | ✓ (only workload) | -| Kubernetes v1.29 | ✓ | ✓ | ✓ (only workload) | -| Kubernetes v1.30 | ✓ | ✓ | ✓ | -| Kubernetes v1.31 | ✓ | ✓ | ✓ | -| Kubernetes v1.32 | ✓ >= v1.9.1 | ✓ | ✓ | -| Kubernetes v1.33 | | ✓ >= v1.10.1 | ✓ | -| Kubernetes v1.34 | | | ✓ >= v1.11.1 | +| |v1.10, _Maintenance Mode_ | v1.11 | v1.12 | +|------------------|--------------------------|-------------------|------------------------------| +| Kubernetes v1.27 | ✓ (only workload) | | | +| Kubernetes v1.28 | ✓ (only workload) | ✓ (only workload) | | +| Kubernetes v1.29 | ✓ | ✓ (only workload) | ✓ (only workload) | +| Kubernetes v1.30 | ✓ | ✓ | ✓ (only workload) | +| Kubernetes v1.31 | ✓ | ✓ | ✓ | +| Kubernetes v1.32 | ✓ | ✓ | ✓ | +| Kubernetes v1.33 | ✓ >= v1.10.1 | ✓ | ✓ | +| Kubernetes v1.34 | | ✓ >= v1.11.1 | ✓ | +| Kubernetes v1.35 | | | ✓ >= v1.12.1 | + See also [Kubernetes version specific notes](#kubernetes-version-specific-notes). + + ### Kubeadm Bootstrap provider (`kubeadm-bootstrap-controller`) For each version of the Cluster API core provider, there is a corresponding version of the Kubeadm Bootstrap provider. @@ -303,9 +349,6 @@ using the [kubeadm API](https://kubernetes.io/docs/setup/production-environment/ | | kubeadm API Version | |------------------|------------------------------------------------------------------------------------| -| Kubernetes v1.24 | [v1beta3](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/) | -| Kubernetes v1.25 | [v1beta3](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/) | -| Kubernetes v1.26 | [v1beta3](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/) | | Kubernetes v1.27 | [v1beta3](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/) | | Kubernetes v1.28 | [v1beta3](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/) | | Kubernetes v1.29 | [v1beta3](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/) | @@ -314,6 +357,7 @@ using the [kubeadm API](https://kubernetes.io/docs/setup/production-environment/ | Kubernetes v1.32 | [v1beta4](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta4/) | | Kubernetes v1.33 | [v1beta4](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta4/) | | Kubernetes v1.34 | [v1beta4](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta4/) | +| Kubernetes v1.35 | [v1beta4](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta4/) | ### Kubeadm Control Plane provider (`kubeadm-control-plane-controller`) @@ -355,10 +399,11 @@ Notably, the Max CoreDNS version could change also with patch releases. | v1.10 | v1.12.1 | | >= v1.10.5 | v1.12.3 | | v1.10.7 | v1.12.4 | -| >= v1.10.8 | v1.13.1 | +| >= v1.10.8 | v1.13.1 | | v1.11 | v1.12.3 | | v1.11.2 | v1.12.4 | -| >= v1.11.3 | v1.13.1 | +| >= v1.11.3 | v1.13.1 | +| v1.12 | v1.13.1 | See [corefile-migration](https://github.com/coredns/corefile-migration) diff --git a/docs/book/src/security/security-guidelines.md b/docs/book/src/security/security-guidelines.md new file mode 100644 index 000000000000..ed6beb71e9a4 --- /dev/null +++ b/docs/book/src/security/security-guidelines.md @@ -0,0 +1,88 @@ +# Security Guidelines for Cluster API Users + +This document compiles security best practices for using Cluster API. These guidelines are based on the [Cluster API Security Self-Assessment](https://github.com/kubernetes/sig-security/blob/main/sig-security-assessments/cluster-api/self-assessment.md#threat-modeling-with-stride) conducted by the Kubernetes SIG Security. We recommend that organizations adapt these guidelines to their specific infrastructure and security requirements to ensure safe operations. + +## Comprehensive auditing + +To ensure comprehensive auditing, the following components require audit configuration: + +- **Cluster-level Auditing** + - Auditing on the management cluster + - API server auditing for all workload clusters + +- **Node/VM-level Auditing** + - Audit KubeConfig files access that are located on the node + - Audit access or edits to CA private keys and cert files located on the node + +- **Cloud Provider Auditing** + - Cloud API auditing to log all actions performed using cloud credentials + +After configuring these audit sources, centralize the logs using aggregation tools and implement real-time monitoring and alerting to detect suspicious activities and security incidents. + +## Use least privileges + +To minimize security risks related to cloud provider access, create dedicated cloud credentials that have only the necessary permissions to manage the lifecycle of a cluster. Avoid using administrative or root accounts for Cluster API operations, and use separate credentials for different purposes such as management cluster versus workload clusters. + +## Limit access + +Implement access restrictions to protect cluster infrastructure. + +### Control Plane Protection + +Limit who can create pods on control plane nodes through multiple methods: + +- **Taints and Tolerations**: Apply `NoSchedule` taints to control plane nodes to prevent general workload scheduling. See [Kubernetes Taints and Tolerations documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) +- **RBAC Policies**: Restrict pod creation permissions using Role-Based Access Control. See [Kubernetes RBAC documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +- **Admission Controllers**: Implement admission webhooks to enforce pod placement policies. See [Dynamic Admission Control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) + +### SSH Access + +Disable or restrict SSH access to nodes in a cluster to prevent unauthorized modifications and access to sensitive files. + +## Second pair of eyes + +Implement a review process where at least two people must approve privileged actions such as creating, deleting, or updating clusters. GitOps provides an effective way to enforce this requirement through pull request workflows, where changes to cluster configurations must be reviewed and approved by another team member before being merged and applied to the infrastructure. + +## Implement comprehensive alerting + +Configure alerts in the centralized audit log system to detect security incidents and resource anomalies. + +### Security Event Monitoring + +- Alert when cluster API components are modified, restarted, or experience unexpected state changes +- Monitor and alert on unauthorized changes to sensitive files on machine images +- Alert on unexpected machine restarts or shutdowns +- Monitor deletion or modification of Elastic Load Balancers (ELB) for API servers + +### Resource Activity Monitoring + +- Alert on all cloud resource creation, update, and deletion activities +- Identify anomalous patterns such as mass resource creation or deletion +- Monitor for resources created outside expected boundaries + +### Resource Limit Monitoring + +- Alert when the number of clusters approaches or exceeds defined soft limits +- Monitor node creation rates and alert when approaching capacity limits +- Track usage against cloud provider quotas and organizational limits +- Alert on excessive API calls or resource creation requests + +## Cluster isolation and segregation + +Implement multiple layers of isolation to prevent privilege escalation from workload clusters to management cluster. + +### Account/Subscription Separation + +Separate workload clusters into different AWS accounts or Azure subscriptions, and use dedicated accounts for management cluster and production workloads. This approach provides a strong security boundary at the cloud provider level. + +### Network Boundaries + +Separate workload and management clusters at the network level through VPC boundaries. Use dedicated VPC/VNet for each cluster type to prevent lateral movement between clusters. + +### Certificate Authority Isolation + +Do not build a chain of trust for cluster CAs. Each cluster must have its own independent CA to ensure that workload cluster CA compromise does not provide access to the management cluster. See [Kubernetes PKI certificates and requirements](https://kubernetes.io/docs/setup/best-practices/certificates/) for best practices. + +## Prevent runtime updates + +Implement controls to prevent tampering of machine images at runtime. Disable or restrict updates to machine images at runtime and prevent unauthorized modifications through SSH access restrictions. Following [immutable infrastructure](https://glossary.cncf.io/immutable-infrastructure/) practices ensures that any changes require deploying new images rather than modifying running systems. diff --git a/docs/book/src/tasks/automated-machine-management/healthchecking.md b/docs/book/src/tasks/automated-machine-management/healthchecking.md index 124acd65bf9b..bb405b470109 100644 --- a/docs/book/src/tasks/automated-machine-management/healthchecking.md +++ b/docs/book/src/tasks/automated-machine-management/healthchecking.md @@ -35,29 +35,42 @@ metadata: spec: # clusterName is required to associate this MachineHealthCheck with a particular cluster clusterName: capi-quickstart - # (Optional) maxUnhealthy prevents further remediation if the cluster is already partially unhealthy - maxUnhealthy: 40% - # (Optional) nodeStartupTimeout determines how long a MachineHealthCheck should wait for - # a Node to join the cluster, before considering a Machine unhealthy. - # Defaults to 10 minutes if not specified. - # Set to 0 to disable the node startup timeout. - # Disabling this timeout will prevent a Machine from being considered unhealthy when - # the Node it created has not yet registered with the cluster. This can be useful when - # Nodes take a long time to start up or when you only want condition based checks for - # Machine health. - nodeStartupTimeout: 10m # selector is used to determine which Machines should be health checked selector: matchLabels: nodepool: nodepool-0 - # Conditions to check on Nodes for matched Machines, if any condition is matched for the duration of its timeout, the Machine is considered unhealthy - unhealthyNodeConditions: - - type: Ready - status: Unknown - timeout: 300s - - type: Ready - status: "False" - timeout: 300s + # checks are the checks that are used to evaluate if a Machine is healthy. + checks: + # (Optional) nodeStartupTimeout determines how long a MachineHealthCheck should wait for + # a Node to join the cluster, before considering a Machine unhealthy. + # Defaults to 10 minutes if not specified. + # Set to 0 to disable the node startup timeout. + # Disabling this timeout will prevent a Machine from being considered unhealthy when + # the Node it created has not yet registered with the cluster. This can be useful when + # Nodes take a long time to start up or when you only want condition based checks for + # Machine health. + nodeStartupTimeoutSeconds: 600 + + # Conditions to check on Nodes for matched Machines, if any condition is matched for the duration of its timeout, the Machine is considered unhealthy + unhealthyNodeConditions: + - type: Ready + status: Unknown + timeoutSeconds: 300 + - type: Ready + status: "False" + timeoutSeconds: 300 + unhealthyMachineConditions: + - type: "Ready" + status: Unknown + timeoutSeconds: 300 + - type: "Ready" + status: "False" + timeoutSeconds: 300 + # remediation configures if and how remediation is triggered if a Machine is unhealthy. + remediation: + triggerIf: + # (Optional) unhealthyLessThanOrEqualTo prevents further remediation if the cluster is already partially unhealthy + unhealthyLessThanOrEqualTo: 40% ``` Use this example as the basis for defining a MachineHealthCheck for control plane nodes managed via @@ -70,17 +83,20 @@ metadata: name: capi-quickstart-kcp-unhealthy-5m spec: clusterName: capi-quickstart - maxUnhealthy: 100% selector: matchLabels: cluster.x-k8s.io/control-plane: "" - unhealthyNodeConditions: + checks: + unhealthyNodeConditions: - type: Ready status: Unknown - timeout: 300s + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 + remediation: + triggerIf: + unhealthyLessThanOrEqualTo: 100% ```