diff --git a/apis/core/v1alpha1/targettypes/kubernetes_cluster.go b/apis/core/v1alpha1/targettypes/kubernetes_cluster.go index 872c892d1..69ec7ed57 100644 --- a/apis/core/v1alpha1/targettypes/kubernetes_cluster.go +++ b/apis/core/v1alpha1/targettypes/kubernetes_cluster.go @@ -23,6 +23,9 @@ type KubernetesClusterTargetConfig struct { Kubeconfig ValueRef `json:"kubeconfig"` OIDCConfig *OIDCConfig `json:"oidcConfig,omitempty"` + + // SelfConfig contains the config for a Target that points to the landscaper resource cluster. + SelfConfig *SelfConfig `json:"selfConfig,omitempty"` } // DefaultKubeconfigKey is the default that is used to hold a kubeconfig. @@ -37,6 +40,7 @@ type ValueRef struct { type kubeconfigJSON struct { Kubeconfig *ValueRef `json:"kubeconfig"` OIDCConfig *OIDCConfig `json:"oidcConfig,omitempty"` + SelfConfig *SelfConfig `json:"selfConfig,omitempty"` } // MarshalJSON implements the json marshaling for a JSON @@ -60,12 +64,15 @@ func (v *ValueRef) UnmarshalJSON(data []byte) error { func (kc *KubernetesClusterTargetConfig) UnmarshalJSON(data []byte) error { kj := &kubeconfigJSON{} err := json.Unmarshal(data, kj) - if err == nil && (kj.Kubeconfig != nil || kj.OIDCConfig != nil) { + if err == nil && (kj.Kubeconfig != nil || kj.OIDCConfig != nil || kj.SelfConfig != nil) { // parsing was successful if kj.Kubeconfig != nil { kc.Kubeconfig = *kj.Kubeconfig } - kc.OIDCConfig = kj.OIDCConfig + if kj.OIDCConfig != nil { + kc.OIDCConfig = kj.OIDCConfig + } + kc.SelfConfig = kj.SelfConfig return nil } return kc.Kubeconfig.UnmarshalJSON(data) @@ -87,3 +94,8 @@ type OIDCConfig struct { Audience []string `json:"audience,omitempty"` ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` } + +type SelfConfig struct { + ServiceAccount v1.LocalObjectReference `json:"serviceAccount,omitempty"` + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` +} diff --git a/apis/core/v1alpha1/targettypes/kubernetes_cluster_test.go b/apis/core/v1alpha1/targettypes/kubernetes_cluster_test.go index ed8327c74..51f4baf2b 100644 --- a/apis/core/v1alpha1/targettypes/kubernetes_cluster_test.go +++ b/apis/core/v1alpha1/targettypes/kubernetes_cluster_test.go @@ -75,4 +75,35 @@ var _ = Describe("Kubernetes Cluster Target Types", func() { }, })) }) + + It("should marshal a self config", func() { + targetConfig := &targettypes.KubernetesClusterTargetConfig{ + SelfConfig: &targettypes.SelfConfig{ + ServiceAccount: v1.LocalObjectReference{ + Name: "test-account", + }, + ExpirationSeconds: ptr.To[int64](300), + }, + } + targetConfigJSON, err := json.Marshal(targetConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(targetConfigJSON).To(MatchJSON(`{"kubeconfig":null,"selfConfig":{"serviceAccount":{"name":"test-account"},"expirationSeconds":300}}`)) + }) + + It("should unmarshal a self config", func() { + configJSON := []byte(`{"selfConfig":{"serviceAccount":{"name":"test-account"},"expirationSeconds":300}}`) + config := &targettypes.KubernetesClusterTargetConfig{} + Expect(json.Unmarshal(configJSON, config)).To(Succeed()) + Expect(config).To(Equal(&targettypes.KubernetesClusterTargetConfig{ + Kubeconfig: targettypes.ValueRef{ + StrVal: nil, + }, + SelfConfig: &targettypes.SelfConfig{ + ServiceAccount: v1.LocalObjectReference{ + Name: "test-account", + }, + ExpirationSeconds: ptr.To[int64](300), + }, + })) + }) }) diff --git a/docs/guided-tour/targets/README.md b/docs/guided-tour/targets/01-kubeconfig-targets/README.md similarity index 97% rename from docs/guided-tour/targets/README.md rename to docs/guided-tour/targets/01-kubeconfig-targets/README.md index 9ac0dbd7d..262b9a3dd 100644 --- a/docs/guided-tour/targets/README.md +++ b/docs/guided-tour/targets/01-kubeconfig-targets/README.md @@ -13,7 +13,7 @@ If your target cluster is a Gardener shoot cluster, you typically have a for the target cluster. It is **not** possible to use such a kubeconfig in a `Target` custom resource. You have the following alternatives: -- Use an [OIDC Target](../../usage/Targets.md#oidc-target-to-kubernetes-target-cluster). +- Use an [OIDC Target](../../../usage/Targets.md#oidc-target-to-kubernetes-target-cluster) - Use a Target whose kubeconfig is based on a ServiceAccount token, as described below. ## Targets Whose Kubeconfig is Based On a ServiceAccount Token diff --git a/docs/guided-tour/targets/resources/clusterrolebinding.yaml.tpl b/docs/guided-tour/targets/01-kubeconfig-targets/resources/clusterrolebinding.yaml.tpl similarity index 100% rename from docs/guided-tour/targets/resources/clusterrolebinding.yaml.tpl rename to docs/guided-tour/targets/01-kubeconfig-targets/resources/clusterrolebinding.yaml.tpl diff --git a/docs/guided-tour/targets/02-self-targets/README.md b/docs/guided-tour/targets/02-self-targets/README.md new file mode 100644 index 000000000..b2d5f2c3e --- /dev/null +++ b/docs/guided-tour/targets/02-self-targets/README.md @@ -0,0 +1,68 @@ +--- +title: Self Targets +sidebar_position: 2 +--- + +# Self Targets + +This example demonstrates how you can use the Landscaper to deploy objects on its own resource cluster. +This means in this example the resource cluster and the target cluster are the same. +For this use-case, the Landscaper provides a special type of targets, so-called +[Self Targets](../../../usage/Targets.md#targets-to-the-landscaper-resource-cluster-self-targets). +Their advantage is that you do not need to include a kubeconfig into them. Instead, the Target references a ServiceAccount +in the same Namespace. The Self Target in this example looks as follows: + +```yaml +apiVersion: landscaper.gardener.cloud/v1alpha1 +kind: Target +metadata: + name: self-target + namespace: cu-example +spec: + type: landscaper.gardener.cloud/kubernetes-cluster + config: + selfConfig: + serviceAccount: + name: self-serviceaccount + expirationSeconds: 3600 +``` + +This Target references a [ServiceAccount `self-serviceaccount`](installation/serviceaccount.yaml.tpl). +A [ClusterRoleBinding `landscaper:guided-tour:self`](installation/clusterrolebinding.yaml.tpl) binds the ServiceAccount +to the ClusterRole `cluster-admin`, so that it has the necessary rights to create objects on the resource cluster. +The [Installation `self-inst`](installation/installation.yaml.tpl) uses the Target to deploy a ConfigMap on the +resource cluster. + + +## Procedure + +1. In the [settings](commands/settings) file, adjust the variables `RESOURCE_CLUSTER_KUBECONFIG_PATH`. + +2. On the Landscaper resource cluster, create namespaces `cu-example` and `example`. + +3. Run script [commands/deploy-k8s-resources.sh](commands/deploy-k8s-resources.sh). + It templates the following objects and applies them to the resource cluster: + - [ServiceAccount `self-serviceaccount`](installation/serviceaccount.yaml.tpl), + - [ClusterRoleBinding `landscaper:guided-tour:self`](installation/clusterrolebinding.yaml.tpl), + - [Target `self-target`](installation/target.yaml.tpl), + - [Installation `self-inst`](installation/installation.yaml.tpl). + + The diagram below provides an overview of these objects. + +4. Wait until the Installation is in phase `Succeeded` and check that it has created a ConfigMap `self-target-example` + in namespace `example` on the resource cluster. + +![diagram](./images/self-targets.png) + + +## Cleanup + +You can remove the Installation with the +[delete-installation script](commands/delete-installation.sh). +When the Installation is gone, you can delete the Target, ClusterRoleBinding, and ServiceAccount with the +[delete-other-k8s-resources script](commands/delete-other-k8s-resources.sh). + + +## References + +[Self Targets](../../../usage/Targets.md#targets-to-the-landscaper-resource-cluster-self-targets) diff --git a/docs/guided-tour/targets/02-self-targets/commands/delete-installation.sh b/docs/guided-tour/targets/02-self-targets/commands/delete-installation.sh new file mode 100755 index 000000000..1475ccb73 --- /dev/null +++ b/docs/guided-tour/targets/02-self-targets/commands/delete-installation.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit + +COMPONENT_DIR="$(dirname $0)/.." +cd "${COMPONENT_DIR}" +COMPONENT_DIR="$(pwd)" +echo "COMPONENT_DIR: ${COMPONENT_DIR}" + +source "${COMPONENT_DIR}/commands/settings" + +echo "deleting installation" +kubectl delete installation "self-inst" -n "${NAMESPACE}" --kubeconfig="${RESOURCE_CLUSTER_KUBECONFIG_PATH}" diff --git a/docs/guided-tour/targets/02-self-targets/commands/delete-other-k8s-resources.sh b/docs/guided-tour/targets/02-self-targets/commands/delete-other-k8s-resources.sh new file mode 100755 index 000000000..a56d563aa --- /dev/null +++ b/docs/guided-tour/targets/02-self-targets/commands/delete-other-k8s-resources.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit + +COMPONENT_DIR="$(dirname $0)/.." +cd "${COMPONENT_DIR}" +COMPONENT_DIR="$(pwd)" +echo "COMPONENT_DIR: ${COMPONENT_DIR}" + +source "${COMPONENT_DIR}/commands/settings" + +echo "deleting target" +kubectl delete target "self-target" -n "${NAMESPACE}" --kubeconfig="${RESOURCE_CLUSTER_KUBECONFIG_PATH}" + +echo "deleting clusterrolebinding" +kubectl delete clusterrolebinding "landscaper:guided-tour:self" --kubeconfig="${RESOURCE_CLUSTER_KUBECONFIG_PATH}" + +echo "deleting serviceaccount" +kubectl delete serviceaccount "self-serviceaccount" -n "${NAMESPACE}" --kubeconfig="${RESOURCE_CLUSTER_KUBECONFIG_PATH}" diff --git a/docs/guided-tour/targets/02-self-targets/commands/deploy-k8s-resources.sh b/docs/guided-tour/targets/02-self-targets/commands/deploy-k8s-resources.sh new file mode 100755 index 000000000..d3181d5f3 --- /dev/null +++ b/docs/guided-tour/targets/02-self-targets/commands/deploy-k8s-resources.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit + +COMPONENT_DIR="$(dirname $0)/.." +cd "${COMPONENT_DIR}" +COMPONENT_DIR="$(pwd)" +echo "COMPONENT_DIR: ${COMPONENT_DIR}" + +source "${COMPONENT_DIR}/commands/settings" + +TMP_DIR=`mktemp -d` +echo "TMP_DIR: ${TMP_DIR}" + +echo "creating serviceaccount" +outputFile="${TMP_DIR}/serviceaccount.yaml" +export namespace="${NAMESPACE}" +inputFile="${COMPONENT_DIR}/installation/serviceaccount.yaml.tpl" +envsubst < ${inputFile} > ${outputFile} +kubectl apply -f ${outputFile} --kubeconfig="${RESOURCE_CLUSTER_KUBECONFIG_PATH}" + +echo "creating clusterrolebinding" +outputFile="${TMP_DIR}/clusterrolebinding.yaml" +export namespace="${NAMESPACE}" +inputFile="${COMPONENT_DIR}/installation/clusterrolebinding.yaml.tpl" +envsubst < ${inputFile} > ${outputFile} +kubectl apply -f ${outputFile} --kubeconfig="${RESOURCE_CLUSTER_KUBECONFIG_PATH}" + +echo "creating target" +outputFile="${TMP_DIR}/target.yaml" +export namespace="${NAMESPACE}" +inputFile="${COMPONENT_DIR}/installation/target.yaml.tpl" +envsubst < ${inputFile} > ${outputFile} +kubectl apply -f ${outputFile} --kubeconfig="${RESOURCE_CLUSTER_KUBECONFIG_PATH}" + +echo "creating installation" +outputFile="${TMP_DIR}/installation.yaml" +export namespace="${NAMESPACE}" +inputFile="${COMPONENT_DIR}/installation/installation.yaml.tpl" +envsubst < ${inputFile} > ${outputFile} +kubectl apply -f ${outputFile} --kubeconfig="${RESOURCE_CLUSTER_KUBECONFIG_PATH}" diff --git a/docs/guided-tour/targets/02-self-targets/commands/settings b/docs/guided-tour/targets/02-self-targets/commands/settings new file mode 100644 index 000000000..8f1cbe64d --- /dev/null +++ b/docs/guided-tour/targets/02-self-targets/commands/settings @@ -0,0 +1,5 @@ +# path to the kubeconfig of the resource cluster, i.e. the cluster on which installations, targets, etc. are created +RESOURCE_CLUSTER_KUBECONFIG_PATH="/Users/${USER}/tmp/kubes/kubeconfig.yaml" + +# namespace for resources in the resource cluster +NAMESPACE="cu-example" diff --git a/docs/guided-tour/targets/02-self-targets/images/self-targets.png b/docs/guided-tour/targets/02-self-targets/images/self-targets.png new file mode 100644 index 000000000..f81f0f233 Binary files /dev/null and b/docs/guided-tour/targets/02-self-targets/images/self-targets.png differ diff --git a/docs/guided-tour/targets/02-self-targets/installation/clusterrolebinding.yaml.tpl b/docs/guided-tour/targets/02-self-targets/installation/clusterrolebinding.yaml.tpl new file mode 100644 index 000000000..36dd7f0c5 --- /dev/null +++ b/docs/guided-tour/targets/02-self-targets/installation/clusterrolebinding.yaml.tpl @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: landscaper:guided-tour:self +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: self-serviceaccount + namespace: ${namespace} diff --git a/docs/guided-tour/targets/02-self-targets/installation/installation.yaml.tpl b/docs/guided-tour/targets/02-self-targets/installation/installation.yaml.tpl new file mode 100644 index 000000000..f91b10e49 --- /dev/null +++ b/docs/guided-tour/targets/02-self-targets/installation/installation.yaml.tpl @@ -0,0 +1,53 @@ +apiVersion: landscaper.gardener.cloud/v1alpha1 +kind: Installation +metadata: + name: self-inst + namespace: ${namespace} + annotations: + landscaper.gardener.cloud/operation: reconcile + +spec: + + imports: + targets: + - name: cluster + target: self-target + + blueprint: + inline: + filesystem: + blueprint.yaml: | + apiVersion: landscaper.gardener.cloud/v1alpha1 + kind: Blueprint + jsonSchema: "https://json-schema.org/draft/2019-09/schema" + + imports: + - name: cluster + type: target + targetType: landscaper.gardener.cloud/kubernetes-cluster + + deployExecutions: + - name: default + type: GoTemplate + template: | + deployItems: + - name: default-deploy-item + type: landscaper.gardener.cloud/kubernetes-manifest + + target: + import: cluster + + config: + apiVersion: manifest.deployer.landscaper.gardener.cloud/v1alpha2 + kind: ProviderConfiguration + updateStrategy: update + manifests: + - policy: manage + manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + name: self-target-example + namespace: example + data: + testData: hello diff --git a/docs/guided-tour/targets/02-self-targets/installation/serviceaccount.yaml.tpl b/docs/guided-tour/targets/02-self-targets/installation/serviceaccount.yaml.tpl new file mode 100644 index 000000000..9b131ad5d --- /dev/null +++ b/docs/guided-tour/targets/02-self-targets/installation/serviceaccount.yaml.tpl @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: self-serviceaccount + namespace: ${namespace} diff --git a/docs/guided-tour/targets/02-self-targets/installation/target.yaml.tpl b/docs/guided-tour/targets/02-self-targets/installation/target.yaml.tpl new file mode 100644 index 000000000..f1815c5c0 --- /dev/null +++ b/docs/guided-tour/targets/02-self-targets/installation/target.yaml.tpl @@ -0,0 +1,12 @@ +apiVersion: landscaper.gardener.cloud/v1alpha1 +kind: Target +metadata: + name: self-target + namespace: ${namespace} +spec: + type: landscaper.gardener.cloud/kubernetes-cluster + config: + selfConfig: + serviceAccount: + name: self-serviceaccount + expirationSeconds: 3600 diff --git a/docs/usage/Targets.md b/docs/usage/Targets.md index 4d814c6e0..65e8bc7c9 100644 --- a/docs/usage/Targets.md +++ b/docs/usage/Targets.md @@ -277,4 +277,66 @@ This target can be used in Landscaper Installations in the same way as other Tar The following picture gives an overview about the cluster settings and k8s resources required to set up a trust relationship between the resource and the target cluster. -![OIDC Targets](images/oidc-targets.png) \ No newline at end of file +![OIDC Targets](images/oidc-targets.png) + + +## Targets to the Landscaper Resource Cluster ("Self Targets") + +You can use the Landscaper to deploy resources to the Landscaper resource cluster itself, i.e. the cluster on which the +Installations and Targets reside. You can achieve this with a special sort of Target, a so-called "Self Target". + +First, create a ServiceAccount on the resource cluster in some namespace. Note that this ServiceAccount must belong to +the same namespace as the Self Target that we are going to create, and any Installation that uses the Target. + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: + namespace: +``` + +The ServiceAccount must have enough permissions on the resource cluster to perform the deployment, which your +Installation defines. There are the following alternatives to grant the ServiceAccount permissions: + +- You can create a (Cluster)RoleBinding, that binds the ServiceAccount to an appropriate (Cluster)Role. + +- If you are using a Landscaper instance managed by the + [Landscaper-as-a-Service](https://github.com/gardener/landscaper-service), + just add the ServiceAccount to the SubjectList `subjects` in namespace `ls-user` on the resource cluster. + It will then automatically get the same roles as a user of the Landscaper instance. + + ```yaml + apiVersion: landscaper-service.gardener.cloud/v1alpha1 + kind: SubjectList + metadata: + name: subjects + namespace: ls-user + ... + spec: + subjects: + - kind: ServiceAccount + name: + namespace: + - ... + ``` + +Next, create the Self Target. It is a special sort of Target with the following structure: + + ```yaml + apiVersion: landscaper.gardener.cloud/v1alpha1 + kind: Target + metadata: + name: + namespace: + spec: + config: + selfConfig: + serviceAccount: + name: + expirationSeconds: # optional, defaults to 86400 = 60 * 60 * 24 + type: landscaper.gardener.cloud/kubernetes-cluster + ``` + +Now you can use this Target as usual in Installations. +There is an [example in the Guided-Tour](../guided-tour/targets/02-self-targets). diff --git a/pkg/deployer/helm/add.go b/pkg/deployer/helm/add.go index 9e0524932..32a96e1ac 100644 --- a/pkg/deployer/helm/add.go +++ b/pkg/deployer/helm/add.go @@ -9,7 +9,6 @@ import ( "fmt" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -40,10 +39,7 @@ func AddDeployerToManager(lsUncachedClient, lsCachedClient, hostUncachedClient, } log.Info("access to critical problems allowed") - d, err := NewDeployer(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient, - log, - config, - ) + d, err := NewDeployer(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient, lsMgr.GetConfig(), log, config) if err != nil { return err } diff --git a/pkg/deployer/helm/deletionmanager_test.go b/pkg/deployer/helm/deletionmanager_test.go index 036d82fb2..7fb48e11c 100644 --- a/pkg/deployer/helm/deletionmanager_test.go +++ b/pkg/deployer/helm/deletionmanager_test.go @@ -6,16 +6,13 @@ import ( "encoding/json" "time" - "k8s.io/utils/ptr" - - "github.com/gardener/landscaper/pkg/utils" - "github.com/google/uuid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/types" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -27,6 +24,7 @@ import ( "github.com/gardener/landscaper/pkg/deployer/helm" deployerlib "github.com/gardener/landscaper/pkg/deployer/lib" "github.com/gardener/landscaper/pkg/deployer/lib/timeout" + "github.com/gardener/landscaper/pkg/utils" testutils "github.com/gardener/landscaper/test/utils" "github.com/gardener/landscaper/test/utils/envtest" "github.com/gardener/landscaper/test/utils/matchers" @@ -67,10 +65,10 @@ var _ = Describe("Deletion Manager", func() { Expect(err).ToNot(HaveOccurred()) resources = &resourceBuilder{state.Namespace} - deployer, err := helm.NewDeployer(testenv.Client, testenv.Client, testenv.Client, testenv.Client, logging.Discard(), helmv1alpha1.Configuration{}) + deployer, err := helm.NewDeployer(testenv.Client, testenv.Client, testenv.Client, testenv.Client, nil, logging.Discard(), helmv1alpha1.Configuration{}) Expect(err).ToNot(HaveOccurred()) - ctrl = deployerlib.NewController( + ctrl = deployerlib.NewController(nil, testenv.Client, testenv.Client, testenv.Client, testenv.Client, utils.NewFinishedObjectCache(), api.LandscaperScheme, diff --git a/pkg/deployer/helm/deployer.go b/pkg/deployer/helm/deployer.go index f9399a8dc..2bf52a78d 100644 --- a/pkg/deployer/helm/deployer.go +++ b/pkg/deployer/helm/deployer.go @@ -8,6 +8,7 @@ import ( "context" "time" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" @@ -36,15 +37,16 @@ const ( ) // NewDeployer creates a new deployer that reconciles deploy items of type helm. -func NewDeployer(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient client.Client, - log logging.Logger, - config helmv1alpha1.Configuration) (deployerlib.Deployer, error) { +func NewDeployer( + lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient client.Client, lsRestConfig *rest.Config, + log logging.Logger, config helmv1alpha1.Configuration) (deployerlib.Deployer, error) { dep := &deployer{ lsUncachedClient: lsUncachedClient, lsCachedClient: lsCachedClient, hostUncachedClient: hostUncachedClient, hostCachedClient: hostCachedClient, + lsRestConfig: lsRestConfig, log: log, config: config, hooks: extension.ReconcileExtensionHooks{}, @@ -58,6 +60,7 @@ type deployer struct { lsCachedClient client.Client hostUncachedClient client.Client hostCachedClient client.Client + lsRestConfig *rest.Config log logging.Logger config helmv1alpha1.Configuration @@ -69,7 +72,7 @@ func (d *deployer) Reconcile(ctx context.Context, lsCtx *lsv1alpha1.Context, di return err } - helm, err := New(d.lsUncachedClient, d.lsCachedClient, d.hostUncachedClient, d.hostCachedClient, d.config, di, rt, lsCtx) + helm, err := New(d.lsUncachedClient, d.lsCachedClient, d.hostUncachedClient, d.hostCachedClient, d.lsRestConfig, d.config, di, rt, lsCtx) if err != nil { err = lserrors.NewWrappedError(err, "Reconcile", "newRootLogger", err.Error()) return err @@ -106,7 +109,7 @@ func (d *deployer) Delete(ctx context.Context, lsCtx *lsv1alpha1.Context, di *ls return err } - helm, err := New(d.lsUncachedClient, d.lsCachedClient, d.hostUncachedClient, d.hostCachedClient, d.config, di, rt, lsCtx) + helm, err := New(d.lsUncachedClient, d.lsCachedClient, d.hostUncachedClient, d.hostCachedClient, d.lsRestConfig, d.config, di, rt, lsCtx) if err != nil { return err } @@ -129,7 +132,7 @@ func (d *deployer) ExtensionHooks() extension.ReconcileExtensionHooks { func (d *deployer) NextReconcile(ctx context.Context, last time.Time, di *lsv1alpha1.DeployItem) (*time.Time, error) { // todo: directly parse deploy items - helm, err := New(d.lsUncachedClient, d.lsCachedClient, d.hostUncachedClient, d.hostCachedClient, d.config, di, nil, nil) + helm, err := New(d.lsUncachedClient, d.lsCachedClient, d.hostUncachedClient, d.hostCachedClient, d.lsRestConfig, d.config, di, nil, nil) if err != nil { return nil, err } diff --git a/pkg/deployer/helm/ensure.go b/pkg/deployer/helm/ensure.go index 836c9a12e..02c5813e4 100644 --- a/pkg/deployer/helm/ensure.go +++ b/pkg/deployer/helm/ensure.go @@ -12,7 +12,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -44,9 +43,8 @@ func (h *Helm) ApplyFiles(ctx context.Context, filesForManifestDeployer, crdsFor currOp := "ApplyFile" logger, ctx := logging.FromContextOrNew(ctx, []interface{}{lc.KeyMethod, currOp}) - _, targetClient, targetClientSet, err := h.TargetClient(ctx) - if err != nil { - return lserrors.NewWrappedError(err, currOp, "TargetClusterClient", err.Error()) + if err := h.ensureTargetAccess(ctx); err != nil { + return lserrors.NewWrappedError(err, currOp, "ensureTargetAccess", err.Error()) } if h.ProviderStatus == nil { @@ -66,7 +64,7 @@ func (h *Helm) ApplyFiles(ctx context.Context, filesForManifestDeployer, crdsFor if shouldUseRealHelmDeployer { // Apply helm install/upgrade. Afterwards get the list of deployed resources by helm get release. // The list is filtered, i.e. it contains only the resources that are needed for the default readiness check. - realHelmDeployer := realhelmdeployer.NewRealHelmDeployer(ch, h.ProviderConfiguration, h.TargetRestConfig, targetClientSet, h.DeployItem) + realHelmDeployer := realhelmdeployer.NewRealHelmDeployer(ch, h.ProviderConfiguration, h.targetAccess, h.DeployItem) deployErr = realHelmDeployer.Deploy(ctx) if deployErr == nil { managedResourceStatusList, err := realHelmDeployer.GetManagedResourcesStatus(ctx) @@ -82,7 +80,7 @@ func (h *Helm) ApplyFiles(ctx context.Context, filesForManifestDeployer, crdsFor return err } - deployErr = h.applyManifests(ctx, targetClient, targetClientSet, manifests) + deployErr = h.applyManifests(ctx, manifests) } // common error handling for deploy errors (h.applyManifests / realHelmDeployer.Deploy) @@ -95,6 +93,7 @@ func (h *Helm) ApplyFiles(ctx context.Context, filesForManifestDeployer, crdsFor return deployErr } + var err error h.DeployItem.Status.ProviderStatus, err = kutil.ConvertToRawExtension(h.ProviderStatus, HelmScheme) if err != nil { return lserrors.NewWrappedError(err, currOp, "ProviderStatus", err.Error()) @@ -108,7 +107,7 @@ func (h *Helm) ApplyFiles(ctx context.Context, filesForManifestDeployer, crdsFor return err } - if err := h.checkResourcesReady(ctx, targetClient, !shouldUseRealHelmDeployer); err != nil { + if err := h.checkResourcesReady(ctx, h.targetAccess.TargetClient(), !shouldUseRealHelmDeployer); err != nil { return err } @@ -116,7 +115,7 @@ func (h *Helm) ApplyFiles(ctx context.Context, filesForManifestDeployer, crdsFor return err } - if err := h.readExportValues(ctx, currOp, targetClient, exports); err != nil { + if err := h.readExportValues(ctx, currOp, h.targetAccess.TargetClient(), exports); err != nil { return err } @@ -125,8 +124,7 @@ func (h *Helm) ApplyFiles(ctx context.Context, filesForManifestDeployer, crdsFor return nil } -func (h *Helm) applyManifests(ctx context.Context, targetClient client.Client, targetClientSet kubernetes.Interface, - manifests []managedresource.Manifest) error { +func (h *Helm) applyManifests(ctx context.Context, manifests []managedresource.Manifest) error { if _, err := timeout.TimeoutExceeded(ctx, h.DeployItem, TimeoutCheckpointHelmStartApplyManifests); err != nil { return err @@ -134,8 +132,8 @@ func (h *Helm) applyManifests(ctx context.Context, targetClient client.Client, t applier := resourcemanager.NewManifestApplier(resourcemanager.ManifestApplierOptions{ Decoder: serializer.NewCodecFactory(scheme.Scheme).UniversalDecoder(), - KubeClient: targetClient, - Clientset: targetClientSet, + KubeClient: h.targetAccess.TargetClient(), + Clientset: h.targetAccess.TargetClientSet(), DefaultNamespace: h.ProviderConfiguration.Namespace, DeployItemName: h.DeployItem.Name, DeployItem: h.DeployItem, @@ -148,6 +146,7 @@ func (h *Helm) applyManifests(ctx context.Context, targetClient client.Client, t DeletionGroupsDuringUpdate: h.ProviderConfiguration.DeletionGroupsDuringUpdate, InterruptionChecker: interruption.NewStandardInterruptionChecker(h.DeployItem, h.lsUncachedClient), LsUncachedClient: h.lsUncachedClient, + LsRestConfig: h.lsRestConfig, }) _, err := applier.Apply(ctx) @@ -254,6 +253,7 @@ func (h *Helm) checkResourcesReady(ctx context.Context, client client.Client, fa InterruptionChecker: interruption.NewStandardInterruptionChecker(h.DeployItem, h.lsUncachedClient), LsClient: h.lsUncachedClient, DeployItem: h.DeployItem, + LsRestConfig: h.lsRestConfig, } err := customReadinessCheck.CheckResourcesReady(ctx) if err != nil { @@ -279,6 +279,7 @@ func (h *Helm) readExportValues(ctx context.Context, currOp string, targetClient InterruptionChecker: interruption.NewStandardInterruptionChecker(h.DeployItem, h.lsUncachedClient), LsClient: h.lsUncachedClient, DeployItem: h.DeployItem, + LsRestConfig: h.lsRestConfig, } resourceExports, err := resourcemanager.NewExporter(opts).Export(ctx, exportDefinition) @@ -316,21 +317,21 @@ func (h *Helm) deleteManifestsInGroups(ctx context.Context) error { return h.Writer().UpdateDeployItem(ctx, read_write_layer.W000067, h.DeployItem) } - _, targetClient, _, err := h.TargetClient(ctx) - if err != nil { + if err := h.ensureTargetAccess(ctx); err != nil { return err } interruptionChecker := interruption.NewStandardInterruptionChecker(h.DeployItem, h.lsUncachedClient) - err = resourcemanager.DeleteManagedResources( + err := resourcemanager.DeleteManagedResources( ctx, h.lsUncachedClient, h.ProviderStatus.ManagedResources, h.ProviderConfiguration.DeletionGroups, - targetClient, + h.targetAccess.TargetClient(), h.DeployItem, - interruptionChecker) + interruptionChecker, + h.lsRestConfig) if err != nil { return fmt.Errorf("failed deleting managed resources: %w", err) } @@ -351,15 +352,13 @@ func (h *Helm) deleteManifestsWithRealHelmDeployer(ctx context.Context, di *lsv1 return h.Writer().UpdateDeployItem(ctx, read_write_layer.W000047, h.DeployItem) } - _, _, targetClientSet, err := h.TargetClient(ctx) - if err != nil { + if err := h.ensureTargetAccess(ctx); err != nil { return err } - realHelmDeployer := realhelmdeployer.NewRealHelmDeployer(nil, h.ProviderConfiguration, - h.TargetRestConfig, targetClientSet, di) + realHelmDeployer := realhelmdeployer.NewRealHelmDeployer(nil, h.ProviderConfiguration, h.targetAccess, di) - err = realHelmDeployer.Undeploy(ctx) + err := realHelmDeployer.Undeploy(ctx) if err != nil { return err } diff --git a/pkg/deployer/helm/helm.go b/pkg/deployer/helm/helm.go index 78502cefc..c0cf049a8 100644 --- a/pkg/deployer/helm/helm.go +++ b/pkg/deployer/helm/helm.go @@ -6,7 +6,6 @@ package helm import ( "context" - "errors" "strings" "helm.sh/helm/v3/pkg/chart" @@ -14,7 +13,6 @@ import ( "helm.sh/helm/v3/pkg/engine" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -54,6 +52,7 @@ type Helm struct { lsCachedClient client.Client hostUncachedClient client.Client hostCachedClient client.Client + lsRestConfig *rest.Config Configuration helmv1alpha1.Configuration @@ -63,13 +62,13 @@ type Helm struct { ProviderConfiguration *helmv1alpha1.ProviderConfiguration ProviderStatus *helmv1alpha1.ProviderStatus - TargetKubeClient client.Client - TargetRestConfig *rest.Config - TargetClientSet kubernetes.Interface + targetAccess *lib.TargetAccess } // New creates a new internal helm item -func New(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient client.Client, +func New( + lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient client.Client, + lsRestConfig *rest.Config, helmconfig helmv1alpha1.Configuration, item *lsv1alpha1.DeployItem, rt *lsv1alpha1.ResolvedTarget, @@ -99,6 +98,7 @@ func New(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient } return &Helm{ + lsRestConfig: lsRestConfig, lsUncachedClient: lsUncachedClient, lsCachedClient: lsCachedClient, hostUncachedClient: hostUncachedClient, @@ -112,15 +112,21 @@ func New(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient }, nil } +func (h *Helm) ensureTargetAccess(ctx context.Context) (err error) { + if h.targetAccess == nil { + h.targetAccess, err = lib.NewTargetAccess(ctx, h.Target, h.lsUncachedClient, h.lsRestConfig) + } + return err +} + // Template loads the specified helm chart // and templates it with the given values. func (h *Helm) Template(ctx context.Context) (map[string]string, map[string]string, map[string]interface{}, *chart.Chart, lserrors.LsError) { currOp := "TemplateChart" - restConfig, _, _, err := h.TargetClient(ctx) - if err != nil { - return nil, nil, nil, nil, lserrors.NewWrappedError(err, currOp, "GetTargetClient", err.Error()) + if err := h.ensureTargetAccess(ctx); err != nil { + return nil, nil, nil, nil, lserrors.NewWrappedError(err, currOp, "ensureTargetAccess", err.Error()) } // download chart @@ -168,7 +174,7 @@ func (h *Helm) Template(ctx context.Context) (map[string]string, map[string]stri crdsForManifestDeployer := map[string]string{} shouldUseRealHelmDeployer := ptr.Deref[bool](h.ProviderConfiguration.HelmDeployment, true) if !shouldUseRealHelmDeployer { - filesForManifestDeployer, err = engine.RenderWithClient(ch, values, restConfig) + filesForManifestDeployer, err = engine.RenderWithClient(ch, values, h.targetAccess.TargetRestConfig()) if err != nil { return nil, nil, nil, nil, lserrors.NewWrappedError( err, currOp, "RenderHelmValues", err.Error(), lsv1alpha1.ErrorConfigurationProblem) @@ -182,25 +188,6 @@ func (h *Helm) Template(ctx context.Context) (map[string]string, map[string]stri return filesForManifestDeployer, crdsForManifestDeployer, values, ch, nil } -func (h *Helm) TargetClient(ctx context.Context) (*rest.Config, client.Client, kubernetes.Interface, error) { - if h.TargetKubeClient != nil { - return h.TargetRestConfig, h.TargetKubeClient, h.TargetClientSet, nil - } - if h.Target != nil { - restConfig, kubeClient, clientset, err := lib.GetRestConfigAndClientAndClientSet(ctx, h.Target, h.lsUncachedClient) - if err != nil { - return nil, nil, nil, err - } - - h.TargetRestConfig = restConfig - h.TargetKubeClient = kubeClient - h.TargetClientSet = clientset - - return restConfig, kubeClient, clientset, nil - } - return nil, nil, nil, errors.New("neither a target nor kubeconfig are defined") -} - func (h *Helm) isDownloadInfoError(err error) bool { msg := err.Error() return strings.Contains(msg, "no chart name found") || diff --git a/pkg/deployer/helm/helm_suite_test.go b/pkg/deployer/helm/helm_suite_test.go index c945bd55d..78124b711 100644 --- a/pkg/deployer/helm/helm_suite_test.go +++ b/pkg/deployer/helm/helm_suite_test.go @@ -114,7 +114,7 @@ var _ = Describe("Template", func() { lsCtx := &lsv1alpha1.Context{} lsCtx.Name = lsv1alpha1.DefaultContextName lsCtx.Namespace = item.Namespace - h, err := helm.New(testenv.Client, testenv.Client, testenv.Client, testenv.Client, helmv1alpha1.Configuration{}, item, rt, lsCtx) + h, err := helm.New(testenv.Client, testenv.Client, testenv.Client, testenv.Client, nil, helmv1alpha1.Configuration{}, item, rt, lsCtx) Expect(err).ToNot(HaveOccurred()) files, crds, _, _, err := h.Template(ctx) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/deployer/helm/realhelmdeployer/real_helm_deployer.go b/pkg/deployer/helm/realhelmdeployer/real_helm_deployer.go index 712dc77a4..4902b88a4 100644 --- a/pkg/deployer/helm/realhelmdeployer/real_helm_deployer.go +++ b/pkg/deployer/helm/realhelmdeployer/real_helm_deployer.go @@ -13,6 +13,8 @@ import ( "strings" "sync" + "github.com/gardener/landscaper/pkg/deployer/lib" + "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/kube" @@ -64,8 +66,8 @@ type RealHelmDeployer struct { mutex sync.RWMutex } -func NewRealHelmDeployer(ch *chart.Chart, providerConfig *helmv1alpha1.ProviderConfiguration, targetRestConfig *rest.Config, - clientset kubernetes.Interface, di *lsv1alpha1.DeployItem) *RealHelmDeployer { +func NewRealHelmDeployer(ch *chart.Chart, providerConfig *helmv1alpha1.ProviderConfiguration, + targetAccess *lib.TargetAccess, di *lsv1alpha1.DeployItem) *RealHelmDeployer { return &RealHelmDeployer{ chart: ch, @@ -75,8 +77,8 @@ func NewRealHelmDeployer(ch *chart.Chart, providerConfig *helmv1alpha1.ProviderC rawValues: providerConfig.Values, helmConfig: providerConfig.HelmDeploymentConfig, createNamespace: providerConfig.CreateNamespace, - targetRestConfig: targetRestConfig, - apiResourceHandler: resourcemanager.CreateApiResourceHandler(clientset), + targetRestConfig: targetAccess.TargetRestConfig(), + apiResourceHandler: resourcemanager.CreateApiResourceHandler(targetAccess.TargetClientSet()), helmSecretManager: nil, di: di, messages: make([]string, 0), diff --git a/pkg/deployer/helm/test/e2e_test.go b/pkg/deployer/helm/test/e2e_test.go index 979383c4e..874a52d99 100644 --- a/pkg/deployer/helm/test/e2e_test.go +++ b/pkg/deployer/helm/test/e2e_test.go @@ -53,13 +53,11 @@ var _ = Describe("Helm Deployer", func() { ctx := context.Background() defer ctx.Done() - deployer, err := helmctrl.NewDeployer(testenv.Client, testenv.Client, testenv.Client, testenv.Client, - logging.Discard(), - helmv1alpha1.Configuration{}, - ) + deployer, err := helmctrl.NewDeployer(testenv.Client, testenv.Client, testenv.Client, testenv.Client, nil, + logging.Discard(), helmv1alpha1.Configuration{}) Expect(err).ToNot(HaveOccurred()) - ctrl := deployerlib.NewController( + ctrl := deployerlib.NewController(nil, testenv.Client, testenv.Client, testenv.Client, testenv.Client, utils.NewFinishedObjectCache(), api.LandscaperScheme, diff --git a/pkg/deployer/lib/controller.go b/pkg/deployer/lib/controller.go index 2f691bae1..9b5371766 100644 --- a/pkg/deployer/lib/controller.go +++ b/pkg/deployer/lib/controller.go @@ -18,6 +18,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -98,7 +99,8 @@ func Add(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient if err := args.Validate(); err != nil { return err } - con := NewController(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient, + con := NewController(lsMgr.GetConfig(), + lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient, finishedObjectCache, lsMgr.GetScheme(), lsMgr.GetEventRecorderFor(args.Name), @@ -119,6 +121,7 @@ func Add(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient // controller reconciles deployitems and delegates the business logic to the configured Deployer. type controller struct { + lsRestConfig *rest.Config lsUncachedClient client.Client lsCachedClient client.Client hostUncachedClient client.Client @@ -143,7 +146,8 @@ type controller struct { } // NewController creates a new generic deployitem controller. -func NewController(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient client.Client, +func NewController(lsRestConfig *rest.Config, + lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient client.Client, finishedObjectCache *lsutil.FinishedObjectCache, lsScheme *runtime.Scheme, lsEventRecorder record.EventRecorder, @@ -156,6 +160,7 @@ func NewController(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCac wc := lsutil.NewWorkerCounter(maxNumberOfWorkers) return &controller{ + lsRestConfig: lsRestConfig, lsUncachedClient: lsUncachedClient, lsCachedClient: lsCachedClient, hostUncachedClient: hostUncachedClient, diff --git a/pkg/deployer/lib/readinesscheck/customreadinesscheck.go b/pkg/deployer/lib/readinesscheck/customreadinesscheck.go index dbee280bf..4400afdf6 100644 --- a/pkg/deployer/lib/readinesscheck/customreadinesscheck.go +++ b/pkg/deployer/lib/readinesscheck/customreadinesscheck.go @@ -11,6 +11,8 @@ import ( "reflect" "strings" + "k8s.io/client-go/rest" + "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -39,6 +41,7 @@ type CustomReadinessCheck struct { InterruptionChecker interruption.InterruptionChecker LsClient client.Client DeployItem *lsv1alpha1.DeployItem + LsRestConfig *rest.Config } // CheckResourcesReady starts a custom readiness check by checking the readiness of the submitted resources @@ -48,7 +51,7 @@ func (c *CustomReadinessCheck) CheckResourcesReady(ctx context.Context) error { return nil } - targetClient, err := lib.GetTargetClient(ctx, c.Client, c.LsClient, c.DeployItem, c.Configuration.TargetName) + targetClient, err := lib.GetTargetClientConsideringSecondaryTarget(ctx, c.Client, c.LsClient, c.DeployItem, c.Configuration.TargetName, c.LsRestConfig) if err != nil { return err } diff --git a/pkg/deployer/lib/resourcemanager/deletiongroup.go b/pkg/deployer/lib/resourcemanager/deletiongroup.go index 343cde2cc..70c15ee2e 100644 --- a/pkg/deployer/lib/resourcemanager/deletiongroup.go +++ b/pkg/deployer/lib/resourcemanager/deletiongroup.go @@ -12,15 +12,15 @@ import ( apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/gardener/landscaper/pkg/deployer/lib" - lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" "github.com/gardener/landscaper/apis/deployer/utils/managedresource" kutil "github.com/gardener/landscaper/controller-utils/pkg/kubernetes" "github.com/gardener/landscaper/controller-utils/pkg/logging" lc "github.com/gardener/landscaper/controller-utils/pkg/logging/constants" + "github.com/gardener/landscaper/pkg/deployer/lib" "github.com/gardener/landscaper/pkg/deployer/lib/interruption" "github.com/gardener/landscaper/pkg/deployer/lib/timeout" ) @@ -43,6 +43,7 @@ func NewDeletionGroup( deployItem *lsv1alpha1.DeployItem, primaryTargetClient client.Client, interruptionChecker interruption.InterruptionChecker, + lsRestConfig *rest.Config, ) (group *DeletionGroup, err error) { if definition.IsPredefined() && definition.IsCustom() { return nil, fmt.Errorf("invalid deletion group: predefinedResourceGroup and customResourceGroup must not both be set") @@ -60,7 +61,7 @@ func NewDeletionGroup( targetClient := primaryTargetClient if isCustomWithSecondaryTarget { - targetClient, err = lib.GetTargetClient(ctx, primaryTargetClient, lsUncachedClient, deployItem, definition.CustomResourceGroup.TargetName) + targetClient, err = lib.GetTargetClientConsideringSecondaryTarget(ctx, primaryTargetClient, lsUncachedClient, deployItem, definition.CustomResourceGroup.TargetName, lsRestConfig) if err != nil { return nil, err } diff --git a/pkg/deployer/lib/resourcemanager/deletionmanager.go b/pkg/deployer/lib/resourcemanager/deletionmanager.go index bf12c71f5..8918b76d2 100644 --- a/pkg/deployer/lib/resourcemanager/deletionmanager.go +++ b/pkg/deployer/lib/resourcemanager/deletionmanager.go @@ -3,6 +3,7 @@ package resourcemanager import ( "context" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" @@ -18,6 +19,7 @@ func DeleteManagedResources( targetClient client.Client, deployItem *lsv1alpha1.DeployItem, interruptionChecker interruption.InterruptionChecker, + lsRestConfig *rest.Config, ) error { if len(managedResources) == 0 { return nil @@ -32,7 +34,7 @@ func DeleteManagedResources( groups := make([]*DeletionGroup, len(groupDefinitions)) for i := range groupDefinitions { var err error - groups[i], err = NewDeletionGroup(ctx, lsUncachedClient, groupDefinitions[i], deployItem, targetClient, interruptionChecker) + groups[i], err = NewDeletionGroup(ctx, lsUncachedClient, groupDefinitions[i], deployItem, targetClient, interruptionChecker, lsRestConfig) if err != nil { return err } diff --git a/pkg/deployer/lib/resourcemanager/exporter.go b/pkg/deployer/lib/resourcemanager/exporter.go index bce62af05..028b6d6d9 100644 --- a/pkg/deployer/lib/resourcemanager/exporter.go +++ b/pkg/deployer/lib/resourcemanager/exporter.go @@ -10,6 +10,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" @@ -32,6 +33,7 @@ type ExporterOptions struct { InterruptionChecker interruption.InterruptionChecker LsClient client.Client DeployItem *lsv1alpha1.DeployItem + LsRestConfig *rest.Config } // Exporter defines the export of data from manifests. @@ -40,6 +42,7 @@ type Exporter struct { interruptionChecker interruption.InterruptionChecker lsClient client.Client deployItem *lsv1alpha1.DeployItem + lsRestConfig *rest.Config } // NewExporter creates a new exporter. @@ -49,6 +52,7 @@ func NewExporter(opts ExporterOptions) *Exporter { interruptionChecker: opts.InterruptionChecker, lsClient: opts.LsClient, deployItem: opts.DeployItem, + lsRestConfig: opts.LsRestConfig, } if exporter.interruptionChecker == nil { @@ -107,7 +111,7 @@ func (e *Exporter) Export(ctx context.Context, exports *managedresource.Exports) } func (e *Exporter) doExport(ctx context.Context, export managedresource.Export) (map[string]interface{}, error) { - targetClient, err := lib.GetTargetClient(ctx, e.kubeClient, e.lsClient, e.deployItem, export.TargetName) + targetClient, err := lib.GetTargetClientConsideringSecondaryTarget(ctx, e.kubeClient, e.lsClient, e.deployItem, export.TargetName, e.lsRestConfig) if err != nil { return nil, err } diff --git a/pkg/deployer/lib/resourcemanager/objectapplier.go b/pkg/deployer/lib/resourcemanager/objectapplier.go index 4adf98c3d..8e72bd07b 100644 --- a/pkg/deployer/lib/resourcemanager/objectapplier.go +++ b/pkg/deployer/lib/resourcemanager/objectapplier.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/types" apimacherrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" @@ -72,6 +73,7 @@ type ManifestApplierOptions struct { InterruptionChecker interruption.InterruptionChecker LsUncachedClient client.Client + LsRestConfig *rest.Config } // ManifestApplier creates or updated manifest based on their definition. @@ -89,6 +91,7 @@ type ManifestApplier struct { deletionGroupsDuringUpdate []managedresource.DeletionGroupDefinition interruptionChecker interruption.InterruptionChecker lsUncachedClient client.Client + lsRestConfig *rest.Config // properties created during runtime @@ -140,6 +143,7 @@ func NewManifestApplier(opts ManifestApplierOptions) *ManifestApplier { interruptionChecker: opts.InterruptionChecker, apiResourceHandler: CreateApiResourceHandler(opts.Clientset), lsUncachedClient: opts.LsUncachedClient, + lsRestConfig: opts.LsRestConfig, } } @@ -449,6 +453,7 @@ func (a *ManifestApplier) cleanupOrphanedResourcesInGroups(ctx context.Context, a.kubeClient, a.deployItem, a.interruptionChecker, + a.lsRestConfig, ) } diff --git a/pkg/deployer/lib/target_access.go b/pkg/deployer/lib/target_access.go new file mode 100644 index 000000000..1d21ef14e --- /dev/null +++ b/pkg/deployer/lib/target_access.go @@ -0,0 +1,168 @@ +package lib + +import ( + "context" + "errors" + "fmt" + + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" + "github.com/gardener/landscaper/apis/core/v1alpha1/targettypes" +) + +// defaultExpirationSeconds is the default expiration duration for tokens generated for OIDC Targets and Self Targets. +const defaultExpirationSeconds = 86400 // = 1 day + +// TargetAccess bundles the various objects to access a target cluster. +type TargetAccess struct { + targetClient client.Client + targetRestConfig *rest.Config + targetClientSet kubernetes.Interface +} + +func (ta *TargetAccess) TargetClient() client.Client { + return ta.targetClient +} + +func (ta *TargetAccess) TargetRestConfig() *rest.Config { + return ta.targetRestConfig +} + +func (ta *TargetAccess) TargetClientSet() kubernetes.Interface { + return ta.targetClientSet +} + +// NewTargetAccess constructs a TargetAccess, handling the different subtypes of kubernetes-cluster Targets, namely: +// - Targets with a kubeconfig, +// - OIDC Targets, and +// - Self Targets, i.e. Targets pointing to the resource cluster watched by the Landscaper. +func NewTargetAccess(ctx context.Context, resolvedTarget *lsv1alpha1.ResolvedTarget, + lsUncachedClient client.Client, lsRestConfig *rest.Config) (_ *TargetAccess, err error) { + + if resolvedTarget == nil { + return nil, errors.New("no target defined") + } + + if resolvedTarget.Target == nil { + return nil, fmt.Errorf("resolved target does not contain the original target") + } + + targetConfig := &targettypes.KubernetesClusterTargetConfig{} + if err := yaml.Unmarshal([]byte(resolvedTarget.Content), targetConfig); err != nil { + return nil, fmt.Errorf("unable to parse target confĂ­guration: %w", err) + } + + var restConfig *rest.Config + if targetConfig.Kubeconfig.StrVal != nil { + kubeconfigBytes := []byte(*targetConfig.Kubeconfig.StrVal) + restConfig, err = clientcmd.RESTConfigFromKubeConfig(kubeconfigBytes) + if err != nil { + return nil, fmt.Errorf("unable to create rest config from kubeconfig: %w", err) + } + + } else if targetConfig.OIDCConfig != nil { + restConfig, err = getRestConfigForOIDCTarget(ctx, targetConfig.OIDCConfig, resolvedTarget, lsUncachedClient) + if err != nil { + return nil, err + } + + } else if targetConfig.SelfConfig != nil { + restConfig, err = getRestConfigForSelfTarget(ctx, targetConfig.SelfConfig, resolvedTarget, lsUncachedClient, lsRestConfig) + if err != nil { + return nil, err + } + + } else { + return nil, fmt.Errorf("target contains neither kubeconfig, nor oidc config, nor self config") + } + + targetClient, err := client.New(restConfig, client.Options{}) + if err != nil { + return nil, err + } + + targetClientSet, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, err + } + + return &TargetAccess{ + targetClient: targetClient, + targetRestConfig: restConfig, + targetClientSet: targetClientSet, + }, nil +} + +func getRestConfigForOIDCTarget(ctx context.Context, oidcConfig *targettypes.OIDCConfig, resolvedTarget *lsv1alpha1.ResolvedTarget, lsUncachedClient client.Client) (*rest.Config, error) { + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: resolvedTarget.Namespace, + Name: oidcConfig.ServiceAccount.Name, + }, + } + + expirationSeconds := oidcConfig.ExpirationSeconds + if expirationSeconds == nil { + expirationSeconds = ptr.To[int64](defaultExpirationSeconds) + } + + tokenRequest := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + Audiences: oidcConfig.Audience, + ExpirationSeconds: expirationSeconds, + }, + } + + if err := lsUncachedClient.SubResource("token").Create(ctx, serviceAccount, tokenRequest); err != nil { + return nil, fmt.Errorf("unable to create token for oidc target: %w", err) + } + + return &rest.Config{ + Host: oidcConfig.Server, + BearerToken: tokenRequest.Status.Token, + TLSClientConfig: rest.TLSClientConfig{ + CAData: oidcConfig.CAData, + }, + }, nil +} + +func getRestConfigForSelfTarget(ctx context.Context, selfConfig *targettypes.SelfConfig, + resolvedTarget *lsv1alpha1.ResolvedTarget, lsUncachedClient client.Client, lsRestConfig *rest.Config) (*rest.Config, error) { + + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: resolvedTarget.Namespace, + Name: selfConfig.ServiceAccount.Name, + }, + } + + expirationSeconds := selfConfig.ExpirationSeconds + if expirationSeconds == nil { + expirationSeconds = ptr.To[int64](defaultExpirationSeconds) + } + + tokenRequest := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + ExpirationSeconds: expirationSeconds, + }, + } + + if err := lsUncachedClient.SubResource("token").Create(ctx, serviceAccount, tokenRequest); err != nil { + return nil, fmt.Errorf("unable to create token for self target: %w", err) + } + + return &rest.Config{ + Host: lsRestConfig.Host, + BearerToken: tokenRequest.Status.Token, + TLSClientConfig: lsRestConfig.TLSClientConfig, + }, nil +} diff --git a/pkg/deployer/lib/target_client_provider.go b/pkg/deployer/lib/target_client_provider.go index fd73b1848..d58e467e4 100644 --- a/pkg/deployer/lib/target_client_provider.go +++ b/pkg/deployer/lib/target_client_provider.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" @@ -11,16 +12,17 @@ import ( "github.com/gardener/landscaper/pkg/utils/read_write_layer" ) -// GetTargetClient is used to determine the client to read resources for custom readiness checks, export collection, and deletion groups. -// Usually it is the client obtained from the Target of the DeployItem. +// GetTargetClientConsideringSecondaryTarget is used to determine the client to read resources for custom readiness checks, +// export collection, and deletion groups. Usually it is the client obtained from the Target of the DeployItem. // In some scenarios however, the Landscaper deploys an installer on a primary target cluster, and the installer // deploys the actual application on a secondary target cluster. -func GetTargetClient( +func GetTargetClientConsideringSecondaryTarget( ctx context.Context, primaryTargetClient client.Client, lsClient client.Client, deployItem *lsv1alpha1.DeployItem, - secondaryTargetName *string) (targetClient client.Client, err error) { + secondaryTargetName *string, + lsRestConfig *rest.Config) (targetClient client.Client, err error) { if secondaryTargetName == nil { return primaryTargetClient, nil @@ -49,10 +51,10 @@ func GetTargetClient( return nil, fmt.Errorf("unable to resolve secondary target %s: %w", *secondaryTargetName, err) } - _, targetClient, _, err = GetRestConfigAndClientAndClientSet(ctx, resolvedTarget, lsClient) + targetAccess, err := NewTargetAccess(ctx, resolvedTarget, lsClient, lsRestConfig) if err != nil { return nil, fmt.Errorf("unable to get secondary target client %s: %w", *secondaryTargetName, err) } - return targetClient, nil + return targetAccess.TargetClient(), nil } diff --git a/pkg/deployer/lib/utils.go b/pkg/deployer/lib/utils.go index 415b282c5..626915cda 100644 --- a/pkg/deployer/lib/utils.go +++ b/pkg/deployer/lib/utils.go @@ -7,27 +7,18 @@ package lib import ( "context" "encoding/json" - "errors" "fmt" "reflect" - authenticationv1 "k8s.io/api/authentication/v1" - "k8s.io/utils/ptr" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/yaml" lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" lsv1alpha1helper "github.com/gardener/landscaper/apis/core/v1alpha1/helper" - "github.com/gardener/landscaper/apis/core/v1alpha1/targettypes" lserrors "github.com/gardener/landscaper/apis/errors" kutil "github.com/gardener/landscaper/controller-utils/pkg/kubernetes" "github.com/gardener/landscaper/controller-utils/pkg/landscaper/targetresolver" @@ -39,92 +30,6 @@ import ( "github.com/gardener/landscaper/pkg/utils/read_write_layer" ) -func GetRestConfigAndClientAndClientSet(ctx context.Context, resolvedTarget *lsv1alpha1.ResolvedTarget, lsUncachedClient client.Client) (_ *rest.Config, _ client.Client, _ kubernetes.Interface, err error) { - var restConfig *rest.Config - - if resolvedTarget.Target == nil { - return nil, nil, nil, fmt.Errorf("resolved target does not contain the original target") - } - - targetConfig := &targettypes.KubernetesClusterTargetConfig{} - if err := yaml.Unmarshal([]byte(resolvedTarget.Content), targetConfig); err != nil { - return nil, nil, nil, fmt.Errorf("unable to parse target confĂ­guration: %w", err) - } - - if targetConfig.Kubeconfig.StrVal != nil { - kubeconfigBytes, err := GetKubeconfigFromTargetConfig(targetConfig) - if err != nil { - return nil, nil, nil, err - } - - kubeconfig, err := clientcmd.NewClientConfigFromBytes(kubeconfigBytes) - if err != nil { - return nil, nil, nil, err - } - - restConfig, err = kubeconfig.ClientConfig() - if err != nil { - return nil, nil, nil, err - } - - } else if targetConfig.OIDCConfig != nil { - serviceAccount := &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: resolvedTarget.Namespace, - Name: targetConfig.OIDCConfig.ServiceAccount.Name, - }, - } - - expirationSeconds := targetConfig.OIDCConfig.ExpirationSeconds - if expirationSeconds == nil { - // use 1 day as default - expirationSeconds = ptr.To[int64](86400) - } - - tokenRequest := &authenticationv1.TokenRequest{ - Spec: authenticationv1.TokenRequestSpec{ - Audiences: targetConfig.OIDCConfig.Audience, - ExpirationSeconds: expirationSeconds, - }, - } - - if err = lsUncachedClient.SubResource("token").Create(ctx, serviceAccount, tokenRequest); err != nil { - return nil, nil, nil, fmt.Errorf("unable to create token: %w", err) - } - - restConfig = &rest.Config{ - Host: targetConfig.OIDCConfig.Server, - BearerToken: tokenRequest.Status.Token, - TLSClientConfig: rest.TLSClientConfig{ - CAData: targetConfig.OIDCConfig.CAData, - }, - } - - } else { - return nil, nil, nil, fmt.Errorf("unable build rest config from resolved target") - } - - kubeClient, err := client.New(restConfig, client.Options{}) - if err != nil { - return nil, nil, nil, err - } - clientset, err := kubernetes.NewForConfig(restConfig) - if err != nil { - return nil, nil, nil, err - } - - return restConfig, kubeClient, clientset, nil -} - -// GetKubeconfigFromTargetConfig fetches the kubeconfig from a given config. -// If the config defines the target from a secret that secret is read from all provided clients. -func GetKubeconfigFromTargetConfig(config *targettypes.KubernetesClusterTargetConfig) ([]byte, error) { - if config.Kubeconfig.StrVal != nil { - return []byte(*config.Kubeconfig.StrVal), nil - } - return nil, errors.New("kubeconfig not defined") -} - // CreateOrUpdateExport creates or updates the export of a deploy item. func CreateOrUpdateExport(ctx context.Context, kubeWriter *read_write_layer.Writer, kubeClient client.Client, deployItem *lsv1alpha1.DeployItem, values interface{}) error { if values == nil { diff --git a/pkg/deployer/manifest/add.go b/pkg/deployer/manifest/add.go index 9cafa64eb..187e97f53 100644 --- a/pkg/deployer/manifest/add.go +++ b/pkg/deployer/manifest/add.go @@ -39,7 +39,7 @@ func AddDeployerToManager(lsUncachedClient, lsCachedClient, hostUncachedClient, } log.Info("access to critical problems allowed") - d, err := NewDeployer(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient, + d, err := NewDeployer(lsMgr.GetConfig(), lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient, log, config, ) diff --git a/pkg/deployer/manifest/controller.go b/pkg/deployer/manifest/controller.go index 29a48a0c2..56cdba1c5 100644 --- a/pkg/deployer/manifest/controller.go +++ b/pkg/deployer/manifest/controller.go @@ -8,13 +8,13 @@ import ( "context" "time" - manifestv1alpha2 "github.com/gardener/landscaper/apis/deployer/manifest/v1alpha2" - "github.com/gardener/landscaper/controller-utils/pkg/logging" - + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" + manifestv1alpha2 "github.com/gardener/landscaper/apis/deployer/manifest/v1alpha2" crval "github.com/gardener/landscaper/apis/deployer/utils/continuousreconcile/validation" + "github.com/gardener/landscaper/controller-utils/pkg/logging" deployerlib "github.com/gardener/landscaper/pkg/deployer/lib" cr "github.com/gardener/landscaper/pkg/deployer/lib/continuousreconcile" "github.com/gardener/landscaper/pkg/deployer/lib/extension" @@ -30,11 +30,13 @@ const ( ) // NewDeployer creates a new deployer that reconciles deploy items of type helm. -func NewDeployer(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient client.Client, +func NewDeployer(lsRestConfig *rest.Config, + lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient client.Client, log logging.Logger, config manifestv1alpha2.Configuration) (deployerlib.Deployer, error) { dep := &deployer{ + lsRestConfig: lsRestConfig, lsUncachedClient: lsUncachedClient, lsCachedClient: lsCachedClient, hostUncachedClient: hostUncachedClient, @@ -48,6 +50,7 @@ func NewDeployer(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCache } type deployer struct { + lsRestConfig *rest.Config lsUncachedClient client.Client lsCachedClient client.Client hostUncachedClient client.Client @@ -62,6 +65,7 @@ func (d *deployer) Reconcile(ctx context.Context, _ *lsv1alpha1.Context, di *lsv if err != nil { return err } + manifest.SetLsRestConfig(d.lsRestConfig) return manifest.Reconcile(ctx) } @@ -70,6 +74,7 @@ func (d deployer) Delete(ctx context.Context, _ *lsv1alpha1.Context, di *lsv1alp if err != nil { return err } + manifest.SetLsRestConfig(d.lsRestConfig) return manifest.Delete(ctx) } @@ -87,6 +92,7 @@ func (d *deployer) NextReconcile(ctx context.Context, last time.Time, di *lsv1al if err != nil { return nil, err } + manifest.SetLsRestConfig(d.lsRestConfig) if crval.ContinuousReconcileSpecIsEmpty(manifest.ProviderConfiguration.ContinuousReconcile) { // no continuous reconciliation configured return nil, nil diff --git a/pkg/deployer/manifest/ensure.go b/pkg/deployer/manifest/ensure.go index 9f5fcfcf4..16334c7b5 100644 --- a/pkg/deployer/manifest/ensure.go +++ b/pkg/deployer/manifest/ensure.go @@ -39,10 +39,8 @@ func (m *Manifest) Reconcile(ctx context.Context) error { m.DeployItem.Status.Phase = lsv1alpha1.DeployItemPhases.Progressing - _, targetClient, targetClientSet, err := m.TargetClient(ctx) - if err != nil { - return lserrors.NewWrappedError(err, - currOp, "TargetClusterClient", err.Error()) + if err := m.ensureTargetAccess(ctx); err != nil { + return lserrors.NewWrappedError(err, currOp, "ensureTargetAccess", err.Error()) } if m.ProviderStatus == nil { @@ -57,8 +55,8 @@ func (m *Manifest) Reconcile(ctx context.Context) error { applier := resourcemanager.NewManifestApplier(resourcemanager.ManifestApplierOptions{ Decoder: serializer.NewCodecFactory(Scheme).UniversalDecoder(), - KubeClient: targetClient, - Clientset: targetClientSet, + KubeClient: m.targetAccess.TargetClient(), + Clientset: m.targetAccess.TargetClientSet(), DeployItemName: m.DeployItem.Name, DeployItem: m.DeployItem, UpdateStrategy: m.ProviderConfiguration.UpdateStrategy, @@ -70,6 +68,7 @@ func (m *Manifest) Reconcile(ctx context.Context) error { DeletionGroupsDuringUpdate: m.ProviderConfiguration.DeletionGroupsDuringUpdate, InterruptionChecker: interruption.NewStandardInterruptionChecker(m.DeployItem, m.lsUncachedClient), LsUncachedClient: m.lsUncachedClient, + LsRestConfig: m.lsRestConfig, }) patchInfos, err := applier.Apply(ctx) @@ -97,7 +96,7 @@ func (m *Manifest) Reconcile(ctx context.Context) error { return err } - if err := m.CheckResourcesReady(ctx, targetClient); err != nil { + if err := m.CheckResourcesReady(ctx, m.targetAccess.TargetClient()); err != nil { return err } @@ -107,10 +106,11 @@ func (m *Manifest) Reconcile(ctx context.Context) error { } opts := resourcemanager.ExporterOptions{ - KubeClient: targetClient, + KubeClient: m.targetAccess.TargetClient(), InterruptionChecker: interruption.NewStandardInterruptionChecker(m.DeployItem, m.lsUncachedClient), LsClient: m.lsUncachedClient, DeployItem: m.DeployItem, + LsRestConfig: m.lsRestConfig, } exporter := resourcemanager.NewExporter(opts) @@ -175,6 +175,7 @@ func (m *Manifest) CheckResourcesReady(ctx context.Context, client client.Client InterruptionChecker: interruption.NewStandardInterruptionChecker(m.DeployItem, m.lsUncachedClient), LsClient: m.lsUncachedClient, DeployItem: m.DeployItem, + LsRestConfig: m.lsRestConfig, } err := customReadinessCheck.CheckResourcesReady(ctx) if err != nil { @@ -205,9 +206,8 @@ func (m *Manifest) deleteManifestsInGroups(ctx context.Context) error { return err } - _, targetClient, _, err := m.TargetClient(ctx) - if err != nil { - return lserrors.NewWrappedError(err, op, "TargetClusterClient", err.Error()) + if err := m.ensureTargetAccess(ctx); err != nil { + return lserrors.NewWrappedError(err, op, "ensureTargetAccess", err.Error()) } managedResources := []managedresource.ManagedResourceStatus{} @@ -219,7 +219,7 @@ func (m *Manifest) deleteManifestsInGroups(ctx context.Context) error { lc.KeyResourceKind, mr.Resource.Kind) mrLogger.Debug("Checking resource") - ok, err := resourcemanager.FilterByPolicy(mrCtx, mr, targetClient, m.DeployItem.Name) + ok, err := resourcemanager.FilterByPolicy(mrCtx, mr, m.targetAccess.TargetClient(), m.DeployItem.Name) if err != nil { return err } @@ -227,7 +227,7 @@ func (m *Manifest) deleteManifestsInGroups(ctx context.Context) error { continue } - notFound, err := resourcemanager.AnnotateAndPatchBeforeDelete(ctx, mr, targetClient) + notFound, err := resourcemanager.AnnotateAndPatchBeforeDelete(ctx, mr, m.targetAccess.TargetClient()) if err != nil { return err } @@ -241,14 +241,15 @@ func (m *Manifest) deleteManifestsInGroups(ctx context.Context) error { interruptionChecker := interruption.NewStandardInterruptionChecker(m.DeployItem, m.lsUncachedClient) - err = resourcemanager.DeleteManagedResources( + err := resourcemanager.DeleteManagedResources( ctx, m.lsUncachedClient, managedResources, m.ProviderConfiguration.DeletionGroups, - targetClient, + m.targetAccess.TargetClient(), m.DeployItem, interruptionChecker, + m.lsRestConfig, ) if err != nil { return fmt.Errorf("failed deleting managed resources: %w", err) diff --git a/pkg/deployer/manifest/manifest.go b/pkg/deployer/manifest/manifest.go index 7a20f767c..736b66cf4 100644 --- a/pkg/deployer/manifest/manifest.go +++ b/pkg/deployer/manifest/manifest.go @@ -6,25 +6,19 @@ package manifest import ( "context" - "errors" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" - lserrors "github.com/gardener/landscaper/apis/errors" - - "github.com/gardener/landscaper/pkg/deployer/lib" - - "github.com/gardener/landscaper/pkg/utils" - lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" manifestinstall "github.com/gardener/landscaper/apis/deployer/manifest/install" manifestv1alpha2 "github.com/gardener/landscaper/apis/deployer/manifest/v1alpha2" - manifestvalidation "github.com/gardener/landscaper/apis/deployer/manifest/validation" + lserrors "github.com/gardener/landscaper/apis/errors" "github.com/gardener/landscaper/pkg/api" + "github.com/gardener/landscaper/pkg/deployer/lib" + "github.com/gardener/landscaper/pkg/utils" ) const ( @@ -40,6 +34,7 @@ func init() { // Manifest is the internal representation of a DeployItem of Type Manifest type Manifest struct { + lsRestConfig *rest.Config lsUncachedClient client.Client hostUncachedClient client.Client @@ -50,9 +45,7 @@ type Manifest struct { ProviderConfiguration *manifestv1alpha2.ProviderConfiguration ProviderStatus *manifestv1alpha2.ProviderStatus - TargetKubeClient client.Client - TargetRestConfig *rest.Config - TargetClientSet kubernetes.Interface + targetAccess *lib.TargetAccess } // NewDeployItemBuilder creates a new deployitem builder for manifest deployitems @@ -101,21 +94,13 @@ func New(lsUncachedClient client.Client, hostUncachedClient client.Client, }, nil } -func (m *Manifest) TargetClient(ctx context.Context) (*rest.Config, client.Client, kubernetes.Interface, error) { - if m.TargetKubeClient != nil { - return m.TargetRestConfig, m.TargetKubeClient, m.TargetClientSet, nil - } - if m.Target != nil { - restConfig, kubeClient, clientset, err := lib.GetRestConfigAndClientAndClientSet(ctx, m.Target, m.lsUncachedClient) - if err != nil { - return nil, nil, nil, err - } - - m.TargetRestConfig = restConfig - m.TargetKubeClient = kubeClient - m.TargetClientSet = clientset +func (m *Manifest) SetLsRestConfig(lsRestConfig *rest.Config) { + m.lsRestConfig = lsRestConfig +} - return restConfig, kubeClient, clientset, nil +func (m *Manifest) ensureTargetAccess(ctx context.Context) (err error) { + if m.targetAccess == nil { + m.targetAccess, err = lib.NewTargetAccess(ctx, m.Target, m.lsUncachedClient, m.lsRestConfig) } - return nil, nil, nil, errors.New("neither a target nor kubeconfig are defined") + return err } diff --git a/pkg/deployer/manifest/test/e2e_test.go b/pkg/deployer/manifest/test/e2e_test.go index b83b813df..7e3a6ea2d 100644 --- a/pkg/deployer/manifest/test/e2e_test.go +++ b/pkg/deployer/manifest/test/e2e_test.go @@ -52,13 +52,13 @@ var _ = Describe("Manifest Deployer", func() { state, err = testenv.InitState(context.TODO()) Expect(err).ToNot(HaveOccurred()) - deployer, err := manifestctlr.NewDeployer(testenv.Client, testenv.Client, testenv.Client, testenv.Client, + deployer, err := manifestctlr.NewDeployer(nil, testenv.Client, testenv.Client, testenv.Client, testenv.Client, logging.Discard(), manifestv1alpha2.Configuration{}, ) Expect(err).ToNot(HaveOccurred()) - ctrl = deployerlib.NewController( + ctrl = deployerlib.NewController(nil, testenv.Client, testenv.Client, testenv.Client, testenv.Client, utils.NewFinishedObjectCache(), api.LandscaperScheme, diff --git a/pkg/deployer/mock/add.go b/pkg/deployer/mock/add.go index 0a16168dc..fd23f1bdb 100644 --- a/pkg/deployer/mock/add.go +++ b/pkg/deployer/mock/add.go @@ -71,7 +71,8 @@ func NewController(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCac return nil, err } - return deployerlib.NewController(lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient, + return deployerlib.NewController(nil, + lsUncachedClient, lsCachedClient, hostUncachedClient, hostCachedClient, finishedObjectCache, scheme, eventRecorder, scheme, deployerlib.DeployerArgs{ diff --git a/test/integration/suite_test.go b/test/integration/suite_test.go index d05939178..ebf348b1e 100644 --- a/test/integration/suite_test.go +++ b/test/integration/suite_test.go @@ -9,8 +9,14 @@ import ( "flag" "testing" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/gardener/landscaper/hack/testcluster/pkg/utils" + "github.com/gardener/landscaper/test/framework" "github.com/gardener/landscaper/test/integration/core" "github.com/gardener/landscaper/test/integration/dependencies" + "github.com/gardener/landscaper/test/integration/deployers" "github.com/gardener/landscaper/test/integration/deployitems" "github.com/gardener/landscaper/test/integration/executions" "github.com/gardener/landscaper/test/integration/importexport" @@ -21,13 +27,6 @@ import ( "github.com/gardener/landscaper/test/integration/targets" "github.com/gardener/landscaper/test/integration/tutorial" "github.com/gardener/landscaper/test/integration/webhook" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/gardener/landscaper/hack/testcluster/pkg/utils" - "github.com/gardener/landscaper/test/framework" - "github.com/gardener/landscaper/test/integration/deployers" ) var opts *framework.Options diff --git a/test/integration/targets/oidc_targets.go b/test/integration/targets/oidc_targets.go index fcadc5711..8b779ee13 100644 --- a/test/integration/targets/oidc_targets.go +++ b/test/integration/targets/oidc_targets.go @@ -2,9 +2,7 @@ package targets import ( "context" - "encoding/json" - "fmt" - "path" + "encoding/base64" "path/filepath" "time" @@ -14,10 +12,8 @@ import ( v12 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/ptr" lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" - "github.com/gardener/landscaper/apis/core/v1alpha1/targettypes" kutil "github.com/gardener/landscaper/controller-utils/pkg/kubernetes" lsutils "github.com/gardener/landscaper/pkg/utils/landscaper" "github.com/gardener/landscaper/test/framework" @@ -28,156 +24,75 @@ func OIDCTargetTests(ctx context.Context, f *framework.Framework) { Describe("OIDC Targets", func() { - const ( - openIDConnectApiVersion = "authentication.gardener.cloud/v1alpha1" - openIDConnectKind = "OpenIDConnect" - ) - var ( testdataDir = filepath.Join(f.RootPath, "test", "integration", "testdata", "targets", "oidc-targets") state = f.Register() ) - createOpenIDConnect := func(name, clientID, issuerURL, prefix string) (*unstructured.Unstructured, error) { - unstr := &unstructured.Unstructured{} - unstr.SetUnstructuredContent(map[string]interface{}{ - "spec": map[string]interface{}{ - "clientID": clientID, - "issuerURL": issuerURL, - "supportedSigningAlgs": []string{"RS256"}, - "usernameClaim": "sub", - "usernamePrefix": prefix, - }, - }) - unstr.SetAPIVersion(openIDConnectApiVersion) - unstr.SetKind(openIDConnectKind) - unstr.SetName(name) - err := f.Client.Create(ctx, unstr) - return unstr, err - } - - deleteOpenIDConnect := func(name string) error { - unstr := &unstructured.Unstructured{} - unstr.SetAPIVersion(openIDConnectApiVersion) - unstr.SetKind(openIDConnectKind) - unstr.SetName(name) - return f.Client.Delete(ctx, unstr) - } - - createAdminClusterRoleBinding := func(name, saName, saNamespace, prefix string) error { - b := &v12.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{}, - Subjects: nil, - RoleRef: v12.RoleRef{}, - } - b.SetName(name) - b.RoleRef = v12.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: "cluster-admin", - } - b.Subjects = []v12.Subject{ - { - APIGroup: "rbac.authorization.k8s.io", - Kind: "User", - Name: fmt.Sprintf("%ssystem:serviceaccount:%s:%s", prefix, saNamespace, saName), - }, - } - - return f.Client.Create(ctx, b) - } - - createOIDCTarget := func(ctx context.Context, name, namespace, saName, audience string) (*lsv1alpha1.Target, error) { - config := &targettypes.KubernetesClusterTargetConfig{ - OIDCConfig: &targettypes.OIDCConfig{ - Server: f.RestConfig.Host, - CAData: f.RestConfig.CAData, - ServiceAccount: v1.LocalObjectReference{ - Name: saName, - }, - Audience: []string{audience}, - ExpirationSeconds: ptr.To[int64](86400), - }, - } - configRaw, err := json.Marshal(config) - if err != nil { - return nil, err - } - - t := &lsv1alpha1.Target{} - t.SetName(name) - t.SetNamespace(namespace) - t.Spec = lsv1alpha1.TargetSpec{ - Type: targettypes.KubernetesClusterTargetType, - Configuration: &lsv1alpha1.AnyJSON{ - RawMessage: configRaw, - }, - } - - if err := state.Create(ctx, t); err != nil { - return nil, err - } - return t, nil - } - It("should use an oidc target", func() { const ( - openIDConnectName = "resource-cluster-oidc" - targetName = "my-cluster-oidc" - serviceAccountName = "service-account-oidc" - bindingName = "binding-oidc" - audience = "target-cluster-oidc" - prefix = "resource-cluster-oidc:" + audience = "oidc-target-cluster" + configMapName = "oidc-target-test" ) + settings := map[string]any{ + "namespace": state.Namespace, + "openIDConnectName": "landscaper-integration-test-oidc-targets", + "clusterRoleBindingName": "landscaper:integration-test:oidc-targets", + "serviceAccountName": "oidc-serviceaccount", + "targetName": "oidc-target", + "installationName": "oidc-inst", + "configMapName": configMapName, + "audience": audience, + "clientID": audience, + "issuerURL": f.OIDCIssuerURL, + "server": f.RestConfig.Host, + "caData": base64.StdEncoding.EncodeToString(f.RestConfig.CAData), + "prefix": "resource-cluster-oidc:", + } + By("Create OpenIDConnect resource so that the target cluster trusts the resource cluster") - _, err := createOpenIDConnect(openIDConnectName, audience, f.OIDCIssuerURL, prefix) - Expect(err).NotTo(HaveOccurred()) + openIDConnect := &unstructured.Unstructured{} + Expect(utils.CreateClientObjectFromTemplate(ctx, f.Client, filepath.Join(testdataDir, "openidconnect.yaml"), settings, openIDConnect)).To(Succeed()) - By("Create ClusterRoleBinding on target cluster for ServiceAccount on resource cluster") - err = createAdminClusterRoleBinding(bindingName, serviceAccountName, state.Namespace, prefix) - Expect(err).NotTo(HaveOccurred()) + By("Create ClusterRoleBinding on resource cluster") + clusterRoleBinding := &v12.ClusterRoleBinding{} + Expect(utils.CreateClientObjectFromTemplate(ctx, f.Client, filepath.Join(testdataDir, "clusterrolebinding.yaml"), settings, clusterRoleBinding)).To(Succeed()) By("Create ServiceAccount on resource cluster") - _, err = utils.CreateServiceAccount(ctx, state.State, serviceAccountName, state.Namespace) - Expect(err).NotTo(HaveOccurred()) + serviceAccount := &v1.ServiceAccount{} + Expect(utils.CreateStateObjectFromTemplate(ctx, state.State, filepath.Join(testdataDir, "serviceaccount.yaml"), settings, serviceAccount)).To(Succeed()) - By("Create oidc target on resource cluster") - target, err := createOIDCTarget(ctx, targetName, state.Namespace, serviceAccountName, audience) - Expect(err).NotTo(HaveOccurred()) - - By("Create DataObject for namespace import") - doNamespace := &lsv1alpha1.DataObject{} - utils.ExpectNoError(utils.CreateNamespaceDataObjectFromFile(ctx, state.State, doNamespace, path.Join(testdataDir, "import-do-namespace.yaml"))) + By("Create OIDC Target on resource cluster") + target := &lsv1alpha1.Target{} + Expect(utils.CreateStateObjectFromTemplate(ctx, state.State, filepath.Join(testdataDir, "target.yaml"), settings, target)).To(Succeed()) By("Create Installation") inst := &lsv1alpha1.Installation{} - utils.ExpectNoError(utils.CreateInstallationFromFile(ctx, state.State, inst, path.Join(testdataDir, "installation.yaml"))) + Expect(utils.CreateStateObjectFromTemplate(ctx, state.State, filepath.Join(testdataDir, "installation.yaml"), settings, inst)).To(Succeed()) By("Wait for Installation to finish") utils.ExpectNoError(lsutils.WaitForInstallationToFinish(ctx, f.Client, inst, lsv1alpha1.InstallationPhases.Succeeded, 2*time.Minute)) By("Check deployed configmaps") - cm := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm-1", Namespace: state.Namespace}} - key := kutil.ObjectKeyFromObject(cm) - Expect(f.Client.Get(ctx, key, cm)).To(Succeed()) + cm := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: configMapName, Namespace: state.Namespace}} + Expect(f.Client.Get(ctx, kutil.ObjectKeyFromObject(cm), cm)).To(Succeed()) By("Delete installation") Expect(state.Client.Delete(ctx, inst)).To(Succeed()) Expect(lsutils.WaitForInstallationToBeDeleted(ctx, f.Client, inst, 2*time.Minute)).To(Succeed()) - By("Delete DataObject") - Expect(state.Client.Delete(ctx, doNamespace)).To(Succeed()) - By("Delete Target") Expect(state.Client.Delete(ctx, target)).To(Succeed()) By("Delete ServiceAccount") - Expect(utils.DeleteServiceAccount(ctx, state.State, serviceAccountName, state.Namespace)).To(Succeed()) + Expect(state.Client.Delete(ctx, serviceAccount)).To(Succeed()) + + By("Delete ClusterRoleBinding") + Expect(f.Client.Delete(ctx, clusterRoleBinding)).To(Succeed()) By("Delete OpenIDConnect resource") - Expect(deleteOpenIDConnect(openIDConnectName)).To(Succeed()) + Expect(f.Client.Delete(ctx, openIDConnect)).To(Succeed()) }) - }) } diff --git a/test/integration/targets/register.go b/test/integration/targets/register.go index c4915036d..f7c440d6b 100644 --- a/test/integration/targets/register.go +++ b/test/integration/targets/register.go @@ -18,4 +18,5 @@ func RegisterTests(f *framework.Framework) { TargetTests(f) TargetMapTests(ctx, f) OIDCTargetTests(ctx, f) + SelfTargetTests(ctx, f) } diff --git a/test/integration/targets/self_targets.go b/test/integration/targets/self_targets.go new file mode 100644 index 000000000..dbb74bd8c --- /dev/null +++ b/test/integration/targets/self_targets.go @@ -0,0 +1,81 @@ +package targets + +import ( + "context" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + v12 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" + kutil "github.com/gardener/landscaper/controller-utils/pkg/kubernetes" + lsutils "github.com/gardener/landscaper/pkg/utils/landscaper" + "github.com/gardener/landscaper/test/framework" + "github.com/gardener/landscaper/test/utils" +) + +func SelfTargetTests(ctx context.Context, f *framework.Framework) { + + Describe("Self Targets", func() { + + var ( + testdataDir = filepath.Join(f.RootPath, "test", "integration", "testdata", "targets", "self-targets") + state = f.Register() + ) + + It("should use a self target", func() { + const ( + configMapName = "self-target-test" + ) + + settings := map[string]any{ + "namespace": state.Namespace, + "configMapName": configMapName, + "clusterRoleBindingName": "landscaper:integration-test:self-targets", + "installationName": "self-inst", + "serviceAccountName": "self-serviceaccount", + "targetName": "self-target", + } + + By("Create ServiceAccount on resource cluster") + serviceAccount := &v1.ServiceAccount{} + Expect(utils.CreateStateObjectFromTemplate(ctx, state.State, filepath.Join(testdataDir, "serviceaccount.yaml"), settings, serviceAccount)).To(Succeed()) + + By("Create ClusterRoleBinding on resource cluster") + clusterRoleBinding := &v12.ClusterRoleBinding{} + Expect(utils.CreateStateObjectFromTemplate(ctx, state.State, filepath.Join(testdataDir, "clusterrolebinding.yaml"), settings, clusterRoleBinding)).To(Succeed()) + + By("Create Self Target") + target := &lsv1alpha1.Target{} + Expect(utils.CreateStateObjectFromTemplate(ctx, state.State, filepath.Join(testdataDir, "target.yaml"), settings, target)).To(Succeed()) + + By("Create Installation") + inst := &lsv1alpha1.Installation{} + Expect(utils.CreateStateObjectFromTemplate(ctx, state.State, filepath.Join(testdataDir, "installation.yaml"), settings, inst)).To(Succeed()) + + By("Wait for Installation to finish") + utils.ExpectNoError(lsutils.WaitForInstallationToFinish(ctx, f.Client, inst, lsv1alpha1.InstallationPhases.Succeeded, 2*time.Minute)) + + By("Check deployed configmaps") + cm := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: configMapName, Namespace: state.Namespace}} + Expect(f.Client.Get(ctx, kutil.ObjectKeyFromObject(cm), cm)).To(Succeed()) + + By("Delete installation") + Expect(state.Client.Delete(ctx, inst)).To(Succeed()) + Expect(lsutils.WaitForInstallationToBeDeleted(ctx, f.Client, inst, 2*time.Minute)).To(Succeed()) + + By("Delete Target") + Expect(state.Client.Delete(ctx, target)).To(Succeed()) + + By("Delete ServiceAccount") + Expect(state.Client.Delete(ctx, serviceAccount)).To(Succeed()) + + By("Delete ClusterRoleBinding") + Expect(state.Client.Delete(ctx, clusterRoleBinding)).To(Succeed()) + }) + }) +} diff --git a/test/integration/testdata/targets/oidc-targets/clusterrolebinding.yaml b/test/integration/testdata/targets/oidc-targets/clusterrolebinding.yaml new file mode 100644 index 000000000..76844400a --- /dev/null +++ b/test/integration/testdata/targets/oidc-targets/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .clusterRoleBindingName }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - apiGroup: "rbac.authorization.k8s.io" + kind: User + name: {{ .prefix }}system:serviceaccount:{{ .namespace }}:{{ .serviceAccountName }} diff --git a/test/integration/testdata/targets/oidc-targets/import-do-namespace.yaml b/test/integration/testdata/targets/oidc-targets/import-do-namespace.yaml deleted file mode 100644 index 267f46c6d..000000000 --- a/test/integration/testdata/targets/oidc-targets/import-do-namespace.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: landscaper.gardener.cloud/v1alpha1 -kind: DataObject -metadata: - name: do-namespace - namespace: example -data: example diff --git a/test/integration/testdata/targets/oidc-targets/installation.yaml b/test/integration/testdata/targets/oidc-targets/installation.yaml index 44f333b36..3de6bdd21 100644 --- a/test/integration/testdata/targets/oidc-targets/installation.yaml +++ b/test/integration/testdata/targets/oidc-targets/installation.yaml @@ -3,19 +3,13 @@ kind: Installation metadata: annotations: landscaper.gardener.cloud/operation: reconcile - name: oidc-1 - namespace: example - + name: {{ .installationName }} + namespace: {{ .namespace }} spec: - imports: targets: - name: cluster - target: my-cluster-oidc - data: - - name: namespace - dataRef: do-namespace - + target: {{ .targetName }} blueprint: inline: filesystem: @@ -23,15 +17,9 @@ spec: apiVersion: landscaper.gardener.cloud/v1alpha1 kind: Blueprint jsonSchema: "https://json-schema.org/draft/2019-09/schema" - imports: - name: cluster targetType: landscaper.gardener.cloud/kubernetes-cluster - - name: namespace - type: data - schema: - type: string - deployExecutions: - name: default type: GoTemplate @@ -51,7 +39,7 @@ spec: apiVersion: v1 kind: ConfigMap metadata: - name: cm-1 - namespace: {{ .imports.namespace }} + name: {{ .configMapName }} + namespace: {{ .namespace }} data: foo: bar diff --git a/test/integration/testdata/targets/oidc-targets/openidconnect.yaml b/test/integration/testdata/targets/oidc-targets/openidconnect.yaml new file mode 100644 index 000000000..a5db88473 --- /dev/null +++ b/test/integration/testdata/targets/oidc-targets/openidconnect.yaml @@ -0,0 +1,11 @@ +apiVersion: authentication.gardener.cloud/v1alpha1 +kind: OpenIDConnect +metadata: + name: {{ .openIDConnectName }} +spec: + clientID: {{ .clientID }} + issuerURL: {{ .issuerURL }} + supportedSigningAlgs: + - RS256 + usernameClaim: sub + usernamePrefix: '{{ .prefix }}' diff --git a/test/integration/testdata/targets/oidc-targets/serviceaccount.yaml b/test/integration/testdata/targets/oidc-targets/serviceaccount.yaml new file mode 100644 index 000000000..af06bfdc6 --- /dev/null +++ b/test/integration/testdata/targets/oidc-targets/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .serviceAccountName }} + namespace: {{ .namespace }} diff --git a/test/integration/testdata/targets/oidc-targets/target.yaml b/test/integration/testdata/targets/oidc-targets/target.yaml new file mode 100644 index 000000000..ca983d026 --- /dev/null +++ b/test/integration/testdata/targets/oidc-targets/target.yaml @@ -0,0 +1,16 @@ +apiVersion: landscaper.gardener.cloud/v1alpha1 +kind: Target +metadata: + name: {{ .targetName }} + namespace: {{ .namespace }} +spec: + config: + oidcConfig: + server: {{ .server }} + caData: {{ .caData }} + audience: + - {{ .audience }} + serviceAccount: + name: {{ .serviceAccountName }} + expirationSeconds: 3600 + type: landscaper.gardener.cloud/kubernetes-cluster diff --git a/test/integration/testdata/targets/self-targets/clusterrolebinding.yaml b/test/integration/testdata/targets/self-targets/clusterrolebinding.yaml new file mode 100644 index 000000000..d410a22c7 --- /dev/null +++ b/test/integration/testdata/targets/self-targets/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .clusterRoleBindingName }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: {{ .serviceAccountName }} + namespace: {{ .namespace }} diff --git a/test/integration/testdata/targets/self-targets/installation.yaml b/test/integration/testdata/targets/self-targets/installation.yaml new file mode 100644 index 000000000..3de6bdd21 --- /dev/null +++ b/test/integration/testdata/targets/self-targets/installation.yaml @@ -0,0 +1,45 @@ +apiVersion: landscaper.gardener.cloud/v1alpha1 +kind: Installation +metadata: + annotations: + landscaper.gardener.cloud/operation: reconcile + name: {{ .installationName }} + namespace: {{ .namespace }} +spec: + imports: + targets: + - name: cluster + target: {{ .targetName }} + blueprint: + inline: + filesystem: + blueprint.yaml: | + apiVersion: landscaper.gardener.cloud/v1alpha1 + kind: Blueprint + jsonSchema: "https://json-schema.org/draft/2019-09/schema" + imports: + - name: cluster + targetType: landscaper.gardener.cloud/kubernetes-cluster + deployExecutions: + - name: default + type: GoTemplate + template: | + deployItems: + - name: item-1 + type: landscaper.gardener.cloud/kubernetes-manifest + target: + import: cluster + config: + apiVersion: manifest.deployer.landscaper.gardener.cloud/v1alpha2 + kind: ProviderConfiguration + updateStrategy: update + manifests: + - policy: manage + manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ .configMapName }} + namespace: {{ .namespace }} + data: + foo: bar diff --git a/test/integration/testdata/targets/self-targets/serviceaccount.yaml b/test/integration/testdata/targets/self-targets/serviceaccount.yaml new file mode 100644 index 000000000..af06bfdc6 --- /dev/null +++ b/test/integration/testdata/targets/self-targets/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .serviceAccountName }} + namespace: {{ .namespace }} diff --git a/test/integration/testdata/targets/self-targets/target.yaml b/test/integration/testdata/targets/self-targets/target.yaml new file mode 100644 index 000000000..6a71fa3fe --- /dev/null +++ b/test/integration/testdata/targets/self-targets/target.yaml @@ -0,0 +1,12 @@ +apiVersion: landscaper.gardener.cloud/v1alpha1 +kind: Target +metadata: + name: {{ .targetName }} + namespace: {{ .namespace }} +spec: + config: + selfConfig: + serviceAccount: + name: {{ .serviceAccountName }} + expirationSeconds: 3600 + type: landscaper.gardener.cloud/kubernetes-cluster diff --git a/test/utils/builder.go b/test/utils/builder.go index ee6614aab..a27db415a 100644 --- a/test/utils/builder.go +++ b/test/utils/builder.go @@ -1,16 +1,67 @@ package utils import ( + "bytes" "context" "fmt" + "os" + "text/template" k8sv1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" "github.com/gardener/landscaper/test/utils/envtest" ) +func BuildObjectFromTemplate(filePath string, settings map[string]any, obj client.Object) error { + data, err := os.ReadFile(filePath) + if err != nil { + return err + } + + tmpl, err := template.New("tmpl").Parse(string(data)) + if err != nil { + return err + } + + var w bytes.Buffer + if err := tmpl.Execute(&w, settings); err != nil { + return err + } + + if err := yaml.Unmarshal(w.Bytes(), obj); err != nil { + return err + } + + return nil +} + +func CreateStateObjectFromTemplate(ctx context.Context, state *envtest.State, filePath string, settings map[string]any, obj client.Object) error { + if err := BuildObjectFromTemplate(filePath, settings, obj); err != nil { + return err + } + + if err := state.Create(ctx, obj); err != nil { + return err + } + + return nil +} + +func CreateClientObjectFromTemplate(ctx context.Context, cl client.Client, filePath string, settings map[string]any, obj client.Object) error { + if err := BuildObjectFromTemplate(filePath, settings, obj); err != nil { + return err + } + + if err := cl.Create(ctx, obj); err != nil { + return err + } + + return nil +} + func CreateDataObjectFromFile(ctx context.Context, state *envtest.State, do *lsv1alpha1.DataObject, path string) error { if err := ReadResourceFromFile(do, path); err != nil { return err