diff --git a/.github/workflows/e2e-long.yaml b/.github/workflows/e2e-long.yaml index c169bae5..ba651073 100644 --- a/.github/workflows/e2e-long.yaml +++ b/.github/workflows/e2e-long.yaml @@ -15,7 +15,7 @@ jobs: test_name: Import via GitOps run_azure_janitor: false artifact_name: artifacts_import_gitops - management_cluster_infrastructure: eks + MANAGEMENT_CLUSTER_ENVIRONMENT: eks secrets: inherit e2e_import_gitops_v3: uses: ./.github/workflows/run-e2e-suite.yaml @@ -24,7 +24,7 @@ jobs: test_name: Import via GitOps [v3] run_azure_janitor: false artifact_name: artifacts_import_gitops_v3 - management_cluster_infrastructure: eks + MANAGEMENT_CLUSTER_ENVIRONMENT: eks secrets: inherit e2e_v2prov: uses: ./.github/workflows/run-e2e-suite.yaml @@ -33,7 +33,7 @@ jobs: test_name: v2 provisioning run_azure_janitor: true artifact_name: artifacts_v2prov - management_cluster_infrastructure: eks + MANAGEMENT_CLUSTER_ENVIRONMENT: eks secrets: inherit e2e_update_labels: uses: ./.github/workflows/run-e2e-suite.yaml @@ -42,7 +42,7 @@ jobs: test_name: Update labels run_azure_janitor: true artifact_name: artifacts_update_labels - management_cluster_infrastructure: eks + MANAGEMENT_CLUSTER_ENVIRONMENT: eks secrets: inherit e2e_embedded_capi_disabled: uses: ./.github/workflows/run-e2e-suite.yaml @@ -51,7 +51,7 @@ jobs: test_name: Embedded CAPI disabled run_azure_janitor: false artifact_name: artifacts_embedded_capi - management_cluster_infrastructure: eks + MANAGEMENT_CLUSTER_ENVIRONMENT: eks secrets: inherit e2e_embedded_capi_disabled_v3: uses: ./.github/workflows/run-e2e-suite.yaml @@ -60,5 +60,5 @@ jobs: test_name: Embedded CAPI disabled [v3] run_azure_janitor: false artifact_name: artifacts_embedded_capi_v3 - management_cluster_infrastructure: eks + MANAGEMENT_CLUSTER_ENVIRONMENT: eks secrets: inherit diff --git a/.github/workflows/e2e-short-test.yaml b/.github/workflows/e2e-short-test.yaml index 73749855..e799b6c0 100644 --- a/.github/workflows/e2e-short-test.yaml +++ b/.github/workflows/e2e-short-test.yaml @@ -4,7 +4,7 @@ on: workflow_dispatch: env: - MANAGEMENT_CLUSTER_INFRASTRUCTURE: "isolated-kind" + MANAGEMENT_CLUSTER_ENVIRONMENT: "isolated-kind" GINKGO_LABEL_FILTER: "short" jobs: diff --git a/.github/workflows/e2e-short.yaml b/.github/workflows/e2e-short.yaml index a9d73c4c..f5e1617d 100644 --- a/.github/workflows/e2e-short.yaml +++ b/.github/workflows/e2e-short.yaml @@ -5,7 +5,7 @@ on: types: [opened, edited, synchronize, reopened, labeled, unlabeled] env: - MANAGEMENT_CLUSTER_INFRASTRUCTURE: "isolated-kind" + MANAGEMENT_CLUSTER_ENVIRONMENT: "isolated-kind" GINKGO_LABEL_FILTER: "short" jobs: diff --git a/.github/workflows/run-e2e-suite.yaml b/.github/workflows/run-e2e-suite.yaml index 1359ff26..410fe3cf 100644 --- a/.github/workflows/run-e2e-suite.yaml +++ b/.github/workflows/run-e2e-suite.yaml @@ -1,7 +1,7 @@ on: workflow_call: inputs: - management_cluster_infrastructure: + MANAGEMENT_CLUSTER_ENVIRONMENT: description: "The infrastructure to use for the management cluster: eks, kind or isolated-kind" type: string required: true @@ -43,7 +43,7 @@ env: AWS_REGION: eu-west-2 AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - MANAGEMENT_CLUSTER_INFRASTRUCTURE: ${{ inputs.management_cluster_infrastructure }} + MANAGEMENT_CLUSTER_ENVIRONMENT: ${{ inputs.MANAGEMENT_CLUSTER_ENVIRONMENT }} GINKGO_LABEL_FILTER: full GINKGO_TESTS: ${{ github.workspace }}/${{ inputs.test_suite }} @@ -84,7 +84,7 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: build and push e2e image - if: ${{ inputs.management_cluster_infrastructure == 'eks' }} + if: ${{ inputs.MANAGEMENT_CLUSTER_ENVIRONMENT == 'eks' }} run: make e2e-image-push - name: Run e2e tests run: make test-e2e diff --git a/Makefile b/Makefile index e6996f77..a9e3b4b7 100644 --- a/Makefile +++ b/Makefile @@ -80,8 +80,15 @@ GINKGO_NOCOLOR ?= false GINKGO_LABEL_FILTER ?= short GINKGO_TESTS ?= $(ROOT_DIR)/$(TEST_DIR)/e2e/suites/... -MANAGEMENT_CLUSTER_INFRASTRUCTURE ?= eks -E2ECONFIG_VARS ?= MANAGEMENT_CLUSTER_INFRASTRUCTURE=$(MANAGEMENT_CLUSTER_INFRASTRUCTURE) +MANAGEMENT_CLUSTER_ENVIRONMENT ?= eks + +E2ECONFIG_VARS ?= MANAGEMENT_CLUSTER_ENVIRONMENT=$(MANAGEMENT_CLUSTER_ENVIRONMENT) \ +ARTIFACTS=$(ARTIFACTS) \ +HELM_BINARY_PATH=$(HELM) \ +CLUSTERCTL_BINARY_PATH=$(CLUSTERCTL) \ +SKIP_RESOURCE_CLEANUP=$(SKIP_RESOURCE_CLEANUP) \ +USE_EXISTING_CLUSTER=$(USE_EXISTING_CLUSTER) \ +TURTLES_PATH=$(ROOT_DIR)/$(CHART_RELEASE_DIR) # to set multiple ginkgo skip flags, if any ifneq ($(strip $(GINKGO_SKIP)),) @@ -535,14 +542,7 @@ test-e2e: $(GINKGO) $(HELM) $(CLUSTERCTL) kubectl e2e-image ## Run the end-to-en -poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) --tags=e2e --focus="$(GINKGO_FOCUS)" --label-filter="$(GINKGO_LABEL_FILTER)" \ $(_SKIP_ARGS) --nodes=$(GINKGO_NODES) --timeout=$(GINKGO_TIMEOUT) --no-color=$(GINKGO_NOCOLOR) \ --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.1.xml" $(GINKGO_ARGS) $(GINKGO_TESTS) -- \ - -e2e.artifacts-folder="$(ARTIFACTS)" \ - -e2e.config="$(E2E_CONF_FILE)" \ - -e2e.helm-binary-path=$(HELM) \ - -e2e.clusterctl-binary-path=$(CLUSTERCTL) \ - -e2e.chart-path=$(ROOT_DIR)/$(CHART_RELEASE_DIR) \ - -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ - -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) \ - -e2e.gitea-custom-ingress=$(GITEA_CUSTOM_INGRESS) + -e2e.config="$(E2E_CONF_FILE)" .PHONY: e2e-image e2e-image: ## Build the image for e2e tests diff --git a/test/e2e/README.md b/test/e2e/README.md index 52492e28..d2bac849 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -99,6 +99,7 @@ variables: RANCHER_HOSTNAME: "localhost" # Your ngrok domain NGROK_API_KEY: "" # Key and token values for establishing ingress NGROK_AUTHTOKEN: "" + MANAGEMENT_CLUSTER_ENVIRONMENT: "isolated-kind" # Environment to run the tests in: eks, isolated-kind, kind. ``` ## Testdata @@ -113,7 +114,19 @@ While all the tests are based on the combination of [ginkgo](https://github.com/ ## Cluster configuration -[Kind](https://kind.sigs.k8s.io/) is used to set up a cluster for e2e tests. All required components like rancher, rancher-turtles and [cluster-api-operator](https://github.com/kubernetes-sigs/cluster-api-operator) (which provisions cluster-api with required providers) are installed using [helm](https://kind.sigs.k8s.io/) charts. +### Kind + +[Kind](https://kind.sigs.k8s.io/) is used to set up a cluster for e2e tests. All required components like rancher, rancher-turtles and [cluster-api-operator](https://github.com/kubernetes-sigs/cluster-api-operator) (which provisions cluster-api with required providers) are installed using [helm](https://kind.sigs.k8s.io/) charts. This option can be enabled by setting `MANAGEMENT_CLUSTER_ENVIRONMENT` to `kind`. It's also required to set `NGROK_API_KEY`, `NGROK_AUTHTOKEN` and `RANCHER_HOSTNAME` environment variables. + +### Isolated Kind + +This is similar to Kind but instead of public endpoint for Rancher, it uses the internal IP of CP node. This setup can be used to test providers are running in the same network as Rancher. This option can be enabled by setting `MANAGEMENT_CLUSTER_ENVIRONMENT` to `isolated-kind`. + +### EKS + +EKS is used to set up a cluster for e2e tests. In this setup nginx ingress will be deployed to provide a public endpoint for Rancher. This option can be enabled by setting `MANAGEMENT_CLUSTER_ENVIRONMENT` to `eks`. + +### Customizing the cluster To configure individual components, a series of `server-side-apply` patches are being issued. All required patch manifests are located under `test/e2e/resources/config`. Under circumstances each manifest could have a limited environment based configuration with `envsubst` (for example: setting `RANCHER_HOSTNAME` value in ingress configuration). diff --git a/test/e2e/config/operator.yaml b/test/e2e/config/operator.yaml index 78893df9..ed452902 100644 --- a/test/e2e/config/operator.yaml +++ b/test/e2e/config/operator.yaml @@ -27,22 +27,32 @@ intervals: default/wait-turtles-uninstall: ["10m", "30s"] variables: - MANAGEMENT_CLUSTER_INFRASTRUCTURE: "isolated-kind" # supported options are eks, isolated-kind, kind - RANCHER_VERSION: "v2.8.1" + MANAGEMENT_CLUSTER_ENVIRONMENT: "isolated-kind" # supported options are eks, isolated-kind, kind RANCHER_ALPHA_VERSION: "v2.9.1-alpha1" + CLUSTERCTL_BINARY_PATH: "" + USE_EXISTING_CLUSTER: "false" + SKIP_RESOURCE_CLEANUP: "false" + ARTIFACTS_FOLDER: "_artifacts" + HELM_BINARY_PATH: "helm" + HELM_EXTRA_VALUES_FOLDER: "/tmp" KUBERNETES_VERSION: "v1.28.6" KUBERNETES_MANAGEMENT_VERSION: "v1.27.0" + RKE2_VERSION: "v1.28.1+rke2r1" KUBERNETES_MANAGEMENT_AWS_REGION: "eu-west-2" RANCHER_HOSTNAME: "localhost" RANCHER_FEATURES: "" RANCHER_PATH: "rancher-latest/rancher" RANCHER_ALPHA_PATH: "rancher-alpha/rancher" + RANCHER_ALPHA_URL: "https://releases.rancher.com/server-charts/alpha" + RANCHER_VERSION: "v2.8.1" + TURTLES_VERSION: "v0.0.1" + TURTLES_PATH: "turtles/rancher-turtles" + TURTLES_REPO_NAME: "turtles" + TURTLES_URL: https://rancher.github.io/turtles CPI_IMAGE_K8S_VERSION: "v1.27.0" - RKE2_VERSION: "v1.28.1+rke2r1" RANCHER_REPO_NAME: "rancher-latest" RANCHER_ALPHA_REPO_NAME: "rancher-alpha" RANCHER_URL: "https://releases.rancher.com/server-charts/latest" - RANCHER_ALPHA_URL: "https://releases.rancher.com/server-charts/alpha" CERT_MANAGER_URL: "https://charts.jetstack.io" CERT_MANAGER_REPO_NAME: "jetstack" CERT_MANAGER_PATH: "jetstack/cert-manager" diff --git a/test/e2e/const.go b/test/e2e/const.go index 253aadbb..f238cf11 100644 --- a/test/e2e/const.go +++ b/test/e2e/const.go @@ -98,16 +98,16 @@ const ( NginxIngressDeployment = "ingress-nginx-controller" ) -type ManagementClusterInfrastuctureType string +type ManagementClusterEnvironmentType string const ( - ManagementClusterInfrastuctureEKS ManagementClusterInfrastuctureType = "eks" - ManagementClusterInfrastuctureIsolatedKind ManagementClusterInfrastuctureType = "isolated-kind" - ManagementClusterInfrastuctureKind ManagementClusterInfrastuctureType = "kind" + ManagementClusterEnvironmentEKS ManagementClusterEnvironmentType = "eks" + ManagementClusterEnvironmentIsolatedKind ManagementClusterEnvironmentType = "isolated-kind" + ManagementClusterEnvironmentKind ManagementClusterEnvironmentType = "kind" ) const ( - ManagementClusterInfrastucture = "MANAGEMENT_CLUSTER_INFRASTRUCTURE" + ManagementClusterEnvironmentVar = "MANAGEMENT_CLUSTER_ENVIRONMENT" KubernetesManagementVersionVar = "KUBERNETES_MANAGEMENT_VERSION" @@ -141,6 +141,17 @@ const ( GiteaUserNameVar = "GITEA_USER_NAME" GiteaUserPasswordVar = "GITEA_USER_PWD" + ArtifactsFolderVar = "ARTIFACTS_FOLDER" + UseExistingClusterVar = "USE_EXISTING_CLUSTER" + HelmBinaryPathVar = "HELM_BINARY_PATH" + HelmExtraValuesFolderVar = "HELM_EXTRA_VALUES_FOLDER" + TurtlesVersionVar = "TURTLES_VERSION" + TurtlesPathVar = "TURTLES_PATH" + TurtlesUrlVar = "TURTLES_URL" + TurtlesRepoNameVar = "TURTLES_REPO_NAME" + SkipResourceCleanupVar = "SKIP_RESOURCE_CLEANUP" + ClusterctlBinaryPathVar = "CLUSTERCTL_BINARY_PATH" + RKE2VersionVar = "RKE2_VERSION" CapaEncodedCredentialsVar = "CAPA_ENCODED_CREDS" diff --git a/test/e2e/flags.go b/test/e2e/flags.go index 3be038b8..8f8ea341 100644 --- a/test/e2e/flags.go +++ b/test/e2e/flags.go @@ -26,41 +26,9 @@ import ( type FlagValues struct { // ConfigPath is the path to the e2e config file. ConfigPath string - - // UseExistingCluster instructs the test to use the current cluster instead of creating a new one (default discovery rules apply). - UseExistingCluster bool - - // ArtifactFolder is the folder to store e2e test artifacts. - ArtifactFolder string - - // SkipCleanup prevents cleanup of test resources e.g. for debug purposes. - SkipCleanup bool - - // HelmBinaryPath is the path to the helm binary. - HelmBinaryPath string - - // HelmExtraValuesDir is the location where extra values files will be stored. - HelmExtraValuesDir string - - // ChartPath is the path to the operator chart. - ChartPath string - - // ClusterctlBinaryPath is the path to the clusterctl binary to use. - ClusterctlBinaryPath string - - // GiteaCustomIngress is the flag to enable custom ingress for Gitea. - GiteaCustomIngress bool } // InitFlags is used to specify the standard flags for the e2e tests. func InitFlags(values *FlagValues) { flag.StringVar(&values.ConfigPath, "e2e.config", "config/operator.yaml", "path to the e2e config file") - flag.StringVar(&values.ArtifactFolder, "e2e.artifacts-folder", "_artifacts", "folder where e2e test artifact should be stored") - flag.BoolVar(&values.SkipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") - flag.BoolVar(&values.UseExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") - flag.StringVar(&values.HelmBinaryPath, "e2e.helm-binary-path", "helm", "path to the helm binary") - flag.StringVar(&values.HelmExtraValuesDir, "e2e.helm-extra-values-path", "/tmp", "path to the extra values file") - flag.StringVar(&values.ClusterctlBinaryPath, "e2e.clusterctl-binary-path", "helm", "path to the clusterctl binary") - flag.StringVar(&values.ChartPath, "e2e.chart-path", "", "path to the operator chart") - flag.BoolVar(&values.GiteaCustomIngress, "e2e.gitea-custom-ingress", false, "if true, the test will use a custom ingress for Gitea") } diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go index 625ef02a..39223800 100644 --- a/test/e2e/helpers.go +++ b/test/e2e/helpers.go @@ -25,6 +25,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" . "github.com/onsi/gomega" @@ -115,3 +116,15 @@ func CreateClusterctlLocalRepository(ctx context.Context, config *clusterctl.E2E Expect(clusterctlConfig).To(BeAnExistingFile(), "The clusterctl config file does not exists in the local repository %s", repositoryFolder) return clusterctlConfig } + +func ValidateE2EConfig(config *clusterctl.E2EConfig) { + Expect(os.MkdirAll(config.GetVariable(ArtifactsFolderVar), 0o755)).To(Succeed(), "Invalid test suite argument. Can't create artifacts folder %q", config.GetVariable(ArtifactsFolderVar)) + Expect(config.GetVariable(HelmBinaryPathVar)).To(BeAnExistingFile(), "Invalid test suite argument. HELM_BINARY_PATH should be an existing file.") + Expect(config.GetVariable(TurtlesPathVar)).To(BeAnExistingFile(), "Invalid test suite argument. TURTLES_PATH should be an existing file.") + + _, err := strconv.ParseBool(config.GetVariable(UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Invalid test suite argument. Can't parse USE_EXISTING_CLUSTER %q", config.GetVariable(UseExistingClusterVar)) + + _, err = strconv.ParseBool(config.GetVariable(SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Invalid test suite argument. Can't parse SKIP_RESOURCE_CLEANUP %q", config.GetVariable(SkipResourceCleanupVar)) +} diff --git a/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go b/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go index 1517e8a0..3afa83e7 100644 --- a/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go +++ b/test/e2e/specs/migrate_gitops_provv1_mgmtv3.go @@ -353,6 +353,7 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateT testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ HelmBinaryPath: input.HelmBinaryPath, ChartsPath: input.ChartPath, + ChartVersion: input.E2EConfig.GetVariable(e2e.TurtlesVersionVar), BootstrapClusterProxy: input.BootstrapClusterProxy, WaitInterval: input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-controllers"), }) @@ -362,7 +363,7 @@ func MigrateToV3UsingGitOpsSpec(ctx context.Context, inputGetter func() MigrateT HelmBinaryPath: input.HelmBinaryPath, Namespace: turtlesframework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", + Tag: input.E2EConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: input.E2EConfig.GetIntervals(input.BootstrapClusterProxy.GetName(), "wait-controllers"), SkipCleanup: true, AdditionalValues: map[string]string{}, diff --git a/test/e2e/suites/embedded-capi-disabled-v3/embedded_capi_disabled_v3_test.go b/test/e2e/suites/embedded-capi-disabled-v3/embedded_capi_disabled_v3_test.go index d2390fea..57f56e91 100644 --- a/test/e2e/suites/embedded-capi-disabled-v3/embedded_capi_disabled_v3_test.go +++ b/test/e2e/suites/embedded-capi-disabled-v3/embedded_capi_disabled_v3_test.go @@ -40,8 +40,8 @@ var _ = Describe("[AWS] [EKS] [management.cattle.io/v3] Create and delete CAPI c E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIAwsEKSMMP, ClusterName: "highlander-e2e-cluster1", ControlPlaneMachineCount: ptr.To[int](1), diff --git a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go index 93cf3c4c..3cd5c0cd 100644 --- a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go @@ -22,9 +22,9 @@ package embedded_capi_disabled_v3 import ( "context" "fmt" - "os" "path/filepath" "runtime" + "strconv" "testing" . "github.com/onsi/ginkgo/v2" @@ -54,6 +54,8 @@ var ( // hostName is the host name for the Rancher Manager server. hostName string + artifactsFolder string + ctx = context.Background() setupClusterResult *testenv.SetupTestClusterResult @@ -74,34 +76,36 @@ func TestE2E(t *testing.T) { } var _ = BeforeSuite(func() { - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - Expect(os.MkdirAll(flagVals.ArtifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) - Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") - Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, + UseExistingCluster: useExistingCluter, E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, + ArtifactFolder: artifactsFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, @@ -119,8 +123,8 @@ var _ = BeforeSuite(func() { // and the deploy Rancher Turtles. rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), InstallCertManager: true, CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), @@ -153,12 +157,12 @@ var _ = BeforeSuite(func() { rtInput := testenv.DeployRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: flagVals.ChartPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), CAPIProvidersYAML: e2e.CapiProviders, Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{ "cluster-api-operator.cert-manager.enabled": "false", @@ -200,7 +204,7 @@ var _ = BeforeSuite(func() { giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), @@ -227,13 +231,16 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), }) + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ SetupTestClusterResult: *setupClusterResult, - SkipCleanup: flagVals.SkipCleanup, - ArtifactFolder: flagVals.ArtifactFolder, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, }) }) diff --git a/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go b/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go index 50ebace9..e488e965 100644 --- a/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go +++ b/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go @@ -40,8 +40,8 @@ var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality shoul E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIAwsEKSMMP, ClusterName: "highlander-e2e-cluster1", ControlPlaneMachineCount: ptr.To[int](1), diff --git a/test/e2e/suites/embedded-capi-disabled/suite_test.go b/test/e2e/suites/embedded-capi-disabled/suite_test.go index 4f38d3a7..eaca143b 100644 --- a/test/e2e/suites/embedded-capi-disabled/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled/suite_test.go @@ -22,9 +22,9 @@ package embedded_capi_disabled import ( "context" "fmt" - "os" "path/filepath" "runtime" + "strconv" "testing" . "github.com/onsi/ginkgo/v2" @@ -55,6 +55,8 @@ var ( // hostName is the host name for the Rancher Manager server. hostName string + artifactsFolder string + ctx = context.Background() setupClusterResult *testenv.SetupTestClusterResult @@ -75,34 +77,36 @@ func TestE2E(t *testing.T) { } var _ = BeforeSuite(func() { - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - Expect(os.MkdirAll(flagVals.ArtifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) - Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") - Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, + UseExistingCluster: useExistingCluter, E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, + ArtifactFolder: artifactsFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, @@ -120,8 +124,8 @@ var _ = BeforeSuite(func() { // and the deploy Rancher Turtles. rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), InstallCertManager: true, CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), @@ -154,12 +158,12 @@ var _ = BeforeSuite(func() { rtInput := testenv.DeployRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: flagVals.ChartPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), CAPIProvidersYAML: e2e.CapiProviders, Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{ "cluster-api-operator.cert-manager.enabled": "false", @@ -202,7 +206,7 @@ var _ = BeforeSuite(func() { giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), @@ -229,13 +233,16 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), }) + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ SetupTestClusterResult: *setupClusterResult, - SkipCleanup: flagVals.SkipCleanup, - ArtifactFolder: flagVals.ArtifactFolder, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, }) }) diff --git a/test/e2e/suites/import-gitops-v3/import_gitops_v3_test.go b/test/e2e/suites/import-gitops-v3/import_gitops_v3_test.go index b4852dd8..6fd02295 100644 --- a/test/e2e/suites/import-gitops-v3/import_gitops_v3_test.go +++ b/test/e2e/suites/import-gitops-v3/import_gitops_v3_test.go @@ -40,8 +40,8 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and dele E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIDockerKubeadm, ClusterName: "clusterv3-auto-import-kubeadm", ControlPlaneMachineCount: ptr.To[int](1), @@ -73,8 +73,8 @@ var _ = Describe("[Docker] [RKE2] - [management.cattle.io/v3] Create and delete E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIDockerRKE2, ClusterName: "clusterv3-auto-import-rke2", ControlPlaneMachineCount: ptr.To[int](1), @@ -106,7 +106,7 @@ var _ = Describe("[Azure] [AKS] - [management.cattle.io/v3] Create and delete CA E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ArtifactFolder: flagVals.ArtifactFolder, + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIAzureAKSMMP, ClusterName: "highlander-e2e-clusterv3-2", ControlPlaneMachineCount: ptr.To[int](1), @@ -137,8 +137,8 @@ var _ = Describe("[AWS] [EKS] - [management.cattle.io/v3] Create and delete CAPI E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIAwsEKSMMP, ClusterName: "clusterv3-eks", ControlPlaneMachineCount: ptr.To[int](1), diff --git a/test/e2e/suites/import-gitops-v3/suite_test.go b/test/e2e/suites/import-gitops-v3/suite_test.go index 0c202512..ec4d96fb 100644 --- a/test/e2e/suites/import-gitops-v3/suite_test.go +++ b/test/e2e/suites/import-gitops-v3/suite_test.go @@ -22,9 +22,9 @@ package import_gitops_v3 import ( "context" "fmt" - "os" "path/filepath" "runtime" + "strconv" "testing" . "github.com/onsi/ginkgo/v2" @@ -58,6 +58,8 @@ var ( // hostName is the host name for the Rancher Manager server. hostName string + artifactsFolder string + ctx = context.Background() setupClusterResult *testenv.SetupTestClusterResult @@ -78,34 +80,36 @@ func TestE2E(t *testing.T) { } var _ = BeforeSuite(func() { - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - Expect(os.MkdirAll(flagVals.ArtifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) - Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") - Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, + UseExistingCluster: useExistingCluter, E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, + ArtifactFolder: artifactsFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, @@ -121,8 +125,8 @@ var _ = BeforeSuite(func() { rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), InstallCertManager: true, CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), @@ -154,12 +158,12 @@ var _ = BeforeSuite(func() { rtInput := testenv.DeployRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: flagVals.ChartPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), CAPIProvidersYAML: e2e.CapiProviders, Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{ "rancherTurtles.features.addon-provider-fleet.enabled": "true", @@ -213,7 +217,7 @@ var _ = BeforeSuite(func() { giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), @@ -240,14 +244,17 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), }) + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ SetupTestClusterResult: *setupClusterResult, - SkipCleanup: flagVals.SkipCleanup, - ArtifactFolder: flagVals.ArtifactFolder, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, }) }) diff --git a/test/e2e/suites/import-gitops/import_gitops_test.go b/test/e2e/suites/import-gitops/import_gitops_test.go index 84293a3b..4f8429f3 100644 --- a/test/e2e/suites/import-gitops/import_gitops_test.go +++ b/test/e2e/suites/import-gitops/import_gitops_test.go @@ -43,8 +43,8 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIDockerKubeadm, ClusterName: "clusterv1-docker-kubeadm", ControlPlaneMachineCount: ptr.To[int](1), @@ -73,8 +73,8 @@ var _ = Describe("[Docker] [RKE2] Create and delete CAPI cluster functionality s E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIDockerRKE2, ClusterName: "clusterv1-docker-rke2", ControlPlaneMachineCount: ptr.To[int](1), @@ -103,8 +103,8 @@ var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality shoul E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIAwsEKSMMP, ClusterName: "clusterv1-eks", ControlPlaneMachineCount: ptr.To[int](1), @@ -132,7 +132,7 @@ var _ = Describe("[Azure] [AKS] Create and delete CAPI cluster functionality sho E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ArtifactFolder: flagVals.ArtifactFolder, + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIAzureAKSMMP, ClusterName: "highlander-e2e-cluster4", ControlPlaneMachineCount: ptr.To[int](1), @@ -160,8 +160,8 @@ var _ = Describe("[vSphere] [Kubeadm] Create and delete CAPI cluster functionali E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIvSphereKubeadm, ClusterName: "cluster-vsphere-kubeadm", ControlPlaneMachineCount: ptr.To[int](1), @@ -193,8 +193,8 @@ var _ = Describe("[vSphere] [RKE2] Create and delete CAPI cluster functionality E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIvSphereRKE2, ClusterName: "cluster-vsphere-rke2", ControlPlaneMachineCount: ptr.To[int](1), diff --git a/test/e2e/suites/import-gitops/suite_test.go b/test/e2e/suites/import-gitops/suite_test.go index 0668fb8e..40b2691a 100644 --- a/test/e2e/suites/import-gitops/suite_test.go +++ b/test/e2e/suites/import-gitops/suite_test.go @@ -22,9 +22,9 @@ package import_gitops import ( "context" "fmt" - "os" "path/filepath" "runtime" + "strconv" "testing" . "github.com/onsi/ginkgo/v2" @@ -57,6 +57,8 @@ var ( // hostName is the host name for the Rancher Manager server. hostName string + artifactsFolder string + ctx = context.Background() setupClusterResult *testenv.SetupTestClusterResult @@ -77,34 +79,36 @@ func TestE2E(t *testing.T) { } var _ = BeforeSuite(func() { - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - Expect(os.MkdirAll(flagVals.ArtifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) - Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") - Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, + UseExistingCluster: useExistingCluter, E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, + ArtifactFolder: artifactsFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, @@ -120,8 +124,8 @@ var _ = BeforeSuite(func() { rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), InstallCertManager: true, CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), @@ -154,8 +158,8 @@ var _ = BeforeSuite(func() { if shortTestOnly() { rtInput := testenv.DeployRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: "https://rancher.github.io/turtles", + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: "https://rancher.github.io/turtles", CAPIProvidersYAML: e2e.CapiProviders, Namespace: framework.DefaultRancherTurtlesNamespace, Version: "v0.6.0", @@ -165,18 +169,19 @@ var _ = BeforeSuite(func() { testenv.DeployRancherTurtles(ctx, rtInput) testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartsPath: flagVals.ChartPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartsPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + ChartVersion: e2eConfig.GetVariable(e2e.TurtlesVersionVar), BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), }) upgradeInput := testenv.UpgradeRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: rtInput.AdditionalValues, PostUpgradeSteps: []func(){}, @@ -205,12 +210,12 @@ var _ = BeforeSuite(func() { } else { rtInput := testenv.DeployRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: flagVals.ChartPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), CAPIProvidersYAML: e2e.CapiProviders, Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, } @@ -269,7 +274,7 @@ var _ = BeforeSuite(func() { giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), @@ -296,21 +301,24 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), }) testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), Namespace: framework.DefaultRancherTurtlesNamespace, DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), }) + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ SetupTestClusterResult: *setupClusterResult, - SkipCleanup: flagVals.SkipCleanup, - ArtifactFolder: flagVals.ArtifactFolder, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, }) }) diff --git a/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go b/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go index 43a8ca51..a8929575 100644 --- a/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go +++ b/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go @@ -39,13 +39,13 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Migrate v1 to m specs.MigrateToV3UsingGitOpsSpec(ctx, func() specs.MigrateToV3UsingGitOpsSpecInput { return specs.MigrateToV3UsingGitOpsSpecInput{ - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: flagVals.ChartPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, - ClusterctlBinaryPath: flagVals.ClusterctlBinaryPath, - ArtifactFolder: flagVals.ArtifactFolder, + ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), + ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIDockerKubeadm, ClusterName: "clusterv3-migrated", ControlPlaneMachineCount: ptr.To(1), diff --git a/test/e2e/suites/migrate-gitops/suite_test.go b/test/e2e/suites/migrate-gitops/suite_test.go index f0b421db..a0fab8a6 100644 --- a/test/e2e/suites/migrate-gitops/suite_test.go +++ b/test/e2e/suites/migrate-gitops/suite_test.go @@ -22,9 +22,9 @@ package migrate_gitops import ( "context" "fmt" - "os" "path/filepath" "runtime" + "strconv" "testing" . "github.com/onsi/ginkgo/v2" @@ -58,6 +58,8 @@ var ( // hostName is the host name for the Rancher Manager server. hostName string + artifactsFolder string + ctx = context.Background() setupClusterResult *testenv.SetupTestClusterResult @@ -78,34 +80,36 @@ func TestE2E(t *testing.T) { } var _ = BeforeSuite(func() { - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - Expect(os.MkdirAll(flagVals.ArtifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) - Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") - Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, + UseExistingCluster: useExistingCluter, E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, + ArtifactFolder: artifactsFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, @@ -121,8 +125,8 @@ var _ = BeforeSuite(func() { rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), InstallCertManager: true, CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), @@ -155,8 +159,8 @@ var _ = BeforeSuite(func() { rtInput := testenv.DeployRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: "https://rancher.github.io/turtles", + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: "https://rancher.github.io/turtles", CAPIProvidersYAML: e2e.CapiProviders, Namespace: framework.DefaultRancherTurtlesNamespace, Version: "v0.6.0", @@ -167,18 +171,19 @@ var _ = BeforeSuite(func() { testenv.DeployRancherTurtles(ctx, rtInput) testenv.DeployChartMuseum(ctx, testenv.DeployChartMuseumInput{ - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartsPath: flagVals.ChartPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartsPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + ChartVersion: e2eConfig.GetVariable(e2e.TurtlesVersionVar), BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), }) upgradeInput := testenv.UpgradeRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: rtInput.AdditionalValues, } @@ -206,7 +211,7 @@ var _ = BeforeSuite(func() { giteaInput := testenv.DeployGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), @@ -233,21 +238,24 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), }) testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), Namespace: framework.DefaultRancherTurtlesNamespace, DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), }) + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ SetupTestClusterResult: *setupClusterResult, - SkipCleanup: flagVals.SkipCleanup, - ArtifactFolder: flagVals.ArtifactFolder, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, }) }) diff --git a/test/e2e/suites/update-labels/suite_test.go b/test/e2e/suites/update-labels/suite_test.go index 8384ddf9..43c22662 100644 --- a/test/e2e/suites/update-labels/suite_test.go +++ b/test/e2e/suites/update-labels/suite_test.go @@ -22,9 +22,9 @@ package update_labels import ( "context" "fmt" - "os" "path/filepath" "runtime" + "strconv" "testing" . "github.com/onsi/ginkgo/v2" @@ -55,6 +55,8 @@ var ( // hostName is the host name for the Rancher Manager server. hostName string + artifactsFolder string + ctx = context.Background() setupClusterResult *testenv.SetupTestClusterResult @@ -74,34 +76,36 @@ func TestE2E(t *testing.T) { } var _ = BeforeSuite(func() { - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - Expect(os.MkdirAll(flagVals.ArtifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) - Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") - Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, + UseExistingCluster: useExistingCluter, E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, + ArtifactFolder: artifactsFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, @@ -117,8 +121,8 @@ var _ = BeforeSuite(func() { rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), InstallCertManager: true, CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), @@ -150,12 +154,12 @@ var _ = BeforeSuite(func() { rtInput := testenv.DeployRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: flagVals.ChartPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), CAPIProvidersYAML: e2e.CapiProviders, Namespace: framework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{ "cluster-api-operator.cluster-api.version": "v1.6.0", @@ -175,9 +179,12 @@ var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ SetupTestClusterResult: *setupClusterResult, - SkipCleanup: flagVals.SkipCleanup, - ArtifactFolder: flagVals.ArtifactFolder, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, }) }) diff --git a/test/e2e/suites/update-labels/update_labels_test.go b/test/e2e/suites/update-labels/update_labels_test.go index 4ee14f77..72de3d40 100644 --- a/test/e2e/suites/update-labels/update_labels_test.go +++ b/test/e2e/suites/update-labels/update_labels_test.go @@ -169,12 +169,12 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w }) AfterEach(func() { - err := testenv.CollectArtifacts(ctx, setupClusterResult.BootstrapClusterProxy.GetKubeconfigPath(), path.Join(flagVals.ArtifactFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+"bootstrap"+specName)) + err := testenv.CollectArtifacts(ctx, setupClusterResult.BootstrapClusterProxy.GetKubeconfigPath(), path.Join(artifactsFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+"bootstrap"+specName)) if err != nil { fmt.Printf("Failed to collect artifacts for the bootstrap cluster: %v\n", err) } - err = testenv.CollectArtifacts(ctx, rancherKubeconfig.TempFilePath, path.Join(flagVals.ArtifactFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+specName)) + err = testenv.CollectArtifacts(ctx, rancherKubeconfig.TempFilePath, path.Join(artifactsFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+specName)) if err != nil { fmt.Printf("Failed to collect artifacts for the child cluster: %v\n", err) } diff --git a/test/e2e/suites/v2prov/suite_test.go b/test/e2e/suites/v2prov/suite_test.go index e9022a89..17c636c0 100644 --- a/test/e2e/suites/v2prov/suite_test.go +++ b/test/e2e/suites/v2prov/suite_test.go @@ -22,9 +22,9 @@ package v2prov import ( "context" "fmt" - "os" "path/filepath" "runtime" + "strconv" "testing" . "github.com/onsi/ginkgo/v2" @@ -57,6 +57,8 @@ var ( ctx = context.Background() + artifactsFolder string + setupClusterResult *testenv.SetupTestClusterResult ) @@ -74,34 +76,36 @@ func TestE2E(t *testing.T) { } var _ = BeforeSuite(func() { - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - Expect(os.MkdirAll(flagVals.ArtifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", flagVals.ArtifactFolder) - Expect(flagVals.HelmBinaryPath).To(BeAnExistingFile(), "Invalid test suite argument. helm-binary-path should be an existing file.") - Expect(flagVals.ChartPath).To(BeAnExistingFile(), "Invalid test suite argument. chart-path should be an existing file.") - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, + UseExistingCluster: useExistingCluter, E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, + ArtifactFolder: artifactsFolder, KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: flagVals.HelmBinaryPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), IngressType: preSetupOutput.IngressType, CustomIngress: e2e.NginxIngress, CustomIngressNamespace: e2e.NginxIngressNamespace, @@ -117,8 +121,8 @@ var _ = BeforeSuite(func() { rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher.yaml"), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), InstallCertManager: true, CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), @@ -152,12 +156,12 @@ var _ = BeforeSuite(func() { rtInput := testenv.DeployRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: flagVals.HelmBinaryPath, - ChartPath: flagVals.ChartPath, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), CAPIProvidersYAML: e2e.CapiProviders, Namespace: turtlesframework.DefaultRancherTurtlesNamespace, Image: fmt.Sprintf("ghcr.io/rancher/turtles-e2e-%s", runtime.GOARCH), - Tag: "v0.0.1", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, } @@ -174,9 +178,12 @@ var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ SetupTestClusterResult: *setupClusterResult, - SkipCleanup: flagVals.SkipCleanup, - ArtifactFolder: flagVals.ArtifactFolder, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, }) }) diff --git a/test/e2e/suites/v2prov/v2prov_test.go b/test/e2e/suites/v2prov/v2prov_test.go index 70e96183..048ec333 100644 --- a/test/e2e/suites/v2prov/v2prov_test.go +++ b/test/e2e/suites/v2prov/v2prov_test.go @@ -168,12 +168,12 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w }) AfterEach(func() { - err := testenv.CollectArtifacts(ctx, setupClusterResult.BootstrapClusterProxy.GetKubeconfigPath(), path.Join(flagVals.ArtifactFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+"bootstrap"+specName)) + err := testenv.CollectArtifacts(ctx, setupClusterResult.BootstrapClusterProxy.GetKubeconfigPath(), path.Join(artifactsFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+"bootstrap"+specName)) if err != nil { fmt.Printf("Failed to collect artifacts for the bootstrap cluster: %v\n", err) } - err = testenv.CollectArtifacts(ctx, rancherKubeconfig.TempFilePath, path.Join(flagVals.ArtifactFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+specName)) + err = testenv.CollectArtifacts(ctx, rancherKubeconfig.TempFilePath, path.Join(artifactsFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+specName)) if err != nil { fmt.Printf("Failed to collect artifacts for the child cluster: %v\n", err) } diff --git a/test/framework/apply_template_helper.go b/test/framework/apply_template_helper.go index 98ccc060..f5ec54a3 100644 --- a/test/framework/apply_template_helper.go +++ b/test/framework/apply_template_helper.go @@ -25,13 +25,21 @@ import ( "sigs.k8s.io/cluster-api/test/framework" ) -// ApplyFromTemplateInput is the input to ApplyFromTemplate. +// ApplyFromTemplateInput represents the input parameters for applying a template. type ApplyFromTemplateInput struct { - Getter func(key string) string - Template []byte + // Getter is a function that retrieves a value based on a given key. + Getter func(key string) string + + // Template is the content of the template to be applied. + Template []byte + + // AddtionalEnvironmentVariables is a map of additional environment variables to be set during template application. AddtionalEnvironmentVariables map[string]string - Proxy framework.ClusterProxy + // Proxy is the cluster proxy used for applying the template. + Proxy framework.ClusterProxy + + // OutputFilePath is the path where the output of the template application will be stored. OutputFilePath string } diff --git a/test/framework/chartmuseum_helper.go b/test/framework/chartmuseum_helper.go index 9f3cf6fa..c4bf47d0 100644 --- a/test/framework/chartmuseum_helper.go +++ b/test/framework/chartmuseum_helper.go @@ -30,16 +30,34 @@ import ( "sigs.k8s.io/cluster-api/test/framework" ) -// ChartMuseumInput is the input to DeployChartMuseum. +// ChartMuseumInput represents the input parameters for interacting with ChartMuseum. type ChartMuseumInput struct { - HelmBinaryPath string - ChartsPath string + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // ChartsPath is the path to the charts. + ChartsPath string + + // ChartVersion is the version of the chart. + ChartVersion string + + // ChartMuseumManifests are the ChartMuseum manifests. ChartMuseumManifests []byte - DeploymentName string - ServiceName string - PortName string - Proxy framework.ClusterProxy - WaitInterval []interface{} + + // DeploymentName is the name of the deployment. + DeploymentName string + + // ServiceName is the name of the service. + ServiceName string + + // PortName is the name of the port. + PortName string + + // Proxy is the cluster proxy. + Proxy framework.ClusterProxy + + // WaitInterval is the wait interval. + WaitInterval []interface{} } // DeployChartMuseum will create a new repo in the Gitea server. @@ -101,7 +119,7 @@ func DeployChartMuseum(ctx context.Context, input ChartMuseumInput) string { exec.Command( input.HelmBinaryPath, "cm-push", input.ChartsPath, - "rancher-turtles-local", "-a", "v0.0.1", + "rancher-turtles-local", "-a", input.ChartVersion, "--kubeconfig", input.Proxy.GetKubeconfigPath(), ).CombinedOutput() diff --git a/test/framework/clusterctl_helper.go b/test/framework/clusterctl_helper.go index 830a64f3..355e43f0 100644 --- a/test/framework/clusterctl_helper.go +++ b/test/framework/clusterctl_helper.go @@ -26,12 +26,21 @@ import ( . "github.com/onsi/gomega" ) -// ClusterctlGenerateFromTemplateInput is the input to ClusterctlGenerateFromTemplate. +// ClusterctlGenerateFromTemplateInput represents the input parameters for generating from a template. type ClusterctlGenerateFromTemplateInput struct { - ClusterName string - TemplatePath string - OutputFilePath string + // ClusterName is the name of the cluster. + ClusterName string + + // TemplatePath is the path to the template. + TemplatePath string + + // OutputFilePath is the path to the output file. + OutputFilePath string + + // ClusterCtlBinaryPath is the path to the ClusterCtl binary. ClusterCtlBinaryPath string + + // EnvironmentVariables are the environment variables to be set. EnvironmentVariables map[string]string } diff --git a/test/framework/command_helper.go b/test/framework/command_helper.go index bba1f0cc..ccd38f9e 100644 --- a/test/framework/command_helper.go +++ b/test/framework/command_helper.go @@ -25,19 +25,31 @@ import ( . "github.com/onsi/gomega" ) -// RunCommandInput is the input to RunCommand. +// RunCommandInput represents the input parameters for running a command. type RunCommandInput struct { - Command string - Args []string + // Command is the command to be executed. + Command string + + // Args are the arguments to be passed to the command. + Args []string + + // EnvironmentVariables are the environment variables to be set for the command. EnvironmentVariables map[string]string } -// RunCommandResult is the result of RunCommand. +// RunCommandResult represents the result of running a command. type RunCommandResult struct { + // ExitCode is the exit code of the command. ExitCode int - Stdout []byte - Stderr []byte - Error error + + // Stdout is the standard output of the command. + Stdout []byte + + // Stderr is the standard error of the command. + Stderr []byte + + // Error is the error that occurred while running the command. + Error error } // RunCommand will run a command with the given args and environment variables. diff --git a/test/framework/fleet_helper.go b/test/framework/fleet_helper.go index 6c74ed0c..23b4cfe8 100644 --- a/test/framework/fleet_helper.go +++ b/test/framework/fleet_helper.go @@ -32,16 +32,31 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// FleetCreateGitRepoInput is the input to FleetCreateGitRepo. +// FleetCreateGitRepoInput represents the input parameters for creating a Git repository in Fleet. type FleetCreateGitRepoInput struct { - Name string - Namespace string - Repo string - Branch string - Paths []string - FleetGeneration int + // Name is the name of the Git repository. + Name string + + // Namespace is the namespace in which the Git repository will be created. + Namespace string + + // Repo is the URL of the Git repository. + Repo string + + // Branch is the branch of the Git repository to use. + Branch string + + // Paths are the paths within the Git repository to sync. + Paths []string + + // FleetGeneration is the generation of the Fleet instance. + FleetGeneration int + + // ClientSecretName is the name of the client secret to use for authentication. ClientSecretName string - ClusterProxy framework.ClusterProxy + + // ClusterProxy is the ClusterProxy instance for interacting with the cluster. + ClusterProxy framework.ClusterProxy } // FleetCreateGitRepo will create and apply a GitRepo resource to the cluster. See the Fleet docs @@ -77,10 +92,15 @@ func FleetCreateGitRepo(ctx context.Context, input FleetCreateGitRepoInput) { }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to appl GitRepo") } -// FleetDeleteGitRepoInput is the input to FleetDeleteGitRepo. +// FleetDeleteGitRepoInput represents the input parameters for deleting a Git repository in the fleet. type FleetDeleteGitRepoInput struct { - Name string - Namespace string + // Name is the name of the Git repository to be deleted. + Name string + + // Namespace is the namespace of the Git repository to be deleted. + Namespace string + + // ClusterProxy is the cluster proxy used for interacting with the cluster. ClusterProxy framework.ClusterProxy } @@ -117,10 +137,13 @@ func FleetDeleteGitRepo(ctx context.Context, input FleetDeleteGitRepoInput) { }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to delete GitRepo") } -// FleetCreateFleetFileInput is the input to FleetCreateFleetFile. +// FleetCreateFleetFileInput represents the input parameters for creating a fleet file. type FleetCreateFleetFileInput struct { + // Namespace is the namespace in which the fleet file will be created. Namespace string - FilePath string + + // FilePath is the file path of the fleet file. + FilePath string } // FleetCreateFleetFile will create a fleet.yaml file in the given location. diff --git a/test/framework/git_helper.go b/test/framework/git_helper.go index f9bf6bc1..544532f4 100644 --- a/test/framework/git_helper.go +++ b/test/framework/git_helper.go @@ -30,10 +30,17 @@ import ( // GitCloneRepoInput is the input to GitCloneRepo. type GitCloneRepoInput struct { - Address string + // Address is the URL of the repository to clone. + Address string + + // CloneLocation is the directory where the repository will be cloned. CloneLocation string - Username string - Password string + + // Username is the username for authentication (optional). + Username string + + // Password is the password for authentication (optional). + Password string } // GitCloneRepo will clone a repo to a given location. @@ -66,13 +73,22 @@ func GitCloneRepo(ctx context.Context, input GitCloneRepoInput) string { return cloneDir } -// GitCommitAndPushInput is th einput to GitCommitAndPush. +// GitCommitAndPushInput is the input to GitCommitAndPush. type GitCommitAndPushInput struct { + // CloneLocation is the directory where the repository is cloned. CloneLocation string - Username string - Password string + + // Username is the username for authentication (optional). + Username string + + // Password is the password for authentication (optional). + Password string + + // CommitMessage is the message for the commit. CommitMessage string - GitPushWait []interface{} + + // GitPushWait is the wait time for the git push operation. + GitPushWait []interface{} } // GitCommitAndPush will commit the files for a repo and push the changes to the origin. diff --git a/test/framework/gitea_helper.go b/test/framework/gitea_helper.go index 2e72a110..37ecbf20 100644 --- a/test/framework/gitea_helper.go +++ b/test/framework/gitea_helper.go @@ -25,12 +25,19 @@ import ( "code.gitea.io/sdk/gitea" ) -// GiteaCreateRepoInput is the input to GiteaCreateRepo. +// GiteaCreateRepoInput represents the input parameters for creating a repository in Gitea. type GiteaCreateRepoInput struct { + // ServerAddr is the address of the Gitea server. ServerAddr string - RepoName string - Username string - Password string + + // RepoName is the name of the repository to be created. + RepoName string + + // Username is the username of the user creating the repository. + Username string + + // Password is the password of the user creating the repository. + Password string } // GiteaCreateRepo will create a new repo in the Gitea server. diff --git a/test/framework/kube_helper.go b/test/framework/kube_helper.go index 11712b38..5fb51aa9 100644 --- a/test/framework/kube_helper.go +++ b/test/framework/kube_helper.go @@ -36,10 +36,15 @@ const ( retryableOperationTimeout = 99 * time.Minute ) -// GetNodeAddressInput is th einput to GetNodeAddress. +// GetNodeAddressInput represents the input parameters for retrieving a specific node's address. type GetNodeAddressInput struct { - Lister framework.Lister - NodeIndex int + // Lister is an interface used for listing resources. + Lister framework.Lister + + // NodeIndex is the index of the node to retrieve the address from. + NodeIndex int + + // AddressIndex is the index of the address to retrieve from the node. AddressIndex int } @@ -63,16 +68,27 @@ func GetNodeAddress(ctx context.Context, input GetNodeAddressInput) string { return node.Status.Addresses[input.AddressIndex].Address } -// GetServicePortByNameInput is the input to GetServicePortByName. +// GetServicePortByNameInput represents the input parameters for retrieving a service port by name. type GetServicePortByNameInput struct { - GetLister framework.GetLister - ServiceName string + // GetLister is the function used to retrieve a lister. + GetLister framework.GetLister + + // ServiceName is the name of the service. + ServiceName string + + // ServiceNamespace is the namespace of the service. ServiceNamespace string - PortName string + + // PortName is the name of the port. + PortName string } +// GetServicePortByNameOutput represents the output of the GetServicePortByName function. type GetServicePortByNameOutput struct { - Port int32 + // Port is the port number of the service. + Port int32 + + // NodePort is the node port number of the service. NodePort int32 } @@ -104,14 +120,27 @@ func GetServicePortByName(ctx context.Context, input GetServicePortByNameInput, } } -// CreateSecretInput is the input to CreateSecret. +// CreateSecretInput represents the input parameters for creating a secret. type CreateSecretInput struct { - Creator framework.Creator - Name string - Namespace string - Type corev1.SecretType - Data map[string]string - Labels map[string]string + // Creator is the framework.Creator responsible for creating the secret. + Creator framework.Creator + + // Name is the name of the secret. + Name string + + // Namespace is the namespace in which the secret will be created. + Namespace string + + // Type is the type of the secret. + Type corev1.SecretType + + // Data is a map of key-value pairs representing the secret data. + Data map[string]string + + // Labels is a map of key-value pairs representing the labels associated with the secret. + Labels map[string]string + + // Annotations is a map of key-value pairs representing the annotations associated with the secret. Annotations map[string]string } @@ -146,11 +175,16 @@ func CreateSecret(ctx context.Context, input CreateSecretInput) { }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create secret %s", klog.KObj(secret)) } -// AddLabelsToNamespaceInput is th einput to AddLabelsToNamespace. +// AddLabelsToNamespaceInput represents the input parameters for adding labels to a namespace. type AddLabelsToNamespaceInput struct { + // ClusterProxy is the cluster proxy object used for interacting with the Kubernetes cluster. ClusterProxy framework.ClusterProxy - Name string - Labels map[string]string + + // Name is the name of the namespace to which labels will be added. + Name string + + // Labels is a map of key-value pairs representing the labels to be added to the namespace. + Labels map[string]string } // AddLabelsToNamespace will add labels to a namespace. @@ -183,13 +217,25 @@ func AddLabelsToNamespace(ctx context.Context, input AddLabelsToNamespaceInput) }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to update namespace %s with new labels", input.Name) } +// CreateDockerRegistrySecretInput represents the input parameters for creating a Docker registry secret. type CreateDockerRegistrySecretInput struct { + // BootstrapClusterProxy is the bootstrap cluster proxy. BootstrapClusterProxy framework.ClusterProxy - Name string - Namespace string - DockerServer string - DockerUsername string - DockerPassword string + + // Name is the name of the secret. + Name string + + // Namespace is the namespace where the secret will be created. + Namespace string + + // DockerServer is the Docker server URL. + DockerServer string + + // DockerUsername is the username for authenticating with the Docker registry. + DockerUsername string + + // DockerPassword is the password for authenticating with the Docker registry. + DockerPassword string } func CreateDockerRegistrySecret(ctx context.Context, input CreateDockerRegistrySecretInput) { @@ -235,11 +281,18 @@ func CreateDockerRegistrySecret(ctx context.Context, input CreateDockerRegistryS Expect(cmdCreateSecret.ExitCode).To(Equal(0), "Creating secret return non-zero exit code") } -// GetIngressHostInput is the input to GetIngressHost. +// GetIngressHostInput represents the input parameters for retrieving the host of an Ingress. type GetIngressHostInput struct { - GetLister framework.GetLister - IngressName string + // GetLister is a function that returns a lister for accessing Kubernetes resources. + GetLister framework.GetLister + + // IngressName is the name of the Ingress. + IngressName string + + // IngressNamespace is the namespace of the Ingress. IngressNamespace string + + // IngressRuleIndex is the index of the Ingress rule. IngressRuleIndex int } diff --git a/test/framework/rancher_helpers.go b/test/framework/rancher_helpers.go index fcd602af..4f4c1419 100644 --- a/test/framework/rancher_helpers.go +++ b/test/framework/rancher_helpers.go @@ -38,20 +38,34 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// RancherGetClusterKubeconfigInput is the input to RancherGetClusterKubeconfig. +// RancherGetClusterKubeconfigInput represents the input parameters for getting the kubeconfig of a cluster in Rancher. type RancherGetClusterKubeconfigInput struct { - Getter framework.Getter - SecretName string - Namespace string - ClusterName string + // Getter is the framework getter used to retrieve the kubeconfig. + Getter framework.Getter + + // SecretName is the name of the secret containing the kubeconfig. + SecretName string + + // Namespace is the namespace of the secret containing the kubeconfig. + Namespace string + + // ClusterName is the name of the cluster. + ClusterName string + + // RancherServerURL is the URL of the Rancher server. RancherServerURL string - WriteToTempFile bool + + // WriteToTempFile indicates whether to write the kubeconfig to a temporary file. + WriteToTempFile bool } -// RancherGetClusterKubeconfigResult is the result of RancherGetClusterKubeconfig. +// RancherGetClusterKubeconfigResult represents the result of getting the kubeconfig for a Rancher cluster. type RancherGetClusterKubeconfigResult struct { + // KubeconfigData contains the kubeconfig data as a byte array. KubeconfigData []byte - TempFilePath string + + // TempFilePath is the temporary file path where the kubeconfig is stored. + TempFilePath string } // RancherGetClusterKubeconfig will get the Kubeconfig for a cluster from Rancher. @@ -203,12 +217,18 @@ func fixConfig(ctx context.Context, name string, config *api.Config) { config.Clusters[currentCluster].Server = controlPlaneURL.String() } +// RancherLookupUserInput represents the input for looking up a user in Rancher. type RancherLookupUserInput struct { + // ClusterProxy is the cluster proxy used for communication with Rancher. ClusterProxy framework.ClusterProxy - Username string + + // Username is the username of the user to look up. + Username string } +// RancherLookupUserResult represents the result of a user lookup in Rancher. type RancherLookupUserResult struct { + // User is the username of the user found in Rancher. User string } diff --git a/test/testenv/aws.go b/test/testenv/aws.go index 74b095de..b7725331 100644 --- a/test/testenv/aws.go +++ b/test/testenv/aws.go @@ -28,21 +28,32 @@ import ( turtlesframework "github.com/rancher/turtles/test/framework" ) +// CreateECRCredsInput represents the input parameters for creating ECR credentials. type CreateECRCredsInput struct { + // BootstrapClusterProxy is the cluster proxy used for bootstrapping. BootstrapClusterProxy framework.ClusterProxy - Name string - Account string - Region string - Namespace string + + // Name is the name of the ECR credentials. + Name string + + // Account is the AWS account associated with the ECR credentials. + Account string + + // Region is the AWS region where the ECR credentials are created. + Region string + + // Namespace is the Kubernetes namespace where the ECR credentials are stored. + Namespace string } +// CreateECRCreds is a function that creates ECR credentials for a given input. It expects the required input parameters to be non-nil. func CreateECRCreds(ctx context.Context, input CreateECRCredsInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for CreateECRCreds") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for CreateECRCreds") - Expect(input.Name).ToNot(BeEmpty(), "Name is required for CreatECRCreds") - Expect(input.Namespace).ToNot(BeEmpty(), "Namespace is required for CreatECRCreds") - Expect(input.Account).ToNot(BeEmpty(), "Account is required for CreatECRCreds") - Expect(input.Region).ToNot(BeEmpty(), "Region is required for CreatECRCreds") + Expect(input.Name).ToNot(BeEmpty(), "Name is required for CreateECRCreds") + Expect(input.Namespace).ToNot(BeEmpty(), "Namespace is required for CreateECRCreds") + Expect(input.Account).ToNot(BeEmpty(), "Account is required for CreateECRCreds") + Expect(input.Region).ToNot(BeEmpty(), "Region is required for CreateECRCreds") By("Getting password for ECR") cmdPwdRes := &turtlesframework.RunCommandResult{} diff --git a/test/testenv/bootstrapclusterproviders.go b/test/testenv/bootstrapclusterproviders.go index 704900a5..c50c7f34 100644 --- a/test/testenv/bootstrapclusterproviders.go +++ b/test/testenv/bootstrapclusterproviders.go @@ -26,9 +26,12 @@ import ( "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) +// CustomClusterProvider is a function type that represents a custom cluster provider. +// It takes in a context, an E2EConfig, a cluster name, and a Kubernetes version as parameters. +// It returns a bootstrap.ClusterProvider. type CustomClusterProvider func(ctx context.Context, config *clusterctl.E2EConfig, clusterName, kubernetesVersion string) bootstrap.ClusterProvider -// EKSBootsrapCluster creates a new EKS bootstrap cluster and returns a ClusterProvider +// EKSBootsrapCluster is a function that creates a new EKS bootstrap cluster. func EKSBootsrapCluster(ctx context.Context, config *clusterctl.E2EConfig, clusterName, kubernetesVersion string) bootstrap.ClusterProvider { By("Creating a new EKS bootstrap cluster") diff --git a/test/testenv/charmuseum.go b/test/testenv/charmuseum.go index eab0bb68..fdd82239 100644 --- a/test/testenv/charmuseum.go +++ b/test/testenv/charmuseum.go @@ -28,13 +28,26 @@ import ( turtlesframework "github.com/rancher/turtles/test/framework" ) +// DeployChartMuseumInput represents the input parameters for deploying ChartMuseum. type DeployChartMuseumInput struct { - HelmBinaryPath string + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // BootstrapClusterProxy is the cluster proxy for the bootstrap cluster. BootstrapClusterProxy framework.ClusterProxy - ChartsPath string - WaitInterval []interface{} + + // ChartsPath is the path to the charts. + ChartsPath string + + // ChartVersion is the version of the chart. + ChartVersion string + + // WaitInterval is the interval to wait for. + WaitInterval []interface{} } +// DeployChartMuseum installs ChartMuseum to the Kubernetes cluster using the provided input parameters. +// It expects the required input parameters to be non-nil. func DeployChartMuseum(ctx context.Context, input DeployChartMuseumInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for DeployChartMuseum") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for DeployChartMuseum") @@ -46,6 +59,7 @@ func DeployChartMuseum(ctx context.Context, input DeployChartMuseumInput) { turtlesframework.DeployChartMuseum(ctx, turtlesframework.ChartMuseumInput{ HelmBinaryPath: input.HelmBinaryPath, ChartsPath: input.ChartsPath, + ChartVersion: input.ChartVersion, Proxy: input.BootstrapClusterProxy, WaitInterval: input.WaitInterval, ChartMuseumManifests: e2e.ChartMuseum, diff --git a/test/testenv/cleanup.go b/test/testenv/cleanup.go index 3ddb599d..cfc15fd1 100644 --- a/test/testenv/cleanup.go +++ b/test/testenv/cleanup.go @@ -30,12 +30,20 @@ import ( "sigs.k8s.io/cluster-api/test/framework" ) +// CleanupTestClusterInput represents the input parameters for cleaning up a test cluster. type CleanupTestClusterInput struct { + // SetupTestClusterResult contains the result of setting up the test cluster. SetupTestClusterResult - SkipCleanup bool + + // SkipCleanup indicates whether to skip the cleanup process. + SkipCleanup bool + + // ArtifactFolder specifies the folder where artifacts are stored. ArtifactFolder string } +// CleanupTestCluster is a function that cleans up the test cluster. +// It expects the required input parameters to be non-nil. func CleanupTestCluster(ctx context.Context, input CleanupTestClusterInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for CleanupTestCluster") Expect(input.SetupTestClusterResult).ToNot(BeNil(), "SetupTestClusterResult is required for CleanupTestCluster") @@ -69,6 +77,8 @@ var secrets = []string{ "AZURE_TENANT_ID", } +// CollectArtifacts collects artifacts using the provided kubeconfig path, name, and additional arguments. +// It returns an error if the kubeconfig path is empty or if there is an error running the kubectl command. func CollectArtifacts(ctx context.Context, kubeconfigPath, name string, args ...string) error { if kubeconfigPath == "" { return fmt.Errorf("Unable to collect artifacts: kubeconfig path is empty") diff --git a/test/testenv/eks.go b/test/testenv/eks.go index 25fc2291..234e9cd5 100644 --- a/test/testenv/eks.go +++ b/test/testenv/eks.go @@ -28,20 +28,27 @@ import ( turtlesframework "github.com/rancher/turtles/test/framework" ) +// CreateEKSBootstrapClusterAndValidateImagesInput represents the input parameters for creating an EKS bootstrap cluster and validating images. type CreateEKSBootstrapClusterAndValidateImagesInput struct { - Name string - Version string - Region string + // Name is the name of the bootstrap cluster. + Name string + // Version is the version of the bootstrap cluster. + Version string + // Region is the AWS region where the bootstrap cluster will be created. + Region string + // NumWorkers is the number of worker nodes in the bootstrap cluster. NumWorkers int - Images []clusterctl.ContainerImage + // Images is a list of container images to be validated. + Images []clusterctl.ContainerImage } type CreateEKSBootstrapClusterAndValidateImagesInputResult struct { // BootstrapClusterProvider manages provisioning of the the bootstrap cluster to be used for the e2e tests. - // Please note that provisioning will be skipped if e2e.use-existing-cluster is provided. BootstrapClusterProvider bootstrap.ClusterProvider } +// CreateEKSBootstrapClusterAndValidateImages is a function that creates an EKS bootstrap cluster and validates images. +// It expects the required input parameters to be non-nil. func CreateEKSBootstrapClusterAndValidateImages(ctx context.Context, input CreateEKSBootstrapClusterAndValidateImagesInput, res *CreateEKSBootstrapClusterAndValidateImagesInputResult) { Expect(ctx).ToNot(BeNil(), "Context is required for CreateEKSBootstrapClusterAndValidateImages") Expect(input.Name).ToNot(BeEmpty(), "Name is required for CreateEKSBootstrapClusterAndValidateImages") diff --git a/test/testenv/eksctl_provider.go b/test/testenv/eksctl_provider.go index d083d122..e8b0d1e4 100644 --- a/test/testenv/eksctl_provider.go +++ b/test/testenv/eksctl_provider.go @@ -15,6 +15,8 @@ import ( "sigs.k8s.io/cluster-api/test/framework/bootstrap" ) +// NewEKSClusterProvider creates a new instance of EKSClusterProvider. +// It expects the required input parameters to be non-nil. func NewEKSClusterProvider(name, version, region string, numWorkers int) bootstrap.ClusterProvider { Expect(name).ToNot(BeEmpty(), "name is required for NewEKSClusterProvider") Expect(version).ToNot(BeEmpty(), "version is required for NewEKSClusterProvider") @@ -29,15 +31,25 @@ func NewEKSClusterProvider(name, version, region string, numWorkers int) bootstr } } +// EKSClusterProvider represents a provider for managing EKS clusters. +// EKSClusterProvider represents a provider for managing EKS clusters. type EKSClusterProvider struct { - name string - version string - region string - numWorkers int + // name of the EKS cluster. + name string + // version of the EKS cluster. + version string + // region where the EKS cluster is located. + region string + // number of worker nodes in the EKS cluster. + numWorkers int + // path to the kubeconfig file for the EKS cluster. kubeconfigPath string } -// Create a EKS cluster. +// Create creates an EKS cluster using eksctl. +// It creates a temporary file for kubeconfig and writes the EKS kubeconfig to it. +// The cluster is created with the specified name, version, number of worker nodes, region, and tags. +// The kubeconfig path is set to the path of the temporary file. func (k *EKSClusterProvider) Create(ctx context.Context) { tempFile, err := os.CreateTemp("", "kubeconfig") Expect(err).NotTo(HaveOccurred(), "Failed to create temp file for kubeconfig") diff --git a/test/testenv/gitea.go b/test/testenv/gitea.go index 5b77aee1..4ffab218 100644 --- a/test/testenv/gitea.go +++ b/test/testenv/gitea.go @@ -35,29 +35,71 @@ import ( "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) +// DeployGiteaInput represents the input parameters for deploying Gitea. type DeployGiteaInput struct { + // BootstrapClusterProxy is the cluster proxy for bootstrapping. BootstrapClusterProxy framework.ClusterProxy - HelmBinaryPath string - ChartRepoName string - ChartRepoURL string - ChartName string - ChartVersion string - ValuesFilePath string - Values map[string]string - RolloutWaitInterval []interface{} - ServiceWaitInterval []interface{} - Username string - Password string - AuthSecretName string - CustomIngressConfig []byte - ServiceType corev1.ServiceType - Variables turtlesframework.VariableCollection + + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // ChartRepoName is the name of the chart repository. + ChartRepoName string + + // ChartRepoURL is the URL of the chart repository. + ChartRepoURL string + + // ChartName is the name of the chart. + ChartName string + + // ChartVersion is the version of the chart. + ChartVersion string + + // ValuesFilePath is the path to the values file. + ValuesFilePath string + + // Values are the values for the chart. + Values map[string]string + + // RolloutWaitInterval is the interval to wait between rollouts. + RolloutWaitInterval []interface{} + + // ServiceWaitInterval is the interval to wait for the service. + ServiceWaitInterval []interface{} + + // Username is the username for authentication. + Username string + + // Password is the password for authentication. + Password string + + // AuthSecretName is the name of the authentication secret. + AuthSecretName string + + // CustomIngressConfig is the custom ingress configuration. + CustomIngressConfig []byte + + // ServiceType is the type of the service. + ServiceType corev1.ServiceType + + // Variables is the collection of variables. + Variables turtlesframework.VariableCollection } +// DeployGiteaResult represents the result of deploying Gitea. type DeployGiteaResult struct { + // GitAddress is the address of the deployed Gitea instance. GitAddress string } +// DeployGitea deploys Gitea using the provided input parameters. +// It expects the required input parameters to be non-nil. +// If the service type is ClusterIP, it checks that the custom ingress config is not empty. +// The function then proceeds to install the Gitea chart using Helm. +// It adds the chart repository, updates the chart, and installs the chart with the specified version and flags. +// After the installation, it waits for the Gitea deployment to be available. +// Depending on the service type, it retrieves the Git server address using the node port, load balancer, or custom ingress. +// If a username is provided, it waits for the Gitea endpoint to be available and creates a Gitea secret with the username and password. func DeployGitea(ctx context.Context, input DeployGiteaInput) *DeployGiteaResult { Expect(ctx).NotTo(BeNil(), "ctx is required for DeployGitea") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for DeployGitea") @@ -214,12 +256,20 @@ func DeployGitea(ctx context.Context, input DeployGiteaInput) *DeployGiteaResult return result } +// UninstallGiteaInput represents the input parameters for uninstalling Gitea. type UninstallGiteaInput struct { + // BootstrapClusterProxy is the cluster proxy for the bootstrap cluster. BootstrapClusterProxy framework.ClusterProxy - HelmBinaryPath string - DeleteWaitInterval []interface{} + + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // DeleteWaitInterval is the interval to wait between deleting resources. + DeleteWaitInterval []interface{} } +// UninstallGitea uninstalls Gitea by removing the Gitea Helm Chart. +// It expects the required input parameters to be non-nil. func UninstallGitea(ctx context.Context, input UninstallGiteaInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for UninstallGitea") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for UninstallGitea") @@ -238,15 +288,17 @@ func UninstallGitea(ctx context.Context, input UninstallGiteaInput) { Expect(err).ToNot(HaveOccurred()) } +// PreGiteaInstallHook is a function that sets the service type for the Gitea input based on the management cluster environment type. +// It expects the required input parameters to be non-nil. func PreGiteaInstallHook(giteaInput *DeployGiteaInput, e2eConfig *clusterctl.E2EConfig) { - infrastructureType := e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) + infrastructureType := e2e.ManagementClusterEnvironmentType(e2eConfig.GetVariable(e2e.ManagementClusterEnvironmentVar)) switch infrastructureType { - case e2e.ManagementClusterInfrastuctureEKS: + case e2e.ManagementClusterEnvironmentEKS: giteaInput.ServiceType = corev1.ServiceTypeLoadBalancer - case e2e.ManagementClusterInfrastuctureIsolatedKind: + case e2e.ManagementClusterEnvironmentIsolatedKind: giteaInput.ServiceType = corev1.ServiceTypeNodePort - case e2e.ManagementClusterInfrastuctureKind: + case e2e.ManagementClusterEnvironmentKind: giteaInput.ServiceType = corev1.ServiceTypeClusterIP default: Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", infrastructureType)) diff --git a/test/testenv/operator.go b/test/testenv/operator.go index e5eee166..6e398304 100644 --- a/test/testenv/operator.go +++ b/test/testenv/operator.go @@ -32,14 +32,28 @@ import ( turtlesframework "github.com/rancher/turtles/test/framework" ) +// CAPIOperatorDeployProviderInput represents the input parameters for deploying a CAPI operator provider. type CAPIOperatorDeployProviderInput struct { - E2EConfig *clusterctl.E2EConfig - BootstrapClusterProxy framework.ClusterProxy - CAPIProvidersSecretsYAML [][]byte - CAPIProvidersYAML []byte - TemplateData map[string]string + // E2EConfig is the configuration for end-to-end testing. + E2EConfig *clusterctl.E2EConfig + + // BootstrapClusterProxy is the proxy for the bootstrap cluster. + BootstrapClusterProxy framework.ClusterProxy + + // CAPIProvidersSecretsYAML is the YAML representation of the secrets for the CAPI providers. + CAPIProvidersSecretsYAML [][]byte + + // CAPIProvidersYAML is the YAML representation of the CAPI providers. + CAPIProvidersYAML []byte + + // TemplateData is the data used for templating. + TemplateData map[string]string + + // WaitDeploymentsReadyInterval is the interval for waiting for deployments to be ready. WaitDeploymentsReadyInterval []interface{} - WaitForDeployments []NamespaceName + + // WaitForDeployments is the list of deployments to wait for. + WaitForDeployments []NamespaceName } type NamespaceName struct { @@ -47,6 +61,10 @@ type NamespaceName struct { Namespace string } +// CAPIOperatorDeployProvider deploys the CAPI operator providers. +// It expects the required input parameters to be non-nil. +// It iterates over the CAPIProvidersSecretsYAML and applies them. Then, it applies the CAPI operator providers. +// If there are no deployments to wait for, the function returns. Otherwise, it waits for the provider deployments to be ready. func CAPIOperatorDeployProvider(ctx context.Context, input CAPIOperatorDeployProviderInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for CAPIOperatorDeployProvider") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for CAPIOperatorDeployProvider") diff --git a/test/testenv/rancher.go b/test/testenv/rancher.go index 10a64bf3..cbc4516f 100644 --- a/test/testenv/rancher.go +++ b/test/testenv/rancher.go @@ -39,31 +39,79 @@ import ( "sigs.k8s.io/yaml" ) +// DeployRancherInput represents the input parameters for deploying Rancher. type DeployRancherInput struct { - BootstrapClusterProxy framework.ClusterProxy - HelmBinaryPath string - HelmExtraValuesPath string - InstallCertManager bool - CertManagerChartPath string - CertManagerUrl string - CertManagerRepoName string - RancherChartRepoName string - RancherChartURL string - RancherChartPath string - RancherVersion string - RancherImageTag string - RancherNamespace string - RancherHost string - RancherPassword string - RancherFeatures string - RancherPatches [][]byte - RancherWaitInterval []interface{} - ControllerWaitInterval []interface{} - RancherIngressConfig []byte - RancherServicePatch []byte + // BootstrapClusterProxy is the cluster proxy for bootstrapping. + BootstrapClusterProxy framework.ClusterProxy + + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // HelmExtraValuesPath is the path to the Helm extra values file. + HelmExtraValuesPath string + + // InstallCertManager is the flag indicating whether to install Cert Manager. + InstallCertManager bool + + // CertManagerChartPath is the path to the Cert Manager chart. + CertManagerChartPath string + + // CertManagerUrl is the URL for Cert Manager. + CertManagerUrl string + + // CertManagerRepoName is the repository name for Cert Manager. + CertManagerRepoName string + + // RancherChartRepoName is the repository name for Rancher chart. + RancherChartRepoName string + + // RancherChartURL is the URL for Rancher chart. + RancherChartURL string + + // RancherChartPath is the path to the Rancher chart. + RancherChartPath string + + // RancherVersion is the version of Rancher. + RancherVersion string + + // RancherImageTag is the image tag for Rancher. + RancherImageTag string + + // RancherNamespace is the namespace for Rancher. + RancherNamespace string + + // RancherHost is the host for Rancher. + RancherHost string + + // RancherPassword is the password for Rancher. + RancherPassword string + + // RancherFeatures are the features for Rancher. + RancherFeatures string + + // RancherPatches are the patches for Rancher. + RancherPatches [][]byte + + // RancherWaitInterval is the wait interval for Rancher. + RancherWaitInterval []interface{} + + // ControllerWaitInterval is the wait interval for the controller. + ControllerWaitInterval []interface{} + + // RancherIngressConfig is the ingress configuration for Rancher. + RancherIngressConfig []byte + + // RancherServicePatch is the service patch for Rancher. + RancherServicePatch []byte + + // RancherIngressClassName is the ingress class name for Rancher. RancherIngressClassName string - Development bool - Variables turtlesframework.VariableCollection + + // Development is the flag indicating whether it is a development environment. + Development bool + + // Variables is the collection of variables. + Variables turtlesframework.VariableCollection } type deployRancherValuesFile struct { @@ -79,6 +127,15 @@ type deployRancherIngressValuesFile struct { Credentials ngrokCredentials `json:"credentials"` } +// DeployRancher deploys Rancher using the provided input parameters. +// It expects the required input parameters to be non-nil. +// If InstallCertManager is true, the function will install cert-manager. +// The function adds the cert-manager chart repository and the Rancher chart repository. +// It then updates the Rancher chart repository. +// The function generates the extra values file for Rancher and writes it to the Helm extra values path.// +// If RancherIngressConfig is provided, the function sets up the ingress for Rancher. +// If RancherServicePatch is provided, the function updates the Rancher service. +// The function waits for the Rancher webhook rollout and the fleet controller rollout. func DeployRancher(ctx context.Context, input DeployRancherInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for DeployRancher") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for DeployRancher") @@ -246,12 +303,20 @@ func DeployRancher(ctx context.Context, input DeployRancherInput) { }, input.ControllerWaitInterval...).ShouldNot(HaveOccurred()) } +// RestartRancherInput represents the input parameters for restarting Rancher. type RestartRancherInput struct { + // BootstrapClusterProxy is the cluster proxy for the bootstrap cluster. BootstrapClusterProxy framework.ClusterProxy - RancherNamespace string - RancherWaitInterval []interface{} + + // RancherNamespace is the namespace where Rancher is deployed. + RancherNamespace string + + // RancherWaitInterval is the wait interval for Rancher restart. + RancherWaitInterval []interface{} } +// RestartRancher restarts the Rancher application by killing its pods. +// It expects the required input parameters to be non-nil. func RestartRancher(ctx context.Context, input RestartRancherInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for RestartRancher") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for RestartRancher") @@ -268,28 +333,74 @@ func RestartRancher(ctx context.Context, input RestartRancherInput) { type IngressType string const ( - CustomIngress IngressType = "custom" - NgrokIngress IngressType = "ngrok" + // CustomIngress represents a custom ingress type. + CustomIngress IngressType = "custom" + + // NgrokIngress represents an ngrok ingress type. + NgrokIngress IngressType = "ngrok" + + // EKSNginxIngress represents an EKS nginx ingress type. EKSNginxIngress IngressType = "eks" ) +// RancherDeployIngressInput represents the input parameters for deploying an ingress in Rancher. type RancherDeployIngressInput struct { - BootstrapClusterProxy framework.ClusterProxy - HelmBinaryPath string - HelmExtraValuesPath string - CustomIngress []byte // TODO: add ability to pass a function that deploys the custom ingress - CustomIngressNamespace string - CustomIngressDeployment string - IngressWaitInterval []interface{} + // BootstrapClusterProxy is the cluster proxy for the bootstrap cluster. + BootstrapClusterProxy framework.ClusterProxy + + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // HelmExtraValuesPath is the path to the Helm extra values file. + HelmExtraValuesPath string + + // CustomIngress is the custom ingress to be deployed. + CustomIngress []byte + + // CustomIngressNamespace is the namespace for the custom ingress. + CustomIngressNamespace string + + // CustomIngressDeployment is the deployment name for the custom ingress. + CustomIngressDeployment string + + // IngressWaitInterval is the wait interval for the ingress deployment. + IngressWaitInterval []interface{} + + // DefaultIngressClassPatch is the default ingress class patch. DefaultIngressClassPatch []byte - IngressType IngressType - NgrokApiKey string - NgrokAuthToken string - NgrokPath string - NgrokRepoName string - NgrokRepoURL string + + // IngressType is the type of ingress to be deployed. + IngressType IngressType + + // NgrokApiKey is the API key for Ngrok. + NgrokApiKey string + + // NgrokAuthToken is the authentication token for Ngrok. + NgrokAuthToken string + + // NgrokPath is the path to the Ngrok binary. + NgrokPath string + + // NgrokRepoName is the name of the Ngrok repository. + NgrokRepoName string + + // NgrokRepoURL is the URL of the Ngrok repository. + NgrokRepoURL string } +// RancherDeployIngress deploys an ingress based on the provided input. +// It expects the required input parameters to be non-nil. +// - If the IngressType is CustomIngress: +// - CustomIngress, CustomIngressNamespace, CustomIngressDeployment, and IngressWaitInterval must not be empty. +// - deployIsolatedModeIngress is called with the provided context and input. +// +// - If the IngressType is NgrokIngress: +// - NgrokApiKey, NgrokAuthToken, NgrokPath, NgrokRepoName, NgrokRepoURL, and HelmExtraValuesPath must not be empty. +// - deployNgrokIngress is called with the provided context and input. +// +// - If the IngressType is EKSNginxIngress: +// - IngressWaitInterval must not be nil. +// - deployEKSIngress is called with the provided input. func RancherDeployIngress(ctx context.Context, input RancherDeployIngressInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for RancherDeployIngress") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for RancherDeployIngress") @@ -417,24 +528,44 @@ func deployNgrokIngress(ctx context.Context, input RancherDeployIngressInput) { Expect(input.BootstrapClusterProxy.Apply(ctx, input.DefaultIngressClassPatch, "--server-side")).To(Succeed()) } +// PreRancherInstallHookInput represents the input parameters for the pre-Rancher install hook. type PreRancherInstallHookInput struct { - Ctx context.Context - RancherInput *DeployRancherInput - PreSetupOutput PreManagementClusterSetupResult + // Ctx is the context for the hook execution. + Ctx context.Context + + // RancherInput is the input parameters for deploying Rancher. + RancherInput *DeployRancherInput + + // PreSetupOutput is the output of the pre-management cluster setup. + PreSetupOutput PreManagementClusterSetupResult + + // SetupClusterResult is the result of setting up the test cluster. SetupClusterResult *SetupTestClusterResult - E2EConfig *clusterctl.E2EConfig + + // E2EConfig is the E2E configuration for the cluster. + E2EConfig *clusterctl.E2EConfig } +// PreRancherInstallHookResult represents the result of a pre-Rancher install hook. type PreRancherInstallHookResult struct { + // HostName is the hostname of the Rancher installation. HostName string } -// PreRancherInstallHook is a hook that can be used to perform actions before Rancher is installed. +// PreRancherInstallHook is a function that performs pre-installation tasks for Rancher. +// The function retrieves the infrastructure type from the input and performs different actions based on the type. +// If the infrastructure type is e2e.ManagementClusterEnvironmentEKS, it retrieves the ingress hostname and sets it as the Rancher host. +// It also deploys ghcr details by creating a Docker registry secret. +// If the infrastructure type is e2e.ManagementClusterEnvironmentIsolatedKind, it sets the isolated host name as the Rancher host. +// If the infrastructure type is e2e.ManagementClusterEnvironmentKind, it sets the Rancher ingress config and service patch based on the provided values. +// The function returns the host name as part of the PreRancherInstallHookResult. func PreRancherInstallHook(input *PreRancherInstallHookInput) PreRancherInstallHookResult { hostName := "" - switch e2e.ManagementClusterInfrastuctureType(input.E2EConfig.GetVariable(e2e.ManagementClusterInfrastucture)) { - case e2e.ManagementClusterInfrastuctureEKS: + infrastructureType := e2e.ManagementClusterEnvironmentType(input.E2EConfig.GetVariable(e2e.ManagementClusterEnvironmentVar)) + + switch e2e.ManagementClusterEnvironmentType(infrastructureType) { + case e2e.ManagementClusterEnvironmentEKS: By("Getting ingress hostname") svcRes := &WaitForServiceIngressHostnameResult{} WaitForServiceIngressHostname(input.Ctx, WaitForServiceIngressHostnameInput{ @@ -458,17 +589,17 @@ func PreRancherInstallHook(input *PreRancherInstallHookInput) PreRancherInstallH }) input.RancherInput.RancherIngressClassName = "nginx" - case e2e.ManagementClusterInfrastuctureIsolatedKind: + case e2e.ManagementClusterEnvironmentIsolatedKind: hostName = input.SetupClusterResult.IsolatedHostName input.RancherInput.RancherHost = hostName - case e2e.ManagementClusterInfrastuctureKind: + case e2e.ManagementClusterEnvironmentKind: // i.e. we are using ngrok locally input.RancherInput.RancherIngressConfig = e2e.IngressConfig input.RancherInput.RancherServicePatch = e2e.RancherServicePatch hostName = input.E2EConfig.GetVariable(e2e.RancherHostnameVar) input.RancherInput.RancherHost = hostName default: - Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", input.E2EConfig.GetVariable(e2e.ManagementClusterInfrastucture))) + Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", infrastructureType)) } return PreRancherInstallHookResult{ diff --git a/test/testenv/service.go b/test/testenv/service.go index 6c5eea0f..2db8b187 100644 --- a/test/testenv/service.go +++ b/test/testenv/service.go @@ -32,17 +32,29 @@ import ( turtlesframework "github.com/rancher/turtles/test/framework" ) +// WaitForServiceIngressHostnameInput represents the input parameters for waiting for a service ingress hostname. type WaitForServiceIngressHostnameInput struct { + // BootstrapClusterProxy is the cluster proxy for the bootstrap cluster. BootstrapClusterProxy framework.ClusterProxy - ServiceName string - ServiceNamespace string - IngressWaitInterval []interface{} + + // ServiceName is the name of the service. + ServiceName string + + // ServiceNamespace is the namespace of the service. + ServiceNamespace string + + // IngressWaitInterval is the interval to wait between ingress checks. + IngressWaitInterval []interface{} } +// WaitForServiceIngressHostnameResult represents the result of waiting for the service ingress hostname. type WaitForServiceIngressHostnameResult struct { + // Hostname is the hostname of the service ingress. Hostname string } +// WaitForServiceIngressHostname waits for a service to have an external IP and retrieves its hostname. +// It expects the required input parameters to be non-nil. func WaitForServiceIngressHostname(ctx context.Context, input WaitForServiceIngressHostnameInput, result *WaitForServiceIngressHostnameResult) { Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForServiceIngressHostname") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for WaitForServiceIngressHostname") diff --git a/test/testenv/setupcluster.go b/test/testenv/setupcluster.go index cb4a2043..1640f677 100644 --- a/test/testenv/setupcluster.go +++ b/test/testenv/setupcluster.go @@ -36,15 +36,30 @@ import ( turtlesframework "github.com/rancher/turtles/test/framework" ) +// SetupTestClusterInput represents the input parameters for setting up a test cluster. type SetupTestClusterInput struct { - UseExistingCluster bool - E2EConfig *clusterctl.E2EConfig + // UseExistingCluster specifies whether to use an existing cluster or create a new one. + UseExistingCluster bool + + // E2EConfig is the configuration for end-to-end testing. + E2EConfig *clusterctl.E2EConfig + + // ClusterctlConfigPath is the path to the clusterctl configuration file. ClusterctlConfigPath string - Scheme *runtime.Scheme - ArtifactFolder string - // Hostname string - KubernetesVersion string - HelmBinaryPath string + + // Scheme is the runtime scheme. + Scheme *runtime.Scheme + + // ArtifactFolder is the folder where artifacts are stored. + ArtifactFolder string + + // KubernetesVersion is the version of Kubernetes to use. + KubernetesVersion string + + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // CustomClusterProvider is a custom cluster provider. CustomClusterProvider CustomClusterProvider } @@ -63,6 +78,8 @@ type SetupTestClusterResult struct { IsolatedHostName string } +// SetupTestCluster sets up a test cluster for running tests. +// It expects the required input parameters to be non-nil. func SetupTestCluster(ctx context.Context, input SetupTestClusterInput) *SetupTestClusterResult { Expect(ctx).NotTo(BeNil(), "ctx is required for setupTestCluster") Expect(input.E2EConfig).ToNot(BeNil(), "E2EConfig is required for setupTestCluster") @@ -120,7 +137,7 @@ func setupCluster(ctx context.Context, config *clusterctl.E2EConfig, scheme *run return clusterProvider, proxy } -// configureIsolatedEnvironment gets the internal by setting it to the IP of the first and only node in the boostrap cluster. Labels the node with +// getInternalClusterHostname gets the internal by setting it to the IP of the first and only node in the boostrap cluster. Labels the node with // "ingress-ready" so that the nginx ingress controller can pick it up, required by kind. See: https://kind.sigs.k8s.io/docs/user/ingress/#create-cluster // This hostname can be used in an environment where the cluster is isolated from the outside world and a Rancher hostname is required. func getInternalClusterHostname(ctx context.Context, clusterProxy framework.ClusterProxy) string { @@ -146,22 +163,35 @@ func createClusterName(baseName string) string { return fmt.Sprintf("%s-%s", baseName, util.RandomString(6)) } -// PreManagementClusterSetupResult is the output of the preManagementClusterSetupHook. +// PreManagementClusterSetupResult represents the result of pre-management cluster setup. type PreManagementClusterSetupResult struct { - IngressType IngressType - DockerUsername string - DockerPassword string + // IngressType specifies the type of ingress for the cluster. + IngressType IngressType + + // DockerUsername is the username for accessing the Docker registry. + DockerUsername string + + // DockerPassword is the password for accessing the Docker registry. + DockerPassword string + + // CustomClusterProvider represents the custom cluster provider for the cluster. CustomClusterProvider CustomClusterProvider } -// PreManagementClusterSetupHook is a hook that can be used to perform actions before the management cluster is setup. +// PreManagementClusterSetupHook is a function that performs pre-setup tasks for the management cluster. +// It expects the required input parameters to be non-nil. +// It checks the environment type and sets the Docker username, Docker password, custom cluster provider, and ingress type accordingly. +// If the environment type is e2e.ManagementClusterEnvironmentEKS, it expects the GITHUB_USERNAME and GITHUB_TOKEN environment variables to be set. +// If the environment type is e2e.ManagementClusterEnvironmentIsolatedKind, it sets the ingress type to CustomIngress. +// If the environment type is e2e.ManagementClusterEnvironmentKind, it sets the ingress type to NgrokIngress. +// If the environment type is not recognized, it fails with an error message indicating the invalid infrastructure type. func PreManagementClusterSetupHook(e2eConfig *clusterctl.E2EConfig) PreManagementClusterSetupResult { output := PreManagementClusterSetupResult{} - infrastructureType := e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) + infrastructureType := e2e.ManagementClusterEnvironmentType(e2eConfig.GetVariable(e2e.ManagementClusterEnvironmentVar)) switch infrastructureType { - case e2e.ManagementClusterInfrastuctureEKS: + case e2e.ManagementClusterEnvironmentEKS: output.DockerUsername = os.Getenv("GITHUB_USERNAME") Expect(output.DockerUsername).NotTo(BeEmpty(), "Github username is required") output.DockerPassword = os.Getenv("GITHUB_TOKEN") @@ -169,9 +199,9 @@ func PreManagementClusterSetupHook(e2eConfig *clusterctl.E2EConfig) PreManagemen output.CustomClusterProvider = EKSBootsrapCluster Expect(output.CustomClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") output.IngressType = EKSNginxIngress - case e2e.ManagementClusterInfrastuctureIsolatedKind: + case e2e.ManagementClusterEnvironmentIsolatedKind: output.IngressType = CustomIngress - case e2e.ManagementClusterInfrastuctureKind: + case e2e.ManagementClusterEnvironmentKind: output.IngressType = NgrokIngress default: Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", infrastructureType)) diff --git a/test/testenv/turtles.go b/test/testenv/turtles.go index 6ad05c86..fc323e49 100644 --- a/test/testenv/turtles.go +++ b/test/testenv/turtles.go @@ -35,32 +35,75 @@ import ( "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) +// DeployRancherTurtlesInput represents the input parameters for deploying Rancher Turtles. type DeployRancherTurtlesInput struct { - BootstrapClusterProxy framework.ClusterProxy - HelmBinaryPath string - ChartPath string - CAPIProvidersSecretYAML []byte - CAPIProvidersYAML []byte - Namespace string - Image string - Tag string - Version string + // BootstrapClusterProxy is the cluster proxy for the bootstrap cluster. + BootstrapClusterProxy framework.ClusterProxy + + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // TurtlesChartUrl is the URL of the Turtles chart. + TurtlesChartUrl string + + // TurtlesChartPath is the path to the Turtles chart. + TurtlesChartPath string + + // TurtlesChartRepoName is the name of the Turtles chart repository. + TurtlesChartRepoName string + + // CAPIProvidersSecretYAML is the YAML content of the CAPI providers secret. + CAPIProvidersSecretYAML []byte + + // CAPIProvidersYAML is the YAML content of the CAPI providers. + CAPIProvidersYAML []byte + + // Namespace is the namespace for deploying Rancher Turtles. + Namespace string + + // Image is the image for Rancher Turtles. + Image string + + // Tag is the tag for Rancher Turtles. + Tag string + + // Version is the version of Rancher Turtles. + Version string + + // WaitDeploymentsReadyInterval is the interval for waiting for deployments to be ready. WaitDeploymentsReadyInterval []interface{} - AdditionalValues map[string]string + + // AdditionalValues are the additional values for Rancher Turtles. + AdditionalValues map[string]string } +// UninstallRancherTurtlesInput represents the input parameters for uninstalling Rancher Turtles. type UninstallRancherTurtlesInput struct { + // BootstrapClusterProxy is the cluster proxy for the bootstrap cluster. BootstrapClusterProxy framework.ClusterProxy - HelmBinaryPath string - Namespace string - DeleteWaitInterval []interface{} + + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // Namespace is the namespace where Rancher Turtles are installed. + Namespace string + + // DeleteWaitInterval is the wait interval for deleting resources. + DeleteWaitInterval []interface{} } +// DeployRancherTurtles deploys Rancher Turtles to the specified Kubernetes cluster. +// It expects the required input parameters to be non-nil. +// If the version is specified but the TurtlesChartUrl is empty, it adds an external rancher turtles chart repo for chartmuseum use-case. If the TurtlesChartUrl is specified, it adds the Rancher chart repo. +// After adding the necessary chart repos, the function installs the rancher-turtles chart. It sets the additional values for the chart based on the input parameters. +// If the image and tag are specified, it sets the corresponding values in the chart. If only the version is specified, it adds the version flag to the chart's additional flags. +// The function then adds the CAPI infrastructure providers and waits for the CAPI deployments to be available. It waits for the capi-controller-manager, capi-kubeadm-bootstrap-controller-manager, +// capi-kubeadm-control-plane-controller-manager, capd-controller-manager, rke2-bootstrap-controller-manager, and rke2-control-plane-controller-manager deployments to be available. func DeployRancherTurtles(ctx context.Context, input DeployRancherTurtlesInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for DeployRancherTurtles") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for DeployRancherTurtles") Expect(input.CAPIProvidersYAML).ToNot(BeNil(), "CAPIProvidersYAML is required for DeployRancherTurtles") - Expect(input.ChartPath).ToNot(BeEmpty(), "ChartPath is required for DeployRancherTurtles") + Expect(input.TurtlesChartPath).ToNot(BeEmpty(), "ChartPath is required for DeployRancherTurtles") Expect(input.HelmBinaryPath).ToNot(BeEmpty(), "HelmBinaryPath is required for DeployRancherTurtles") Expect(input.WaitDeploymentsReadyInterval).ToNot(BeNil(), "WaitDeploymentsReadyInterval is required for DeployRancherTurtles") @@ -74,15 +117,29 @@ func DeployRancherTurtles(ctx context.Context, input DeployRancherTurtlesInput) Expect(input.BootstrapClusterProxy.Apply(ctx, input.CAPIProvidersSecretYAML)).To(Succeed()) } - chartPath := input.ChartPath - if input.Version != "" { + chartPath := input.TurtlesChartPath + if input.Version != "" && input.TurtlesChartUrl == "" { chartPath = "rancher-turtles-external/rancher-turtles" By("Adding external rancher turtles chart repo") addChart := &opframework.HelmChart{ BinaryPath: input.HelmBinaryPath, Name: "rancher-turtles-external", - Path: input.ChartPath, + Path: input.TurtlesChartPath, + Commands: opframework.Commands(opframework.Repo, opframework.Add), + AdditionalFlags: opframework.Flags("--force-update"), + Kubeconfig: input.BootstrapClusterProxy.GetKubeconfigPath(), + } + _, err := addChart.Run(nil) + Expect(err).ToNot(HaveOccurred()) + } + + if input.TurtlesChartUrl != "" { + By("Adding Rancher chart repo") + addChart := &opframework.HelmChart{ + BinaryPath: input.HelmBinaryPath, + Name: input.TurtlesChartRepoName, + Path: input.TurtlesChartUrl, Commands: opframework.Commands(opframework.Repo, opframework.Add), AdditionalFlags: opframework.Flags("--force-update"), Kubeconfig: input.BootstrapClusterProxy.GetKubeconfigPath(), @@ -187,18 +244,42 @@ func DeployRancherTurtles(ctx context.Context, input DeployRancherTurtlesInput) }, input.WaitDeploymentsReadyInterval...) } +// UpgradeRancherTurtlesInput represents the input parameters for upgrading Rancher Turtles. type UpgradeRancherTurtlesInput struct { - BootstrapClusterProxy framework.ClusterProxy - HelmBinaryPath string - Namespace string + // BootstrapClusterProxy is the cluster proxy for the bootstrap cluster. + BootstrapClusterProxy framework.ClusterProxy + + // HelmBinaryPath is the path to the Helm binary. + HelmBinaryPath string + + // Namespace is the namespace for the deployment. + Namespace string + + // WaitDeploymentsReadyInterval is the interval for waiting until deployments are ready. WaitDeploymentsReadyInterval []interface{} - AdditionalValues map[string]string - Image string - Tag string - PostUpgradeSteps []func() - SkipCleanup bool + + // AdditionalValues are the additional values for the Helm chart. + AdditionalValues map[string]string + + // Image is the image for the deployment. + Image string + + // Tag is the tag for the deployment. + Tag string + + // PostUpgradeSteps are the post-upgrade steps to be executed. + PostUpgradeSteps []func() + + // SkipCleanup indicates whether to skip the cleanup after the upgrade. + SkipCleanup bool } +// UpgradeRancherTurtles upgrades the rancher-turtles chart. +// It expects the required input parameters to be non-nil. +// The function performs the following steps: +// 1. Validates the input parameters to ensure they are not empty or nil. +// 2. Upgrades the rancher-turtles chart by executing the necessary helm commands. +// 3. Executes any post-upgrade steps provided in the input. func UpgradeRancherTurtles(ctx context.Context, input UpgradeRancherTurtlesInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeRancherTurtles") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for UpgradeRancherTurtles") @@ -267,6 +348,8 @@ func UpgradeRancherTurtles(ctx context.Context, input UpgradeRancherTurtlesInput } } +// UninstallRancherTurtles uninstalls the Rancher Turtles chart. +// It expects the required input parameters to be non-nil. func UninstallRancherTurtles(ctx context.Context, input UninstallRancherTurtlesInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for UninstallRancherTurtles") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for UninstallRancherTurtles") @@ -294,35 +377,43 @@ func UninstallRancherTurtles(ctx context.Context, input UninstallRancherTurtlesI Expect(err).ToNot(HaveOccurred()) } -// PreRancherTurtlesInstallHook is a hook that can be used to perform actions before Rancher Turtles is installed. +// PreRancherTurtlesInstallHook is a function that sets additional values for the Rancher Turtles installation based on the management cluster environment type. +// If the infrastructure type is e2e.ManagementClusterEnvironmentEKS, the image pull secrets are set to "{regcred}" and the image pull policy is set to "IfNotPresent". +// If the infrastructure type is e2e.ManagementClusterEnvironmentIsolatedKind or e2e.ManagementClusterEnvironmentKind, the image pull policy is set to "Never". +// If the infrastructure type is not recognized, the function fails with an error message indicating the invalid infrastructure type. func PreRancherTurtlesInstallHook(rtInput *DeployRancherTurtlesInput, e2eConfig *clusterctl.E2EConfig) { - infrastructureType := e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) + infrastructureType := e2e.ManagementClusterEnvironmentType(e2eConfig.GetVariable(e2e.ManagementClusterEnvironmentVar)) switch infrastructureType { - case e2e.ManagementClusterInfrastuctureEKS: + case e2e.ManagementClusterEnvironmentEKS: rtInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - case e2e.ManagementClusterInfrastuctureIsolatedKind: - // NOTE: rancher turtles image is loadded into kind manually, we can set the imagePullPolicy to Never + case e2e.ManagementClusterEnvironmentIsolatedKind: + // NOTE: rancher turtles image is loaded into kind manually, we can set the imagePullPolicy to Never rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - case e2e.ManagementClusterInfrastuctureKind: + case e2e.ManagementClusterEnvironmentKind: rtInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" default: Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", infrastructureType)) } } -// PreRancherTurtlesInstallHook is a hook that can be used to perform actions before Rancher Turtles is installed. +// PreRancherTurtlesUpgradelHook is a function that handles the pre-upgrade hook for Rancher Turtles. +// If the infrastructure type is e2e.ManagementClusterEnvironmentEKS, it sets the imagePullSecrets and imagePullPolicy values in rtUpgradeInput. +// If the infrastructure type is e2e.ManagementClusterEnvironmentIsolatedKind, it sets the imagePullPolicy value in rtUpgradeInput to "Never". +// If the infrastructure type is e2e.ManagementClusterEnvironmentKind, it sets the imagePullPolicy value in rtUpgradeInput to "Never". +// If the infrastructure type is not recognized, it fails with an error message indicating the invalid infrastructure type. func PreRancherTurtlesUpgradelHook(rtUpgradeInput *UpgradeRancherTurtlesInput, e2eConfig *clusterctl.E2EConfig) { - infrastructureType := e2e.ManagementClusterInfrastuctureType(e2eConfig.GetVariable(e2e.ManagementClusterInfrastucture)) + infrastructureType := e2e.ManagementClusterEnvironmentType(e2eConfig.GetVariable(e2e.ManagementClusterEnvironmentVar)) + switch infrastructureType { - case e2e.ManagementClusterInfrastuctureEKS: + case e2e.ManagementClusterEnvironmentEKS: rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullSecrets"] = "{regcred}" rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "IfNotPresent" - case e2e.ManagementClusterInfrastuctureIsolatedKind: - // NOTE: rancher turtles image is loadded into kind manually, we can set the imagePullPolicy to Never + case e2e.ManagementClusterEnvironmentIsolatedKind: + // NOTE: rancher turtles image is loaded into kind manually, we can set the imagePullPolicy to Never rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" - case e2e.ManagementClusterInfrastuctureKind: + case e2e.ManagementClusterEnvironmentKind: rtUpgradeInput.AdditionalValues["rancherTurtles.imagePullPolicy"] = "Never" default: Fail(fmt.Sprintf("Invalid management cluster infrastructure type %q", infrastructureType))