Skip to content

Commit

Permalink
OCM-9891 | test: Automated ids:69143,69144,76344,76345
Browse files Browse the repository at this point in the history
  • Loading branch information
86254860 authored and openshift-merge-bot[bot] committed Oct 10, 2024
1 parent a2f1b94 commit 6afdebf
Show file tree
Hide file tree
Showing 13 changed files with 221 additions and 82 deletions.
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ require (
github.com/hashicorp/terraform-plugin-sdk v1.17.2
github.com/onsi/ginkgo/v2 v2.17.1
github.com/onsi/gomega v1.30.0
github.com/openshift-online/ocm-common v0.0.10
github.com/openshift-online/ocm-sdk-go v0.1.438
github.com/openshift-online/ocm-common v0.0.11
github.com/openshift-online/ocm-sdk-go v0.1.443
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.9.3
github.com/thoas/go-funk v0.9.3
Expand Down
8 changes: 4 additions & 4 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -498,10 +498,10 @@ github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/openshift-online/ocm-common v0.0.10 h1:J3QtAOK/ZQlHTPB4uQfshO45vjYdJ9099PLM0HoOUR0=
github.com/openshift-online/ocm-common v0.0.10/go.mod h1:6MWje2NFNJ3IWpGs7BYj6DWagWXHyp8EnmYY7XFTtI4=
github.com/openshift-online/ocm-sdk-go v0.1.438 h1:tsLCCUzbLCTL4RZG02y9RuopmGCXp2cjxqhdyCutdes=
github.com/openshift-online/ocm-sdk-go v0.1.438/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y=
github.com/openshift-online/ocm-common v0.0.11 h1:DOj7fB59q0vAUFxSEQpLPp2AkReCCFq3r3NMaoZU20I=
github.com/openshift-online/ocm-common v0.0.11/go.mod h1:6MWje2NFNJ3IWpGs7BYj6DWagWXHyp8EnmYY7XFTtI4=
github.com/openshift-online/ocm-sdk-go v0.1.443 h1:wb79sOFAzA2f4hvJMOz2YJ6Q0HTIXY3kbDJoy4/xnBg=
github.com/openshift-online/ocm-sdk-go v0.1.443/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
Expand Down
80 changes: 9 additions & 71 deletions tests/e2e/classic_machine_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ var _ = Describe("Create MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
}
_, err = mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
Expect(err.Error()).Should(ContainSubstring("Attribute 'replicas'\nmust be a non-negative integer"))
helper.ExpectTFErrorContains(err, "Attribute 'replicas' must be a non-negative integer")

By("Create machinepool with invalid instance type")
mpArgs = &exec.MachinePoolArgs{
Expand All @@ -284,7 +284,7 @@ var _ = Describe("Create MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
}
_, err = mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
Expect(err.Error()).Should(ContainSubstring("Machine type\n'%s' is not supported for cloud provider", InvalidInstanceType))
helper.ExpectTFErrorContains(err, "Machine type '"+InvalidInstanceType+"' is not supported for cloud provider")

By("Create machinepool with setting replicas and enable-autoscaling at the same time")
mpArgs = &exec.MachinePoolArgs{
Expand All @@ -296,7 +296,7 @@ var _ = Describe("Create MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
}
_, err = mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
Expect(err.Error()).Should(ContainSubstring("when\nenabling autoscaling, should set value for maxReplicas"))
helper.ExpectTFErrorContains(err, "when enabling autoscaling, should set value for maxReplicas")

By("Create machinepool with setting min-replicas large than max-replicas")
mpArgs = &exec.MachinePoolArgs{
Expand All @@ -321,7 +321,7 @@ var _ = Describe("Create MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
}
_, err = mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
Expect(err.Error()).Should(ContainSubstring("when\ndisabling autoscaling, cannot set min_replicas and/or max_replicas"))
helper.ExpectTFErrorContains(err, "when disabling autoscaling, cannot set min_replicas and/or max_replicas")

By("Create machinepool with setting min-replicas large than max-replicas")
if profileHandler.Profile().IsMultiAZ() {
Expand All @@ -336,7 +336,7 @@ var _ = Describe("Create MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
}
_, err = mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
Expect(err.Error()).Should(ContainSubstring("Multi AZ clusters require that the number of replicas be a\nmultiple of 3"))
helper.ExpectTFErrorContains(err, "Multi AZ clusters require that the number of replicas be a multiple of 3")

mpArgs = &exec.MachinePoolArgs{
Cluster: helper.StringPointer(clusterID),
Expand All @@ -348,7 +348,7 @@ var _ = Describe("Create MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
}
_, err = mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
Expect(err.Error()).Should(ContainSubstring("Multi AZ clusters require that the number of replicas be a\nmultiple of 3"))
helper.ExpectTFErrorContains(err, "Multi AZ clusters require that the number of replicas be a multiple of 3")

}
})
Expand Down Expand Up @@ -494,68 +494,6 @@ var _ = Describe("Create MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
Expect(mpResponseBody.Replicas()).To(Equal(4))
})

It("can create machinepool with disk size - [id:69144]", ci.High, func() {
By("Create additional machinepool with disk size specified")
replicas := 3
machineType := "r5.xlarge"
name := helper.GenerateRandomName("ocp-69144", 2)
diskSize := 249
mpArgs := &exec.MachinePoolArgs{
Cluster: helper.StringPointer(clusterID),
Replicas: helper.IntPointer(replicas),
MachineType: helper.StringPointer(machineType),
Name: helper.StringPointer(name),
DiskSize: helper.IntPointer(diskSize),
}

_, err := mpService.Apply(mpArgs)
Expect(err).ToNot(HaveOccurred())
defer func() {
_, err = mpService.Destroy()
Expect(err).ToNot(HaveOccurred())
}()

By("Verify the parameters of the created machinepool")
mpResponseBody, err := cms.RetrieveClusterMachinePool(cms.RHCSConnection, clusterID, name)
Expect(err).ToNot(HaveOccurred())
Expect(mpResponseBody.RootVolume().AWS().Size()).To(Equal(diskSize))
Expect(mpResponseBody.InstanceType()).To(Equal(machineType))

By("Update disksize is not allowed ")
mpArgs = &exec.MachinePoolArgs{
Cluster: helper.StringPointer(clusterID),
Replicas: helper.IntPointer(replicas),
MachineType: helper.StringPointer(machineType),
Name: helper.StringPointer(name),
DiskSize: helper.IntPointer(320),
}
output, err := mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
Expect(output).Should(ContainSubstring("Attribute disk_size, cannot be changed from 249 to 320"))

By("Destroy machinepool")
_, err = mpService.Destroy()
Expect(err).ToNot(HaveOccurred())

By("Create another machinepool without disksize will create another machinepool with default value")
name = helper.GenerateRandomName("ocp-69144", 2)
mpArgs = &exec.MachinePoolArgs{
Cluster: helper.StringPointer(clusterID),
Replicas: helper.IntPointer(replicas),
MachineType: helper.StringPointer("m5.2xlarge"),
Name: helper.StringPointer(name),
}

_, err = mpService.Apply(mpArgs)
Expect(err).ToNot(HaveOccurred())

By("Verify the parameters of the created machinepool")
mpResponseBody, err = cms.RetrieveClusterMachinePool(cms.RHCSConnection, clusterID, name)
Expect(err).ToNot(HaveOccurred())
Expect(mpResponseBody.RootVolume().AWS().Size()).To(Equal(300))
Expect(mpResponseBody.InstanceType()).To(Equal("m5.2xlarge"))
})

It("can create machinepool with additional security group - [id:69146]", ci.High, func() {
if !profileHandler.Profile().IsBYOVPC() {
Skip("This case only works for BYOVPC cluster profile")
Expand Down Expand Up @@ -852,7 +790,7 @@ var _ = Describe("Edit MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
profileHandler profilehandler.ProfileHandler
dmpService exec.MachinePoolService
mpService exec.MachinePoolService
defaultMachinePoolNmae = "worker"
defaultMachinePoolName = "worker"
defaultMachinepoolResponse *cmsv1.MachinePool
originalDefaultMachinepoolArgs *exec.MachinePoolArgs
)
Expand All @@ -872,8 +810,8 @@ var _ = Describe("Edit MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
mpService, err = profileHandler.Services().GetMachinePoolsService()
Expect(err).ToNot(HaveOccurred())

defaultMachinepoolResponse, err = cms.RetrieveClusterMachinePool(cms.RHCSConnection, clusterID, defaultMachinePoolNmae)
if err != nil && strings.Contains(err.Error(), fmt.Sprintf("Machine pool with id '%s' not found", defaultMachinePoolNmae)) {
defaultMachinepoolResponse, err = cms.RetrieveClusterMachinePool(cms.RHCSConnection, clusterID, defaultMachinePoolName)
if err != nil && strings.Contains(err.Error(), fmt.Sprintf("Machine pool with id '%s' not found", defaultMachinePoolName)) {
Skip("The default machinepool does not exist")
}

Expand Down
164 changes: 164 additions & 0 deletions tests/e2e/machine_pool_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
package e2e

import (
"fmt"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/terraform-redhat/terraform-provider-rhcs/tests/ci"
"github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/cms"
"github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/constants"
"github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/exec"
"github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/helper"
"github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/profilehandler"
)

var _ = Describe("Create Classic or HCP MachinePool", ci.Day2, ci.FeatureMachinepool, func() {
defer GinkgoRecover()

var (
mpService exec.MachinePoolService
vpcOutput *exec.VPCOutput
profileHandler profilehandler.ProfileHandler
)

BeforeEach(func() {
var err error
profileHandler, err = profilehandler.NewProfileHandlerFromYamlFile()
Expect(err).ToNot(HaveOccurred())

mpService, err = profileHandler.Services().GetMachinePoolsService()
Expect(err).ToNot(HaveOccurred())

if profileHandler.Profile().IsHCP() {
By("Get vpc output")
vpcService, err := profileHandler.Services().GetVPCService()
Expect(err).ToNot(HaveOccurred())
vpcOutput, err = vpcService.Output()
Expect(err).ToNot(HaveOccurred())
}
})

AfterEach(func() {
mpService.Destroy()
})

getDefaultMPArgs := func(name string, isHCP bool) *exec.MachinePoolArgs {
replicas := 3
machineType := "m5.2xlarge"
mpArgs := &exec.MachinePoolArgs{
Cluster: helper.StringPointer(clusterID),
Replicas: helper.IntPointer(replicas),
MachineType: helper.StringPointer(machineType),
Name: helper.StringPointer(name),
}

if isHCP {
subnetId := vpcOutput.PrivateSubnets[0]
mpArgs.AutoscalingEnabled = helper.BoolPointer(false)
mpArgs.SubnetID = helper.StringPointer(subnetId)
mpArgs.AutoRepair = helper.BoolPointer(true)
}
return mpArgs
}

It("can create machinepool with disk size - [id:69144]", ci.Critical, func() {
By("Create additional machinepool with disk size specified")
replicas := 3
machineType := "r5.xlarge"
name := helper.GenerateRandomName("mp-69144", 2)
diskSize := 249
mpArgs := &exec.MachinePoolArgs{
Cluster: helper.StringPointer(clusterID),
Replicas: helper.IntPointer(replicas),
MachineType: helper.StringPointer(machineType),
Name: helper.StringPointer(name),
DiskSize: helper.IntPointer(diskSize),
}

if profileHandler.Profile().IsHCP() {
subnetId := vpcOutput.PrivateSubnets[0]
mpArgs.AutoscalingEnabled = helper.BoolPointer(false)
mpArgs.SubnetID = helper.StringPointer(subnetId)
mpArgs.AutoRepair = helper.BoolPointer(true)
}

_, err := mpService.Apply(mpArgs)
Expect(err).ToNot(HaveOccurred())

By("Verify the parameters of the created machinepool")
if profileHandler.Profile().IsHCP() {
mpResponseBody, err := cms.RetrieveClusterNodePool(cms.RHCSConnection, clusterID, name)
Expect(err).ToNot(HaveOccurred())
Expect(mpResponseBody.AWSNodePool().RootVolume().Size()).To(Equal(diskSize))
Expect(mpResponseBody.AWSNodePool().InstanceType()).To(Equal(machineType))
} else {
mpResponseBody, err := cms.RetrieveClusterMachinePool(cms.RHCSConnection, clusterID, name)
Expect(err).ToNot(HaveOccurred())
Expect(mpResponseBody.RootVolume().AWS().Size()).To(Equal(diskSize))
Expect(mpResponseBody.InstanceType()).To(Equal(machineType))
}

By("Destroy machinepool")
_, err = mpService.Destroy()
Expect(err).ToNot(HaveOccurred())

By("Create another machinepool without disksize set will be created with default value")
name = helper.GenerateRandomName("mp-69144", 2)
mpArgs = getDefaultMPArgs(name, profileHandler.Profile().IsHCP())

_, err = mpService.Apply(mpArgs)
Expect(err).ToNot(HaveOccurred())

By("Verify the parameters of the created machinepool")
if profileHandler.Profile().IsHCP() {
mpResponseBody, err := cms.RetrieveClusterNodePool(cms.RHCSConnection, clusterID, name)
Expect(err).ToNot(HaveOccurred())
Expect(mpResponseBody.AWSNodePool().RootVolume().Size()).To(Equal(300))
Expect(mpResponseBody.AWSNodePool().InstanceType()).To(Equal("m5.2xlarge"))
} else {
mpResponseBody, err := cms.RetrieveClusterMachinePool(cms.RHCSConnection, clusterID, name)
Expect(err).ToNot(HaveOccurred())
Expect(mpResponseBody.RootVolume().AWS().Size()).To(Equal(300))
Expect(mpResponseBody.InstanceType()).To(Equal("m5.2xlarge"))
}
})

It("will validate well for worker disk size field - [id:76345]", ci.Low, func() {
By("Try to create a machine pool with invalid worker disk size")
mpName := helper.GenerateRandomName("mp-76345", 2)
mpArgs := getDefaultMPArgs(mpName, profileHandler.Profile().IsHCP())
maxDiskSize := constants.MaxDiskSize
minDiskSize := constants.MinClassicDiskSize
if profileHandler.Profile().IsHCP() {
minDiskSize = constants.MinHCPDiskSize
}

errMsg := fmt.Sprintf("Must be between %d GiB and %d GiB", minDiskSize, maxDiskSize)

mpArgs.DiskSize = helper.IntPointer(minDiskSize - 1)
_, err := mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
helper.ExpectTFErrorContains(err, errMsg)

mpArgs.DiskSize = helper.IntPointer(maxDiskSize + 1)
_, err = mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
helper.ExpectTFErrorContains(err, errMsg)

// TODO OCM-11521 terraform plan doesn't have validation

By("Create a successful machine pool with disk size specified")
mpName = helper.GenerateRandomName("mp-76345", 2)
mpArgs = getDefaultMPArgs(mpName, profileHandler.Profile().IsHCP())
mpArgs.DiskSize = helper.IntPointer(249)

_, err = mpService.Apply(mpArgs)
Expect(err).ToNot(HaveOccurred())

By("Update disk size of the created machine pool is not allowed")
mpArgs.DiskSize = helper.IntPointer(320)
_, err = mpService.Apply(mpArgs)
Expect(err).To(HaveOccurred())
helper.ExpectTFErrorContains(err, "disk_size, cannot be changed from 249 to 320")
})
})
21 changes: 21 additions & 0 deletions tests/e2e/negative_day_one_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -541,6 +541,27 @@ var _ = Describe("Negative Tests", Ordered, ContinueOnFailure, func() {
})
})

Describe("Create Classic or HCP cluster", ci.Day1Negative, func() {
It("validate worker disk size - [id:76344]", ci.Low, func() {
maxDiskSize := constants.MaxDiskSize
minDiskSize := constants.MinClassicDiskSize
if profileHandler.Profile().IsHCP() {
minDiskSize = constants.MinHCPDiskSize
}

By("Create cluster with invalid worker disk size")
errMsg := fmt.Sprintf("Must be between %d GiB and %d GiB", minDiskSize, maxDiskSize)
validateClusterArgAgainstErrorSubstrings(func(args *exec.ClusterArgs) {
args.WorkerDiskSize = helper.IntPointer(minDiskSize - 1)
}, errMsg)
validateClusterArgAgainstErrorSubstrings(func(args *exec.ClusterArgs) {
args.WorkerDiskSize = helper.IntPointer(maxDiskSize + 1)
}, errMsg)

// TODO OCM-11521 terraform plan doesn't have validation
})
})

Describe("The EOL OCP version validation", ci.Day1Negative, func() {
It("version validation - [id:64095]", ci.Medium, func() {
if profileHandler.Profile().GetAdditionalSGNumber() > 0 {
Expand Down
4 changes: 0 additions & 4 deletions tests/e2e/verification_post_day1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,10 +318,6 @@ var _ = Describe("Verify cluster", func() {
It("worker disk size is set correctly - [id:69143]",
ci.Day1Post, ci.Critical,
func() {

if profile.IsHCP() {
Skip("Test can run only on Classic cluster")
}
switch profile.GetWorkerDiskSize() {
case 0:
Expect(cluster.Nodes().ComputeRootVolume().AWS().Size()).To(Equal(300))
Expand Down
1 change: 1 addition & 0 deletions tests/tf-manifests/rhcs/clusters/rosa-hcp/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ resource "rhcs_cluster_rosa_hcp" "rosa_hcp_cluster" {
wait_for_std_compute_nodes_complete = var.wait_for_cluster
disable_waiting_in_destroy = var.disable_waiting_in_destroy
registry_config = var.registry_config
worker_disk_size = var.worker_disk_size
}

resource "rhcs_cluster_wait" "rosa_cluster" { # id: 71869
Expand Down
2 changes: 1 addition & 1 deletion tests/tf-manifests/rhcs/clusters/rosa-hcp/output.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@ output "properties" {

output "tags" {
value = rhcs_cluster_rosa_hcp.rosa_hcp_cluster.tags
}
}
5 changes: 5 additions & 0 deletions tests/tf-manifests/rhcs/clusters/rosa-hcp/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -215,3 +215,8 @@ variable "registry_config" {
})
default = null
}

variable "worker_disk_size" {
type = number
default = null
}
Loading

0 comments on commit 6afdebf

Please sign in to comment.