diff --git a/go.mod b/go.mod index 42a1f2f..afa043a 100644 --- a/go.mod +++ b/go.mod @@ -81,6 +81,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/lithammer/dedent v1.1.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.18 // indirect diff --git a/go.sum b/go.sum index 07fe2ce..8d8e568 100644 --- a/go.sum +++ b/go.sum @@ -436,6 +436,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= diff --git a/pkg/cmd/adm/register_member_test.go b/pkg/cmd/adm/register_member_test.go index 2b4d155..3d54ecf 100644 --- a/pkg/cmd/adm/register_member_test.go +++ b/pkg/cmd/adm/register_member_test.go @@ -17,7 +17,6 @@ import ( "github.com/kubesaw/ksctl/pkg/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -515,15 +514,6 @@ func verifyToolchainClusterSecret(t *testing.T, fakeClient *test.FakeClient, saN require.Equal(t, fmt.Sprintf("token-secret-for-%s", saName), apiConfig.AuthInfos["auth"].Token) } -func whenDeploymentThenUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int) func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - return func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - if deployment, ok := obj.(*appsv1.Deployment); ok { - checkDeploymentBeingUpdated(t, fakeClient, namespacedName, currentReplicas, numberOfUpdateCalls, deployment) - } - return fakeClient.Client.Update(ctx, obj, opts...) - } -} - func newFakeClientsFromRestConfig(t *testing.T, initObjs ...runtimeclient.Object) (newClientFromRestConfigFunc, *test.FakeClient) { fakeClient := test.NewFakeClient(t, initObjs...) fakeClient.MockCreate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.CreateOption) error { diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index cc23073..2546061 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -1,157 +1,227 @@ package adm import ( - "context" "fmt" + "os" "time" "github.com/kubesaw/ksctl/pkg/client" - "github.com/kubesaw/ksctl/pkg/cmd/flags" "github.com/kubesaw/ksctl/pkg/configuration" clicontext "github.com/kubesaw/ksctl/pkg/context" "github.com/kubesaw/ksctl/pkg/ioutils" - "github.com/spf13/cobra" appsv1 "k8s.io/api/apps/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectlrollout "k8s.io/kubectl/pkg/cmd/rollout" + cmdutil "k8s.io/kubectl/pkg/cmd/util" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) +type ( + RolloutRestartFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error + RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error +) + +// NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config +// 1. If the command is run for host operator, it restart the whole host operator.(it deletes olm based pods(host-operator pods), +// waits for the new pods to come up, then uses rollout-restart command for non-olm based - registration-service) +// 2. If the command is run for member operator, it restart the whole member operator.(it deletes olm based pods(member-operator pods), +// waits for the new pods to come up, then uses rollout-restart command for non-olm based deployments - webhooks) func NewRestartCmd() *cobra.Command { - var targetCluster string command := &cobra.Command{ - Use: "restart -t ", - Short: "Restarts a deployment", - Long: `Restarts the deployment with the given name in the operator namespace. -If no deployment name is provided, then it lists all existing deployments in the namespace.`, - Args: cobra.RangeArgs(0, 1), + Use: "restart ", + Short: "Restarts an operator", + Long: `Restarts the whole operator, it relies on the target cluster and fetches the cluster config + 1. If the command is run for host operator, it restarts the whole host operator. + (it deletes olm based pods(host-operator pods),waits for the new pods to + come up, then uses rollout-restart command for non-olm based deployments - registration-service) + 2. If the command is run for member operator, it restarts the whole member operator. + (it deletes olm based pods(member-operator pods),waits for the new pods + to come up, then uses rollout-restart command for non-olm based deployments - webhooks)`, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) ctx := clicontext.NewCommandContext(term, client.DefaultNewClient) - return restart(ctx, targetCluster, args...) + return restart(ctx, args[0]) }, } - command.Flags().StringVarP(&targetCluster, "target-cluster", "t", "", "The target cluster") - flags.MustMarkRequired(command, "target-cluster") return command } -func restart(ctx *clicontext.CommandContext, clusterName string, deployments ...string) error { +func restart(ctx *clicontext.CommandContext, clusterName string) error { + kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() + ioStreams := genericiooptions.IOStreams{ + In: os.Stdin, + Out: os.Stdout, + ErrOut: os.Stderr, + } + kubeConfigFlags.ClusterName = nil // `cluster` flag is redefined for our own purpose + kubeConfigFlags.AuthInfoName = nil // unused here, so we can hide it + kubeConfigFlags.Context = nil // unused here, so we can hide it + cfg, err := configuration.LoadClusterConfig(ctx, clusterName) if err != nil { return err } - cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) + kubeConfigFlags.Namespace = &cfg.OperatorNamespace + kubeConfigFlags.APIServer = &cfg.ServerAPI + kubeConfigFlags.BearerToken = &cfg.Token + kubeconfig, err := client.EnsureKsctlConfigFile() if err != nil { return err } - - if len(deployments) == 0 { - err := printExistingDeployments(ctx.Terminal, cl, cfg.OperatorNamespace) - if err != nil { - ctx.Terminal.Printlnf("\nERROR: Failed to list existing deployments\n :%s", err.Error()) - } - return fmt.Errorf("at least one deployment name is required, include one or more of the above deployments to restart") - } - deploymentName := deployments[0] + kubeConfigFlags.KubeConfig = &kubeconfig + factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) if !ctx.AskForConfirmation( - ioutils.WithMessagef("restart the deployment '%s' in namespace '%s'", deploymentName, cfg.OperatorNamespace)) { + ioutils.WithMessagef("restart all the deployments in the cluster '%s' and namespace '%s' \n", clusterName, cfg.OperatorNamespace)) { return nil } - return restartDeployment(ctx, cl, cfg.OperatorNamespace, deploymentName) -} -func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, deploymentName string) error { - namespacedName := types.NamespacedName{ - Namespace: ns, - Name: deploymentName, + cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) + if err != nil { + return err } - originalReplicas, err := scaleToZero(cl, namespacedName) + return restartDeployments(ctx, cl, cfg.OperatorNamespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, factory, ioStreams, deployment) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return restartNonOlmDeployments(ctx, deployment, factory, ioStreams) + }) +} + +// This function has the whole logic of getting the list of olm and non-olm based deployment, then proceed on restarting/deleting accordingly +func restartDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, checker RolloutStatusCheckerFunc, restarter RolloutRestartFunc) error { + + ctx.Printlnf("Fetching the current OLM and non-OLM deployments of the operator in %s namespace", ns) + olmDeploymentList, nonOlmDeploymentList, err := getExistingDeployments(ctx, cl, ns) if err != nil { - if apierrors.IsNotFound(err) { - ctx.Printlnf("\nERROR: The given deployment '%s' wasn't found.", deploymentName) - return printExistingDeployments(ctx, cl, ns) - } return err } - ctx.Println("The deployment was scaled to 0") - if err := scaleBack(ctx, cl, namespacedName, originalReplicas); err != nil { - ctx.Printlnf("Scaling the deployment '%s' in namespace '%s' back to '%d' replicas wasn't successful", originalReplicas) - ctx.Println("Please, try to contact administrators to scale the deployment back manually") - return err + //if there is no olm operator deployment, no need for restart + if len(olmDeploymentList.Items) == 0 { + return fmt.Errorf("no operator deployment found in namespace %s , it is required for the operator deployment to be running so the command can proceed with restarting the KubeSaw components", ns) + } + //Deleting the pods of the olm based operator deployment and then checking the status + for _, olmOperatorDeployment := range olmDeploymentList.Items { + ctx.Printlnf("Proceeding to delete the Pods of %v", olmOperatorDeployment.Name) + + if err := deleteDeploymentPods(ctx, cl, olmOperatorDeployment); err != nil { + return err + } + //sleeping here so that when the status is called we get the correct status + time.Sleep(1 * time.Second) + + ctx.Printlnf("Checking the status of the deleted pod's deployment %v", olmOperatorDeployment.Name) + //check the rollout status + if err := checker(ctx, olmOperatorDeployment); err != nil { + return err + } + } + + //Non-Olm deployments like reg-svc,to be restarted + //if no Non-OL deployment found it should just return with a message + if len(nonOlmDeploymentList.Items) == 0 { + // if there are no non-olm deployments + ctx.Printlnf("No Non-OLM deployment found in namespace %s, hence no restart happened", ns) + return nil + } + // if there is a Non-olm deployment found use rollout-restart command + for _, nonOlmDeployment := range nonOlmDeploymentList.Items { + //it should only use rollout restart for the deployments which are NOT autoscaling-buffer + if nonOlmDeployment.Name != "autoscaling-buffer" { + ctx.Printlnf("Proceeding to restart the non-olm deployment %v", nonOlmDeployment.Name) + //using rollout-restart + if err := restarter(ctx, nonOlmDeployment); err != nil { + return err + } + //check the rollout status + ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOlmDeployment.Name) + if err := checker(ctx, nonOlmDeployment); err != nil { + return err + } + //if the deployment is not auto-scaling buffer, it should return from the function and not go to print the message for autoscaling buffer + //We do not expect more than 1 non-olm deployment for each OLM deployment and hence returning here + return nil + } + //message if there is a autoscaling buffer, it shouldn't be restarted but successfully exit + ctx.Printlnf("Found only autoscaling-buffer deployment in namespace %s , which is not required to be restarted", ns) } - ctx.Printlnf("The deployment was scaled back to '%d'", originalReplicas) return nil } -func restartHostOperator(ctx *clicontext.CommandContext, hostClient runtimeclient.Client, hostNamespace string) error { - deployments := &appsv1.DeploymentList{} - if err := hostClient.List(context.TODO(), deployments, - runtimeclient.InNamespace(hostNamespace), - runtimeclient.MatchingLabels{"olm.owner.namespace": "toolchain-host-operator"}); err != nil { +func deleteDeploymentPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment) error { + //get pods by label selector from the deployment + pods := corev1.PodList{} + selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) + if err := cl.List(ctx, &pods, + runtimeclient.MatchingLabelsSelector{Selector: selector}, + runtimeclient.InNamespace(deployment.Namespace)); err != nil { return err } - if len(deployments.Items) != 1 { - return fmt.Errorf("there should be a single deployment matching the label olm.owner.namespace=toolchain-host-operator in %s ns, but %d was found. "+ - "It's not possible to restart the Host Operator deployment", hostNamespace, len(deployments.Items)) + + //delete pods + for _, pod := range pods.Items { + pod := pod // TODO We won't need it after upgrading to go 1.22: https://go.dev/blog/loopvar-preview + ctx.Printlnf("Deleting pod: %s", pod.Name) + if err := cl.Delete(ctx, &pod); err != nil { + return err + } } - return restartDeployment(ctx, hostClient, hostNamespace, deployments.Items[0].Name) + return nil + } -func printExistingDeployments(term ioutils.Terminal, cl runtimeclient.Client, ns string) error { - deployments := &appsv1.DeploymentList{} - if err := cl.List(context.TODO(), deployments, runtimeclient.InNamespace(ns)); err != nil { +func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { + + o := kubectlrollout.NewRolloutRestartOptions(ioStreams) + + if err := o.Complete(f, nil, []string{"deployment/" + deployment.Name}); err != nil { return err } - deploymentList := "\n" - for _, deployment := range deployments.Items { - deploymentList += fmt.Sprintf("%s\n", deployment.Name) + + if err := o.Validate(); err != nil { + return err } - term.PrintContextSeparatorWithBodyf(deploymentList, "Existing deployments in %s namespace", ns) - return nil + ctx.Printlnf("Running the rollout restart command for non-Olm deployment %v", deployment.Name) + return o.RunRestart() } -func scaleToZero(cl runtimeclient.Client, namespacedName types.NamespacedName) (int32, error) { - // get the deployment - deployment := &appsv1.Deployment{} - if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil { - return 0, err +func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, deployment appsv1.Deployment) error { + + cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams) + + if err := cmd.Complete(f, []string{"deployment/" + deployment.Name}); err != nil { + return err } - // keep original number of replicas so we can bring it back - originalReplicas := *deployment.Spec.Replicas - zero := int32(0) - deployment.Spec.Replicas = &zero - // update the deployment so it scales to zero - return originalReplicas, cl.Update(context.TODO(), deployment) + if err := cmd.Validate(); err != nil { + return err + } + ctx.Printlnf("Running the Rollout status to check the status of the deployment") + return cmd.Run() } -func scaleBack(term ioutils.Terminal, cl runtimeclient.Client, namespacedName types.NamespacedName, originalReplicas int32) error { - return wait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, 10*time.Second, false, func(ctx context.Context) (done bool, err error) { - term.Println("") - term.Printlnf("Trying to scale the deployment back to '%d'", originalReplicas) - // get the updated - deployment := &appsv1.Deployment{} - if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil { - return false, err - } - // check if the replicas number wasn't already reset by a controller - if *deployment.Spec.Replicas == originalReplicas { - return true, nil - } - // set the original - deployment.Spec.Replicas = &originalReplicas - // and update to scale back - if err := cl.Update(context.TODO(), deployment); err != nil { - term.Printlnf("error updating Deployment '%s': %s. Will retry again...", namespacedName.Name, err.Error()) - return false, nil - } - return true, nil - }) +func getExistingDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, error) { + + olmDeployments := &appsv1.DeploymentList{} + if err := cl.List(ctx, olmDeployments, + runtimeclient.InNamespace(ns), + runtimeclient.MatchingLabels{"kubesaw-control-plane": "kubesaw-controller-manager"}); err != nil { + return nil, nil, err + } + + nonOlmDeployments := &appsv1.DeploymentList{} + if err := cl.List(ctx, nonOlmDeployments, + runtimeclient.InNamespace(ns), + runtimeclient.MatchingLabels{"toolchain.dev.openshift.com/provider": "codeready-toolchain"}); err != nil { + return nil, nil, err + } + + return olmDeployments, nonOlmDeployments, nil } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 32197c3..6292f44 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -1,215 +1,342 @@ package adm import ( - "context" + "bytes" "fmt" + "io" + "net/http" "testing" "github.com/codeready-toolchain/toolchain-common/pkg/test" - "github.com/kubesaw/ksctl/pkg/configuration" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/client-go/rest/fake" + cgtesting "k8s.io/client-go/testing" + cmdtesting "k8s.io/kubectl/pkg/cmd/testing" + "k8s.io/kubectl/pkg/scheme" ) -func TestRestartDeployment(t *testing.T) { - // given - SetFileConfig(t, Host(), Member()) +func TestKubectlRolloutFunctionality(t *testing.T) { - for _, clusterName := range []string{"host", "member1"} { - clusterType := configuration.Host - if clusterName != "host" { - clusterType = configuration.Member + HostNamespacedName := types.NamespacedName{ + Namespace: "toolchain-host-operator", + Name: "host-operator-controller-manager", + } + RegNamespacedName := types.NamespacedName{ + Namespace: "toolchain-host-operator", + Name: "registration-service", + } + var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} + hostDep := newDeployment(HostNamespacedName, 1) + regDep := newDeployment(RegNamespacedName, 1) + ns := scheme.Codecs.WithoutConversion() + tf := cmdtesting.NewTestFactory().WithNamespace(HostNamespacedName.Namespace) + tf.ClientConfigVal = cmdtesting.DefaultClientConfig() + info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) + encoder := ns.EncoderForVersion(info.Serializer, rolloutGroupVersionEncoder) + tf.Client = &fake.RESTClient{ + GroupVersion: rolloutGroupVersionEncoder, + NegotiatedSerializer: ns, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, hostDep)))) + return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil + }), + } + csCalls := 0 + tf.FakeDynamicClient.WatchReactionChain = nil + tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { + csCalls++ + fw := watch.NewFake() + hostDep.Status = appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + }}, } - namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: "cool-deployment", + c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(hostDep.DeepCopyObject()) + if err != nil { + t.Errorf("unexpected err %s", err) } - term := NewFakeTerminalWithResponse("Y") - - t.Run("restart is successful for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "cool-deployment") - - // then - require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 2, numberOfUpdateCalls) + u := &unstructured.Unstructured{} + u.SetUnstructuredContent(c) + go fw.Add(u) + return true, fw, nil + }) + + streams, _, buf, _ := genericiooptions.NewTestIOStreams() + term := NewFakeTerminalWithResponse("Y") + pod := newPod(test.NamespacedName(hostDep.Namespace, hostDep.Name)) + hostDep.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} + regDep.Labels = map[string]string{"toolchain.dev.openshift.com/provider": "codeready-toolchain"} + + t.Run("Rollout Restart and Rollout Status works successfully", func(t *testing.T) { + csCalls = 0 + newClient, fakeClient := NewFakeClients(t, hostDep, regDep, pod) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, tf, streams, *hostDep) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return restartNonOlmDeployments(ctx, deployment, tf, streams) }) - t.Run("list deployments when no deployment name is provided for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - term := NewFakeTerminalWithResponse("Y") - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName) - - // then - require.EqualError(t, err, "at least one deployment name is required, include one or more of the above deployments to restart") - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 0, numberOfUpdateCalls) - assert.Contains(t, term.Output(), fmt.Sprintf("Existing deployments in toolchain-%s-operator namespace", clusterType)) - assert.Contains(t, term.Output(), "cool-deployment") + //then + require.NoError(t, err) + require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") + //checking the output from kubectl for rolloutstatus + require.Contains(t, buf.String(), "deployment.apps/host-operator-controller-manager restarted\n") + //checking the flow for non-operator deployments + require.Contains(t, term.Output(), "Proceeding to restart the non-olm deployment") + require.Contains(t, term.Output(), "Running the rollout restart command for non-Olm deployment") + actual := &appsv1.Deployment{} + AssertObjectHasContent(t, fakeClient, HostNamespacedName, actual, func() { + require.NotNil(t, actual.Spec.Replicas) + assert.Equal(t, int32(1), *actual.Spec.Replicas) + require.NotNil(t, actual.Annotations["restartedAt"]) }) + assert.Equal(t, 2, csCalls) + require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") + require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment") + + }) + + t.Run("Error No OLM deployment", func(t *testing.T) { + csCalls = 0 + newClient, fakeClient := NewFakeClients(t, regDep) + ctx := clicontext.NewCommandContext(term, newClient) - t.Run("restart fails - cannot get the deployment for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - fakeClient.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { - return fmt.Errorf("some error") - } - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "cool-deployment") - - // then - require.Error(t, err) - fakeClient.MockGet = nil - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 0, numberOfUpdateCalls) + //when + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, tf, streams, *hostDep) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return restartNonOlmDeployments(ctx, deployment, tf, streams) }) - t.Run("restart fails - deployment not found for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - term := NewFakeTerminalWithResponse("Y") - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "wrong-deployment") - - // then - require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 0, numberOfUpdateCalls) - assert.Contains(t, term.Output(), "ERROR: The given deployment 'wrong-deployment' wasn't found.") - assert.Contains(t, term.Output(), fmt.Sprintf("Existing deployments in toolchain-%s-operator namespace", clusterType)) - assert.Contains(t, term.Output(), "cool-deployment") + //then + require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , hence no restart happened") + assert.Equal(t, 0, csCalls) + + }) + t.Run("No Non-OLM deployment", func(t *testing.T) { + csCalls = 0 + newClient, fakeClient := NewFakeClients(t, hostDep, pod) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, tf, streams, *hostDep) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return restartNonOlmDeployments(ctx, deployment, tf, streams) }) - } + + //then + require.NoError(t, err) + //checking the logic when only operator based deployment is there and no non-operator based + require.Contains(t, term.Output(), "No Non-OLM deployment found in namespace toolchain-host-operator, hence no restart happened") + assert.Equal(t, 1, csCalls) + + }) + } +func TestRestartDeployment(t *testing.T) { + //given + SetFileConfig(t, Host(), Member()) -func TestRestartDeploymentWithInsufficientPermissions(t *testing.T) { - // given - SetFileConfig(t, Host(NoToken()), Member(NoToken())) - for _, clusterName := range []string{"host", "member1"} { - // given - clusterType := configuration.Host - if clusterName != "host" { - clusterType = configuration.Member - } - namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: "cool-deployment", - } - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - term := NewFakeTerminalWithResponse("Y") + //OLM-deployments + //host + hostDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) + hostDeployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} + hostPod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager")) + extraPod := newPod(test.NamespacedName("toolchain-host-operator", "extra")) + + //Non-OLM deployments + //reg-svc + regServDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "registration-service"), 1) + regServDeployment.Labels = map[string]string{"toolchain.dev.openshift.com/provider": "codeready-toolchain"} + + actualPod := &corev1.Pod{} + term := NewFakeTerminalWithResponse("Y") + + t.Run("restart deployment returns an error if no operator based deployment found", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, regServDeployment) ctx := clicontext.NewCommandContext(term, newClient) - // when - err := restart(ctx, clusterName, "cool-deployment") + //when + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + require.Equal(t, "host-operator-controller-manager", deployment.Name) + return nil + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + require.Equal(t, regServDeployment, deployment) + return nil + }) + + //then + require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , it is required for the operator deployment to be running so the command can proceed with restarting the KubeSaw components") + }) - // then - require.Error(t, err) - assert.Equal(t, 0, numberOfUpdateCalls) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - } -} + t.Run("restart deployment works successfully with whole operator(operator, non operator)", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, hostDeployment, hostPod, regServDeployment, extraPod) + ctx := clicontext.NewCommandContext(term, newClient) -func TestRestartHostOperator(t *testing.T) { - // given - SetFileConfig(t, Host()) - term := NewFakeTerminalWithResponse("") // it should not read the input - cfg, err := configuration.LoadClusterConfig(term, "host") - require.NoError(t, err) - namespacedName := types.NamespacedName{ - Namespace: "toolchain-host-operator", - Name: "host-operator-controller-manager", - } + //when + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return nil + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return nil + }) - t.Run("host deployment is present and restart successful", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + //then + require.NoError(t, err) + //checking the flow for operator deployments + require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in toolchain-host-operator namespace") + require.Contains(t, term.Output(), "Proceeding to delete the Pods of") + require.Contains(t, term.Output(), "Deleting pod: host-operator-controller-manager") + err = fakeClient.Get(ctx, test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), actualPod) + //pods are actually deleted + require.True(t, apierror.IsNotFound(err)) + require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") + //checking the flow for non-operator deployments + require.Contains(t, term.Output(), "Proceeding to restart the non-olm deployment") + require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") + }) + + t.Run("restart deployment works successfully when only operator based deployment", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, hostDeployment, hostPod) ctx := clicontext.NewCommandContext(term, newClient) - // when - err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) + //when + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return nil + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return nil + }) - // then + //then require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - assert.Equal(t, 2, numberOfUpdateCalls) + require.Contains(t, term.Output(), "No Non-OLM deployment found in namespace toolchain-host-operator, hence no restart happened") }) - t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + t.Run("rollout restart returns an error", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, hostDeployment, regServDeployment, hostPod) ctx := clicontext.NewCommandContext(term, newClient) + expectedErr := fmt.Errorf("Could not do rollout restart of the deployment") + //when + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return nil + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return expectedErr + }) + + //then + require.EqualError(t, err, expectedErr.Error()) + }) + + t.Run("rollout status for the deleted pods(operator) works", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, hostDeployment) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return nil + }, nil) + + //then + require.NoError(t, err) + }) + + t.Run("error in rollout status of the deleted pods(operator)", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, hostDeployment) + ctx := clicontext.NewCommandContext(term, newClient) + expectedErr := fmt.Errorf("Could not check the status of the deployment") + //when + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return expectedErr + }, nil) + + //then + require.EqualError(t, err, expectedErr.Error()) + }) + +} + +func TestRestartAutoScalerDeployment(t *testing.T) { + //given + SetFileConfig(t, Host(), Member()) + + //OLM-deployments + //member + memberDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "member-operator-controller-manager"), 1) + memberDeployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} + + //Non-OLM deployments + //autoscaler + autoscalerDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "autoscaling-buffer"), 1) + autoscalerDeployment.Labels = map[string]string{"toolchain.dev.openshift.com/provider": "codeready-toolchain"} - // when - err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) + term := NewFakeTerminalWithResponse("Y") - // then - require.Error(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - assert.Equal(t, 0, numberOfUpdateCalls) + t.Run("autoscalling deployment should not restart", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, memberDeployment, autoscalerDeployment) + ctx := clicontext.NewCommandContext(term, newClient) + //when + err := restartDeployments(ctx, fakeClient, "toolchain-member-operator", + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return nil + }, mockRolloutRestartInterceptor()) + + //then + require.NoError(t, err) + require.Contains(t, term.Output(), "Found only autoscaling-buffer deployment in namespace toolchain-member-operator , which is not required to be restarted") + require.NotContains(t, term.Output(), "Proceeding to restart the non-olm deployment") }) +} - t.Run("there are more deployments with the host operator label - restart fails", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - deployment2 := deployment.DeepCopy() - deployment2.Name = "another" - newClient, fakeClient := NewFakeClients(t, deployment, deployment2) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) +func TestRestart(t *testing.T) { + //given + SetFileConfig(t, Host(), Member()) + + t.Run("No restart when users says NO in confirmaion of restart", func(t *testing.T) { + term := NewFakeTerminalWithResponse("N") + //given + newClient, _ := NewFakeClients(t) ctx := clicontext.NewCommandContext(term, newClient) + //when + err := restart(ctx, "host") - // when - err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) + //then + require.NoError(t, err) + require.NotContains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") - // then - require.Error(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - assert.Equal(t, 0, numberOfUpdateCalls) }) } @@ -221,32 +348,34 @@ func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1. }, Spec: appsv1.DeploymentSpec{ Replicas: &replicas, + Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"dummy-key": "controller"}}, }, } } -func requireDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int) func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - return func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - deployment, ok := obj.(*appsv1.Deployment) - require.True(t, ok) - checkDeploymentBeingUpdated(t, fakeClient, namespacedName, currentReplicas, numberOfUpdateCalls, deployment) - return fakeClient.Client.Update(ctx, obj, opts...) +func newPod(namespacedName types.NamespacedName) *corev1.Pod { //nolint:unparam + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespacedName.Namespace, + Name: namespacedName.Name, + Labels: map[string]string{"dummy-key": "controller"}, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + Phase: "Running", + }, } } -func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int, deployment *appsv1.Deployment) { - // on the first call, we should have a deployment with 3 replicas ("current") and request to scale down to 0 ("requested") - // on the other calls, it's the opposite - if *numberOfUpdateCalls == 0 { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, currentReplicas) - // check the requested deployment's replicas field - assert.Equal(t, int32(0), *deployment.Spec.Replicas) - } else { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 0) - // check the requested deployment's replicas field - assert.Equal(t, currentReplicas, *deployment.Spec.Replicas) +func mockRolloutRestartInterceptor() func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + if deployment.Name == "autoscaling-buffer" { + return fmt.Errorf("autoscalling deployment found") + } + return nil } - *numberOfUpdateCalls++ } diff --git a/pkg/cmd/adm/unregister_member.go b/pkg/cmd/adm/unregister_member.go index fd177b7..c082321 100644 --- a/pkg/cmd/adm/unregister_member.go +++ b/pkg/cmd/adm/unregister_member.go @@ -14,6 +14,8 @@ import ( "k8s.io/apimachinery/pkg/types" ) +type restartFunc func(ctx *clicontext.CommandContext, clusterName string) error + func NewUnregisterMemberCmd() *cobra.Command { return &cobra.Command{ Use: "unregister-member ", @@ -23,12 +25,12 @@ func NewUnregisterMemberCmd() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) ctx := clicontext.NewCommandContext(term, client.DefaultNewClient) - return UnregisterMemberCluster(ctx, args[0]) + return UnregisterMemberCluster(ctx, args[0], restart) }, } } -func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string) error { +func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string, restart restartFunc) error { hostClusterConfig, err := configuration.LoadClusterConfig(ctx, configuration.HostName) if err != nil { return err @@ -62,5 +64,5 @@ func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string) } ctx.Printlnf("\nThe deletion of the Toolchain member cluster from the Host cluster has been triggered") - return restartHostOperator(ctx, hostClusterClient, hostClusterConfig.OperatorNamespace) + return restart(ctx, "host") } diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index 72c2392..fb7575b 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -1,9 +1,9 @@ package adm import ( + "fmt" "testing" - "github.com/codeready-toolchain/toolchain-common/pkg/test" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" "github.com/stretchr/testify/assert" @@ -13,20 +13,17 @@ import ( func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { // given toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) - hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") - deployment := newDeployment(hostDeploymentName, 1) - deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - newClient, fakeClient := NewFakeClients(t, toolchainCluster, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = whenDeploymentThenUpdated(t, fakeClient, hostDeploymentName, 1, &numberOfUpdateCalls) + newClient, fakeClient := NewFakeClients(t, toolchainCluster) SetFileConfig(t, Host(), Member()) term := NewFakeTerminalWithResponse("y") ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1") + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { + return nil + }) // then require.NoError(t, err) @@ -36,9 +33,46 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { assert.Contains(t, term.Output(), "Delete Member cluster stated above from the Host cluster?") assert.Contains(t, term.Output(), "The deletion of the Toolchain member cluster from the Host cluster has been triggered") assert.NotContains(t, term.Output(), "cool-token") +} + +func TestUnregisterMemberWhenRestartError(t *testing.T) { + // given + toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) + + newClient, _ := NewFakeClients(t, toolchainCluster) + + SetFileConfig(t, Host(), Member()) + term := NewFakeTerminalWithResponse("y") + ctx := clicontext.NewCommandContext(term, newClient) - AssertDeploymentHasReplicas(t, fakeClient, hostDeploymentName, 1) - assert.Equal(t, 2, numberOfUpdateCalls) + // when + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { + return fmt.Errorf("restart did not happen") + }) + + // then + require.EqualError(t, err, "restart did not happen") +} + +func TestUnregisterMemberCallsRestart(t *testing.T) { + // given + toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) + + newClient, _ := NewFakeClients(t, toolchainCluster) + + SetFileConfig(t, Host(), Member()) + term := NewFakeTerminalWithResponse("y") + ctxAct := clicontext.NewCommandContext(term, newClient) + called := 0 + // when + err := UnregisterMemberCluster(ctxAct, "member1", func(ctx *clicontext.CommandContext, restartClusterName string) error { + called++ + return mockRestart(ctx, restartClusterName) + }) + + // then + require.NoError(t, err) + assert.Equal(t, 1, called) } func TestUnregisterMemberWhenAnswerIsN(t *testing.T) { @@ -50,7 +84,9 @@ func TestUnregisterMemberWhenAnswerIsN(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1") + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { + return nil + }) // then require.NoError(t, err) @@ -71,7 +107,9 @@ func TestUnregisterMemberWhenNotFound(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1") + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { + return nil + }) // then require.EqualError(t, err, "toolchainclusters.toolchain.dev.openshift.com \"member-cool-server.com\" not found") @@ -92,7 +130,9 @@ func TestUnregisterMemberWhenUnknownClusterName(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "some") + err := UnregisterMemberCluster(ctx, "some", func(ctx *clicontext.CommandContext, clusterName string) error { + return nil + }) // then require.Error(t, err) @@ -115,9 +155,18 @@ func TestUnregisterMemberLacksPermissions(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1") + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { + return nil + }) // then require.EqualError(t, err, "ksctl command failed: the token in your ksctl.yaml file is missing") AssertToolchainClusterSpec(t, fakeClient, toolchainCluster) } + +func mockRestart(ctx *clicontext.CommandContext, clusterName string) error { + if clusterName == "host" && ctx != nil { + return nil + } + return fmt.Errorf("cluster name is wrong") +} diff --git a/resources/roles/host.yaml b/resources/roles/host.yaml index 876b5df..4eadb41 100644 --- a/resources/roles/host.yaml +++ b/resources/roles/host.yaml @@ -20,6 +20,8 @@ objects: - "list" - "patch" - "update" + - "watch" + - "delete" - kind: Role apiVersion: rbac.authorization.k8s.io/v1 diff --git a/resources/roles/member.yaml b/resources/roles/member.yaml index 5532c84..735212d 100644 --- a/resources/roles/member.yaml +++ b/resources/roles/member.yaml @@ -20,6 +20,8 @@ objects: - "list" - "patch" - "update" + - "watch" + - "delete" - kind: Role apiVersion: rbac.authorization.k8s.io/v1