diff --git a/controllers/toolchaincluster/healthchecker.go b/controllers/toolchaincluster/healthchecker.go index 04fcd3c1..775f69e0 100644 --- a/controllers/toolchaincluster/healthchecker.go +++ b/controllers/toolchaincluster/healthchecker.go @@ -4,110 +4,24 @@ import ( "context" "strings" - toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" - "github.com/go-logr/logr" - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeclientset "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) const ( - healthzOk = "/healthz responded with ok" - healthzNotOk = "/healthz responded without ok" - clusterNotReachableMsg = "cluster is not reachable" - clusterReachableMsg = "cluster is reachable" + healthzOk = "/healthz responded with ok" + healthzNotOk = "/healthz responded without ok" ) -type HealthChecker struct { - localClusterClient client.Client - remoteClusterClient client.Client - remoteClusterClientset *kubeclientset.Clientset - logger logr.Logger -} - -func (hc *HealthChecker) updateIndividualClusterStatus(ctx context.Context, toolchainCluster *toolchainv1alpha1.ToolchainCluster) error { - - currentClusterStatus := hc.getClusterHealthStatus(ctx) - - for index, currentCond := range currentClusterStatus.Conditions { - for _, previousCond := range toolchainCluster.Status.Conditions { - if currentCond.Type == previousCond.Type && currentCond.Status == previousCond.Status { - currentClusterStatus.Conditions[index].LastTransitionTime = previousCond.LastTransitionTime - } - } - } +// getClusterHealth gets the kubernetes cluster health status by requesting "/healthz" +func getClusterHealthStatus(ctx context.Context, remoteClusterClientset *kubeclientset.Clientset) (bool, error) { - toolchainCluster.Status = *currentClusterStatus - if err := hc.localClusterClient.Status().Update(ctx, toolchainCluster); err != nil { - return errors.Wrapf(err, "Failed to update the status of cluster %s", toolchainCluster.Name) - } - return nil -} - -// getClusterHealthStatus gets the kubernetes cluster health status by requesting "/healthz" -func (hc *HealthChecker) getClusterHealthStatus(ctx context.Context) *toolchainv1alpha1.ToolchainClusterStatus { - clusterStatus := toolchainv1alpha1.ToolchainClusterStatus{} - body, err := hc.remoteClusterClientset.DiscoveryClient.RESTClient().Get().AbsPath("/healthz").Do(ctx).Raw() + lgr := log.FromContext(ctx) + body, err := remoteClusterClientset.DiscoveryClient.RESTClient().Get().AbsPath("/healthz").Do(ctx).Raw() if err != nil { - hc.logger.Error(err, "Failed to do cluster health check for a ToolchainCluster") - clusterStatus.Conditions = append(clusterStatus.Conditions, clusterOfflineCondition()) - } else { - if !strings.EqualFold(string(body), "ok") { - clusterStatus.Conditions = append(clusterStatus.Conditions, clusterNotReadyCondition(), clusterNotOfflineCondition()) - } else { - clusterStatus.Conditions = append(clusterStatus.Conditions, clusterReadyCondition()) - } - } - - return &clusterStatus -} - -func clusterReadyCondition() toolchainv1alpha1.Condition { - currentTime := metav1.Now() - return toolchainv1alpha1.Condition{ - Type: toolchainv1alpha1.ConditionReady, - Status: corev1.ConditionTrue, - Reason: toolchainv1alpha1.ToolchainClusterClusterReadyReason, - Message: healthzOk, - LastUpdatedTime: ¤tTime, - LastTransitionTime: currentTime, - } -} - -func clusterNotReadyCondition() toolchainv1alpha1.Condition { - currentTime := metav1.Now() - return toolchainv1alpha1.Condition{ - Type: toolchainv1alpha1.ConditionReady, - Status: corev1.ConditionFalse, - Reason: toolchainv1alpha1.ToolchainClusterClusterNotReadyReason, - Message: healthzNotOk, - LastUpdatedTime: ¤tTime, - LastTransitionTime: currentTime, + lgr.Error(err, "Failed to do cluster health check for a ToolchainCluster") + return false, err } -} + return strings.EqualFold(string(body), "ok"), nil -func clusterOfflineCondition() toolchainv1alpha1.Condition { - currentTime := metav1.Now() - return toolchainv1alpha1.Condition{ - Type: toolchainv1alpha1.ToolchainClusterOffline, - Status: corev1.ConditionTrue, - Reason: toolchainv1alpha1.ToolchainClusterClusterNotReachableReason, - Message: clusterNotReachableMsg, - LastUpdatedTime: ¤tTime, - LastTransitionTime: currentTime, - } -} - -func clusterNotOfflineCondition() toolchainv1alpha1.Condition { - currentTime := metav1.Now() - return toolchainv1alpha1.Condition{ - Type: toolchainv1alpha1.ToolchainClusterOffline, - Status: corev1.ConditionFalse, - Reason: toolchainv1alpha1.ToolchainClusterClusterReachableReason, - Message: clusterReachableMsg, - LastUpdatedTime: ¤tTime, - LastTransitionTime: currentTime, - } } diff --git a/controllers/toolchaincluster/healthchecker_test.go b/controllers/toolchaincluster/healthchecker_test.go index 7a1ad53d..8da6e932 100644 --- a/controllers/toolchaincluster/healthchecker_test.go +++ b/controllers/toolchaincluster/healthchecker_test.go @@ -2,22 +2,17 @@ package toolchaincluster import ( "context" + "fmt" "testing" toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" "github.com/codeready-toolchain/toolchain-common/pkg/cluster" "github.com/codeready-toolchain/toolchain-common/pkg/test" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/h2non/gock.v1" - corev1 "k8s.io/api/core/v1" kubeclientset "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" ) -var logger = logf.Log.WithName("toolchaincluster_healthcheck") - func TestClusterHealthChecks(t *testing.T) { // given @@ -39,139 +34,51 @@ func TestClusterHealthChecks(t *testing.T) { Reply(404) tests := map[string]struct { - tctype string - apiendpoint string - clusterconditions []toolchainv1alpha1.Condition - status toolchainv1alpha1.ToolchainClusterStatus + tcType string + apiEndPoint string + healthCheck bool + err error }{ - //ToolchainCluster.status doesn't contain any conditions - "UnstableNoCondition": { - tctype: "unstable", - apiendpoint: "http://unstable.com", - clusterconditions: []toolchainv1alpha1.Condition{unhealthy(), notOffline()}, - status: toolchainv1alpha1.ToolchainClusterStatus{}, - }, - "StableNoCondition": { - tctype: "stable", - apiendpoint: "http://cluster.com", - clusterconditions: []toolchainv1alpha1.Condition{healthy()}, - status: toolchainv1alpha1.ToolchainClusterStatus{}, - }, - "NotFoundNoCondition": { - tctype: "not-found", - apiendpoint: "http://not-found.com", - clusterconditions: []toolchainv1alpha1.Condition{offline()}, - status: toolchainv1alpha1.ToolchainClusterStatus{}, - }, - //ToolchainCluster.status already contains conditions - "UnstableContainsCondition": { - tctype: "unstable", - apiendpoint: "http://unstable.com", - clusterconditions: []toolchainv1alpha1.Condition{unhealthy(), notOffline()}, - status: withStatus(healthy()), + "HealthOkay": { + tcType: "stable", + apiEndPoint: "http://cluster.com", + healthCheck: true, }, - "StableContainsCondition": { - tctype: "stable", - apiendpoint: "http://cluster.com", - clusterconditions: []toolchainv1alpha1.Condition{healthy()}, - status: withStatus(offline()), + "HealthNotOkayButNoError": { + tcType: "unstable", + apiEndPoint: "http://unstable.com", + healthCheck: false, }, - "NotFoundContainsCondition": { - tctype: "not-found", - apiendpoint: "http://not-found.com", - clusterconditions: []toolchainv1alpha1.Condition{offline()}, - status: withStatus(healthy()), - }, - //if the connection cannot be established at beginning, then it should be offline - "OfflineConnectionNotEstablished": { - tctype: "failing", - apiendpoint: "http://failing.com", - clusterconditions: []toolchainv1alpha1.Condition{offline()}, - status: toolchainv1alpha1.ToolchainClusterStatus{}, - }, - //if no zones nor region is retrieved, then keep the current - "NoZoneKeepCurrent": { - tctype: "stable", - apiendpoint: "http://cluster.com", - clusterconditions: []toolchainv1alpha1.Condition{healthy()}, - status: withStatus(offline()), + "ErrorWhileDoingHealth": { + tcType: "Notfound", + apiEndPoint: "http://not-found.com", + healthCheck: false, + err: fmt.Errorf("the server could not find the requested resource"), }, } for k, tc := range tests { t.Run(k, func(t *testing.T) { - tctype, sec := newToolchainCluster(tc.tctype, tcNs, tc.apiendpoint, tc.status) - cl := test.NewFakeClient(t, tctype, sec) - reset := setupCachedClusters(t, cl, tctype) + //given + tcType, sec := newToolchainCluster(tc.tcType, tcNs, tc.apiEndPoint, toolchainv1alpha1.ToolchainClusterStatus{}) + cl := test.NewFakeClient(t, tcType, sec) + reset := setupCachedClusters(t, cl, tcType) defer reset() - cachedtc, found := cluster.GetCachedToolchainCluster(tctype.Name) + cachedTC, found := cluster.GetCachedToolchainCluster(tcType.Name) require.True(t, found) - cacheclient, err := kubeclientset.NewForConfig(cachedtc.RestConfig) + cacheClient, err := kubeclientset.NewForConfig(cachedTC.RestConfig) require.NoError(t, err) - healthChecker := &HealthChecker{ - localClusterClient: cl, - remoteClusterClient: cachedtc.Client, - remoteClusterClientset: cacheclient, - logger: logger, - } - // when - err = healthChecker.updateIndividualClusterStatus(context.TODO(), tctype) - //then - require.NoError(t, err) - assertClusterStatus(t, cl, tc.tctype, tc.clusterconditions...) - }) - } -} + //when + healthCheck, err := getClusterHealthStatus(context.TODO(), cacheClient) -func withStatus(conditions ...toolchainv1alpha1.Condition) toolchainv1alpha1.ToolchainClusterStatus { - return toolchainv1alpha1.ToolchainClusterStatus{ - Conditions: conditions, - } -} -func assertClusterStatus(t *testing.T, cl client.Client, clusterName string, clusterConds ...toolchainv1alpha1.Condition) { - tc := &toolchainv1alpha1.ToolchainCluster{} - err := cl.Get(context.TODO(), test.NamespacedName("test-namespace", clusterName), tc) - require.NoError(t, err) - assert.Len(t, tc.Status.Conditions, len(clusterConds)) -ExpConditions: - for _, expCond := range clusterConds { - for _, cond := range tc.Status.Conditions { - if expCond.Type == cond.Type { - assert.Equal(t, expCond.Status, cond.Status) - assert.Equal(t, expCond.Reason, cond.Reason) - assert.Equal(t, expCond.Message, cond.Message) - continue ExpConditions + //then + require.Equal(t, tc.healthCheck, healthCheck) + if tc.err != nil { + require.EqualError(t, err, tc.err.Error()) + } else { + require.NoError(t, err) } - } - assert.Failf(t, "condition not found", "the list of conditions %v doesn't contain the expected condition %v", tc.Status.Conditions, expCond) - } -} -func healthy() toolchainv1alpha1.Condition { - return toolchainv1alpha1.Condition{ - Type: toolchainv1alpha1.ConditionReady, - Status: corev1.ConditionTrue, - Reason: "ClusterReady", - Message: "/healthz responded with ok", - } -} -func unhealthy() toolchainv1alpha1.Condition { - return toolchainv1alpha1.Condition{Type: toolchainv1alpha1.ConditionReady, - Status: corev1.ConditionFalse, - Reason: "ClusterNotReady", - Message: "/healthz responded without ok", - } -} -func offline() toolchainv1alpha1.Condition { - return toolchainv1alpha1.Condition{Type: toolchainv1alpha1.ToolchainClusterOffline, - Status: corev1.ConditionTrue, - Reason: "ClusterNotReachable", - Message: "cluster is not reachable", - } -} -func notOffline() toolchainv1alpha1.Condition { - return toolchainv1alpha1.Condition{Type: toolchainv1alpha1.ToolchainClusterOffline, - Status: corev1.ConditionFalse, - Reason: "ClusterReachable", - Message: "cluster is reachable", + + }) } } diff --git a/controllers/toolchaincluster/toolchaincluster_controller.go b/controllers/toolchaincluster/toolchaincluster_controller.go index 13ed77e7..08f0e6f9 100644 --- a/controllers/toolchaincluster/toolchaincluster_controller.go +++ b/controllers/toolchaincluster/toolchaincluster_controller.go @@ -6,10 +6,12 @@ import ( "fmt" "time" + "github.com/codeready-toolchain/api/api/v1alpha1" toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" "github.com/codeready-toolchain/toolchain-common/pkg/cluster" + "github.com/codeready-toolchain/toolchain-common/pkg/condition" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" @@ -22,9 +24,10 @@ import ( // Reconciler reconciles a ToolchainCluster object type Reconciler struct { - Client client.Client - Scheme *runtime.Scheme - RequeAfter time.Duration + Client client.Client + Scheme *runtime.Scheme + RequeAfter time.Duration + checkHealth func(context.Context, *kubeclientset.Clientset) (bool, error) } // SetupWithManager sets up the controller with the Manager. @@ -47,7 +50,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. toolchainCluster := &toolchainv1alpha1.ToolchainCluster{} err := r.Client.Get(ctx, request.NamespacedName, toolchainCluster) if err != nil { - if errors.IsNotFound(err) { + if kerrors.IsNotFound(err) { // Stop monitoring the toolchain cluster as it is deleted return reconcile.Result{}, nil } @@ -58,9 +61,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. cachedCluster, ok := cluster.GetCachedToolchainCluster(toolchainCluster.Name) if !ok { err := fmt.Errorf("cluster %s not found in cache", toolchainCluster.Name) - toolchainCluster.Status.Conditions = []toolchainv1alpha1.Condition{clusterOfflineCondition()} - if err := r.Client.Status().Update(ctx, toolchainCluster); err != nil { - reqLogger.Error(err, "failed to update the status of ToolchainCluster") + if err := r.updateStatus(ctx, toolchainCluster, clusterOfflineCondition(err.Error())); err != nil { + reqLogger.Error(err, "unable to update cluster status of ToolchainCluster") } return reconcile.Result{}, err } @@ -72,23 +74,77 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. clientSet, err := kubeclientset.NewForConfig(cachedCluster.RestConfig) if err != nil { reqLogger.Error(err, "cannot create ClientSet for the ToolchainCluster") + if err := r.updateStatus(ctx, toolchainCluster, clusterOfflineCondition(err.Error())); err != nil { + reqLogger.Error(err, "unable to update cluster status of ToolchainCluster") + } return reconcile.Result{}, err } - healthChecker := &HealthChecker{ - localClusterClient: r.Client, - remoteClusterClient: cachedCluster.Client, - remoteClusterClientset: clientSet, - logger: reqLogger, - } + + // execute healthcheck + healthCheckResult := r.getClusterHealthCondition(ctx, clientSet) + // update the status of the individual cluster. - if err := healthChecker.updateIndividualClusterStatus(ctx, toolchainCluster); err != nil { + if err := r.updateStatus(ctx, toolchainCluster, healthCheckResult); err != nil { reqLogger.Error(err, "unable to update cluster status of ToolchainCluster") return reconcile.Result{}, err } - return reconcile.Result{RequeueAfter: r.RequeAfter}, nil } +func (r *Reconciler) updateStatus(ctx context.Context, toolchainCluster *toolchainv1alpha1.ToolchainCluster, currentConditions ...toolchainv1alpha1.Condition) error { + toolchainCluster.Status.Conditions = condition.AddOrUpdateStatusConditionsWithLastUpdatedTimestamp(toolchainCluster.Status.Conditions, currentConditions...) + if err := r.Client.Status().Update(ctx, toolchainCluster); err != nil { + return fmt.Errorf("failed to update the status of cluster - %s: %w", toolchainCluster.Name, err) + } + return nil +} + +func (r *Reconciler) getClusterHealthCondition(ctx context.Context, remoteClusterClientset *kubeclientset.Clientset) v1alpha1.Condition { + isHealthy, err := r.getClusterHealth(ctx, remoteClusterClientset) + if err != nil { + return clusterOfflineCondition(err.Error()) + } + if !isHealthy { + return clusterNotReadyCondition() + } + return clusterReadyCondition() + +} + +func (r *Reconciler) getClusterHealth(ctx context.Context, remoteClusterClientset *kubeclientset.Clientset) (bool, error) { + if r.checkHealth != nil { + return r.checkHealth(ctx, remoteClusterClientset) + } + return getClusterHealthStatus(ctx, remoteClusterClientset) +} + +func clusterOfflineCondition(errMsg string) toolchainv1alpha1.Condition { + return toolchainv1alpha1.Condition{ + Type: toolchainv1alpha1.ConditionReady, + Status: corev1.ConditionFalse, + Reason: toolchainv1alpha1.ToolchainClusterClusterNotReachableReason, + Message: errMsg, + } +} + +func clusterReadyCondition() toolchainv1alpha1.Condition { + return toolchainv1alpha1.Condition{ + Type: toolchainv1alpha1.ConditionReady, + Status: corev1.ConditionTrue, + Reason: toolchainv1alpha1.ToolchainClusterClusterReadyReason, + Message: healthzOk, + } +} + +func clusterNotReadyCondition() toolchainv1alpha1.Condition { + return toolchainv1alpha1.Condition{ + Type: toolchainv1alpha1.ConditionReady, + Status: corev1.ConditionFalse, + Reason: toolchainv1alpha1.ToolchainClusterClusterNotReadyReason, + Message: healthzNotOk, + } +} + func (r *Reconciler) migrateSecretToKubeConfig(ctx context.Context, tc *toolchainv1alpha1.ToolchainCluster) error { if len(tc.Spec.SecretRef.Name) == 0 { return nil diff --git a/controllers/toolchaincluster/toolchaincluster_controller_test.go b/controllers/toolchaincluster/toolchaincluster_controller_test.go index 222be729..d5363ed2 100644 --- a/controllers/toolchaincluster/toolchaincluster_controller_test.go +++ b/controllers/toolchaincluster/toolchaincluster_controller_test.go @@ -15,11 +15,12 @@ import ( "gopkg.in/h2non/gock.v1" "gotest.tools/assert/cmp" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" + kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/controller-runtime/pkg/client" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -57,11 +58,11 @@ func TestClusterControllerChecks(t *testing.T) { controller, req := prepareReconcile(NotFound, cl, requeAfter) // when - recresult, err := controller.Reconcile(context.TODO(), req) + recResult, err := controller.Reconcile(context.TODO(), req) // then require.NoError(t, err) - require.Equal(t, reconcile.Result{Requeue: false, RequeueAfter: 0}, recresult) + require.Equal(t, reconcile.Result{Requeue: false, RequeueAfter: 0}, recResult) }) t.Run("Error while getting ToolchainCluster", func(t *testing.T) { @@ -69,22 +70,20 @@ func TestClusterControllerChecks(t *testing.T) { tc, sec := newToolchainCluster("tc", tcNs, "http://tc.com", toolchainv1alpha1.ToolchainClusterStatus{}) cl := test.NewFakeClient(t, sec) - cl.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { if _, ok := obj.(*toolchainv1alpha1.ToolchainCluster); ok { return fmt.Errorf("mock error") } return cl.Client.Get(ctx, key, obj, opts...) } - controller, req := prepareReconcile(tc, cl, requeAfter) // when - recresult, err := controller.Reconcile(context.TODO(), req) + recResult, err := controller.Reconcile(context.TODO(), req) // then require.EqualError(t, err, "mock error") - require.Equal(t, reconcile.Result{Requeue: false, RequeueAfter: 0}, recresult) + require.Equal(t, reconcile.Result{Requeue: false, RequeueAfter: 0}, recResult) }) t.Run("reconcile successful and requeued", func(t *testing.T) { @@ -93,35 +92,76 @@ func TestClusterControllerChecks(t *testing.T) { cl := test.NewFakeClient(t, stable, sec) reset := setupCachedClusters(t, cl, stable) + defer reset() controller, req := prepareReconcile(stable, cl, requeAfter) // when - recresult, err := controller.Reconcile(context.TODO(), req) + recResult, err := controller.Reconcile(context.TODO(), req) // then require.NoError(t, err) - require.Equal(t, reconcile.Result{RequeueAfter: requeAfter}, recresult) - assertClusterStatus(t, cl, "stable", healthy()) + require.Equal(t, reconcile.Result{RequeueAfter: requeAfter}, recResult) + assertClusterStatus(t, cl, "stable", clusterReadyCondition()) }) t.Run("toolchain cluster cache not found", func(t *testing.T) { + // given + unstable, _ := newToolchainCluster("unstable", tcNs, "http://unstable.com", toolchainv1alpha1.ToolchainClusterStatus{}) + + cl := test.NewFakeClient(t, unstable) + controller, req := prepareReconcile(unstable, cl, requeAfter) + + // when + _, err := controller.Reconcile(context.TODO(), req) + + // then + require.EqualError(t, err, "cluster unstable not found in cache") + assertClusterStatus(t, cl, "unstable", clusterOfflineCondition("cluster unstable not found in cache")) + }) + + t.Run("error while updating a toolchain cluster status on cache not found", func(t *testing.T) { // given stable, _ := newToolchainCluster("stable", tcNs, "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{}) cl := test.NewFakeClient(t, stable) - + cl.MockStatusUpdate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { + return fmt.Errorf("mock error") + } controller, req := prepareReconcile(stable, cl, requeAfter) // when - _, err := controller.Reconcile(context.TODO(), req) + recResult, err := controller.Reconcile(context.TODO(), req) // then require.EqualError(t, err, "cluster stable not found in cache") - actualtoolchaincluster := &toolchainv1alpha1.ToolchainCluster{} - err = cl.Client.Get(context.TODO(), types.NamespacedName{Name: "stable", Namespace: tcNs}, actualtoolchaincluster) - require.NoError(t, err) - assertClusterStatus(t, cl, "stable", offline()) + require.Equal(t, reconcile.Result{}, recResult) + + assertClusterStatus(t, cl, "stable") + }) + + t.Run("error while updating a toolchain cluster status when health-check failed", func(t *testing.T) { + // given + stable, sec := newToolchainCluster("stable", tcNs, "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{}) + expectedErr := fmt.Errorf("my test error") + cl := test.NewFakeClient(t, stable, sec) + cl.MockStatusUpdate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { + return expectedErr + } + reset := setupCachedClusters(t, cl, stable) + + defer reset() + controller, req := prepareReconcile(stable, cl, requeAfter) + controller.checkHealth = func(context.Context, *kubeclientset.Clientset) (bool, error) { + return false, expectedErr + } + // when + recResult, err := controller.Reconcile(context.TODO(), req) + + // then + require.EqualError(t, err, fmt.Sprintf("failed to update the status of cluster - %s: %v", stable.Name, expectedErr)) + require.Equal(t, reconcile.Result{}, recResult) + assertClusterStatus(t, cl, "stable") }) t.Run("migrates connection settings to kubeconfig in secret", func(t *testing.T) { @@ -159,6 +199,50 @@ func TestClusterControllerChecks(t *testing.T) { }) } +func TestGetClusterHealth(t *testing.T) { + t.Run("Check health default", func(t *testing.T) { + // given + stable, sec := newToolchainCluster("stable", "test-namespace", "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{}) + + cl := test.NewFakeClient(t, stable, sec) + reset := setupCachedClusters(t, cl, stable) + + defer reset() + controller, req := prepareReconcile(stable, cl, requeAfter) + controller.checkHealth = func(context.Context, *kubeclientset.Clientset) (bool, error) { + return true, nil + } + + // when + recResult, err := controller.Reconcile(context.TODO(), req) + + // then + require.Equal(t, err, nil) + require.Equal(t, reconcile.Result{RequeueAfter: requeAfter}, recResult) + assertClusterStatus(t, cl, "stable", clusterReadyCondition()) + }) + t.Run("get health condition when health obtained is false ", func(t *testing.T) { + // given + stable, sec := newToolchainCluster("stable", "test-namespace", "http://cluster.com", toolchainv1alpha1.ToolchainClusterStatus{}) + + cl := test.NewFakeClient(t, stable, sec) + reset := setupCachedClusters(t, cl, stable) + + defer reset() + controller, req := prepareReconcile(stable, cl, requeAfter) + controller.checkHealth = func(context.Context, *kubeclientset.Clientset) (bool, error) { + return false, nil + } + + // when + recResult, err := controller.Reconcile(context.TODO(), req) + + // then + require.Equal(t, err, nil) + require.Equal(t, reconcile.Result{RequeueAfter: requeAfter}, recResult) + assertClusterStatus(t, cl, "stable", clusterNotReadyCondition()) + }) +} func TestComposeKubeConfig(t *testing.T) { // when kubeConfig := composeKubeConfigFromData([]byte("token"), "http://over.the.rainbow", "the-namespace", false) @@ -208,3 +292,10 @@ func prepareReconcile(toolchainCluster *toolchainv1alpha1.ToolchainCluster, cl * } return controller, req } + +func assertClusterStatus(t *testing.T, cl client.Client, clusterName string, clusterConds ...toolchainv1alpha1.Condition) { + tc := &toolchainv1alpha1.ToolchainCluster{} + err := cl.Get(context.TODO(), test.NamespacedName("test-namespace", clusterName), tc) + require.NoError(t, err) + test.AssertConditionsMatch(t, tc.Status.Conditions, clusterConds...) +}