From 2b06cc87fa59498ca6a14c63b298526e7fa014ee Mon Sep 17 00:00:00 2001 From: Lukas Krejci Date: Tue, 7 Jan 2025 12:46:41 +0100 Subject: [PATCH] Consumed capacity info in SpaceProvisionerConfig (#1109) Make the SPC ready status reflect the ability to place spaces to the corresponding cluster. --------- Co-authored-by: Francisc Munteanu Co-authored-by: Matous Jobanek Co-authored-by: Feny Mehta --- controllers/spaceprovisionerconfig/mapper.go | 44 +- .../spaceprovisionerconfig/mapper_test.go | 51 +- .../spaceprovisionerconfig_controller.go | 194 ++++++-- .../spaceprovisionerconfig_controller_test.go | 440 +++++++++++++----- go.mod | 6 +- go.sum | 12 +- main.go | 2 +- 7 files changed, 563 insertions(+), 186 deletions(-) diff --git a/controllers/spaceprovisionerconfig/mapper.go b/controllers/spaceprovisionerconfig/mapper.go index 2f55e39fe..83062ef49 100644 --- a/controllers/spaceprovisionerconfig/mapper.go +++ b/controllers/spaceprovisionerconfig/mapper.go @@ -2,6 +2,7 @@ package spaceprovisionerconfig import ( "context" + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" "k8s.io/apimachinery/pkg/types" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -9,12 +10,32 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -func MapToolchainClusterToSpaceProvisionerConfigs(ctx context.Context, cl runtimeclient.Client) func(context.Context, runtimeclient.Object) []reconcile.Request { - return func(context context.Context, obj runtimeclient.Object) []reconcile.Request { +func MapToolchainClusterToSpaceProvisionerConfigs(cl runtimeclient.Client) func(context.Context, runtimeclient.Object) []reconcile.Request { + return func(ctx context.Context, obj runtimeclient.Object) []reconcile.Request { + if _, ok := obj.(*toolchainv1alpha1.ToolchainCluster); !ok { + return nil + } + ret, err := findReferencingProvisionerConfigs(ctx, cl, runtimeclient.ObjectKeyFromObject(obj)) if err != nil { log.FromContext(ctx).Error(err, "failed to list SpaceProvisionerConfig objects while determining what objects to reconcile", - "toolchainClusterCause", runtimeclient.ObjectKeyFromObject(obj)) + "causeObj", runtimeclient.ObjectKeyFromObject(obj), "causeKind", "ToolchainCluster") + return []reconcile.Request{} + } + return ret + } +} + +func MapToolchainStatusToSpaceProvisionerConfigs(cl runtimeclient.Client) func(context.Context, runtimeclient.Object) []reconcile.Request { + return func(ctx context.Context, obj runtimeclient.Object) []reconcile.Request { + if _, ok := obj.(*toolchainv1alpha1.ToolchainStatus); !ok { + return nil + } + + ret, err := findAllSpaceProvisionerConfigsInNamespace(ctx, cl, obj.GetNamespace()) + if err != nil { + log.FromContext(ctx).Error(err, "failed to list SpaceProvisionerConfig objects while determining what objects to reconcile", + "causeObj", runtimeclient.ObjectKeyFromObject(obj), "causeKind", "ToolchainStatus") return []reconcile.Request{} } return ret @@ -39,3 +60,20 @@ func findReferencingProvisionerConfigs(ctx context.Context, cl runtimeclient.Cli } return ret, nil } + +func findAllSpaceProvisionerConfigsInNamespace(ctx context.Context, cl runtimeclient.Client, ns string) ([]reconcile.Request, error) { + configs := &toolchainv1alpha1.SpaceProvisionerConfigList{} + if err := cl.List(ctx, configs, runtimeclient.InNamespace(ns)); err != nil { + return nil, err + } + ret := make([]reconcile.Request, 0, len(configs.Items)) + for _, cfg := range configs.Items { + ret = append(ret, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: cfg.Namespace, + Name: cfg.Name, + }, + }) + } + return ret, nil +} diff --git a/controllers/spaceprovisionerconfig/mapper_test.go b/controllers/spaceprovisionerconfig/mapper_test.go index 842452295..f9fbe0b53 100644 --- a/controllers/spaceprovisionerconfig/mapper_test.go +++ b/controllers/spaceprovisionerconfig/mapper_test.go @@ -97,7 +97,7 @@ func TestMapToolchainClusterToSpaceProvisionerConfigs(t *testing.T) { cl := test.NewFakeClient(t, spc0, spc1, spc2) // when - reqs := MapToolchainClusterToSpaceProvisionerConfigs(context.TODO(), cl)(context.TODO(), &toolchainv1alpha1.ToolchainCluster{ + reqs := MapToolchainClusterToSpaceProvisionerConfigs(cl)(context.TODO(), &toolchainv1alpha1.ToolchainCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: test.HostOperatorNs, @@ -123,7 +123,7 @@ func TestMapToolchainClusterToSpaceProvisionerConfigs(t *testing.T) { } // when - reqs := MapToolchainClusterToSpaceProvisionerConfigs(context.TODO(), cl)(context.TODO(), &toolchainv1alpha1.ToolchainCluster{ + reqs := MapToolchainClusterToSpaceProvisionerConfigs(cl)(context.TODO(), &toolchainv1alpha1.ToolchainCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", Namespace: test.HostOperatorNs, @@ -134,3 +134,50 @@ func TestMapToolchainClusterToSpaceProvisionerConfigs(t *testing.T) { require.Empty(t, reqs) }) } + +func TestMapToolchainStatusToSpaceProvisionerConfig(t *testing.T) { + t.Run("finds all SPCs in namespace", func(t *testing.T) { + // given + spc0 := NewSpaceProvisionerConfig("spc0", test.HostOperatorNs, ReferencingToolchainCluster("cluster1")) + spc1 := NewSpaceProvisionerConfig("spc1", test.HostOperatorNs, ReferencingToolchainCluster("cluster2")) + spc2 := NewSpaceProvisionerConfig("spc2", test.HostOperatorNs, ReferencingToolchainCluster("cluster1")) + cl := test.NewFakeClient(t, spc0, spc1, spc2) + + // when + reqs := MapToolchainStatusToSpaceProvisionerConfigs(cl)(context.TODO(), &toolchainv1alpha1.ToolchainStatus{ + ObjectMeta: metav1.ObjectMeta{ + Name: "toolchain-status", + Namespace: test.HostOperatorNs, + }, + }) + + // then + require.Equal(t, []reconcile.Request{ + requestFromObject(spc0), + requestFromObject(spc1), + requestFromObject(spc2), + }, reqs) + }) + t.Run("interprets erors as empty result", func(t *testing.T) { + // given + cl := test.NewFakeClient(t, NewSpaceProvisionerConfig("spc0", test.HostOperatorNs, ReferencingToolchainCluster("cluster1"))) + expectedErr := errors.New("expected list error") + cl.MockList = func(ctx context.Context, list runtimeclient.ObjectList, opts ...runtimeclient.ListOption) error { + if _, ok := list.(*toolchainv1alpha1.SpaceProvisionerConfigList); ok { + return expectedErr + } + return cl.Client.List(ctx, list, opts...) + } + + // when + reqs := MapToolchainStatusToSpaceProvisionerConfigs(cl)(context.TODO(), &toolchainv1alpha1.ToolchainStatus{ + ObjectMeta: metav1.ObjectMeta{ + Name: "toolchain-status", + Namespace: test.HostOperatorNs, + }, + }) + + // then + require.Empty(t, reqs) + }) +} diff --git a/controllers/spaceprovisionerconfig/spaceprovisionerconfig_controller.go b/controllers/spaceprovisionerconfig/spaceprovisionerconfig_controller.go index ccfda1cf5..c12198e65 100644 --- a/controllers/spaceprovisionerconfig/spaceprovisionerconfig_controller.go +++ b/controllers/spaceprovisionerconfig/spaceprovisionerconfig_controller.go @@ -5,10 +5,12 @@ import ( "fmt" toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + "github.com/codeready-toolchain/host-operator/controllers/toolchainconfig" "github.com/codeready-toolchain/toolchain-common/pkg/condition" "github.com/redhat-cop/operator-utils/pkg/util" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -25,12 +27,20 @@ type Reconciler struct { var _ reconcile.Reconciler = (*Reconciler)(nil) -func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&toolchainv1alpha1.SpaceProvisionerConfig{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Watches( &toolchainv1alpha1.ToolchainCluster{}, - handler.EnqueueRequestsFromMapFunc(MapToolchainClusterToSpaceProvisionerConfigs(ctx, r.Client)), + handler.EnqueueRequestsFromMapFunc(MapToolchainClusterToSpaceProvisionerConfigs(r.Client)), + ). + // we use the same information as the ToolchainStatus specific for the SPCs. Because memory consumption is + // read directly out of the member clusters using remote connections, let's look for it only once + // in ToolchainStatus and just read it out "locally" here without needing to reach out to the member clusters + // again. + Watches( + &toolchainv1alpha1.ToolchainStatus{}, + handler.EnqueueRequestsFromMapFunc(MapToolchainStatusToSpaceProvisionerConfigs(r.Client)), ). Complete(r) } @@ -38,6 +48,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) err //+kubebuilder:rbac:groups=toolchain.dev.openshift.com,resources=spaceprovisionerconfigs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=toolchain.dev.openshift.com,resources=spaceprovisionerconfigs/status,verbs=get;update;patch //+kubebuilder:rbac:groups=toolchain.dev.openshift.com,resources=toolchainclusters,verbs=get;list;watch +//+kubebuilder:rbac:groups=toolchain.dev.openshift.com,resources=toolchainstatuses,verbs=get;list;watch // Reconcile ensures that SpaceProvisionerConfig is valid and points to an existing ToolchainCluster. func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { @@ -58,62 +69,153 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. return ctrl.Result{}, nil } - readyCondition, reportedError := r.determineReadyState(ctx, spaceProvisionerConfig) + reportedErr := r.refreshStatus(ctx, spaceProvisionerConfig) - var updated bool - spaceProvisionerConfig.Status.Conditions, updated = condition.AddOrUpdateStatusConditions(spaceProvisionerConfig.Status.Conditions, - readyCondition) - if !updated { - return ctrl.Result{}, reportedError + if err := r.Client.Status().Update(ctx, spaceProvisionerConfig); err != nil { + return ctrl.Result{}, err } - logger.Info("updating SpaceProvisionerConfig", "readyCondition", readyCondition) - if err := r.Client.Status().Update(ctx, spaceProvisionerConfig); err != nil { - if reportedError != nil { - logger.Info("failed to update the status (reported as failed reconciliation) with a previous unreported error during reconciliation", "unreportedError", reportedError) + return ctrl.Result{}, reportedErr +} + +func (r *Reconciler) refreshStatus(ctx context.Context, spc *toolchainv1alpha1.SpaceProvisionerConfig) error { + // clear out the consumed capacity - this will advertise to the user that we either failed before it made sense + // to collect it (and therefore we don't know it) or it was not available (and therefore we again don't know it) + spc.Status.ConsumedCapacity = nil + + if !spc.Spec.Enabled { + updateReadyCondition(spc, corev1.ConditionFalse, toolchainv1alpha1.SpaceProvisionerConfigDisabledReason, "") + return nil + } + + clusterCondition, err := r.determineClusterReadyState(ctx, spc) + if err != nil { + updateReadyCondition(spc, clusterCondition, toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason, err.Error()) + + // the reconciler reacts on ToolchainCluster changes so it will be triggered once a new TC appears + // we therefore don't need to return error from the reconciler in the case the TC is not found. + if errors.IsNotFound(err) { + return nil } - reportedError = fmt.Errorf("failed to update the SpaceProvisionerConfig status: %w", err) + return err + } + + if clusterCondition != corev1.ConditionTrue { + updateReadyCondition(spc, clusterCondition, toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotReadyReason, "") + return nil } - return ctrl.Result{}, reportedError + spc.Status.ConsumedCapacity, err = collectConsumedCapacity(ctx, r.Client, spc.Spec.ToolchainCluster, spc.Namespace) + if err != nil { + updateReadyCondition(spc, corev1.ConditionUnknown, toolchainv1alpha1.SpaceProvisionerConfigFailedToDetermineCapacityReason, err.Error()) + return err + } + + capacityCondition := r.determineCapacityReadyState(spc) + + reason := toolchainv1alpha1.SpaceProvisionerConfigValidReason + if capacityCondition != corev1.ConditionTrue { + reason = toolchainv1alpha1.SpaceProvisionerConfigInsufficientCapacityReason + } + + updateReadyCondition(spc, capacityCondition, reason, "") + + return nil } -func (r *Reconciler) determineReadyState(ctx context.Context, spc *toolchainv1alpha1.SpaceProvisionerConfig) (toolchainv1alpha1.Condition, error) { - toolchainCluster := &toolchainv1alpha1.ToolchainCluster{} - toolchainClusterKey := runtimeclient.ObjectKey{Name: spc.Spec.ToolchainCluster, Namespace: spc.Namespace} - var toolchainPresent corev1.ConditionStatus - toolchainPresenceReason := toolchainv1alpha1.SpaceProvisionerConfigValidReason - var reportedError error - toolchainPresenceMessage := "" - if err := r.Client.Get(ctx, toolchainClusterKey, toolchainCluster); err != nil { - if !errors.IsNotFound(err) { - // we need to requeue the reconciliation in this case because we cannot be sure whether the ToolchainCluster - // is really present in the cluster or not. If we did not do that and instead just reported the error in - // the status, we could eventually leave the SPC in an incorrect state once the error condition in the cluster, - // that prevents us from reading the ToolchainCluster, clears. I.e. we need the requeue to keep the promise - // of eventual consistency. - - reportedError = fmt.Errorf("failed to get the referenced ToolchainCluster: %w", err) - toolchainPresenceMessage = reportedError.Error() +// Note that this function merely mirrors the usage information found in the ToolchainStatus. This means that it actually may work +// with slightly stale data because the counter.Counts cache might not have been synced yet. This is ok though because the capacity manager +// doesn't completely rely on the readiness status of the SPC and will re-evaluate the decision taking into the account the contents of +// the counter cache and therefore completely "fresh" data. +func collectConsumedCapacity(ctx context.Context, cl runtimeclient.Client, clusterName string, toolchainStatusNs string) (*toolchainv1alpha1.ConsumedCapacity, error) { + status := &toolchainv1alpha1.ToolchainStatus{} + if err := cl.Get(ctx, types.NamespacedName{Namespace: toolchainStatusNs, Name: toolchainconfig.ToolchainStatusName}, status); err != nil { + return nil, fmt.Errorf("unable to read ToolchainStatus resource: %w", err) + } + + for _, m := range status.Status.Members { + if m.ClusterName == clusterName { + cc := toolchainv1alpha1.ConsumedCapacity{} + cc.MemoryUsagePercentPerNodeRole = m.MemberStatus.ResourceUsage.MemoryUsagePerNodeRole + cc.SpaceCount = m.SpaceCount + + return &cc, nil } - toolchainPresenceReason = toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason - toolchainPresent = corev1.ConditionFalse - } else { - readyCond, found := condition.FindConditionByType(toolchainCluster.Status.Conditions, toolchainv1alpha1.ConditionReady) - if !found { - toolchainPresent = corev1.ConditionFalse - } else { - toolchainPresent = readyCond.Status + } + + return nil, nil +} + +func (r *Reconciler) determineClusterReadyState(ctx context.Context, spc *toolchainv1alpha1.SpaceProvisionerConfig) (corev1.ConditionStatus, error) { + toolchainCluster := &toolchainv1alpha1.ToolchainCluster{} + if err := r.Client.Get(ctx, runtimeclient.ObjectKey{Name: spc.Spec.ToolchainCluster, Namespace: spc.Namespace}, toolchainCluster); err != nil { + if errors.IsNotFound(err) { + return corev1.ConditionFalse, err } - if toolchainPresent != corev1.ConditionTrue { - toolchainPresenceReason = toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotReadyReason + // IsNotFound is self-explanatory but let's add a little bit of context to the error in other cases + return corev1.ConditionFalse, fmt.Errorf("failed to get the referenced ToolchainCluster: %w", err) + } + + readyCond, found := condition.FindConditionByType(toolchainCluster.Status.Conditions, toolchainv1alpha1.ConditionReady) + if !found { + return corev1.ConditionFalse, nil + } + + return readyCond.Status, nil +} + +func (r *Reconciler) determineCapacityReadyState(spc *toolchainv1alpha1.SpaceProvisionerConfig) corev1.ConditionStatus { + if spc.Status.ConsumedCapacity == nil { + // we don't know anything about the resource consumption in the member + return corev1.ConditionUnknown + } + + // the cluster capacity is ok if it has room for additional spaces and enough free memory + + roomForAdditionalSpaces := determineSpaceCountReadyState(spc) + if !roomForAdditionalSpaces { + return corev1.ConditionFalse + } + + return determineMemoryUtilizationReadyState(spc) +} + +// determineSpaceCountReadyState checks that there is room for additional spaces in the cluster. +// It always knows this fact so returning a bool is ok, in contrast to determinMemoryUtilizationReadyState. +func determineSpaceCountReadyState(spc *toolchainv1alpha1.SpaceProvisionerConfig) bool { + max := spc.Spec.CapacityThresholds.MaxNumberOfSpaces + return max == 0 || max > uint(spc.Status.ConsumedCapacity.SpaceCount) +} + +// determineMemoryUtilizationReadyState checks that the cluster has enough free memory. It may not be able to tell the fact +// if the SPC doesn't contain memory usage information in the status. It therefore can return true, false or +// unknown condition values. +func determineMemoryUtilizationReadyState(spc *toolchainv1alpha1.SpaceProvisionerConfig) corev1.ConditionStatus { + if spc.Spec.CapacityThresholds.MaxMemoryUtilizationPercent == 0 { + // 0 max memory utilization means no limit + return corev1.ConditionTrue + } + + if len(spc.Status.ConsumedCapacity.MemoryUsagePercentPerNodeRole) == 0 { + // we don't know the memory utilization in the member + return corev1.ConditionUnknown + } + + // the memory utilitzation is ok if it is below the threshold in all node types + for _, val := range spc.Status.ConsumedCapacity.MemoryUsagePercentPerNodeRole { + if uint(val) >= spc.Spec.CapacityThresholds.MaxMemoryUtilizationPercent { + return corev1.ConditionFalse } } + return corev1.ConditionTrue +} - return toolchainv1alpha1.Condition{ +func updateReadyCondition(spc *toolchainv1alpha1.SpaceProvisionerConfig, status corev1.ConditionStatus, reason, message string) { + readyCondition := toolchainv1alpha1.Condition{ Type: toolchainv1alpha1.ConditionReady, - Status: toolchainPresent, - Message: toolchainPresenceMessage, - Reason: toolchainPresenceReason, - }, reportedError + Status: status, + Reason: reason, + Message: message, + } + spc.Status.Conditions, _ = condition.AddOrUpdateStatusConditions(spc.Status.Conditions, readyCondition) } diff --git a/controllers/spaceprovisionerconfig/spaceprovisionerconfig_controller_test.go b/controllers/spaceprovisionerconfig/spaceprovisionerconfig_controller_test.go index 2f1fc12a9..1ebe85a16 100644 --- a/controllers/spaceprovisionerconfig/spaceprovisionerconfig_controller_test.go +++ b/controllers/spaceprovisionerconfig/spaceprovisionerconfig_controller_test.go @@ -3,18 +3,20 @@ package spaceprovisionerconfig import ( "context" "errors" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "testing" "time" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + hosttest "github.com/codeready-toolchain/host-operator/test" "github.com/codeready-toolchain/toolchain-common/pkg/apis" "github.com/codeready-toolchain/toolchain-common/pkg/test" . "github.com/codeready-toolchain/toolchain-common/pkg/test/assertions" . "github.com/codeready-toolchain/toolchain-common/pkg/test/spaceprovisionerconfig" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -23,11 +25,24 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -func TestSpaceProvisionerConfigValidation(t *testing.T) { - t.Run("is not ready when non-existing ToolchainCluster is referenced", func(t *testing.T) { +func TestSpaceProvisionerConfigReadinessTracking(t *testing.T) { + blueprintSpc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs, + ReferencingToolchainCluster("cluster1"), + Enabled(true)) + + t.Run("is ready when enabled, cluster present and enabled and enough capacity available", func(t *testing.T) { // given - spc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs, ReferencingToolchainCluster("non-existent")) - r, req, cl := prepareReconcile(t, spc) + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), MaxNumberOfSpaces(5), MaxMemoryUtilizationPercent(80)) + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), + readyToolchainCluster("cluster1"), + hosttest.NewToolchainStatus( + hosttest.WithMember("cluster1", + hosttest.WithSpaceCount(3), + hosttest.WithNodeRoleUsage("worker", 50), + ), + ), + ) // when _, reconcileErr := r.Reconcile(context.TODO(), req) @@ -35,26 +50,24 @@ func TestSpaceProvisionerConfigValidation(t *testing.T) { // then assert.NoError(t, reconcileErr) - AssertThat(t, spc, Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason))) + AssertThat(t, spc, Is(Ready()), Has(ConsumedSpaceCount(3)), Has(ConsumedMemoryUsage(map[string]int{"worker": 50}))) }) - t.Run("is ready when existing ready ToolchainCluster is referenced", func(t *testing.T) { + t.Run("is not ready when disabled", func(t *testing.T) { // given - spc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs, ReferencingToolchainCluster("cluster1")) - r, req, cl := prepareReconcile(t, spc, &toolchainv1alpha1.ToolchainCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - Namespace: test.HostOperatorNs, - }, - Status: toolchainv1alpha1.ToolchainClusterStatus{ - Conditions: []toolchainv1alpha1.Condition{ - { - Type: toolchainv1alpha1.ConditionReady, - Status: v1.ConditionTrue, - }, - }, - }, - }) + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), + MaxNumberOfSpaces(5), + MaxMemoryUtilizationPercent(80), + Enabled(false)) + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), readyToolchainCluster("cluster1"), + hosttest.NewToolchainStatus( + hosttest.WithMember("cluster1", + hosttest.WithSpaceCount(3), + hosttest.WithNodeRoleUsage("worker", 50), + ), + ), + ) // when _, reconcileErr := r.Reconcile(context.TODO(), req) @@ -62,34 +75,44 @@ func TestSpaceProvisionerConfigValidation(t *testing.T) { // then assert.NoError(t, reconcileErr) - AssertThat(t, spc, Is(Ready())) + AssertThat(t, spc, + Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigDisabledReason)), + Has(UnknownConsumedCapacity())) + }) - t.Run("and becomes not ready when ToolchainCluster becomes not ready", func(t *testing.T) { - // given - tc := &toolchainv1alpha1.ToolchainCluster{} - require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKey{Name: "cluster1", Namespace: test.HostOperatorNs}, tc)) - tc.Status.Conditions = []toolchainv1alpha1.Condition{ - { - Type: toolchainv1alpha1.ConditionReady, - Status: v1.ConditionFalse, - }, - } - require.NoError(t, cl.Status().Update(context.TODO(), tc)) + t.Run("is not ready when cluster not present", func(t *testing.T) { + // given + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), MaxNumberOfSpaces(5), MaxMemoryUtilizationPercent(80)) - // when - _, reconcileErr := r.Reconcile(context.TODO(), req) - require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKeyFromObject(spc), spc)) + r, req, cl := prepareReconcile(t, spc.DeepCopy()) - // then - assert.NoError(t, reconcileErr) - AssertThat(t, spc, Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotReadyReason))) - }) + // when + _, reconcileErr := r.Reconcile(context.TODO(), req) + require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKeyFromObject(spc), spc)) + + // then + assert.NoError(t, reconcileErr) + AssertThat(t, spc, + Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason)), + Has(UnknownConsumedCapacity())) }) - t.Run("is not ready when no ToolchainCluster is referenced", func(t *testing.T) { + t.Run("is not ready when no cluster referenced", func(t *testing.T) { // given - spc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs) - r, req, cl := prepareReconcile(t, spc) + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), + MaxNumberOfSpaces(5), + MaxMemoryUtilizationPercent(80), + ReferencingToolchainCluster("")) + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), + readyToolchainCluster("cluster1"), + hosttest.NewToolchainStatus( + hosttest.WithMember("cluster1", + hosttest.WithSpaceCount(3), + hosttest.WithNodeRoleUsage("worker", 50), + ), + ), + ) // when _, reconcileErr := r.Reconcile(context.TODO(), req) @@ -97,26 +120,27 @@ func TestSpaceProvisionerConfigValidation(t *testing.T) { // then assert.NoError(t, reconcileErr) - AssertThat(t, spc, Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason))) + AssertThat(t, spc, + Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason)), + Has(UnknownConsumedCapacity())) }) - t.Run("is not ready when existing not-ready ToolchainCluster is referenced", func(t *testing.T) { + t.Run("is not ready with cluster not ready", func(t *testing.T) { // given - spc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs, ReferencingToolchainCluster("cluster1")) - r, req, cl := prepareReconcile(t, spc, &toolchainv1alpha1.ToolchainCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - Namespace: test.HostOperatorNs, - }, - Status: toolchainv1alpha1.ToolchainClusterStatus{ - Conditions: []toolchainv1alpha1.Condition{ - { - Type: toolchainv1alpha1.ConditionReady, - Status: v1.ConditionFalse, - }, - }, - }, - }) + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), MaxNumberOfSpaces(5), MaxMemoryUtilizationPercent(80)) + + tc := readyToolchainCluster("cluster1") + tc.Status.Conditions[0].Status = corev1.ConditionFalse + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), + tc, + hosttest.NewToolchainStatus( + hosttest.WithMember("cluster1", + hosttest.WithSpaceCount(3), + hosttest.WithNodeRoleUsage("worker", 50), + ), + ), + ) // when _, reconcileErr := r.Reconcile(context.TODO(), req) @@ -124,51 +148,79 @@ func TestSpaceProvisionerConfigValidation(t *testing.T) { // then assert.NoError(t, reconcileErr) - AssertThat(t, spc, Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotReadyReason))) + AssertThat(t, spc, + Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotReadyReason)), + Has(UnknownConsumedCapacity())) + }) - t.Run("and becomes ready when the referenced ToolchainCluster becomes ready", func(t *testing.T) { - // given - tc := &toolchainv1alpha1.ToolchainCluster{} - require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKey{Name: "cluster1", Namespace: test.HostOperatorNs}, tc)) + t.Run("is not ready when space count is depleted", func(t *testing.T) { + // given + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), MaxNumberOfSpaces(5), MaxMemoryUtilizationPercent(80)) + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), + readyToolchainCluster("cluster1"), + hosttest.NewToolchainStatus( + hosttest.WithMember("cluster1", + hosttest.WithSpaceCount(5), + hosttest.WithNodeRoleUsage("worker", 50), + ), + ), + ) - tc.Status.Conditions = []toolchainv1alpha1.Condition{ - { - Type: toolchainv1alpha1.ConditionReady, - Status: v1.ConditionTrue, - }, - } - require.NoError(t, cl.Status().Update(context.TODO(), tc)) + // when + _, reconcileErr := r.Reconcile(context.TODO(), req) + require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKeyFromObject(spc), spc)) - // when - _, reconcileErr = r.Reconcile(context.TODO(), req) - require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKeyFromObject(spc), spc)) + // then + assert.NoError(t, reconcileErr) + AssertThat(t, spc, + Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigInsufficientCapacityReason)), + Has(ConsumedSpaceCount(5)), + Has(ConsumedMemoryUsage(map[string]int{"worker": 50}))) + }) + + t.Run("is not ready when memory is depleted in one", func(t *testing.T) { + // given + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), MaxNumberOfSpaces(5), MaxMemoryUtilizationPercent(80)) + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), + readyToolchainCluster("cluster1"), + hosttest.NewToolchainStatus( + hosttest.WithMember("cluster1", + hosttest.WithSpaceCount(3), + hosttest.WithNodeRoleUsage("worker", 90), + hosttest.WithNodeRoleUsage("master", 40), + ), + ), + ) - // then - require.NoError(t, reconcileErr) - AssertThat(t, spc, Is(Ready())) - }) + // when + _, reconcileErr := r.Reconcile(context.TODO(), req) + require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKeyFromObject(spc), spc)) + + // then + assert.NoError(t, reconcileErr) + AssertThat(t, spc, + Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigInsufficientCapacityReason)), + Has(ConsumedSpaceCount(3)), + Has(ConsumedMemoryUsage(map[string]int{"worker": 90, "master": 40}))) }) - // note that this is checking we "jumping 2 steps" from toolchain cluster not being present at all to be present and ready - t.Run("becomes ready when the referenced ToolchainCluster appears and is ready", func(t *testing.T) { + t.Run("is not ready when memory is depleted in more", func(t *testing.T) { // given - spc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs, - ReferencingToolchainCluster("cluster1"), - WithReadyConditionInvalid(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason)) - r, req, cl := prepareReconcile(t, spc, &toolchainv1alpha1.ToolchainCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - Namespace: test.HostOperatorNs, - }, - Status: toolchainv1alpha1.ToolchainClusterStatus{ - Conditions: []toolchainv1alpha1.Condition{ - { - Type: toolchainv1alpha1.ConditionReady, - Status: v1.ConditionTrue, - }, - }, - }, - }) + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), MaxNumberOfSpaces(5), MaxMemoryUtilizationPercent(80)) + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), + readyToolchainCluster("cluster1"), + hosttest.NewToolchainStatus( + hosttest.WithMember("cluster1", + hosttest.WithSpaceCount(3), + hosttest.WithNodeRoleUsage("worker", 42), + hosttest.WithNodeRoleUsage("master", 90), + hosttest.WithNodeRoleUsage("magic", 90), + ), + ), + ) // when _, reconcileErr := r.Reconcile(context.TODO(), req) @@ -176,16 +228,17 @@ func TestSpaceProvisionerConfigValidation(t *testing.T) { // then assert.NoError(t, reconcileErr) - AssertThat(t, spc, Is(Ready())) + AssertThat(t, spc, + Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigInsufficientCapacityReason)), + Has(ConsumedSpaceCount(3)), + Has(ConsumedMemoryUsage(map[string]int{"worker": 42, "master": 90, "magic": 90}))) }) - // "jumping 2 steps" from having a ready toolchain cluster to not having 1 at all - t.Run("becomes not ready when the referenced ToolchainCluster disappears", func(t *testing.T) { + t.Run("has ready unknown if consumed capacity not known", func(t *testing.T) { // given - spc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs, - ReferencingToolchainCluster("cluster1"), - WithReadyConditionValid()) - r, req, cl := prepareReconcile(t, spc) + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), MaxNumberOfSpaces(5), MaxMemoryUtilizationPercent(80)) + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), readyToolchainCluster("cluster1"), hosttest.NewToolchainStatus()) // when _, reconcileErr := r.Reconcile(context.TODO(), req) @@ -193,34 +246,64 @@ func TestSpaceProvisionerConfigValidation(t *testing.T) { // then assert.NoError(t, reconcileErr) - AssertThat(t, spc, Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason))) + AssertThat(t, spc, + Has(ReadyStatusAndReason(corev1.ConditionUnknown, toolchainv1alpha1.SpaceProvisionerConfigInsufficientCapacityReason)), + Has(UnknownConsumedCapacity())) }) - // this is a variant of becoming not ready when the TC is not ready, but this time the TC loses the ready condition altogether. - t.Run("becomes not ready when the referenced ToolchainCluster no longer has ready condition", func(t *testing.T) { + t.Run("has ready unknown if memory capacity not known", func(t *testing.T) { // given - spc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs, - ReferencingToolchainCluster("cluster1"), - WithReadyConditionValid()) - r, req, cl := prepareReconcile(t, spc, &toolchainv1alpha1.ToolchainCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - Namespace: test.HostOperatorNs, - }, - }) + spc := ModifySpaceProvisionerConfig(blueprintSpc.DeepCopy(), MaxNumberOfSpaces(5), MaxMemoryUtilizationPercent(80)) + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), + readyToolchainCluster("cluster1"), + hosttest.NewToolchainStatus( + hosttest.WithMember("cluster1", + hosttest.WithSpaceCount(3), + ), + ), + ) + // when + _, reconcileErr := r.Reconcile(context.TODO(), req) + require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKeyFromObject(spc), spc)) + // then + assert.NoError(t, reconcileErr) + AssertThat(t, spc, + Has(ReadyStatusAndReason(corev1.ConditionUnknown, toolchainv1alpha1.SpaceProvisionerConfigInsufficientCapacityReason)), + Has(ConsumedSpaceCount(3)), + Has(ConsumedMemoryUsage(nil))) + }) + + t.Run("zero means unlimited", func(t *testing.T) { + // given + spc := blueprintSpc.DeepCopy() + + r, req, cl := prepareReconcile(t, spc.DeepCopy(), + readyToolchainCluster("cluster1"), + hosttest.NewToolchainStatus( + hosttest.WithMember("cluster1", + hosttest.WithSpaceCount(3_000_000), + hosttest.WithNodeRoleUsage("worker", 3000), + hosttest.WithNodeRoleUsage("master", 800), + ), + ), + ) // when _, reconcileErr := r.Reconcile(context.TODO(), req) require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKeyFromObject(spc), spc)) // then assert.NoError(t, reconcileErr) - AssertThat(t, spc, Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotReadyReason))) + AssertThat(t, spc, + Is(Ready()), + Has(ConsumedSpaceCount(3_000_000)), + Has(ConsumedMemoryUsage(map[string]int{"master": 800, "worker": 3000}))) }) } func TestSpaceProvisionerConfigReEnqueing(t *testing.T) { - spc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs, ReferencingToolchainCluster("cluster1")) + spc := NewSpaceProvisionerConfig("spc", test.HostOperatorNs, ReferencingToolchainCluster("cluster1"), Enabled(true)) t.Run("re-enqueues on failure to GET", func(t *testing.T) { // given @@ -255,7 +338,8 @@ func TestSpaceProvisionerConfigReEnqueing(t *testing.T) { // then require.Error(t, reconcileErr) - AssertThat(t, spcInCluster, Is(NotReadyWithReason(toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason))) + AssertThat(t, spcInCluster, + Is(ReadyStatusAndReason(corev1.ConditionFalse, toolchainv1alpha1.SpaceProvisionerConfigToolchainClusterNotFoundReason))) assert.Len(t, spcInCluster.Status.Conditions, 1) assert.Equal(t, "failed to get the referenced ToolchainCluster: "+getErr.Error(), spcInCluster.Status.Conditions[0].Message) }) @@ -328,21 +412,127 @@ func TestSpaceProvisionerConfigReEnqueing(t *testing.T) { }) } +func TestCollectConsumedCapacity(t *testing.T) { + // given + + _, _, cl := prepareReconcile(t, nil, + hosttest.NewToolchainStatus( + hosttest.WithMember( + "cluster-1", + hosttest.WithSpaceCount(300), + hosttest.WithNodeRoleUsage("master", 10), + hosttest.WithNodeRoleUsage("worker", 40), + ), + hosttest.WithMember( + "cluster-2", + hosttest.WithSpaceCount(1), + ), + ), + ) + + t.Run("returns the capacity when present", func(t *testing.T) { + // when + cc, err := collectConsumedCapacity(context.TODO(), cl, "cluster-1", test.HostOperatorNs) + + // then + require.NoError(t, err) + require.NotNil(t, cc) + assert.Equal(t, 300, cc.SpaceCount) + assert.Equal(t, map[string]int{"master": 10, "worker": 40}, cc.MemoryUsagePercentPerNodeRole) + }) + + t.Run("no memory usage is not an error", func(t *testing.T) { + // when + cc, err := collectConsumedCapacity(context.TODO(), cl, "cluster-2", test.HostOperatorNs) + + // then + require.NoError(t, err) + require.NotNil(t, cc) + assert.Equal(t, 1, cc.SpaceCount) + assert.Nil(t, cc.MemoryUsagePercentPerNodeRole) + }) + + t.Run("returns nil when no member status present", func(t *testing.T) { + // when + cc, err := collectConsumedCapacity(context.TODO(), cl, "unknown-cluster", test.HostOperatorNs) + + // then + require.NoError(t, err) + require.Nil(t, cc) + }) + + t.Run("returns error when no toolchain-status is found", func(t *testing.T) { + // given + toolchainStatus := &toolchainv1alpha1.ToolchainStatus{} + require.NoError(t, cl.Get(context.TODO(), runtimeclient.ObjectKey{Name: "toolchain-status", Namespace: test.HostOperatorNs}, toolchainStatus)) + require.NoError(t, cl.Delete(context.TODO(), toolchainStatus)) + + // when + cc, err := collectConsumedCapacity(context.TODO(), cl, "unknown-cluster", test.HostOperatorNs) + + // then + require.Error(t, err) + require.Nil(t, cc) + }) + + t.Run("returns error on failure to get the toolchain status", func(t *testing.T) { + // given + cl.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { + if key.Name == "toolchain-status" { + return errors.New("intetionally failing") + } + return cl.Client.Get(ctx, key, obj, opts...) + } + + // when + cc, err := collectConsumedCapacity(context.TODO(), cl, "unknown-cluster", test.HostOperatorNs) + + // then + require.Error(t, err) + require.Nil(t, cc) + }) +} + func prepareReconcile(t *testing.T, spc *toolchainv1alpha1.SpaceProvisionerConfig, initObjs ...runtimeclient.Object) (*Reconciler, reconcile.Request, *test.FakeClient) { s := runtime.NewScheme() err := apis.AddToScheme(s) require.NoError(t, err) - fakeClient := test.NewFakeClient(t, append(initObjs, spc)...) + objs := initObjs + var name string + var namespace string + if spc != nil { + objs = append(objs, spc) + name = spc.Name + namespace = spc.Namespace + } + fakeClient := test.NewFakeClient(t, objs...) r := &Reconciler{ Client: fakeClient, } req := reconcile.Request{ NamespacedName: types.NamespacedName{ - Namespace: test.HostOperatorNs, - Name: spc.Name, + Namespace: namespace, + Name: name, }, } return r, req, fakeClient } + +func readyToolchainCluster(name string) *toolchainv1alpha1.ToolchainCluster { //nolint: unparam // it makes sense to have this param even if it always receives the same value + return &toolchainv1alpha1.ToolchainCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.HostOperatorNs, + }, + Status: toolchainv1alpha1.ToolchainClusterStatus{ + Conditions: []toolchainv1alpha1.Condition{ + { + Type: toolchainv1alpha1.ConditionReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } +} diff --git a/go.mod b/go.mod index 0cccfbc3c..531d89961 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,8 @@ module github.com/codeready-toolchain/host-operator require ( cloud.google.com/go/recaptchaenterprise/v2 v2.13.0 - github.com/codeready-toolchain/api v0.0.0-20241114213029-44333bf24bcf - github.com/codeready-toolchain/toolchain-common v0.0.0-20241114215157-a6a85252b2f5 + github.com/codeready-toolchain/api v0.0.0-20241119094246-f6581d52dc80 + github.com/codeready-toolchain/toolchain-common v0.0.0-20241128143121-e2e3546a35e9 github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-logr/logr v1.4.1 @@ -100,7 +100,7 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.31.0 // indirect - golang.org/x/net v0.24.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.19.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect diff --git a/go.sum b/go.sum index c2e1d0b40..79b15e9e0 100644 --- a/go.sum +++ b/go.sum @@ -38,10 +38,10 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/codeready-toolchain/api v0.0.0-20241114213029-44333bf24bcf h1:tOHKd4PT6gnV8lLh3kmqqK9YONvL6oFKHpi0kGzfsvw= -github.com/codeready-toolchain/api v0.0.0-20241114213029-44333bf24bcf/go.mod h1:DUq1ffy9Mbersdgji48i/cm9Y+6NMwAdAQJNlfOrPRo= -github.com/codeready-toolchain/toolchain-common v0.0.0-20241114215157-a6a85252b2f5 h1:vW0C32c6sI9ZUGcUw3e9ftE9hqJ/bMo+TtRHp84Hung= -github.com/codeready-toolchain/toolchain-common v0.0.0-20241114215157-a6a85252b2f5/go.mod h1:wx/d4HVbDPOadwpbxn28ZGClC5OmzelIK8p4wupDJVI= +github.com/codeready-toolchain/api v0.0.0-20241119094246-f6581d52dc80 h1:OpZkP3OGAdrDHOb1TtHVnLSVuevEiQhOH//plnpVL/c= +github.com/codeready-toolchain/api v0.0.0-20241119094246-f6581d52dc80/go.mod h1:DUq1ffy9Mbersdgji48i/cm9Y+6NMwAdAQJNlfOrPRo= +github.com/codeready-toolchain/toolchain-common v0.0.0-20241128143121-e2e3546a35e9 h1:urMWj0TK944x2zZhVEvcNklLzPbarWBaxLGhyeXdLi0= +github.com/codeready-toolchain/toolchain-common v0.0.0-20241128143121-e2e3546a35e9/go.mod h1:4MD8PrbljFH7qSF44EjzAs2HVdr1R/vrsii2eO8Im+4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -308,8 +308,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= diff --git a/main.go b/main.go index 35388b621..e02495980 100644 --- a/main.go +++ b/main.go @@ -390,7 +390,7 @@ func main() { // nolint:gocyclo } if err = (&spaceprovisionerconfig.Reconciler{ Client: mgr.GetClient(), - }).SetupWithManager(ctx, mgr); err != nil { + }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "SpaceProvisionerConfig") os.Exit(1) }