diff --git a/controllers/idler/idler_controller.go b/controllers/idler/idler_controller.go index eacd0f05..d0c0d852 100644 --- a/controllers/idler/idler_controller.go +++ b/controllers/idler/idler_controller.go @@ -132,8 +132,9 @@ func (r *Reconciler) ensureIdling(logger logr.Logger, idler *toolchainv1alpha1.I // Already tracking this pod. Check the timeout. if time.Now().After(trackedPod.StartTime.Add(time.Duration(idler.Spec.TimeoutSeconds) * time.Second)) { podLogger.Info("Pod running for too long. Killing the pod.", "start_time", trackedPod.StartTime.Format("2006-01-02T15:04:05Z"), "timeout_seconds", idler.Spec.TimeoutSeconds) + // Check if it belongs to a controller (Deployment, DeploymentConfig, etc) and scale it down to zero. - deletedByController, err := r.scaleControllerToZero(podLogger, pod.ObjectMeta) + appType, appName, deletedByController, err := r.scaleControllerToZero(podLogger, pod.ObjectMeta) if err != nil { return err } @@ -144,8 +145,14 @@ func (r *Reconciler) ensureIdling(logger logr.Logger, idler *toolchainv1alpha1.I } podLogger.Info("Pod deleted") } + + if appName == "" { + appName = pod.Name + appType = "Pod" + } + // By now either a pod has been deleted or scaled to zero by controller, idler Triggered notification should be sent - if err := r.createNotification(logger, idler); err != nil { + if err := r.createNotification(logger, idler, appName, appType); err != nil { logger.Error(err, "failed to create Notification") if err = r.setStatusIdlerNotificationCreationFailed(idler, err.Error()); err != nil { logger.Error(err, "failed to set status IdlerNotificationCreationFailed") @@ -168,7 +175,7 @@ func (r *Reconciler) ensureIdling(logger logr.Logger, idler *toolchainv1alpha1.I return r.updateStatusPods(idler, newStatusPods) } -func (r *Reconciler) createNotification(logger logr.Logger, idler *toolchainv1alpha1.Idler) error { +func (r *Reconciler) createNotification(logger logr.Logger, idler *toolchainv1alpha1.Idler, appName string, appType string) error { logger.Info("Create Notification") //Get the HostClient hostCluster, ok := r.GetHostCluster() @@ -200,7 +207,10 @@ func (r *Reconciler) createNotification(logger logr.Logger, idler *toolchainv1al keysAndVals := map[string]string{ "Namespace": idler.Name, + "AppName": appName, + "AppType": appType, } + for _, userEmail := range userEmails { _, err := notify.NewNotificationBuilder(hostCluster.Client, hostCluster.OperatorNamespace). WithName(notificationName). @@ -252,7 +262,8 @@ func (r *Reconciler) getUserEmailsFromMURs(logger logr.Logger, hostCluster *clus // scaleControllerToZero checks if the object has an owner controller (Deployment, ReplicaSet, etc) // and scales the owner down to zero and returns "true". // Otherwise returns "false". -func (r *Reconciler) scaleControllerToZero(logger logr.Logger, meta metav1.ObjectMeta) (bool, error) { +// It also returns the parent controller type and name or empty strings if there is no parent controller. +func (r *Reconciler) scaleControllerToZero(logger logr.Logger, meta metav1.ObjectMeta) (string, string, bool, error) { logger.Info("Scaling controller to zero", "name", meta.Name) owners := meta.GetOwnerReferences() for _, owner := range owners { @@ -275,17 +286,17 @@ func (r *Reconciler) scaleControllerToZero(logger logr.Logger, meta metav1.Objec } } } - return false, nil + return "", "", false, nil } -func (r *Reconciler) scaleDeploymentToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (bool, error) { +func (r *Reconciler) scaleDeploymentToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (string, string, bool, error) { logger.Info("Scaling deployment to zero", "name", owner.Name) d := &appsv1.Deployment{} if err := r.AllNamespacesClient.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: owner.Name}, d); err != nil { if errors.IsNotFound(err) { // Ignore not found errors. Can happen if the parent controller has been deleted. The Garbage Collector should delete the pods shortly. - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } zero := int32(0) @@ -298,10 +309,10 @@ func (r *Reconciler) scaleDeploymentToZero(logger logr.Logger, namespace string, if err == nil { logger.Info("Deployment scaled to zero using scale sub resource", "name", d.Name) - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } else if errors.IsInternalError(err) { // Internal error indicates that the specReplicasPath is not set on the custom resource - just update the scale resource scale := autoscalingv1.Scale{ ObjectMeta: ctrl.ObjectMeta{ @@ -316,22 +327,22 @@ func (r *Reconciler) scaleDeploymentToZero(logger logr.Logger, namespace string, if err == nil { logger.Info("Deployment scaled to zero using scale sub resource", "name", d.Name) - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } else if !errors.IsNotFound(err) { - return false, err + return "", "", false, err } } } d.Spec.Replicas = &zero if err := r.AllNamespacesClient.Update(context.TODO(), d); err != nil { - return false, err + return "", "", false, err } logger.Info("Deployment scaled to zero", "name", d.Name) - return true, nil + return owner.Kind, owner.Name, true, nil } func getSupportedScaleResource(ownerReference metav1.OwnerReference) *schema.GroupVersionResource { @@ -346,116 +357,121 @@ func getSupportedScaleResource(ownerReference metav1.OwnerReference) *schema.Gro return nil } -func (r *Reconciler) scaleReplicaSetToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (bool, error) { +func (r *Reconciler) scaleReplicaSetToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (string, string, bool, error) { logger.Info("Scaling replica set to zero", "name", owner.Name) rs := &appsv1.ReplicaSet{} if err := r.AllNamespacesClient.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: owner.Name}, rs); err != nil { if errors.IsNotFound(err) { // Ignore not found errors. Can happen if the parent controller has been deleted. The Garbage Collector should delete the pods shortly. logger.Info("replica set is not found; ignoring: it might be already deleted") - return true, nil + return owner.Kind, owner.Name, true, nil } logger.Error(err, "error deleting rs") - return false, err + return "", "", false, err } - deletedByController, err := r.scaleControllerToZero(logger, rs.ObjectMeta) // Check if the ReplicaSet has another controller which owns it (i.g. Deployment) + + appType, appName, deletedByController, err := r.scaleControllerToZero(logger, rs.ObjectMeta) // Check if the ReplicaSet has another controller which owns it (i.g. Deployment) if err != nil { - return false, err + return "", "", false, err } if !deletedByController { // There is no controller that owns the ReplicaSet. Scale the ReplicaSet to zero. zero := int32(0) rs.Spec.Replicas = &zero if err := r.AllNamespacesClient.Update(context.TODO(), rs); err != nil { - return false, err + return "", "", false, err } logger.Info("ReplicaSet scaled to zero", "name", rs.Name) + appType = owner.Kind + appName = owner.Name } - return true, nil + return appType, appName, true, nil } -func (r *Reconciler) deleteDaemonSet(logger logr.Logger, namespace string, owner metav1.OwnerReference) (bool, error) { +func (r *Reconciler) deleteDaemonSet(logger logr.Logger, namespace string, owner metav1.OwnerReference) (string, string, bool, error) { ds := &appsv1.DaemonSet{} if err := r.AllNamespacesClient.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: owner.Name}, ds); err != nil { if errors.IsNotFound(err) { // Ignore not found errors. Can happen if the parent controller has been deleted. The Garbage Collector should delete the pods shortly. - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } if err := r.AllNamespacesClient.Delete(context.TODO(), ds); err != nil { if errors.IsNotFound(err) { // Ignore not found errors. Can happen if the parent controller has been deleted. The Garbage Collector should delete the pods shortly. - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } logger.Info("DaemonSet deleted", "name", ds.Name) - return true, nil + return owner.Kind, owner.Name, true, nil } -func (r *Reconciler) scaleStatefulSetToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (bool, error) { +func (r *Reconciler) scaleStatefulSetToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (string, string, bool, error) { s := &appsv1.StatefulSet{} if err := r.AllNamespacesClient.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: owner.Name}, s); err != nil { if errors.IsNotFound(err) { // Ignore not found errors. Can happen if the parent controller has been deleted. The Garbage Collector should delete the pods shortly. - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } zero := int32(0) s.Spec.Replicas = &zero if err := r.AllNamespacesClient.Update(context.TODO(), s); err != nil { - return false, err + return "", "", false, err } logger.Info("StatefulSet scaled to zero", "name", s.Name) - return true, nil + return owner.Kind, owner.Name, true, nil } -func (r *Reconciler) scaleDeploymentConfigToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (bool, error) { +func (r *Reconciler) scaleDeploymentConfigToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (string, string, bool, error) { dc := &openshiftappsv1.DeploymentConfig{} if err := r.AllNamespacesClient.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: owner.Name}, dc); err != nil { if errors.IsNotFound(err) { // Ignore not found errors. Can happen if the parent controller has been deleted. The Garbage Collector should delete the pods shortly. - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } dc.Spec.Replicas = 0 if err := r.AllNamespacesClient.Update(context.TODO(), dc); err != nil { - return false, err + return "", "", false, err } logger.Info("DeploymentConfig scaled to zero", "name", dc.Name) - return true, nil + return owner.Kind, owner.Name, true, nil } -func (r *Reconciler) scaleReplicationControllerToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (bool, error) { +func (r *Reconciler) scaleReplicationControllerToZero(logger logr.Logger, namespace string, owner metav1.OwnerReference) (string, string, bool, error) { rc := &corev1.ReplicationController{} if err := r.AllNamespacesClient.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: owner.Name}, rc); err != nil { if errors.IsNotFound(err) { // Ignore not found errors. Can happen if the parent controller has been deleted. The Garbage Collector should delete the pods shortly. - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } - deletedByController, err := r.scaleControllerToZero(logger, rc.ObjectMeta) // Check if the ReplicationController has another controller which owns it (i.g. DeploymentConfig) + appType, appName, deletedByController, err := r.scaleControllerToZero(logger, rc.ObjectMeta) // Check if the ReplicationController has another controller which owns it (i.g. DeploymentConfig) if err != nil { - return false, err + return "", "", false, err } if !deletedByController { // There is no controller who owns the ReplicationController. Scale the ReplicationController to zero. zero := int32(0) rc.Spec.Replicas = &zero if err := r.AllNamespacesClient.Update(context.TODO(), rc); err != nil { - return false, err + return "", "", false, err } logger.Info("ReplicationController scaled to zero", "name", rc.Name) + appType = owner.Kind + appName = owner.Name } - return true, nil + return appType, appName, true, nil } -func (r *Reconciler) deleteJob(logger logr.Logger, namespace string, owner metav1.OwnerReference) (bool, error) { +func (r *Reconciler) deleteJob(logger logr.Logger, namespace string, owner metav1.OwnerReference) (string, string, bool, error) { j := &batchv1.Job{} if err := r.AllNamespacesClient.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: owner.Name}, j); err != nil { if errors.IsNotFound(err) { // Ignore not found errors. Can happen if the parent controller has been deleted. The Garbage Collector should delete the pods shortly. logger.Info("job not found") - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } // see https://github.com/kubernetes/kubernetes/issues/20902#issuecomment-321484735 // also, this may be needed for the e2e tests if the call to `client.Delete` comes too quickly after creating the job, @@ -466,12 +482,12 @@ func (r *Reconciler) deleteJob(logger logr.Logger, namespace string, owner metav PropagationPolicy: &propagationPolicy, }); err != nil { if errors.IsNotFound(err) { // Ignore not found errors. Can happen if the parent controller has been deleted. The Garbage Collector should delete the pods shortly. - return true, nil + return owner.Kind, owner.Name, true, nil } - return false, err + return "", "", false, err } logger.Info("Job deleted", "name", j.Name) - return true, nil + return owner.Kind, owner.Name, true, nil } func findPodByName(idler *toolchainv1alpha1.Idler, name string) *toolchainv1alpha1.Pod { diff --git a/controllers/idler/idler_controller_test.go b/controllers/idler/idler_controller_test.go index 97363fd9..2d2d9871 100644 --- a/controllers/idler/idler_controller_test.go +++ b/controllers/idler/idler_controller_test.go @@ -584,6 +584,151 @@ func TestEnsureIdlingFailed(t *testing.T) { }) } +func TestAppNameTypeForControllers(t *testing.T) { + + idler := &toolchainv1alpha1.Idler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "alex-stage", + Labels: map[string]string{ + toolchainv1alpha1.SpaceLabelKey: "alex", + }, + }, + Spec: toolchainv1alpha1.IdlerSpec{TimeoutSeconds: 60}, + } + namespaces := []string{"dev", "stage"} + usernames := []string{"alex"} + nsTmplSet := newNSTmplSet(test.MemberOperatorNs, "alex", "advanced", "abcde11", namespaces, usernames) + mur := newMUR("alex") + reconciler, _, _, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet, mur) + plds := preparePayloads(t, reconciler, idler.Name, "", time.Now()) + + tests := map[string]struct { + ownerKind string + ownerName string + expectedAppType string + expectedAppName string + }{ + "Deployment": { + // We are testing the case with a nested controllers (Deployment -> ReplicaSet -> Pod) here, + // so we the pod's owner is ReplicaSet but the expected scaled app is the parent Deployment. + ownerKind: "ReplicaSet", + ownerName: fmt.Sprintf("%s-replicaset", plds.deployment.Name), + expectedAppType: "Deployment", + expectedAppName: plds.deployment.Name, + }, + "ReplicaSet": { + ownerKind: "ReplicaSet", + ownerName: plds.replicaSet.Name, + expectedAppType: "ReplicaSet", + expectedAppName: plds.replicaSet.Name, + }, + "DaemonSet": { + ownerKind: "DaemonSet", + ownerName: plds.daemonSet.Name, + expectedAppType: "DaemonSet", + expectedAppName: plds.daemonSet.Name, + }, + "StatefulSet": { + ownerKind: "StatefulSet", + ownerName: plds.statefulSet.Name, + expectedAppType: "StatefulSet", + expectedAppName: plds.statefulSet.Name, + }, + "DeploymentConfig": { + // We are testing the case with a nested controllers (DeploymentConfig -> ReplicationController -> Pod) here, + // so we the pod's owner is ReplicaSet but the expected scaled app is the parent Deployment. + ownerKind: "ReplicationController", + ownerName: fmt.Sprintf("%s-replicationcontroller", plds.deploymentConfig.Name), + expectedAppType: "DeploymentConfig", + expectedAppName: plds.deploymentConfig.Name, + }, + "ReplicationController": { + ownerKind: "ReplicationController", + ownerName: plds.replicationController.Name, + expectedAppType: "ReplicationController", + expectedAppName: plds.replicationController.Name, + }, + "Job": { + ownerKind: "Job", + ownerName: plds.job.Name, + expectedAppType: "Job", + expectedAppName: plds.job.Name, + }, + } + + for k, tc := range tests { + t.Run(k, func(t *testing.T) { + //given + p := func() *corev1.Pod { + for _, pod := range plds.controlledPods { + for _, owner := range pod.OwnerReferences { + if owner.Kind == tc.ownerKind && owner.Name == tc.ownerName { + return pod + } + } + } + return nil + }() + + //when + appType, appName, deletedByController, err := reconciler.scaleControllerToZero(logf.FromContext(context.TODO()), p.ObjectMeta) + + //then + require.NoError(t, err) + require.Equal(t, true, deletedByController) + require.Equal(t, tc.expectedAppType, appType) + require.Equal(t, tc.expectedAppName, appName) + }) + } +} + +func TestAppNameTypeForInidividualPods(t *testing.T) { + //given + idler := &toolchainv1alpha1.Idler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "alex-stage", + Labels: map[string]string{ + toolchainv1alpha1.SpaceLabelKey: "alex", + }, + }, + Spec: toolchainv1alpha1.IdlerSpec{TimeoutSeconds: 60}, + } + + t.Run("Test AppName/Type in notification", func(t *testing.T) { + namespaces := []string{"dev", "stage"} + usernames := []string{"alex"} + nsTmplSet := newNSTmplSet(test.MemberOperatorNs, "alex", "advanced", "abcde11", namespaces, usernames) + mur := newMUR("alex") + reconciler, req, cl, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet, mur) + idlerTimeoutPlusOneSecondAgo := time.Now().Add(-time.Duration(idler.Spec.TimeoutSeconds+1) * time.Second) + p := preparePayloadsSinglePod(t, reconciler, idler.Name, "todelete-", idlerTimeoutPlusOneSecondAgo).standalonePods[0] + // first reconcile to track pods + res, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + assert.True(t, res.Requeue) + + // second reconcile should delete pods and create notification + res, err = reconciler.Reconcile(context.TODO(), req) + //then + assert.NoError(t, err) + memberoperatortest.AssertThatIdler(t, idler.Name, cl). + HasConditions(memberoperatortest.Running(), memberoperatortest.IdlerNotificationCreated()) + //check the notification is actually created + hostCl, _ := reconciler.GetHostCluster() + notification := &toolchainv1alpha1.Notification{} + err = hostCl.Client.Get(context.TODO(), types.NamespacedName{ + Namespace: test.HostOperatorNs, + Name: "alex-stage-idled", + }, notification) + require.NoError(t, err) + require.Equal(t, "alex@test.com", notification.Spec.Recipient) + require.Equal(t, "idled", notification.Labels[toolchainv1alpha1.NotificationTypeLabelKey]) + require.Equal(t, "Pod", notification.Spec.Context["AppType"]) + require.Equal(t, p.Name, notification.Spec.Context["AppName"]) + + }) + +} func TestCreateNotification(t *testing.T) { idler := &toolchainv1alpha1.Idler{ ObjectMeta: metav1.ObjectMeta{ @@ -594,6 +739,7 @@ func TestCreateNotification(t *testing.T) { }, Spec: toolchainv1alpha1.IdlerSpec{TimeoutSeconds: 60}, } + t.Run("Creates a notification the first time", func(t *testing.T) { // given namespaces := []string{"dev", "stage"} @@ -603,7 +749,7 @@ func TestCreateNotification(t *testing.T) { reconciler, _, _, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet, mur) //when - err := reconciler.createNotification(logf.FromContext(context.TODO()), idler) + err := reconciler.createNotification(logf.FromContext(context.TODO()), idler, "testPodName", "testapptype") //then require.NoError(t, err) require.True(t, condition.IsTrue(idler.Status.Conditions, toolchainv1alpha1.IdlerTriggeredNotificationCreated)) @@ -616,7 +762,7 @@ func TestCreateNotification(t *testing.T) { t.Run("Notification not created if already sent", func(t *testing.T) { //when - err = reconciler.createNotification(logf.FromContext(context.TODO()), idler) + err = reconciler.createNotification(logf.FromContext(context.TODO()), idler, "testPodName", "testapptype") //then require.NoError(t, err) err = hostCl.Client.Get(context.TODO(), types.NamespacedName{Name: "alex-stage-idled", Namespace: hostCl.OperatorNamespace}, ¬ification) @@ -641,7 +787,7 @@ func TestCreateNotification(t *testing.T) { reconciler, _, _, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet, mur) //when - err := reconciler.createNotification(logf.FromContext(context.TODO()), idler) + err := reconciler.createNotification(logf.FromContext(context.TODO()), idler, "testPodName", "testapptype") //then require.NoError(t, err) require.True(t, condition.IsTrue(idler.Status.Conditions, toolchainv1alpha1.IdlerTriggeredNotificationCreated)) @@ -660,7 +806,7 @@ func TestCreateNotification(t *testing.T) { return errors.New("can't update condition") } //when - err := reconciler.createNotification(logf.FromContext(context.TODO()), idler) + err := reconciler.createNotification(logf.FromContext(context.TODO()), idler, "testPodName", "testapptype") //then require.EqualError(t, err, "can't update condition") @@ -671,7 +817,7 @@ func TestCreateNotification(t *testing.T) { // second reconcile will not create the notification again but set the status cl.MockStatusUpdate = nil - err = reconciler.createNotification(logf.FromContext(context.TODO()), idler) + err = reconciler.createNotification(logf.FromContext(context.TODO()), idler, "testPodName", "testapptype") require.NoError(t, err) require.True(t, condition.IsTrue(idler.Status.Conditions, toolchainv1alpha1.IdlerTriggeredNotificationCreated)) }) @@ -684,7 +830,7 @@ func TestCreateNotification(t *testing.T) { reconciler, _, _, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet) //when - err := reconciler.createNotification(logf.FromContext(context.TODO()), idler) + err := reconciler.createNotification(logf.FromContext(context.TODO()), idler, "testPodName", "testapptype") //then require.EqualError(t, err, "could not get the MUR: masteruserrecords.toolchain.dev.openshift.com \"alex\" not found") }) @@ -699,7 +845,7 @@ func TestCreateNotification(t *testing.T) { delete(mur.Annotations, toolchainv1alpha1.MasterUserRecordEmailAnnotationKey) reconciler, _, _, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet, mur) //when - err := reconciler.createNotification(logf.FromContext(context.TODO()), idler) + err := reconciler.createNotification(logf.FromContext(context.TODO()), idler, "testPodName", "testapptype") require.EqualError(t, err, "no email found for the user in MURs") }) @@ -712,7 +858,7 @@ func TestCreateNotification(t *testing.T) { mur.Annotations[toolchainv1alpha1.MasterUserRecordEmailAnnotationKey] = "invalid-email-address" reconciler, _, _, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet, mur) //when - err := reconciler.createNotification(logf.FromContext(context.TODO()), idler) + err := reconciler.createNotification(logf.FromContext(context.TODO()), idler, "testPodName", "testapptype") require.EqualError(t, err, "unable to create Notification CR from Idler: The specified recipient [invalid-email-address] is not a valid email address: mail: missing '@' or angle-addr") }) } @@ -989,6 +1135,25 @@ func preparePayloads(t *testing.T, r *Reconciler, namespace, namePrefix string, } } +func preparePayloadsSinglePod(t *testing.T, r *Reconciler, namespace, namePrefix string, startTime time.Time) payloads { + sTime := metav1.NewTime(startTime) + + // Pods with no owner. + standalonePods := make([]*corev1.Pod, 0, 1) + for i := 0; i < 1; i++ { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s%s-pod-%d", namePrefix, namespace, i), Namespace: namespace}, + Status: corev1.PodStatus{StartTime: &sTime}, + } + standalonePods = append(standalonePods, pod) + err := r.AllNamespacesClient.Create(context.TODO(), pod) + require.NoError(t, err) + } + return payloads{ + standalonePods: standalonePods, + } +} + func createPods(t *testing.T, r *Reconciler, owner metav1.Object, startTime metav1.Time, podsToTrack []*corev1.Pod) []*corev1.Pod { for i := 0; i < 3; i++ { pod := &corev1.Pod{