Skip to content

Commit

Permalink
add tests for sidecar containers
Browse files Browse the repository at this point in the history
  • Loading branch information
yangjunmyfm192085 committed Sep 19, 2023
1 parent ccc592c commit 7de8030
Show file tree
Hide file tree
Showing 4 changed files with 134 additions and 11 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ test-e2e-all: test-e2e-1.28 test-e2e-1.27 test-e2e-1.26

.PHONY: test-e2e-1.28
test-e2e-1.28:
NODE_IMAGE=kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 ./test/test-e2e.sh
NODE_IMAGE=kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 NEED_ENABLE_FEATURE=true ./test/test-e2e.sh

.PHONY: test-e2e-1.27
test-e2e-1.27:
Expand Down
123 changes: 113 additions & 10 deletions test/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,19 @@ import (
)

const (
localPort = 10250
cpuConsumerPodName = "cpu-consumer"
memoryConsumerPodName = "memory-consumer"
initContainerPodName = "cmwithinitcontainer-consumer"
sideCarContainerPodName = "sidecarpod-consumer"
labelSelector = "metrics-server-skip!=true"
skipLabel = "metrics-server-skip==true"
labelKey = "metrics-server-skip"
localPort = 10250
cpuConsumerPodName = "cpu-consumer"
memoryConsumerPodName = "memory-consumer"
initContainerPodName = "cmwithinitcontainer-consumer"
sideCarContainerPodName = "sidecarpod-consumer"
initSidecarContainersPodName = "initsidecarpod-consumer"
labelSelector = "metrics-server-skip!=true"
skipLabel = "metrics-server-skip==true"
labelKey = "metrics-server-skip"
)

var needTestSideCarsContainers bool

func TestMetricsServer(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "[MetricsServer]")
Expand All @@ -76,6 +79,14 @@ var _ = Describe("MetricsServer", func() {
panic(err)
}

version, err := client.ServerVersion()
if err != nil {
panic(err)
}
if strings.HasPrefix(version.GitVersion, "v1.28") {
needTestSideCarsContainers = true
}

BeforeSuite(func() {
deletePod(client, cpuConsumerPodName)
err = consumeCPU(client, cpuConsumerPodName, labelKey)
Expand All @@ -97,12 +108,20 @@ var _ = Describe("MetricsServer", func() {
if err != nil {
panic(err)
}
if needTestSideCarsContainers {
err = consumeWithInitSideCarContainer(client, initSidecarContainersPodName, labelKey)
if err != nil {
panic(err)
}
}

})
AfterSuite(func() {
deletePod(client, cpuConsumerPodName)
deletePod(client, memoryConsumerPodName)
deletePod(client, initContainerPodName)
deletePod(client, sideCarContainerPodName)
deletePod(client, initSidecarContainersPodName)
})

It("exposes metrics from at least one pod in cluster", func() {
Expand Down Expand Up @@ -191,6 +210,29 @@ var _ = Describe("MetricsServer", func() {
Expect(usage.Cpu().MilliValue()).NotTo(Equal(0), "CPU of Container %q should not be equal zero", ms.Containers[1].Name)
Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory of Container %q should not be equal zero", ms.Containers[1].Name)
})

if needTestSideCarsContainers {
It("returns metric for pod with init sidecar container", func() {
Expect(err).NotTo(HaveOccurred(), "Failed to create %q pod", initSidecarContainersPodName)
deadline := time.Now().Add(60 * time.Second)
var ms *v1beta1.PodMetrics
for {
ms, err = mclient.MetricsV1beta1().PodMetricses(metav1.NamespaceDefault).Get(context.TODO(), initSidecarContainersPodName, metav1.GetOptions{})
if err == nil || time.Now().After(deadline) {
break
}
time.Sleep(5 * time.Second)
}
Expect(err).NotTo(HaveOccurred(), "Failed to get %q pod", initSidecarContainersPodName)
Expect(ms.Containers).To(HaveLen(2), "Unexpected number of containers")
usage := ms.Containers[0].Usage
Expect(usage.Cpu().MilliValue()).NotTo(Equal(0), "CPU should not be equal zero")
Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory should not be equal zero")
usage = ms.Containers[1].Usage
Expect(usage.Cpu().MilliValue()).NotTo(Equal(0), "CPU should not be equal zero")
Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory should not be equal zero")
})
}
It("passes readyz probe", func() {
msPods := mustGetMetricsServerPods(client)
for _, pod := range msPods {
Expand Down Expand Up @@ -539,17 +581,33 @@ func watchPodReadyStatus(client clientset.Interface, podNamespace string, podNam
if !ok {
return fmt.Errorf("Watch pod failed")
}
var containerReady = false
var (
containerReady = false
initContainerReady = false
)
if pod.Name == podName {
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
break
}
containerReady = true
}
if containerReady {

if containerReady && podName != initSidecarContainersPodName {
return nil
}
if podName == initSidecarContainersPodName {
for _, containerStatus := range pod.Status.InitContainerStatuses {
if !containerStatus.Ready {
break
}
initContainerReady = true
}

if containerReady && initContainerReady {
return nil
}
}
}
}
}
Expand Down Expand Up @@ -688,6 +746,51 @@ func consumeWithSideCarContainer(client clientset.Interface, podName, nodeSelect
return watchPodReadyStatus(client, metav1.NamespaceDefault, podName, currentPod.ResourceVersion)
}

func consumeWithInitSideCarContainer(client clientset.Interface, podName, nodeSelector string) error {
startPolicy := corev1.ContainerRestartPolicyAlways
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: podName,
Command: []string{"./consume-cpu/consume-cpu"},
Args: []string{"--duration-sec=60", "--millicores=50"},
Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9",
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: mustQuantity("100m"),
corev1.ResourceMemory: mustQuantity("100Mi"),
},
},
},
},
InitContainers: []corev1.Container{
{
Name: "init-container",
Command: []string{"./consume-cpu/consume-cpu"},
Args: []string{"--duration-sec=60", "--millicores=50"},
Image: "registry.k8s.io/e2e-test-images/resource-consumer:1.9",
RestartPolicy: &startPolicy,
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: mustQuantity("100m"),
corev1.ResourceMemory: mustQuantity("100Mi"),
},
},
},
},
Affinity: affinity(nodeSelector),
},
}

currentPod, err := client.CoreV1().Pods(metav1.NamespaceDefault).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
return err
}
return watchPodReadyStatus(client, metav1.NamespaceDefault, podName, currentPod.ResourceVersion)
}

func deletePod(client clientset.Interface, podName string) {
var gracePeriodSeconds int64 = 0
_ = client.CoreV1().Pods(metav1.NamespaceDefault).Delete(context.TODO(), podName, metav1.DeleteOptions{
Expand Down
16 changes: 16 additions & 0 deletions test/kind-config1.28.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
featureGates:
"SidecarContainers": true
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: ClusterConfiguration
apiServer:
extraArgs:
"enable-aggregator-routing": "true"
- role: worker
- role: worker
labels:
metrics-server-skip: true
4 changes: 4 additions & 0 deletions test/test-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ set -e

: ${NODE_IMAGE:?Need to set NODE_IMAGE to test}
: ${SKAFFOLD_PROFILE:="test"}
: ${NEED_ENABLE_FEATURE:="false"}


KIND_VERSION=0.20.0
Expand Down Expand Up @@ -77,6 +78,9 @@ setup_kubectl() {

create_cluster() {
KIND_CONFIG="$PWD/test/kind-config.yaml"
if [[ ${NEED_ENABLE_FEATURE} == "true" ]]; then
KIND_CONFIG="$PWD/test/kind-config1.28.yaml"
fi
if ! (${KIND} create cluster --name=e2e --image=${NODE_IMAGE} --config=${KIND_CONFIG}) ; then
echo "Could not create KinD cluster"
exit 1
Expand Down

0 comments on commit 7de8030

Please sign in to comment.