From 6e4f34811746d49619f8c5f4821c3a4ff264bc10 Mon Sep 17 00:00:00 2001 From: Rob Scott Date: Sat, 30 Mar 2019 22:44:52 -0400 Subject: [PATCH 1/7] adding yaml output option, updating license to refer to "Kube Capacity Authors", adding contributors section --- LICENSE | 2 +- README.md | 7 + main.go | 2 +- pkg/capacity/capacity.go | 166 ++++++++++++++ .../{list_test.go => capacity_test.go} | 2 +- pkg/capacity/json.go | 101 --------- pkg/capacity/list.go | 212 ++++++++---------- pkg/capacity/printer.go | 41 ++-- pkg/capacity/resources.go | 2 +- pkg/capacity/table.go | 17 +- pkg/cmd/root.go | 4 +- pkg/cmd/version.go | 2 +- pkg/kube/clientset.go | 2 +- 13 files changed, 299 insertions(+), 261 deletions(-) create mode 100644 pkg/capacity/capacity.go rename pkg/capacity/{list_test.go => capacity_test.go} (99%) delete mode 100644 pkg/capacity/json.go diff --git a/LICENSE b/LICENSE index b3ddd85a..5804391b 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2019 Rob Scott + Copyright 2019 Kube Capacity Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 3335eaef..67f5eef6 100644 --- a/README.md +++ b/README.md @@ -111,5 +111,12 @@ There are already some great projects out there that have similar goals. - [kube-resource-report](https://github.com/hjacobs/kube-resource-report): generates HTML/CSS report for resource requests and limits across multiple clusters. - [kubetop](https://github.com/LeastAuthority/kubetop): a CLI similar to top for Kubernetes, focused on resource utilization (not requests and limits). +## Contributors + +Although this project was originally developed by [robscott](https://github.com/robscott), there have been some great contributions from others: + +- [endzyme](https://github.com/endzyme) +- [justinbarrick](https://github.com/justinbarrick) + ## License Apache License 2.0 diff --git a/main.go b/main.go index f2c3aa5d..032c337a 100644 --- a/main.go +++ b/main.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/pkg/capacity/capacity.go b/pkg/capacity/capacity.go new file mode 100644 index 00000000..97f709c9 --- /dev/null +++ b/pkg/capacity/capacity.go @@ -0,0 +1,166 @@ +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capacity + +import ( + "fmt" + "os" + + "k8s.io/client-go/kubernetes" + metrics "k8s.io/metrics/pkg/client/clientset/versioned" + + "github.com/robscott/kube-capacity/pkg/kube" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" +) + +// FetchAndPrint gathers cluster resource data and outputs it +func FetchAndPrint(showPods, showUtil bool, podLabels, nodeLabels, namespaceLabels, kubeContext string, output string) { + clientset, err := kube.NewClientSet(kubeContext) + if err != nil { + fmt.Printf("Error connecting to Kubernetes: %v\n", err) + os.Exit(1) + } + + podList, nodeList := getPodsAndNodes(clientset, podLabels, nodeLabels, namespaceLabels) + pmList := &v1beta1.PodMetricsList{} + if showUtil { + mClientset, err := kube.NewMetricsClientSet(kubeContext) + if err != nil { + fmt.Printf("Error connecting to Metrics API: %v\n", err) + os.Exit(4) + } + + pmList = getMetrics(mClientset) + } + + cm := buildClusterMetric(podList, pmList, nodeList) + printList(&cm, showPods, showUtil, output) +} + +func getPodsAndNodes(clientset kubernetes.Interface, podLabels, nodeLabels, namespaceLabels string) (*corev1.PodList, *corev1.NodeList) { + nodeList, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{ + LabelSelector: nodeLabels, + }) + if err != nil { + fmt.Printf("Error listing Nodes: %v\n", err) + os.Exit(2) + } + + podList, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{ + LabelSelector: podLabels, + }) + if err != nil { + fmt.Printf("Error listing Pods: %v\n", err) + os.Exit(3) + } + + newPodItems := []corev1.Pod{} + + nodes := map[string]bool{} + for _, node := range nodeList.Items { + nodes[node.GetName()] = true + } + + for _, pod := range podList.Items { + if !nodes[pod.Spec.NodeName] { + continue + } + + newPodItems = append(newPodItems, pod) + } + + podList.Items = newPodItems + + if namespaceLabels != "" { + namespaceList, err := clientset.CoreV1().Namespaces().List(metav1.ListOptions{ + LabelSelector: namespaceLabels, + }) + if err != nil { + fmt.Printf("Error listing Namespaces: %v\n", err) + os.Exit(3) + } + + namespaces := map[string]bool{} + for _, ns := range namespaceList.Items { + namespaces[ns.GetName()] = true + } + + newPodItems := []corev1.Pod{} + + for _, pod := range podList.Items { + if !namespaces[pod.GetNamespace()] { + continue + } + + newPodItems = append(newPodItems, pod) + } + + podList.Items = newPodItems + } + + return podList, nodeList +} + +func getMetrics(mClientset *metrics.Clientset) *v1beta1.PodMetricsList { + pmList, err := mClientset.MetricsV1beta1().PodMetricses("").List(metav1.ListOptions{}) + if err != nil { + fmt.Printf("Error getting Pod Metrics: %v\n", err) + fmt.Println("For this to work, metrics-server needs to be running in your cluster") + os.Exit(6) + } + + return pmList +} + +func buildClusterMetric(podList *corev1.PodList, pmList *v1beta1.PodMetricsList, nodeList *corev1.NodeList) clusterMetric { + cm := clusterMetric{ + cpu: &resourceMetric{resourceType: "cpu"}, + memory: &resourceMetric{resourceType: "memory"}, + nodeMetrics: map[string]*nodeMetric{}, + podMetrics: map[string]*podMetric{}, + } + + for _, node := range nodeList.Items { + cm.nodeMetrics[node.Name] = &nodeMetric{ + cpu: &resourceMetric{ + resourceType: "cpu", + allocatable: node.Status.Allocatable["cpu"], + }, + memory: &resourceMetric{ + resourceType: "memory", + allocatable: node.Status.Allocatable["memory"], + }, + podMetrics: map[string]*podMetric{}, + } + + cm.cpu.allocatable.Add(node.Status.Allocatable["cpu"]) + cm.memory.allocatable.Add(node.Status.Allocatable["memory"]) + } + + podMetrics := map[string]v1beta1.PodMetrics{} + for _, pm := range pmList.Items { + podMetrics[fmt.Sprintf("%s-%s", pm.GetNamespace(), pm.GetName())] = pm + } + + for _, pod := range podList.Items { + if pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed { + cm.addPodMetric(&pod, podMetrics[fmt.Sprintf("%s-%s", pod.GetNamespace(), pod.GetName())]) + } + } + + return cm +} diff --git a/pkg/capacity/list_test.go b/pkg/capacity/capacity_test.go similarity index 99% rename from pkg/capacity/list_test.go rename to pkg/capacity/capacity_test.go index 4dc85334..4c8cfa20 100644 --- a/pkg/capacity/list_test.go +++ b/pkg/capacity/capacity_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/pkg/capacity/json.go b/pkg/capacity/json.go deleted file mode 100644 index d4750548..00000000 --- a/pkg/capacity/json.go +++ /dev/null @@ -1,101 +0,0 @@ -// Package capacity - json.go contains all the messy details for the json printer implementation -package capacity - -import ( - "encoding/json" - "fmt" -) - -type jsonNodeMetric struct { - Name string `json:"name"` - CPU *jsonResourceOutput `json:"cpu,omitempty"` - Memory *jsonResourceOutput `json:"memory,omitempty"` - Pods []*jsonPod `json:"pods,omitempty"` -} - -type jsonPod struct { - Name string `json:"name"` - Namespace string `json:"namespace"` - CPU *jsonResourceOutput `json:"cpu"` - Memory *jsonResourceOutput `json:"memory"` -} - -type jsonResourceOutput struct { - Requests string `json:"requests"` - RequestsPct string `json:"requests_pct"` - Limits string `json:"limits"` - LimitsPct string `json:"limits_pct"` - Utilization string `json:"utilization,omitempty"` - UtilizationPct string `json:"utilization_pct,omitempty"` -} - -type jsonClusterMetrics struct { - Nodes []*jsonNodeMetric `json:"nodes"` - ClusterTotals struct { - CPU *jsonResourceOutput `json:"cpu"` - Memory *jsonResourceOutput `json:"memory"` - } `json:"cluster_totals"` -} - -type jsonPrinter struct { - cm *clusterMetric - showPods bool - showUtil bool -} - -func (jp jsonPrinter) Print() { - jsonOutput := jp.buildJSONClusterMetrics() - - jsonRaw, err := json.MarshalIndent(jsonOutput, "", " ") - if err != nil { - fmt.Println("Error Marshalling JSON") - fmt.Println(err) - } - - fmt.Printf("%s", jsonRaw) -} - -func (jp *jsonPrinter) buildJSONClusterMetrics() jsonClusterMetrics { - var response jsonClusterMetrics - - response.ClusterTotals.CPU = jp.buildJSONResourceOutput(jp.cm.cpu) - response.ClusterTotals.Memory = jp.buildJSONResourceOutput(jp.cm.memory) - - for key, val := range jp.cm.nodeMetrics { - var node jsonNodeMetric - node.Name = key - node.CPU = jp.buildJSONResourceOutput(val.cpu) - node.Memory = jp.buildJSONResourceOutput(val.memory) - if jp.showPods { - for _, val := range val.podMetrics { - var newNode jsonPod - newNode.Name = val.name - newNode.Namespace = val.namespace - newNode.CPU = jp.buildJSONResourceOutput(val.cpu) - newNode.Memory = jp.buildJSONResourceOutput(val.memory) - node.Pods = append(node.Pods, &newNode) - } - } - response.Nodes = append(response.Nodes, &node) - } - - return response -} - -func (jp *jsonPrinter) buildJSONResourceOutput(item *resourceMetric) *jsonResourceOutput { - valueCalculator := item.valueFunction() - percentCalculator := item.percentFunction() - - out := jsonResourceOutput{ - Requests: valueCalculator(item.request), - RequestsPct: percentCalculator(item.request), - Limits: valueCalculator(item.limit), - LimitsPct: percentCalculator(item.limit), - } - - if jp.showUtil { - out.Utilization = valueCalculator(item.utilization) - out.UtilizationPct = percentCalculator(item.utilization) - } - return &out -} diff --git a/pkg/capacity/list.go b/pkg/capacity/list.go index f1c94725..073ef459 100644 --- a/pkg/capacity/list.go +++ b/pkg/capacity/list.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,151 +15,115 @@ package capacity import ( + "encoding/json" "fmt" - "os" - "k8s.io/client-go/kubernetes" - metrics "k8s.io/metrics/pkg/client/clientset/versioned" - - "github.com/robscott/kube-capacity/pkg/kube" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + "sigs.k8s.io/yaml" ) -// List gathers cluster resource data and outputs it -func List(showPods, showUtil bool, podLabels, nodeLabels, namespaceLabels, kubeContext string, output string) { - clientset, err := kube.NewClientSet(kubeContext) - if err != nil { - fmt.Printf("Error connecting to Kubernetes: %v\n", err) - os.Exit(1) - } - - podList, nodeList := getPodsAndNodes(clientset, podLabels, nodeLabels, namespaceLabels) - pmList := &v1beta1.PodMetricsList{} - if showUtil { - mClientset, err := kube.NewMetricsClientSet(kubeContext) - if err != nil { - fmt.Printf("Error connecting to Metrics API: %v\n", err) - os.Exit(4) - } - - pmList = getMetrics(mClientset) - } - cm := buildClusterMetric(podList, pmList, nodeList) - printList(&cm, showPods, showUtil, output) +type listNodeMetric struct { + Name string `json:"name"` + CPU *listResourceOutput `json:"cpu,omitempty"` + Memory *listResourceOutput `json:"memory,omitempty"` + Pods []*listPod `json:"pods,omitempty"` } -func getPodsAndNodes(clientset kubernetes.Interface, podLabels, nodeLabels, namespaceLabels string) (*corev1.PodList, *corev1.NodeList) { - nodeList, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{ - LabelSelector: nodeLabels, - }) - if err != nil { - fmt.Printf("Error listing Nodes: %v\n", err) - os.Exit(2) - } - - podList, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{ - LabelSelector: podLabels, - }) - if err != nil { - fmt.Printf("Error listing Pods: %v\n", err) - os.Exit(3) - } - - newPodItems := []corev1.Pod{} - - nodes := map[string]bool{} - for _, node := range nodeList.Items { - nodes[node.GetName()] = true - } - - for _, pod := range podList.Items { - if !nodes[pod.Spec.NodeName] { - continue - } - - newPodItems = append(newPodItems, pod) - } +type listPod struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + CPU *listResourceOutput `json:"cpu"` + Memory *listResourceOutput `json:"memory"` +} - podList.Items = newPodItems +type listResourceOutput struct { + Requests string `json:"requests"` + RequestsPct string `json:"requests_pct"` + Limits string `json:"limits"` + LimitsPct string `json:"limits_pct"` + Utilization string `json:"utilization,omitempty"` + UtilizationPct string `json:"utilization_pct,omitempty"` +} - if namespaceLabels != "" { - namespaceList, err := clientset.CoreV1().Namespaces().List(metav1.ListOptions{ - LabelSelector: namespaceLabels, - }) - if err != nil { - fmt.Printf("Error listing Namespaces: %v\n", err) - os.Exit(3) - } +type listClusterMetrics struct { + Nodes []*listNodeMetric `json:"nodes"` + ClusterTotals struct { + CPU *listResourceOutput `json:"cpu"` + Memory *listResourceOutput `json:"memory"` + } `json:"cluster_totals"` +} - namespaces := map[string]bool{} - for _, ns := range namespaceList.Items { - namespaces[ns.GetName()] = true - } +type listPrinter struct { + cm *clusterMetric + showPods bool + showUtil bool +} - newPodItems := []corev1.Pod{} +func (lp listPrinter) Print(outputType string) { + listOutput := lp.buildListClusterMetrics() - for _, pod := range podList.Items { - if !namespaces[pod.GetNamespace()] { - continue + jsonRaw, err := json.MarshalIndent(listOutput, "", " ") + if err != nil { + fmt.Println("Error Marshalling JSON") + fmt.Println(err) + } else { + if outputType == JSONOutput { + fmt.Printf("%s", jsonRaw) + } else { + // This is a strange approach, but the k8s YAML package + // already marshalls to JSON before converting to YAML, + // this just allows us to follow the same code path. + yamlRaw, err := yaml.JSONToYAML(jsonRaw) + if err != nil { + fmt.Println("Error Converting JSON to Yaml") + fmt.Println(err) + } else { + fmt.Printf("%s", yamlRaw) } - - newPodItems = append(newPodItems, pod) } - - podList.Items = newPodItems } - - return podList, nodeList } -func getMetrics(mClientset *metrics.Clientset) *v1beta1.PodMetricsList { - pmList, err := mClientset.MetricsV1beta1().PodMetricses("").List(metav1.ListOptions{}) - if err != nil { - fmt.Printf("Error getting Pod Metrics: %v\n", err) - fmt.Println("For this to work, metrics-server needs to be running in your cluster") - os.Exit(6) +func (lp *listPrinter) buildListClusterMetrics() listClusterMetrics { + var response listClusterMetrics + + response.ClusterTotals.CPU = lp.buildListResourceOutput(lp.cm.cpu) + response.ClusterTotals.Memory = lp.buildListResourceOutput(lp.cm.memory) + + for key, val := range lp.cm.nodeMetrics { + var node listNodeMetric + node.Name = key + node.CPU = lp.buildListResourceOutput(val.cpu) + node.Memory = lp.buildListResourceOutput(val.memory) + if lp.showPods { + for _, val := range val.podMetrics { + var newNode listPod + newNode.Name = val.name + newNode.Namespace = val.namespace + newNode.CPU = lp.buildListResourceOutput(val.cpu) + newNode.Memory = lp.buildListResourceOutput(val.memory) + node.Pods = append(node.Pods, &newNode) + } + } + response.Nodes = append(response.Nodes, &node) } - return pmList + return response } -func buildClusterMetric(podList *corev1.PodList, pmList *v1beta1.PodMetricsList, nodeList *corev1.NodeList) clusterMetric { - cm := clusterMetric{ - cpu: &resourceMetric{resourceType: "cpu"}, - memory: &resourceMetric{resourceType: "memory"}, - nodeMetrics: map[string]*nodeMetric{}, - podMetrics: map[string]*podMetric{}, - } - - for _, node := range nodeList.Items { - cm.nodeMetrics[node.Name] = &nodeMetric{ - cpu: &resourceMetric{ - resourceType: "cpu", - allocatable: node.Status.Allocatable["cpu"], - }, - memory: &resourceMetric{ - resourceType: "memory", - allocatable: node.Status.Allocatable["memory"], - }, - podMetrics: map[string]*podMetric{}, - } +func (lp *listPrinter) buildListResourceOutput(item *resourceMetric) *listResourceOutput { + valueCalculator := item.valueFunction() + percentCalculator := item.percentFunction() - cm.cpu.allocatable.Add(node.Status.Allocatable["cpu"]) - cm.memory.allocatable.Add(node.Status.Allocatable["memory"]) + out := listResourceOutput{ + Requests: valueCalculator(item.request), + RequestsPct: percentCalculator(item.request), + Limits: valueCalculator(item.limit), + LimitsPct: percentCalculator(item.limit), } - podMetrics := map[string]v1beta1.PodMetrics{} - for _, pm := range pmList.Items { - podMetrics[fmt.Sprintf("%s-%s", pm.GetNamespace(), pm.GetName())] = pm + if lp.showUtil { + out.Utilization = valueCalculator(item.utilization) + out.UtilizationPct = percentCalculator(item.utilization) } - - for _, pod := range podList.Items { - if pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed { - cm.addPodMetric(&pod, podMetrics[fmt.Sprintf("%s-%s", pod.GetNamespace(), pod.GetName())]) - } - } - - return cm + return &out } diff --git a/pkg/capacity/printer.go b/pkg/capacity/printer.go index 75da098e..7fc8c5af 100644 --- a/pkg/capacity/printer.go +++ b/pkg/capacity/printer.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,10 +21,12 @@ import ( ) const ( - //TableOutput is the constant value for output type text + //TableOutput is the constant value for output type table TableOutput string = "table" - //JSONOutput is the constant value for output type text + //JSONOutput is the constant value for output type JSON JSONOutput string = "json" + //YAMLOutput is the constant value for output type YAML + YAMLOutput string = "yaml" ) // SupportedOutputs returns a string list of output formats supposed by this package @@ -32,41 +34,28 @@ func SupportedOutputs() []string { return []string{ TableOutput, JSONOutput, + YAMLOutput, } } -type printer interface { - Print() -} - func printList(cm *clusterMetric, showPods bool, showUtil bool, output string) { - p, err := printerFactory(cm, showPods, showUtil, output) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - p.Print() -} - -func printerFactory(cm *clusterMetric, showPods bool, showUtil bool, outputType string) (printer, error) { - var response printer - switch outputType { - case JSONOutput: - response = jsonPrinter{ + if output == JSONOutput || output == YAMLOutput { + lp := &listPrinter{ cm: cm, showPods: showPods, showUtil: showUtil, } - return response, nil - case TableOutput: - response = tablePrinter{ + lp.Print(output) + } else if output == TableOutput { + tp := &tablePrinter{ cm: cm, showPods: showPods, showUtil: showUtil, w: new(tabwriter.Writer), } - return response, nil - default: - return response, fmt.Errorf("Called with an unsupported output type: %s", outputType) + tp.Print() + } else { + fmt.Printf("Called with an unsupported output type: %s", output) + os.Exit(1) } } diff --git a/pkg/capacity/resources.go b/pkg/capacity/resources.go index eb2b2ca8..b00ce1a5 100644 --- a/pkg/capacity/resources.go +++ b/pkg/capacity/resources.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/pkg/capacity/table.go b/pkg/capacity/table.go index bf83f65f..3a13e895 100644 --- a/pkg/capacity/table.go +++ b/pkg/capacity/table.go @@ -1,4 +1,17 @@ -// Package capacity - text.go contains all the messy details for the text printer implementation +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package capacity import ( @@ -15,7 +28,7 @@ type tablePrinter struct { w *tabwriter.Writer } -func (tp tablePrinter) Print() { +func (tp *tablePrinter) Print() { tp.w.Init(os.Stdout, 0, 8, 2, ' ', 0) names := make([]string, len(tp.cm.nodeMetrics)) diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index b351566b..a7d07e0e 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -44,7 +44,7 @@ var rootCmd = &cobra.Command{ os.Exit(1) } - capacity.List(showPods, showUtil, podLabels, nodeLabels, namespaceLabels, kubeContext, outputFormat) + capacity.FetchAndPrint(showPods, showUtil, podLabels, nodeLabels, namespaceLabels, kubeContext, outputFormat) }, } diff --git a/pkg/cmd/version.go b/pkg/cmd/version.go index 041739bc..0480b214 100644 --- a/pkg/cmd/version.go +++ b/pkg/cmd/version.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/pkg/kube/clientset.go b/pkg/kube/clientset.go index 0537358a..4e698d84 100644 --- a/pkg/kube/clientset.go +++ b/pkg/kube/clientset.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. From 476062a3cc954ec2ed94b8a07fcdd28432f1d0c4 Mon Sep 17 00:00:00 2001 From: Rob Scott Date: Sun, 31 Mar 2019 23:29:48 -0400 Subject: [PATCH 2/7] work in progress on container output support --- pkg/capacity/capacity.go | 4 +-- pkg/capacity/list.go | 55 +++++++++++++++++++++----------- pkg/capacity/printer.go | 18 ++++++----- pkg/capacity/resources.go | 33 +++++++++++++++++--- pkg/capacity/table.go | 66 +++++++++++++++++++++++++++++++++++---- pkg/cmd/root.go | 4 ++- 6 files changed, 141 insertions(+), 39 deletions(-) diff --git a/pkg/capacity/capacity.go b/pkg/capacity/capacity.go index 97f709c9..81fc9086 100644 --- a/pkg/capacity/capacity.go +++ b/pkg/capacity/capacity.go @@ -28,7 +28,7 @@ import ( ) // FetchAndPrint gathers cluster resource data and outputs it -func FetchAndPrint(showPods, showUtil bool, podLabels, nodeLabels, namespaceLabels, kubeContext string, output string) { +func FetchAndPrint(showContainers, showPods, showUtil bool, podLabels, nodeLabels, namespaceLabels, kubeContext string, output string) { clientset, err := kube.NewClientSet(kubeContext) if err != nil { fmt.Printf("Error connecting to Kubernetes: %v\n", err) @@ -48,7 +48,7 @@ func FetchAndPrint(showPods, showUtil bool, podLabels, nodeLabels, namespaceLabe } cm := buildClusterMetric(podList, pmList, nodeList) - printList(&cm, showPods, showUtil, output) + printList(&cm, showContainers, showPods, showUtil, output) } func getPodsAndNodes(clientset kubernetes.Interface, podLabels, nodeLabels, namespaceLabels string) (*corev1.PodList, *corev1.NodeList) { diff --git a/pkg/capacity/list.go b/pkg/capacity/list.go index 073ef459..67291687 100644 --- a/pkg/capacity/list.go +++ b/pkg/capacity/list.go @@ -29,10 +29,17 @@ type listNodeMetric struct { } type listPod struct { - Name string `json:"name"` - Namespace string `json:"namespace"` - CPU *listResourceOutput `json:"cpu"` - Memory *listResourceOutput `json:"memory"` + Name string `json:"name"` + Namespace string `json:"namespace"` + CPU *listResourceOutput `json:"cpu"` + Memory *listResourceOutput `json:"memory"` + Containers []listContainer `json:"containers"` +} + +type listContainer struct { + Name string `json:"name"` + CPU *listResourceOutput `json:"cpu"` + Memory *listResourceOutput `json:"memory"` } type listResourceOutput struct { @@ -53,9 +60,10 @@ type listClusterMetrics struct { } type listPrinter struct { - cm *clusterMetric - showPods bool - showUtil bool + cm *clusterMetric + showPods bool + showContainers bool + showUtil bool } func (lp listPrinter) Print(outputType string) { @@ -89,19 +97,30 @@ func (lp *listPrinter) buildListClusterMetrics() listClusterMetrics { response.ClusterTotals.CPU = lp.buildListResourceOutput(lp.cm.cpu) response.ClusterTotals.Memory = lp.buildListResourceOutput(lp.cm.memory) - for key, val := range lp.cm.nodeMetrics { + for key, nodeMetric := range lp.cm.nodeMetrics { var node listNodeMetric node.Name = key - node.CPU = lp.buildListResourceOutput(val.cpu) - node.Memory = lp.buildListResourceOutput(val.memory) - if lp.showPods { - for _, val := range val.podMetrics { - var newNode listPod - newNode.Name = val.name - newNode.Namespace = val.namespace - newNode.CPU = lp.buildListResourceOutput(val.cpu) - newNode.Memory = lp.buildListResourceOutput(val.memory) - node.Pods = append(node.Pods, &newNode) + node.CPU = lp.buildListResourceOutput(nodeMetric.cpu) + node.Memory = lp.buildListResourceOutput(nodeMetric.memory) + + if lp.showPods || lp.showContainers { + for _, podMetric := range nodeMetric.podMetrics { + var pod listPod + pod.Name = podMetric.name + pod.Namespace = podMetric.namespace + pod.CPU = lp.buildListResourceOutput(podMetric.cpu) + pod.Memory = lp.buildListResourceOutput(podMetric.memory) + + if lp.showContainers { + for _, containerMetric := range podMetric.containers { + pod.Containers = append(pod.Containers, listContainer{ + Name: containerMetric.name, + Memory: lp.buildListResourceOutput(containerMetric.memory), + CPU: lp.buildListResourceOutput(containerMetric.cpu), + }) + } + } + node.Pods = append(node.Pods, &pod) } } response.Nodes = append(response.Nodes, &node) diff --git a/pkg/capacity/printer.go b/pkg/capacity/printer.go index 7fc8c5af..c9d9a9b4 100644 --- a/pkg/capacity/printer.go +++ b/pkg/capacity/printer.go @@ -38,20 +38,22 @@ func SupportedOutputs() []string { } } -func printList(cm *clusterMetric, showPods bool, showUtil bool, output string) { +func printList(cm *clusterMetric, showContainers bool, showPods bool, showUtil bool, output string) { if output == JSONOutput || output == YAMLOutput { lp := &listPrinter{ - cm: cm, - showPods: showPods, - showUtil: showUtil, + cm: cm, + showPods: showPods, + showUtil: showUtil, + showContainers: showContainers, } lp.Print(output) } else if output == TableOutput { tp := &tablePrinter{ - cm: cm, - showPods: showPods, - showUtil: showUtil, - w: new(tabwriter.Writer), + cm: cm, + showPods: showPods, + showUtil: showUtil, + showContainers: showContainers, + w: new(tabwriter.Writer), } tp.Print() } else { diff --git a/pkg/capacity/resources.go b/pkg/capacity/resources.go index b00ce1a5..34388709 100644 --- a/pkg/capacity/resources.go +++ b/pkg/capacity/resources.go @@ -45,10 +45,17 @@ type nodeMetric struct { } type podMetric struct { - name string - namespace string - cpu *resourceMetric - memory *resourceMetric + name string + namespace string + cpu *resourceMetric + memory *resourceMetric + containers []containerMetric +} + +type containerMetric struct { + name string + cpu *resourceMetric + memory *resourceMetric } func (rm *resourceMetric) addMetric(m *resourceMetric) { @@ -75,7 +82,25 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet request: req["memory"], limit: limit["memory"], }, + containers: []containerMetric{}, } + + for _, container := range pod.Spec.Containers { + pm.containers = append(pm.containers, containerMetric{ + name: container.Name, + cpu: &resourceMetric{ + resourceType: "cpu", + request: container.Resources.Requests["cpu"], + limit: container.Resources.Limits["cpu"], + }, + memory: &resourceMetric{ + resourceType: "memory", + request: container.Resources.Requests["memory"], + limit: container.Resources.Limits["memory"], + }, + }) + } + cm.podMetrics[key] = pm nm := cm.nodeMetrics[pod.Spec.NodeName] diff --git a/pkg/capacity/table.go b/pkg/capacity/table.go index 3a13e895..69f1a01c 100644 --- a/pkg/capacity/table.go +++ b/pkg/capacity/table.go @@ -22,10 +22,11 @@ import ( ) type tablePrinter struct { - cm *clusterMetric - showPods bool - showUtil bool - w *tabwriter.Writer + cm *clusterMetric + showPods bool + showUtil bool + showContainers bool + w *tabwriter.Writer } func (tp *tablePrinter) Print() { @@ -49,7 +50,33 @@ func (tp *tablePrinter) Print() { } func (tp *tablePrinter) printHeaders() { - if tp.showPods && tp.showUtil { + if tp.showContainers && tp.showUtil { + fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CONTAINER \t CPU REQUESTS \t CPU LIMITS \t CPU UTIL \t MEMORY REQUESTS \t MEMORY LIMITS \t MEMORY UTIL") + + if len(tp.cm.nodeMetrics) > 1 { + fmt.Fprintf(tp.w, "* \t *\t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", + tp.cm.cpu.requestString(), + tp.cm.cpu.limitString(), + tp.cm.cpu.utilString(), + tp.cm.memory.requestString(), + tp.cm.memory.limitString(), + tp.cm.memory.utilString()) + + fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t\t\t") + } + + } else if tp.showContainers && tp.showUtil { + fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CONTAINER\t CPU REQUESTS \t CPU LIMITS \t MEMORY REQUESTS \t MEMORY LIMITS") + + fmt.Fprintf(tp.w, "* \t *\t *\t *\t %s \t %s \t %s \t %s \n", + tp.cm.cpu.requestString(), + tp.cm.cpu.limitString(), + tp.cm.memory.requestString(), + tp.cm.memory.limitString()) + + fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t") + + } else if tp.showPods && tp.showUtil { fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CPU REQUESTS \t CPU LIMITS \t CPU UTIL \t MEMORY REQUESTS \t MEMORY LIMITS \t MEMORY UTIL") if len(tp.cm.nodeMetrics) > 1 { @@ -63,6 +90,7 @@ func (tp *tablePrinter) printHeaders() { fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t\t") } + } else if tp.showPods { fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CPU REQUESTS \t CPU LIMITS \t MEMORY REQUESTS \t MEMORY LIMITS") @@ -106,7 +134,33 @@ func (tp *tablePrinter) printNode(name string, nm *nodeMetric) { } sort.Strings(podNames) - if tp.showPods && tp.showUtil { + if tp.showContainers && tp.showUtil { + fmt.Fprintf(tp.w, "%s \t *\t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", + name, + nm.cpu.requestString(), + nm.cpu.limitString(), + nm.cpu.utilString(), + nm.memory.requestString(), + nm.memory.limitString(), + nm.memory.utilString()) + + for _, podName := range podNames { + pm := nm.podMetrics[podName] + fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \n", + name, + pm.namespace, + pm.name, + pm.cpu.requestString(), + pm.cpu.limitString(), + pm.cpu.utilString(), + pm.memory.requestString(), + pm.memory.limitString(), + pm.memory.utilString()) + } + + fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t\t") + + } else if tp.showPods && tp.showUtil { fmt.Fprintf(tp.w, "%s \t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", name, nm.cpu.requestString(), diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index a7d07e0e..6a163465 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" ) +var showContainers bool var showPods bool var showUtil bool var podLabels string @@ -44,11 +45,12 @@ var rootCmd = &cobra.Command{ os.Exit(1) } - capacity.FetchAndPrint(showPods, showUtil, podLabels, nodeLabels, namespaceLabels, kubeContext, outputFormat) + capacity.FetchAndPrint(showContainers, showPods, showUtil, podLabels, nodeLabels, namespaceLabels, kubeContext, outputFormat) }, } func init() { + rootCmd.PersistentFlags().BoolVarP(&showContainers, "containers", "c", false, "includes containers in output") rootCmd.PersistentFlags().BoolVarP(&showPods, "pods", "p", false, "includes pods in output") rootCmd.PersistentFlags().BoolVarP(&showUtil, "util", "u", false, "includes resource utilization in output") rootCmd.PersistentFlags().StringVarP(&podLabels, "pod-labels", "l", "", "labels to filter pods with") From 6e9106abf2b35aab17f979007ea5cfecae602a27 Mon Sep 17 00:00:00 2001 From: Rob Scott Date: Mon, 1 Apr 2019 00:47:56 -0400 Subject: [PATCH 3/7] refactoring table printer, container output working --- pkg/capacity/resources.go | 11 +- pkg/capacity/table.go | 302 ++++++++++++++++++-------------------- 2 files changed, 147 insertions(+), 166 deletions(-) diff --git a/pkg/capacity/resources.go b/pkg/capacity/resources.go index 34388709..d1107fae 100644 --- a/pkg/capacity/resources.go +++ b/pkg/capacity/resources.go @@ -49,7 +49,7 @@ type podMetric struct { namespace string cpu *resourceMetric memory *resourceMetric - containers []containerMetric + containers []*containerMetric } type containerMetric struct { @@ -82,11 +82,11 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet request: req["memory"], limit: limit["memory"], }, - containers: []containerMetric{}, + containers: []*containerMetric{}, } for _, container := range pod.Spec.Containers { - pm.containers = append(pm.containers, containerMetric{ + pm.containers = append(pm.containers, &containerMetric{ name: container.Name, cpu: &resourceMetric{ resourceType: "cpu", @@ -159,9 +159,10 @@ func resourceString(actual, allocatable resource.Quantity, resourceType string) } if resourceType == "cpu" { - return fmt.Sprintf("%dm (%d%%)", actual.MilliValue(), int64(utilPercent)) + return fmt.Sprintf("%dm (%d", actual.MilliValue(), int64(utilPercent)) + "%%)" } - return fmt.Sprintf("%dMi (%d%%)", actual.Value()/1048576, int64(utilPercent)) + + return fmt.Sprintf("%dMi (%d", actual.Value()/1048576, int64(utilPercent)) + "%%)" } // NOTE: This might not be a great place for closures due to the cyclical nature of how resourceType works. Perhaps better implemented another way. diff --git a/pkg/capacity/table.go b/pkg/capacity/table.go index 69f1a01c..60fe2bf5 100644 --- a/pkg/capacity/table.go +++ b/pkg/capacity/table.go @@ -18,6 +18,7 @@ import ( "fmt" "os" "sort" + "strings" "text/tabwriter" ) @@ -29,198 +30,177 @@ type tablePrinter struct { w *tabwriter.Writer } +type tableLine struct { + node string + namespace string + pod string + container string + cpuRequests string + cpuLimits string + cpuUtil string + memoryRequests string + memoryLimits string + memoryUtil string +} + +var headerStrings = tableLine{ + node: "NODE", + namespace: "NAMESPACE", + pod: "POD", + container: "CONTAINER", + cpuRequests: "CPU REQUESTS", + cpuLimits: "CPU LIMITS", + cpuUtil: "CPU UTIL", + memoryRequests: "MEMORY REQUESTS", + memoryLimits: "MEMORY LIMITS", + memoryUtil: "MEMORY UTIL", +} + func (tp *tablePrinter) Print() { tp.w.Init(os.Stdout, 0, 8, 2, ' ', 0) - names := make([]string, len(tp.cm.nodeMetrics)) + nodeNames := getSortedNodeNames(tp.cm.nodeMetrics) - i := 0 - for name := range tp.cm.nodeMetrics { - names[i] = name - i++ - } - sort.Strings(names) + tp.printLine(&headerStrings) - tp.printHeaders() + if len(nodeNames) > 1 { + tp.printClusterLine() + tp.printLine(&tableLine{}) + } - for _, name := range names { - tp.printNode(name, tp.cm.nodeMetrics[name]) + for _, nodeName := range nodeNames { + nm := tp.cm.nodeMetrics[nodeName] + tp.printNodeLine(nodeName, nm) + tp.printLine(&tableLine{}) + + podNames := getSortedPodNames(nm.podMetrics) + if tp.showPods || tp.showContainers { + for _, podName := range podNames { + pm := nm.podMetrics[podName] + tp.printPodLine(nodeName, pm) + if tp.showContainers { + for _, containerMetric := range pm.containers { + tp.printContainerLine(nodeName, pm, containerMetric) + } + } + } + } } tp.w.Flush() } -func (tp *tablePrinter) printHeaders() { - if tp.showContainers && tp.showUtil { - fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CONTAINER \t CPU REQUESTS \t CPU LIMITS \t CPU UTIL \t MEMORY REQUESTS \t MEMORY LIMITS \t MEMORY UTIL") +func (tp *tablePrinter) printLine(tl *tableLine) { + lineItems := []string{tl.node, tl.namespace} - if len(tp.cm.nodeMetrics) > 1 { - fmt.Fprintf(tp.w, "* \t *\t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), - tp.cm.cpu.limitString(), - tp.cm.cpu.utilString(), - tp.cm.memory.requestString(), - tp.cm.memory.limitString(), - tp.cm.memory.utilString()) + if tp.showContainers || tp.showPods { + lineItems = append(lineItems, tl.pod) + } - fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t\t\t") - } + if tp.showContainers { + lineItems = append(lineItems, tl.container) + } - } else if tp.showContainers && tp.showUtil { - fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CONTAINER\t CPU REQUESTS \t CPU LIMITS \t MEMORY REQUESTS \t MEMORY LIMITS") + lineItems = append(lineItems, tl.cpuRequests) + lineItems = append(lineItems, tl.cpuLimits) - fmt.Fprintf(tp.w, "* \t *\t *\t *\t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), - tp.cm.cpu.limitString(), - tp.cm.memory.requestString(), - tp.cm.memory.limitString()) + if tp.showUtil { + lineItems = append(lineItems, tl.cpuUtil) + } - fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t") + lineItems = append(lineItems, tl.memoryRequests) + lineItems = append(lineItems, tl.memoryLimits) - } else if tp.showPods && tp.showUtil { - fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CPU REQUESTS \t CPU LIMITS \t CPU UTIL \t MEMORY REQUESTS \t MEMORY LIMITS \t MEMORY UTIL") + if tp.showUtil { + lineItems = append(lineItems, tl.memoryUtil) + } - if len(tp.cm.nodeMetrics) > 1 { - fmt.Fprintf(tp.w, "* \t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), - tp.cm.cpu.limitString(), - tp.cm.cpu.utilString(), - tp.cm.memory.requestString(), - tp.cm.memory.limitString(), - tp.cm.memory.utilString()) + fmt.Fprintf(tp.w, strings.Join(lineItems[:], "\t ")+"\n") +} - fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t\t") - } +func (tp *tablePrinter) printClusterLine() { + tp.printLine(&tableLine{ + node: "*", + namespace: "*", + pod: "*", + container: "*", + cpuRequests: tp.cm.cpu.requestString(), + cpuLimits: tp.cm.cpu.limitString(), + cpuUtil: tp.cm.cpu.utilString(), + memoryRequests: tp.cm.memory.requestString(), + memoryLimits: tp.cm.memory.limitString(), + memoryUtil: tp.cm.memory.utilString(), + }) +} - } else if tp.showPods { - fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CPU REQUESTS \t CPU LIMITS \t MEMORY REQUESTS \t MEMORY LIMITS") +func (tp *tablePrinter) printNodeLine(nodeName string, nm *nodeMetric) { + tp.printLine(&tableLine{ + node: nodeName, + namespace: "*", + pod: "*", + container: "*", + cpuRequests: nm.cpu.requestString(), + cpuLimits: nm.cpu.limitString(), + cpuUtil: nm.cpu.utilString(), + memoryRequests: nm.memory.requestString(), + memoryLimits: nm.memory.limitString(), + memoryUtil: nm.memory.utilString(), + }) +} - fmt.Fprintf(tp.w, "* \t *\t *\t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), - tp.cm.cpu.limitString(), - tp.cm.memory.requestString(), - tp.cm.memory.limitString()) +func (tp *tablePrinter) printPodLine(nodeName string, pm *podMetric) { + tp.printLine(&tableLine{ + node: nodeName, + namespace: pm.namespace, + pod: pm.name, + container: "*", + cpuRequests: pm.cpu.requestString(), + cpuLimits: pm.cpu.limitString(), + cpuUtil: pm.cpu.utilString(), + memoryRequests: pm.memory.requestString(), + memoryLimits: pm.memory.limitString(), + memoryUtil: pm.memory.utilString(), + }) +} - fmt.Fprintln(tp.w, "\t\t\t\t\t\t") +func (tp *tablePrinter) printContainerLine(nodeName string, pm *podMetric, cm *containerMetric) { + tp.printLine(&tableLine{ + node: nodeName, + namespace: pm.namespace, + pod: pm.name, + container: cm.name, + cpuRequests: cm.cpu.requestString(), + cpuLimits: cm.cpu.limitString(), + cpuUtil: "", + memoryRequests: cm.memory.requestString(), + memoryLimits: cm.memory.limitString(), + memoryUtil: "", + }) +} - } else if tp.showUtil { - fmt.Fprintln(tp.w, "NODE\t CPU REQUESTS \t CPU LIMITS \t CPU UTIL \t MEMORY REQUESTS \t MEMORY LIMITS \t MEMORY UTIL") +func getSortedNodeNames(nodeMetrics map[string]*nodeMetric) []string { + sortedNames := make([]string, len(nodeMetrics)) - fmt.Fprintf(tp.w, "* \t %s \t %s \t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), - tp.cm.cpu.limitString(), - tp.cm.cpu.utilString(), - tp.cm.memory.requestString(), - tp.cm.memory.limitString(), - tp.cm.memory.utilString()) + i := 0 + for name := range nodeMetrics { + sortedNames[i] = name + i++ + } - } else { - fmt.Fprintln(tp.w, "NODE\t CPU REQUESTS \t CPU LIMITS \t MEMORY REQUESTS \t MEMORY LIMITS") + sort.Strings(sortedNames) - if len(tp.cm.nodeMetrics) > 1 { - fmt.Fprintf(tp.w, "* \t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), tp.cm.cpu.limitString(), - tp.cm.memory.requestString(), tp.cm.memory.limitString()) - } - } + return sortedNames } -func (tp *tablePrinter) printNode(name string, nm *nodeMetric) { - podNames := make([]string, len(nm.podMetrics)) +func getSortedPodNames(podMetrics map[string]*podMetric) []string { + sortedNames := make([]string, len(podMetrics)) i := 0 - for name := range nm.podMetrics { - podNames[i] = name + for name := range podMetrics { + sortedNames[i] = name i++ } - sort.Strings(podNames) - - if tp.showContainers && tp.showUtil { - fmt.Fprintf(tp.w, "%s \t *\t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - nm.cpu.requestString(), - nm.cpu.limitString(), - nm.cpu.utilString(), - nm.memory.requestString(), - nm.memory.limitString(), - nm.memory.utilString()) - - for _, podName := range podNames { - pm := nm.podMetrics[podName] - fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - pm.namespace, - pm.name, - pm.cpu.requestString(), - pm.cpu.limitString(), - pm.cpu.utilString(), - pm.memory.requestString(), - pm.memory.limitString(), - pm.memory.utilString()) - } - fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t\t") - - } else if tp.showPods && tp.showUtil { - fmt.Fprintf(tp.w, "%s \t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - nm.cpu.requestString(), - nm.cpu.limitString(), - nm.cpu.utilString(), - nm.memory.requestString(), - nm.memory.limitString(), - nm.memory.utilString()) - - for _, podName := range podNames { - pm := nm.podMetrics[podName] - fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - pm.namespace, - pm.name, - pm.cpu.requestString(), - pm.cpu.limitString(), - pm.cpu.utilString(), - pm.memory.requestString(), - pm.memory.limitString(), - pm.memory.utilString()) - } + sort.Strings(sortedNames) - fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t\t") - - } else if tp.showPods { - fmt.Fprintf(tp.w, "%s \t *\t *\t %s \t %s \t %s \t %s \n", - name, - nm.cpu.requestString(), - nm.cpu.limitString(), - nm.memory.requestString(), - nm.memory.limitString()) - - for _, podName := range podNames { - pm := nm.podMetrics[podName] - fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - pm.namespace, - pm.name, - pm.cpu.requestString(), - pm.cpu.limitString(), - pm.memory.requestString(), - pm.memory.limitString()) - } - - fmt.Fprintln(tp.w, "\t\t\t\t\t\t") - - } else if tp.showUtil { - fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - nm.cpu.requestString(), - nm.cpu.limitString(), - nm.cpu.utilString(), - nm.memory.requestString(), - nm.memory.limitString(), - nm.memory.utilString()) - - } else { - fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \n", name, - nm.cpu.requestString(), nm.cpu.limitString(), - nm.memory.requestString(), nm.memory.limitString()) - } + return sortedNames } From 26e974aed9bbc8badbe64c180fd06e8e4028e700 Mon Sep 17 00:00:00 2001 From: Rob Scott Date: Tue, 2 Apr 2019 00:15:34 -0400 Subject: [PATCH 4/7] improved tests --- pkg/capacity/list.go | 18 ++- pkg/capacity/list_test.go | 266 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 277 insertions(+), 7 deletions(-) create mode 100644 pkg/capacity/list_test.go diff --git a/pkg/capacity/list.go b/pkg/capacity/list.go index 67291687..341ba37e 100644 --- a/pkg/capacity/list.go +++ b/pkg/capacity/list.go @@ -52,11 +52,13 @@ type listResourceOutput struct { } type listClusterMetrics struct { - Nodes []*listNodeMetric `json:"nodes"` - ClusterTotals struct { - CPU *listResourceOutput `json:"cpu"` - Memory *listResourceOutput `json:"memory"` - } `json:"cluster_totals"` + Nodes []*listNodeMetric `json:"nodes"` + ClusterTotals *listClusterTotals `json:"cluster_totals"` +} + +type listClusterTotals struct { + CPU *listResourceOutput `json:"cpu"` + Memory *listResourceOutput `json:"memory"` } type listPrinter struct { @@ -94,8 +96,10 @@ func (lp listPrinter) Print(outputType string) { func (lp *listPrinter) buildListClusterMetrics() listClusterMetrics { var response listClusterMetrics - response.ClusterTotals.CPU = lp.buildListResourceOutput(lp.cm.cpu) - response.ClusterTotals.Memory = lp.buildListResourceOutput(lp.cm.memory) + response.ClusterTotals = &listClusterTotals{ + CPU: lp.buildListResourceOutput(lp.cm.cpu), + Memory: lp.buildListResourceOutput(lp.cm.memory), + } for key, nodeMetric := range lp.cm.nodeMetrics { var node listNodeMetric diff --git a/pkg/capacity/list_test.go b/pkg/capacity/list_test.go new file mode 100644 index 00000000..a6f5e9b7 --- /dev/null +++ b/pkg/capacity/list_test.go @@ -0,0 +1,266 @@ +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capacity + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" +) + +func TestBuildListClusterMetricsNoOptions(t *testing.T) { + cm := getTestClusterMetric() + + lp := listPrinter{ + cm: &cm, + } + + lcm := lp.buildListClusterMetrics() + + assert.EqualValues(t, &listClusterTotals{ + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", + }, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", + }, + }, lcm.ClusterTotals) + + assert.EqualValues(t, &listNodeMetric{ + Name: "example-node-1", + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", + }, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", + }, + }, lcm.Nodes[0]) + +} + +func TestBuildListClusterMetricsAllOptions(t *testing.T) { + cm := getTestClusterMetric() + + lp := listPrinter{ + cm: &cm, + showUtil: true, + showPods: true, + showContainers: true, + } + + lcm := lp.buildListClusterMetrics() + + assert.EqualValues(t, &listClusterTotals{ + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", + Utilization: "63m", + UtilizationPct: "6%", + }, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", + Utilization: "439Mi", + UtilizationPct: "10%", + }, + }, lcm.ClusterTotals) + + assert.EqualValues(t, &listNodeMetric{ + Name: "example-node-1", + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", + Utilization: "63m", + UtilizationPct: "6%", + }, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", + Utilization: "439Mi", + UtilizationPct: "10%", + }, + Pods: []*listPod{ + { + Name: "example-pod", + Namespace: "default", + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", + Utilization: "63m", + UtilizationPct: "6%", + }, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", + Utilization: "439Mi", + UtilizationPct: "10%", + }, + Containers: []listContainer{ + { + Name: "example-container-1", + CPU: &listResourceOutput{ + Requests: "450m", + RequestsPct: "-9223372036854775808%", + Limits: "560m", + LimitsPct: "-9223372036854775808%", + Utilization: "0m", + UtilizationPct: "-9223372036854775808%", + }, + Memory: &listResourceOutput{ + Requests: "160Mi", + RequestsPct: "-9223372036854775808%", + Limits: "280Mi", + LimitsPct: "-9223372036854775808%", + Utilization: "0Mi", + UtilizationPct: "-9223372036854775808%", + }, + }, { + Name: "example-container-2", + CPU: &listResourceOutput{ + Requests: "200m", + RequestsPct: "-9223372036854775808%", + Limits: "250m", + LimitsPct: "-9223372036854775808%", + Utilization: "0m", + UtilizationPct: "-9223372036854775808%", + }, + Memory: &listResourceOutput{ + Requests: "250Mi", + RequestsPct: "-9223372036854775808%", + Limits: "300Mi", + LimitsPct: "-9223372036854775808%", + Utilization: "0Mi", + UtilizationPct: "-9223372036854775808%", + }, + }, + }, + }, + }}, lcm.Nodes[0]) + +} + +func getTestClusterMetric() clusterMetric { + return buildClusterMetric( + &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "example-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: "example-node-1", + Containers: []corev1.Container{ + { + Name: "example-container-1", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("450m"), + "memory": resource.MustParse("160Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("560m"), + "memory": resource.MustParse("280Mi"), + }, + }, + }, + { + Name: "example-container-2", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("200m"), + "memory": resource.MustParse("250Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("250m"), + "memory": resource.MustParse("300Mi"), + }, + }, + }, + }, + }, + }, + }, + }, &v1beta1.PodMetricsList{ + Items: []v1beta1.PodMetrics{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "example-pod", + Namespace: "default", + }, + Containers: []v1beta1.ContainerMetrics{ + { + Name: "example-container-1", + Usage: corev1.ResourceList{ + "cpu": resource.MustParse("40m"), + "memory": resource.MustParse("288Mi"), + }, + }, + { + Name: "example-container-2", + Usage: corev1.ResourceList{ + "cpu": resource.MustParse("23m"), + "memory": resource.MustParse("151Mi"), + }, + }, + }, + }, + }, + }, &corev1.NodeList{ + Items: []corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "example-node-1", + }, + Status: corev1.NodeStatus{ + Allocatable: corev1.ResourceList{ + "cpu": resource.MustParse("1000m"), + "memory": resource.MustParse("4000Mi"), + }, + }, + }, + }, + }, + ) +} From a9ddac7ed6cd7633494cd6c57a331e1d615b5549 Mon Sep 17 00:00:00 2001 From: Rob Scott Date: Tue, 2 Apr 2019 00:46:03 -0400 Subject: [PATCH 5/7] utilization numbers working for containers --- pkg/capacity/list_test.go | 32 ++++++++++++++++---------------- pkg/capacity/resources.go | 15 ++++++++++----- pkg/capacity/table.go | 4 ++-- 3 files changed, 28 insertions(+), 23 deletions(-) diff --git a/pkg/capacity/list_test.go b/pkg/capacity/list_test.go index a6f5e9b7..c052540f 100644 --- a/pkg/capacity/list_test.go +++ b/pkg/capacity/list_test.go @@ -141,37 +141,37 @@ func TestBuildListClusterMetricsAllOptions(t *testing.T) { Name: "example-container-1", CPU: &listResourceOutput{ Requests: "450m", - RequestsPct: "-9223372036854775808%", + RequestsPct: "45%", Limits: "560m", - LimitsPct: "-9223372036854775808%", - Utilization: "0m", - UtilizationPct: "-9223372036854775808%", + LimitsPct: "56%", + Utilization: "40m", + UtilizationPct: "4%", }, Memory: &listResourceOutput{ Requests: "160Mi", - RequestsPct: "-9223372036854775808%", + RequestsPct: "4%", Limits: "280Mi", - LimitsPct: "-9223372036854775808%", - Utilization: "0Mi", - UtilizationPct: "-9223372036854775808%", + LimitsPct: "7%", + Utilization: "288Mi", + UtilizationPct: "7%", }, }, { Name: "example-container-2", CPU: &listResourceOutput{ Requests: "200m", - RequestsPct: "-9223372036854775808%", + RequestsPct: "20%", Limits: "250m", - LimitsPct: "-9223372036854775808%", - Utilization: "0m", - UtilizationPct: "-9223372036854775808%", + LimitsPct: "25%", + Utilization: "23m", + UtilizationPct: "2%", }, Memory: &listResourceOutput{ Requests: "250Mi", - RequestsPct: "-9223372036854775808%", + RequestsPct: "6%", Limits: "300Mi", - LimitsPct: "-9223372036854775808%", - Utilization: "0Mi", - UtilizationPct: "-9223372036854775808%", + LimitsPct: "7%", + Utilization: "151Mi", + UtilizationPct: "3%", }, }, }, diff --git a/pkg/capacity/resources.go b/pkg/capacity/resources.go index d1107fae..ae148ce3 100644 --- a/pkg/capacity/resources.go +++ b/pkg/capacity/resources.go @@ -49,7 +49,7 @@ type podMetric struct { namespace string cpu *resourceMetric memory *resourceMetric - containers []*containerMetric + containers map[string]*containerMetric } type containerMetric struct { @@ -68,6 +68,7 @@ func (rm *resourceMetric) addMetric(m *resourceMetric) { func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMetrics) { req, limit := resourcehelper.PodRequestsAndLimits(pod) key := fmt.Sprintf("%s-%s", pod.Namespace, pod.Name) + nm := cm.nodeMetrics[pod.Spec.NodeName] pm := &podMetric{ name: pod.Name, @@ -82,28 +83,29 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet request: req["memory"], limit: limit["memory"], }, - containers: []*containerMetric{}, + containers: map[string]*containerMetric{}, } for _, container := range pod.Spec.Containers { - pm.containers = append(pm.containers, &containerMetric{ + pm.containers[container.Name] = &containerMetric{ name: container.Name, cpu: &resourceMetric{ resourceType: "cpu", request: container.Resources.Requests["cpu"], limit: container.Resources.Limits["cpu"], + allocatable: nm.cpu.allocatable, }, memory: &resourceMetric{ resourceType: "memory", request: container.Resources.Requests["memory"], limit: container.Resources.Limits["memory"], + allocatable: nm.memory.allocatable, }, - }) + } } cm.podMetrics[key] = pm - nm := cm.nodeMetrics[pod.Spec.NodeName] if nm != nil { cm.cpu.request.Add(req["cpu"]) cm.cpu.limit.Add(limit["cpu"]) @@ -113,6 +115,7 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet cm.podMetrics[key].cpu.allocatable = nm.cpu.allocatable cm.podMetrics[key].memory.allocatable = nm.memory.allocatable nm.podMetrics[key] = cm.podMetrics[key] + nm.cpu.request.Add(req["cpu"]) nm.cpu.limit.Add(limit["cpu"]) nm.memory.request.Add(req["memory"]) @@ -120,7 +123,9 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet } for _, container := range podMetrics.Containers { + pm.containers[container.Name].cpu.utilization = container.Usage["cpu"] pm.cpu.utilization.Add(container.Usage["cpu"]) + pm.containers[container.Name].memory.utilization = container.Usage["memory"] pm.memory.utilization.Add(container.Usage["memory"]) if nm == nil { diff --git a/pkg/capacity/table.go b/pkg/capacity/table.go index 60fe2bf5..758c90ff 100644 --- a/pkg/capacity/table.go +++ b/pkg/capacity/table.go @@ -170,10 +170,10 @@ func (tp *tablePrinter) printContainerLine(nodeName string, pm *podMetric, cm *c container: cm.name, cpuRequests: cm.cpu.requestString(), cpuLimits: cm.cpu.limitString(), - cpuUtil: "", + cpuUtil: cm.cpu.utilString(), memoryRequests: cm.memory.requestString(), memoryLimits: cm.memory.limitString(), - memoryUtil: "", + memoryUtil: cm.memory.utilString(), }) } From dabbedf673f58596f784c0cdbeb9746722211012 Mon Sep 17 00:00:00 2001 From: Rob Scott Date: Wed, 3 Apr 2019 00:01:45 -0400 Subject: [PATCH 6/7] adding sorting, updating docs --- README.md | 26 +++++ pkg/capacity/capacity.go | 43 +------- pkg/capacity/capacity_test.go | 31 +++--- pkg/capacity/list.go | 9 +- pkg/capacity/printer.go | 4 +- pkg/capacity/resources.go | 181 +++++++++++++++++++++++++++++++--- pkg/capacity/table.go | 51 +++------- pkg/cmd/root.go | 32 ++++-- 8 files changed, 251 insertions(+), 126 deletions(-) diff --git a/README.md b/README.md index 67f5eef6..6159ef9b 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,18 @@ example-node-2 tiller tiller-deploy 140m (14%) 180m (18%) It's worth noting that utilization numbers from pods will likely not add up to the total node utilization numbers. Unlike request and limit numbers where node and cluster level numbers represent a sum of pod values, node metrics come directly from metrics-server and will likely include other forms of resource utilization. +### Sorting +To highlight the nodes, pods, and containers with the highest metrics, you can sort by a variety of columns: + +``` +kube-capacity --util --sort cpu.util + +NODE CPU REQUESTS CPU LIMITS CPU UTIL MEMORY REQUESTS MEMORY LIMITS MEMORY UTIL +* 560m (28%) 130m (7%) 40m (2%) 572Mi (9%) 770Mi (13%) 470Mi (8%) +example-node-2 340m (34%) 120m (12%) 30m (3%) 380Mi (13%) 410Mi (14%) 260Mi (9%) +example-node-1 220m (22%) 10m (1%) 10m (1%) 192Mi (6%) 360Mi (12%) 210Mi (7%) +``` + ### Filtering By Labels For more advanced usage, kube-capacity also supports filtering by pod, namespace, and/or node labels. The following examples show how to use these filters: @@ -91,17 +103,31 @@ kube-capacity --namespace-labels team=api kube-capacity --node-labels kubernetes.io/role=node ``` +### JSON and YAML Output +By default, kube-capacity will provide output in a table format. To view this data in JSON or YAML format, the output flag can be used. Here are some sample commands: +``` +kube-capacity --pods --output json +kube-capacity --pods --containers --util --output yaml +``` + ## Prerequisites Any commands requesting cluster utilization are dependent on [metrics-server](https://github.com/kubernetes-incubator/metrics-server) running on your cluster. If it's not already installed, you can install it with the official [helm chart](https://github.com/helm/charts/tree/master/stable/metrics-server). ## Flags Supported ``` + -c, --containers includes containers in output --context string context to use for Kubernetes config -h, --help help for kube-capacity -n, --namespace-labels string labels to filter namespaces with --node-labels string labels to filter nodes with + -o, --output string output format for information + (supports: [table json yaml]) + (default "table") -l, --pod-labels string labels to filter pods with -p, --pods includes pods in output + --sort string attribute to sort results be (supports: + [cpu.util cpu.request cpu.limit mem.util mem.request mem.limit name]) + (default "name") -u, --util includes resource utilization in output ``` diff --git a/pkg/capacity/capacity.go b/pkg/capacity/capacity.go index 81fc9086..b7b8d861 100644 --- a/pkg/capacity/capacity.go +++ b/pkg/capacity/capacity.go @@ -28,7 +28,7 @@ import ( ) // FetchAndPrint gathers cluster resource data and outputs it -func FetchAndPrint(showContainers, showPods, showUtil bool, podLabels, nodeLabels, namespaceLabels, kubeContext string, output string) { +func FetchAndPrint(showContainers, showPods, showUtil bool, podLabels, nodeLabels, namespaceLabels, kubeContext, output, sortBy string) { clientset, err := kube.NewClientSet(kubeContext) if err != nil { fmt.Printf("Error connecting to Kubernetes: %v\n", err) @@ -48,7 +48,7 @@ func FetchAndPrint(showContainers, showPods, showUtil bool, podLabels, nodeLabel } cm := buildClusterMetric(podList, pmList, nodeList) - printList(&cm, showContainers, showPods, showUtil, output) + printList(&cm, showContainers, showPods, showUtil, output, sortBy) } func getPodsAndNodes(clientset kubernetes.Interface, podLabels, nodeLabels, namespaceLabels string) (*corev1.PodList, *corev1.NodeList) { @@ -125,42 +125,3 @@ func getMetrics(mClientset *metrics.Clientset) *v1beta1.PodMetricsList { return pmList } - -func buildClusterMetric(podList *corev1.PodList, pmList *v1beta1.PodMetricsList, nodeList *corev1.NodeList) clusterMetric { - cm := clusterMetric{ - cpu: &resourceMetric{resourceType: "cpu"}, - memory: &resourceMetric{resourceType: "memory"}, - nodeMetrics: map[string]*nodeMetric{}, - podMetrics: map[string]*podMetric{}, - } - - for _, node := range nodeList.Items { - cm.nodeMetrics[node.Name] = &nodeMetric{ - cpu: &resourceMetric{ - resourceType: "cpu", - allocatable: node.Status.Allocatable["cpu"], - }, - memory: &resourceMetric{ - resourceType: "memory", - allocatable: node.Status.Allocatable["memory"], - }, - podMetrics: map[string]*podMetric{}, - } - - cm.cpu.allocatable.Add(node.Status.Allocatable["cpu"]) - cm.memory.allocatable.Add(node.Status.Allocatable["memory"]) - } - - podMetrics := map[string]v1beta1.PodMetrics{} - for _, pm := range pmList.Items { - podMetrics[fmt.Sprintf("%s-%s", pm.GetNamespace(), pm.GetName())] = pm - } - - for _, pod := range podList.Items { - if pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed { - cm.addPodMetric(&pod, podMetrics[fmt.Sprintf("%s-%s", pod.GetNamespace(), pod.GetName())]) - } - } - - return cm -} diff --git a/pkg/capacity/capacity_test.go b/pkg/capacity/capacity_test.go index 4c8cfa20..10d627cc 100644 --- a/pkg/capacity/capacity_test.go +++ b/pkg/capacity/capacity_test.go @@ -49,7 +49,6 @@ func TestBuildClusterMetricEmpty(t *testing.T) { utilization: resource.Quantity{}, }, nodeMetrics: map[string]*nodeMetric{}, - podMetrics: map[string]*podMetric{}, } assert.EqualValues(t, cm, expected) @@ -149,8 +148,6 @@ func TestBuildClusterMetricFull(t *testing.T) { utilization: resource.MustParse("299Mi"), } - assert.Len(t, cm.podMetrics, 1) - assert.NotNil(t, cm.cpu) ensureEqualResourceMetric(t, cm.cpu, cpuExpected) assert.NotNil(t, cm.memory) @@ -162,15 +159,18 @@ func TestBuildClusterMetricFull(t *testing.T) { assert.NotNil(t, cm.nodeMetrics["example-node-1"].memory) ensureEqualResourceMetric(t, cm.nodeMetrics["example-node-1"].memory, memoryExpected) + assert.Len(t, cm.nodeMetrics["example-node-1"].podMetrics, 1) + + pm := cm.nodeMetrics["example-node-1"].podMetrics // Change to pod specific util numbers cpuExpected.utilization = resource.MustParse("23m") memoryExpected.utilization = resource.MustParse("299Mi") - assert.NotNil(t, cm.podMetrics["default-example-pod"]) - assert.NotNil(t, cm.podMetrics["default-example-pod"].cpu) - ensureEqualResourceMetric(t, cm.podMetrics["default-example-pod"].cpu, cpuExpected) - assert.NotNil(t, cm.podMetrics["default-example-pod"].memory) - ensureEqualResourceMetric(t, cm.podMetrics["default-example-pod"].memory, memoryExpected) + assert.NotNil(t, pm["default-example-pod"]) + assert.NotNil(t, pm["default-example-pod"].cpu) + ensureEqualResourceMetric(t, pm["default-example-pod"].cpu, cpuExpected) + assert.NotNil(t, pm["default-example-pod"].memory) + ensureEqualResourceMetric(t, pm["default-example-pod"].memory, memoryExpected) } func ensureEqualResourceMetric(t *testing.T, actual *resourceMetric, expected *resourceMetric) { @@ -203,11 +203,11 @@ func listPods(p *corev1.PodList) []string { func node(name string, labels map[string]string) *corev1.Node { return &corev1.Node{ TypeMeta: metav1.TypeMeta{ - Kind: "Node", + Kind: "Node", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: name, Labels: labels, }, } @@ -216,11 +216,11 @@ func node(name string, labels map[string]string) *corev1.Node { func namespace(name string, labels map[string]string) *corev1.Namespace { return &corev1.Namespace{ TypeMeta: metav1.TypeMeta{ - Kind: "Namespace", + Kind: "Namespace", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: name, Labels: labels, }, } @@ -229,13 +229,13 @@ func namespace(name string, labels map[string]string) *corev1.Namespace { func pod(node, namespace, name string, labels map[string]string) *corev1.Pod { return &corev1.Pod{ TypeMeta: metav1.TypeMeta{ - Kind: "Pod", + Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: name, Namespace: namespace, - Labels: labels, + Labels: labels, }, Spec: corev1.PodSpec{ NodeName: node, @@ -286,7 +286,6 @@ func TestGetPodsAndNodes(t *testing.T) { "default/mypod", }, listPods(podList)) - podList, nodeList = getPodsAndNodes(clientset, "a=test,b!=test", "", "app=true") assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) assert.Equal(t, []string{ diff --git a/pkg/capacity/list.go b/pkg/capacity/list.go index 341ba37e..4bca0562 100644 --- a/pkg/capacity/list.go +++ b/pkg/capacity/list.go @@ -66,6 +66,7 @@ type listPrinter struct { showPods bool showContainers bool showUtil bool + sortBy string } func (lp listPrinter) Print(outputType string) { @@ -101,14 +102,14 @@ func (lp *listPrinter) buildListClusterMetrics() listClusterMetrics { Memory: lp.buildListResourceOutput(lp.cm.memory), } - for key, nodeMetric := range lp.cm.nodeMetrics { + for _, nodeMetric := range lp.cm.getSortedNodeMetrics(lp.sortBy) { var node listNodeMetric - node.Name = key + node.Name = nodeMetric.name node.CPU = lp.buildListResourceOutput(nodeMetric.cpu) node.Memory = lp.buildListResourceOutput(nodeMetric.memory) if lp.showPods || lp.showContainers { - for _, podMetric := range nodeMetric.podMetrics { + for _, podMetric := range nodeMetric.getSortedPodMetrics(lp.sortBy) { var pod listPod pod.Name = podMetric.name pod.Namespace = podMetric.namespace @@ -116,7 +117,7 @@ func (lp *listPrinter) buildListClusterMetrics() listClusterMetrics { pod.Memory = lp.buildListResourceOutput(podMetric.memory) if lp.showContainers { - for _, containerMetric := range podMetric.containers { + for _, containerMetric := range podMetric.getSortedContainerMetrics(lp.sortBy) { pod.Containers = append(pod.Containers, listContainer{ Name: containerMetric.name, Memory: lp.buildListResourceOutput(containerMetric.memory), diff --git a/pkg/capacity/printer.go b/pkg/capacity/printer.go index c9d9a9b4..0e6fcec1 100644 --- a/pkg/capacity/printer.go +++ b/pkg/capacity/printer.go @@ -38,13 +38,14 @@ func SupportedOutputs() []string { } } -func printList(cm *clusterMetric, showContainers bool, showPods bool, showUtil bool, output string) { +func printList(cm *clusterMetric, showContainers, showPods, showUtil bool, output, sortBy string) { if output == JSONOutput || output == YAMLOutput { lp := &listPrinter{ cm: cm, showPods: showPods, showUtil: showUtil, showContainers: showContainers, + sortBy: sortBy, } lp.Print(output) } else if output == TableOutput { @@ -53,6 +54,7 @@ func printList(cm *clusterMetric, showContainers bool, showPods bool, showUtil b showPods: showPods, showUtil: showUtil, showContainers: showContainers, + sortBy: sortBy, w: new(tabwriter.Writer), } tp.Print() diff --git a/pkg/capacity/resources.go b/pkg/capacity/resources.go index ae148ce3..658d6e95 100644 --- a/pkg/capacity/resources.go +++ b/pkg/capacity/resources.go @@ -16,6 +16,7 @@ package capacity import ( "fmt" + "sort" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -23,6 +24,17 @@ import ( v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" ) +// SupportedSortAttributes lists the valid sorting options +var SupportedSortAttributes = [...]string{ + "cpu.util", + "cpu.request", + "cpu.limit", + "mem.util", + "mem.request", + "mem.limit", + "name", +} + type resourceMetric struct { resourceType string allocatable resource.Quantity @@ -35,21 +47,21 @@ type clusterMetric struct { cpu *resourceMetric memory *resourceMetric nodeMetrics map[string]*nodeMetric - podMetrics map[string]*podMetric } type nodeMetric struct { + name string cpu *resourceMetric memory *resourceMetric podMetrics map[string]*podMetric } type podMetric struct { - name string - namespace string - cpu *resourceMetric - memory *resourceMetric - containers map[string]*containerMetric + name string + namespace string + cpu *resourceMetric + memory *resourceMetric + containerMetrics map[string]*containerMetric } type containerMetric struct { @@ -58,6 +70,45 @@ type containerMetric struct { memory *resourceMetric } +func buildClusterMetric(podList *corev1.PodList, pmList *v1beta1.PodMetricsList, nodeList *corev1.NodeList) clusterMetric { + cm := clusterMetric{ + cpu: &resourceMetric{resourceType: "cpu"}, + memory: &resourceMetric{resourceType: "memory"}, + nodeMetrics: map[string]*nodeMetric{}, + } + + for _, node := range nodeList.Items { + cm.nodeMetrics[node.Name] = &nodeMetric{ + name: node.Name, + cpu: &resourceMetric{ + resourceType: "cpu", + allocatable: node.Status.Allocatable["cpu"], + }, + memory: &resourceMetric{ + resourceType: "memory", + allocatable: node.Status.Allocatable["memory"], + }, + podMetrics: map[string]*podMetric{}, + } + + cm.cpu.allocatable.Add(node.Status.Allocatable["cpu"]) + cm.memory.allocatable.Add(node.Status.Allocatable["memory"]) + } + + podMetrics := map[string]v1beta1.PodMetrics{} + for _, pm := range pmList.Items { + podMetrics[fmt.Sprintf("%s-%s", pm.GetNamespace(), pm.GetName())] = pm + } + + for _, pod := range podList.Items { + if pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed { + cm.addPodMetric(&pod, podMetrics[fmt.Sprintf("%s-%s", pod.GetNamespace(), pod.GetName())]) + } + } + + return cm +} + func (rm *resourceMetric) addMetric(m *resourceMetric) { rm.allocatable.Add(m.allocatable) rm.utilization.Add(m.utilization) @@ -83,11 +134,11 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet request: req["memory"], limit: limit["memory"], }, - containers: map[string]*containerMetric{}, + containerMetrics: map[string]*containerMetric{}, } for _, container := range pod.Spec.Containers { - pm.containers[container.Name] = &containerMetric{ + pm.containerMetrics[container.Name] = &containerMetric{ name: container.Name, cpu: &resourceMetric{ resourceType: "cpu", @@ -104,17 +155,15 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet } } - cm.podMetrics[key] = pm - if nm != nil { cm.cpu.request.Add(req["cpu"]) cm.cpu.limit.Add(limit["cpu"]) cm.memory.request.Add(req["memory"]) cm.memory.limit.Add(limit["memory"]) - cm.podMetrics[key].cpu.allocatable = nm.cpu.allocatable - cm.podMetrics[key].memory.allocatable = nm.memory.allocatable - nm.podMetrics[key] = cm.podMetrics[key] + nm.podMetrics[key] = pm + nm.podMetrics[key].cpu.allocatable = nm.cpu.allocatable + nm.podMetrics[key].memory.allocatable = nm.memory.allocatable nm.cpu.request.Add(req["cpu"]) nm.cpu.limit.Add(limit["cpu"]) @@ -123,9 +172,9 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet } for _, container := range podMetrics.Containers { - pm.containers[container.Name].cpu.utilization = container.Usage["cpu"] + pm.containerMetrics[container.Name].cpu.utilization = container.Usage["cpu"] pm.cpu.utilization.Add(container.Usage["cpu"]) - pm.containers[container.Name].memory.utilization = container.Usage["memory"] + pm.containerMetrics[container.Name].memory.utilization = container.Usage["memory"] pm.memory.utilization.Add(container.Usage["memory"]) if nm == nil { @@ -145,6 +194,108 @@ func (cm *clusterMetric) addNodeMetric(nm *nodeMetric) { cm.memory.addMetric(nm.memory) } +func (cm *clusterMetric) getSortedNodeMetrics(sortBy string) []*nodeMetric { + sortedNodeMetrics := make([]*nodeMetric, len(cm.nodeMetrics)) + + i := 0 + for name := range cm.nodeMetrics { + sortedNodeMetrics[i] = cm.nodeMetrics[name] + i++ + } + + sort.Slice(sortedNodeMetrics, func(i, j int) bool { + m1 := sortedNodeMetrics[i] + m2 := sortedNodeMetrics[j] + + switch sortBy { + case "cpu.util": + return m2.cpu.utilization.MilliValue() < m1.cpu.utilization.MilliValue() + case "cpu.limit": + return m2.cpu.limit.MilliValue() < m1.cpu.limit.MilliValue() + case "cpu.request": + return m2.cpu.request.MilliValue() < m1.cpu.request.MilliValue() + case "mem.util": + return m2.memory.utilization.Value() < m1.memory.utilization.Value() + case "mem.limit": + return m2.memory.limit.Value() < m1.memory.limit.Value() + case "mem.request": + return m2.memory.request.Value() < m1.memory.request.Value() + default: + return m1.name < m2.name + } + }) + + return sortedNodeMetrics +} + +func (nm *nodeMetric) getSortedPodMetrics(sortBy string) []*podMetric { + sortedPodMetrics := make([]*podMetric, len(nm.podMetrics)) + + i := 0 + for name := range nm.podMetrics { + sortedPodMetrics[i] = nm.podMetrics[name] + i++ + } + + sort.Slice(sortedPodMetrics, func(i, j int) bool { + m1 := sortedPodMetrics[i] + m2 := sortedPodMetrics[j] + + switch sortBy { + case "cpu.util": + return m2.cpu.utilization.MilliValue() < m1.cpu.utilization.MilliValue() + case "cpu.limit": + return m2.cpu.limit.MilliValue() < m1.cpu.limit.MilliValue() + case "cpu.request": + return m2.cpu.request.MilliValue() < m1.cpu.request.MilliValue() + case "mem.util": + return m2.memory.utilization.Value() < m1.memory.utilization.Value() + case "mem.limit": + return m2.memory.limit.Value() < m1.memory.limit.Value() + case "mem.request": + return m2.memory.request.Value() < m1.memory.request.Value() + default: + return m1.name < m2.name + } + }) + + return sortedPodMetrics +} + +func (pm *podMetric) getSortedContainerMetrics(sortBy string) []*containerMetric { + sortedContainerMetrics := make([]*containerMetric, len(pm.containerMetrics)) + + i := 0 + for name := range pm.containerMetrics { + sortedContainerMetrics[i] = pm.containerMetrics[name] + i++ + } + + sort.Slice(sortedContainerMetrics, func(i, j int) bool { + m1 := sortedContainerMetrics[i] + m2 := sortedContainerMetrics[j] + + switch sortBy { + case "cpu.util": + return m2.cpu.utilization.MilliValue() < m1.cpu.utilization.MilliValue() + case "cpu.limit": + return m2.cpu.limit.MilliValue() < m1.cpu.limit.MilliValue() + case "cpu.request": + return m2.cpu.request.MilliValue() < m1.cpu.request.MilliValue() + case "mem.util": + return m2.memory.utilization.Value() < m1.memory.utilization.Value() + case "mem.limit": + return m2.memory.limit.Value() < m1.memory.limit.Value() + case "mem.request": + return m2.memory.request.Value() < m1.memory.request.Value() + default: + return m1.name < m2.name + } + }) + + return sortedContainerMetrics +} + func (rm *resourceMetric) requestString() string { return resourceString(rm.request, rm.allocatable, rm.resourceType) } diff --git a/pkg/capacity/table.go b/pkg/capacity/table.go index 758c90ff..c26f4aa0 100644 --- a/pkg/capacity/table.go +++ b/pkg/capacity/table.go @@ -17,7 +17,6 @@ package capacity import ( "fmt" "os" - "sort" "strings" "text/tabwriter" ) @@ -27,6 +26,7 @@ type tablePrinter struct { showPods bool showUtil bool showContainers bool + sortBy string w *tabwriter.Writer } @@ -58,28 +58,27 @@ var headerStrings = tableLine{ func (tp *tablePrinter) Print() { tp.w.Init(os.Stdout, 0, 8, 2, ' ', 0) - nodeNames := getSortedNodeNames(tp.cm.nodeMetrics) + sortedNodeMetrics := tp.cm.getSortedNodeMetrics(tp.sortBy) tp.printLine(&headerStrings) - if len(nodeNames) > 1 { + if len(sortedNodeMetrics) > 1 { tp.printClusterLine() tp.printLine(&tableLine{}) } - for _, nodeName := range nodeNames { - nm := tp.cm.nodeMetrics[nodeName] - tp.printNodeLine(nodeName, nm) + for _, nm := range sortedNodeMetrics { + tp.printNodeLine(nm.name, nm) tp.printLine(&tableLine{}) - podNames := getSortedPodNames(nm.podMetrics) if tp.showPods || tp.showContainers { - for _, podName := range podNames { - pm := nm.podMetrics[podName] - tp.printPodLine(nodeName, pm) + podMetrics := nm.getSortedPodMetrics(tp.sortBy) + for _, pm := range podMetrics { + tp.printPodLine(nm.name, pm) if tp.showContainers { - for _, containerMetric := range pm.containers { - tp.printContainerLine(nodeName, pm, containerMetric) + containerMetrics := pm.getSortedContainerMetrics(tp.sortBy) + for _, containerMetric := range containerMetrics { + tp.printContainerLine(nm.name, pm, containerMetric) } } } @@ -176,31 +175,3 @@ func (tp *tablePrinter) printContainerLine(nodeName string, pm *podMetric, cm *c memoryUtil: cm.memory.utilString(), }) } - -func getSortedNodeNames(nodeMetrics map[string]*nodeMetric) []string { - sortedNames := make([]string, len(nodeMetrics)) - - i := 0 - for name := range nodeMetrics { - sortedNames[i] = name - i++ - } - - sort.Strings(sortedNames) - - return sortedNames -} - -func getSortedPodNames(podMetrics map[string]*podMetric) []string { - sortedNames := make([]string, len(podMetrics)) - - i := 0 - for name := range podMetrics { - sortedNames[i] = name - i++ - } - - sort.Strings(sortedNames) - - return sortedNames -} diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index 6a163465..af69bcee 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -30,6 +30,7 @@ var nodeLabels string var namespaceLabels string var kubeContext string var outputFormat string +var sortBy string var rootCmd = &cobra.Command{ Use: "kube-capacity", @@ -45,19 +46,32 @@ var rootCmd = &cobra.Command{ os.Exit(1) } - capacity.FetchAndPrint(showContainers, showPods, showUtil, podLabels, nodeLabels, namespaceLabels, kubeContext, outputFormat) + capacity.FetchAndPrint(showContainers, showPods, showUtil, podLabels, nodeLabels, namespaceLabels, kubeContext, outputFormat, sortBy) }, } func init() { - rootCmd.PersistentFlags().BoolVarP(&showContainers, "containers", "c", false, "includes containers in output") - rootCmd.PersistentFlags().BoolVarP(&showPods, "pods", "p", false, "includes pods in output") - rootCmd.PersistentFlags().BoolVarP(&showUtil, "util", "u", false, "includes resource utilization in output") - rootCmd.PersistentFlags().StringVarP(&podLabels, "pod-labels", "l", "", "labels to filter pods with") - rootCmd.PersistentFlags().StringVarP(&nodeLabels, "node-labels", "", "", "labels to filter nodes with") - rootCmd.PersistentFlags().StringVarP(&namespaceLabels, "namespace-labels", "n", "", "labels to filter namespaces with") - rootCmd.PersistentFlags().StringVarP(&kubeContext, "context", "", "", "context to use for Kubernetes config") - rootCmd.PersistentFlags().StringVarP(&outputFormat, "output", "o", capacity.TableOutput, fmt.Sprintf("output format for information (supports: %v)", capacity.SupportedOutputs())) + rootCmd.PersistentFlags().BoolVarP(&showContainers, + "containers", "c", false, "includes containers in output") + rootCmd.PersistentFlags().BoolVarP(&showPods, + "pods", "p", false, "includes pods in output") + rootCmd.PersistentFlags().BoolVarP(&showUtil, + "util", "u", false, "includes resource utilization in output") + rootCmd.PersistentFlags().StringVarP(&podLabels, + "pod-labels", "l", "", "labels to filter pods with") + rootCmd.PersistentFlags().StringVarP(&nodeLabels, + "node-labels", "", "", "labels to filter nodes with") + rootCmd.PersistentFlags().StringVarP(&namespaceLabels, + "namespace-labels", "n", "", "labels to filter namespaces with") + rootCmd.PersistentFlags().StringVarP(&kubeContext, + "context", "", "", "context to use for Kubernetes config") + rootCmd.PersistentFlags().StringVarP(&sortBy, + "sort", "", "name", + fmt.Sprintf("attribute to sort results be (supports: %v)", capacity.SupportedSortAttributes)) + + rootCmd.PersistentFlags().StringVarP(&outputFormat, + "output", "o", capacity.TableOutput, + fmt.Sprintf("output format for information (supports: %v)", capacity.SupportedOutputs())) } // Execute is the primary entrypoint for this CLI From e04d6be28ab300895efc0b5a51afd73120c95951 Mon Sep 17 00:00:00 2001 From: Rob Scott Date: Thu, 4 Apr 2019 00:58:35 -0400 Subject: [PATCH 7/7] additional tests, prepping for 0.3.0 release --- .circleci/config.yml | 7 +- README.md | 6 +- pkg/capacity/capacity_test.go | 259 ++++++--------------------------- pkg/capacity/resources_test.go | 199 +++++++++++++++++++++++++ pkg/capacity/table.go | 10 +- pkg/capacity/table_test.go | 112 ++++++++++++++ pkg/cmd/version.go | 2 +- 7 files changed, 370 insertions(+), 225 deletions(-) create mode 100644 pkg/capacity/resources_test.go create mode 100644 pkg/capacity/table_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 1d8b3121..9142fc1e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,11 +5,14 @@ jobs: working_directory: /go/src/github.com/robscott/kube-capacity docker: - - image: circleci/golang:1.11 + - image: circleci/golang:1.12 steps: - checkout - - run: go test -v ./pkg/... + - run: go get -u golang.org/x/lint/golint + - run: go list ./... | grep -v vendor | xargs golint -set_exit_status + - run: go list ./... | grep -v vendor | xargs go vet + - run: go test ./pkg/... -v -coverprofile cover.out workflows: version: 2 diff --git a/README.md b/README.md index 6159ef9b..3ab6c48c 100644 --- a/README.md +++ b/README.md @@ -110,9 +110,6 @@ kube-capacity --pods --output json kube-capacity --pods --containers --util --output yaml ``` -## Prerequisites -Any commands requesting cluster utilization are dependent on [metrics-server](https://github.com/kubernetes-incubator/metrics-server) running on your cluster. If it's not already installed, you can install it with the official [helm chart](https://github.com/helm/charts/tree/master/stable/metrics-server). - ## Flags Supported ``` -c, --containers includes containers in output @@ -131,6 +128,9 @@ Any commands requesting cluster utilization are dependent on [metrics-server](ht -u, --util includes resource utilization in output ``` +## Prerequisites +Any commands requesting cluster utilization are dependent on [metrics-server](https://github.com/kubernetes-incubator/metrics-server) running on your cluster. If it's not already installed, you can install it with the official [helm chart](https://github.com/helm/charts/tree/master/stable/metrics-server). + ## Similar Projects There are already some great projects out there that have similar goals. diff --git a/pkg/capacity/capacity_test.go b/pkg/capacity/capacity_test.go index 10d627cc..2fd90a1e 100644 --- a/pkg/capacity/capacity_test.go +++ b/pkg/capacity/capacity_test.go @@ -15,189 +15,64 @@ package capacity import ( - "fmt" "testing" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" "k8s.io/client-go/kubernetes/fake" ) -func TestBuildClusterMetricEmpty(t *testing.T) { - cm := buildClusterMetric( - &corev1.PodList{}, &v1beta1.PodMetricsList{}, &corev1.NodeList{}, - ) - - expected := clusterMetric{ - cpu: &resourceMetric{ - resourceType: "cpu", - allocatable: resource.Quantity{}, - request: resource.Quantity{}, - limit: resource.Quantity{}, - utilization: resource.Quantity{}, - }, - memory: &resourceMetric{ - resourceType: "memory", - allocatable: resource.Quantity{}, - request: resource.Quantity{}, - limit: resource.Quantity{}, - utilization: resource.Quantity{}, - }, - nodeMetrics: map[string]*nodeMetric{}, - } - - assert.EqualValues(t, cm, expected) -} - -func TestBuildClusterMetricFull(t *testing.T) { - cm := buildClusterMetric( - &corev1.PodList{ - Items: []corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "example-pod", - Namespace: "default", - }, - Spec: corev1.PodSpec{ - NodeName: "example-node-1", - Containers: []corev1.Container{ - { - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - "cpu": resource.MustParse("250m"), - "memory": resource.MustParse("250Mi"), - }, - Limits: corev1.ResourceList{ - "cpu": resource.MustParse("250m"), - "memory": resource.MustParse("500Mi"), - }, - }, - }, - { - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("150Mi"), - }, - Limits: corev1.ResourceList{ - "cpu": resource.MustParse("150m"), - "memory": resource.MustParse("200Mi"), - }, - }, - }, - }, - }, - }, - }, - }, &v1beta1.PodMetricsList{ - Items: []v1beta1.PodMetrics{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "example-pod", - Namespace: "default", - }, - Containers: []v1beta1.ContainerMetrics{ - { - Usage: corev1.ResourceList{ - "cpu": resource.MustParse("10m"), - "memory": resource.MustParse("188Mi"), - }, - }, - { - Usage: corev1.ResourceList{ - "cpu": resource.MustParse("13m"), - "memory": resource.MustParse("111Mi"), - }, - }, - }, - }, - }, - }, &corev1.NodeList{ - Items: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "example-node-1", - }, - Status: corev1.NodeStatus{ - Allocatable: corev1.ResourceList{ - "cpu": resource.MustParse("1000m"), - "memory": resource.MustParse("4000Mi"), - }, - }, - }, - }, - }, +func TestGetPodsAndNodes(t *testing.T) { + clientset := fake.NewSimpleClientset( + node("mynode", map[string]string{"hello": "world"}), + node("mynode2", map[string]string{"hello": "world", "moon": "lol"}), + namespace("default", map[string]string{"app": "true"}), + namespace("kube-system", map[string]string{"system": "true"}), + namespace("other", map[string]string{"app": "true", "system": "true"}), + namespace("another", map[string]string{"hello": "world"}), + pod("mynode", "default", "mypod", map[string]string{"a": "test"}), + pod("mynode2", "kube-system", "mypod1", map[string]string{"b": "test"}), + pod("mynode", "other", "mypod2", map[string]string{"c": "test"}), + pod("mynode2", "other", "mypod3", map[string]string{"d": "test"}), + pod("mynode2", "default", "mypod4", map[string]string{"e": "test"}), + pod("mynode", "another", "mypod5", map[string]string{"f": "test"}), + pod("mynode", "default", "mypod6", map[string]string{"g": "test"}), ) - cpuExpected := &resourceMetric{ - allocatable: resource.MustParse("1000m"), - request: resource.MustParse("350m"), - limit: resource.MustParse("400m"), - utilization: resource.MustParse("23m"), - } - - memoryExpected := &resourceMetric{ - allocatable: resource.MustParse("4000Mi"), - request: resource.MustParse("400Mi"), - limit: resource.MustParse("700Mi"), - utilization: resource.MustParse("299Mi"), - } - - assert.NotNil(t, cm.cpu) - ensureEqualResourceMetric(t, cm.cpu, cpuExpected) - assert.NotNil(t, cm.memory) - ensureEqualResourceMetric(t, cm.memory, memoryExpected) - - assert.NotNil(t, cm.nodeMetrics["example-node-1"]) - assert.NotNil(t, cm.nodeMetrics["example-node-1"].cpu) - ensureEqualResourceMetric(t, cm.nodeMetrics["example-node-1"].cpu, cpuExpected) - assert.NotNil(t, cm.nodeMetrics["example-node-1"].memory) - ensureEqualResourceMetric(t, cm.nodeMetrics["example-node-1"].memory, memoryExpected) - - assert.Len(t, cm.nodeMetrics["example-node-1"].podMetrics, 1) - - pm := cm.nodeMetrics["example-node-1"].podMetrics - // Change to pod specific util numbers - cpuExpected.utilization = resource.MustParse("23m") - memoryExpected.utilization = resource.MustParse("299Mi") - - assert.NotNil(t, pm["default-example-pod"]) - assert.NotNil(t, pm["default-example-pod"].cpu) - ensureEqualResourceMetric(t, pm["default-example-pod"].cpu, cpuExpected) - assert.NotNil(t, pm["default-example-pod"].memory) - ensureEqualResourceMetric(t, pm["default-example-pod"].memory, memoryExpected) -} - -func ensureEqualResourceMetric(t *testing.T, actual *resourceMetric, expected *resourceMetric) { - assert.Equal(t, actual.allocatable.MilliValue(), expected.allocatable.MilliValue()) - assert.Equal(t, actual.utilization.MilliValue(), expected.utilization.MilliValue()) - assert.Equal(t, actual.request.MilliValue(), expected.request.MilliValue()) - assert.Equal(t, actual.limit.MilliValue(), expected.limit.MilliValue()) -} - -func listNodes(n *corev1.NodeList) []string { - nodes := []string{} - - for _, node := range n.Items { - nodes = append(nodes, node.GetName()) - } + podList, nodeList := getPodsAndNodes(clientset, "", "", "") + assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "default/mypod", "kube-system/mypod1", "other/mypod2", "other/mypod3", "default/mypod4", + "another/mypod5", "default/mypod6", + }, listPods(podList)) - return nodes -} + podList, nodeList = getPodsAndNodes(clientset, "", "hello=world", "") + assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "default/mypod", "kube-system/mypod1", "other/mypod2", "other/mypod3", "default/mypod4", + "another/mypod5", "default/mypod6", + }, listPods(podList)) -func listPods(p *corev1.PodList) []string { - pods := []string{} + podList, nodeList = getPodsAndNodes(clientset, "", "moon=lol", "") + assert.Equal(t, []string{"mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "kube-system/mypod1", "other/mypod3", "default/mypod4", + }, listPods(podList)) - for _, pod := range p.Items { - pods = append(pods, fmt.Sprintf("%s/%s", pod.GetNamespace(), pod.GetName())) - } + podList, nodeList = getPodsAndNodes(clientset, "a=test", "", "") + assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "default/mypod", + }, listPods(podList)) - return pods + podList, nodeList = getPodsAndNodes(clientset, "a=test,b!=test", "", "app=true") + assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "default/mypod", + }, listPods(podList)) } func node(name string, labels map[string]string) *corev1.Node { @@ -242,53 +117,3 @@ func pod(node, namespace, name string, labels map[string]string) *corev1.Pod { }, } } - -func TestGetPodsAndNodes(t *testing.T) { - clientset := fake.NewSimpleClientset( - node("mynode", map[string]string{"hello": "world"}), - node("mynode2", map[string]string{"hello": "world", "moon": "lol"}), - namespace("default", map[string]string{"app": "true"}), - namespace("kube-system", map[string]string{"system": "true"}), - namespace("other", map[string]string{"app": "true", "system": "true"}), - namespace("another", map[string]string{"hello": "world"}), - pod("mynode", "default", "mypod", map[string]string{"a": "test"}), - pod("mynode2", "kube-system", "mypod1", map[string]string{"b": "test"}), - pod("mynode", "other", "mypod2", map[string]string{"c": "test"}), - pod("mynode2", "other", "mypod3", map[string]string{"d": "test"}), - pod("mynode2", "default", "mypod4", map[string]string{"e": "test"}), - pod("mynode", "another", "mypod5", map[string]string{"f": "test"}), - pod("mynode", "default", "mypod6", map[string]string{"g": "test"}), - ) - - podList, nodeList := getPodsAndNodes(clientset, "", "", "") - assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "default/mypod", "kube-system/mypod1", "other/mypod2", "other/mypod3", "default/mypod4", - "another/mypod5", "default/mypod6", - }, listPods(podList)) - - podList, nodeList = getPodsAndNodes(clientset, "", "hello=world", "") - assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "default/mypod", "kube-system/mypod1", "other/mypod2", "other/mypod3", "default/mypod4", - "another/mypod5", "default/mypod6", - }, listPods(podList)) - - podList, nodeList = getPodsAndNodes(clientset, "", "moon=lol", "") - assert.Equal(t, []string{"mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "kube-system/mypod1", "other/mypod3", "default/mypod4", - }, listPods(podList)) - - podList, nodeList = getPodsAndNodes(clientset, "a=test", "", "") - assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "default/mypod", - }, listPods(podList)) - - podList, nodeList = getPodsAndNodes(clientset, "a=test,b!=test", "", "app=true") - assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "default/mypod", - }, listPods(podList)) -} diff --git a/pkg/capacity/resources_test.go b/pkg/capacity/resources_test.go new file mode 100644 index 00000000..0b735f8b --- /dev/null +++ b/pkg/capacity/resources_test.go @@ -0,0 +1,199 @@ +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capacity + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" +) + +func TestBuildClusterMetricEmpty(t *testing.T) { + cm := buildClusterMetric( + &corev1.PodList{}, &v1beta1.PodMetricsList{}, &corev1.NodeList{}, + ) + + expected := clusterMetric{ + cpu: &resourceMetric{ + resourceType: "cpu", + allocatable: resource.Quantity{}, + request: resource.Quantity{}, + limit: resource.Quantity{}, + utilization: resource.Quantity{}, + }, + memory: &resourceMetric{ + resourceType: "memory", + allocatable: resource.Quantity{}, + request: resource.Quantity{}, + limit: resource.Quantity{}, + utilization: resource.Quantity{}, + }, + nodeMetrics: map[string]*nodeMetric{}, + } + + assert.EqualValues(t, cm, expected) +} + +func TestBuildClusterMetricFull(t *testing.T) { + cm := buildClusterMetric( + &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "example-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: "example-node-1", + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("250m"), + "memory": resource.MustParse("250Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("250m"), + "memory": resource.MustParse("500Mi"), + }, + }, + }, + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("150Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("150m"), + "memory": resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + }, &v1beta1.PodMetricsList{ + Items: []v1beta1.PodMetrics{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "example-pod", + Namespace: "default", + }, + Containers: []v1beta1.ContainerMetrics{ + { + Usage: corev1.ResourceList{ + "cpu": resource.MustParse("10m"), + "memory": resource.MustParse("188Mi"), + }, + }, + { + Usage: corev1.ResourceList{ + "cpu": resource.MustParse("13m"), + "memory": resource.MustParse("111Mi"), + }, + }, + }, + }, + }, + }, &corev1.NodeList{ + Items: []corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "example-node-1", + }, + Status: corev1.NodeStatus{ + Allocatable: corev1.ResourceList{ + "cpu": resource.MustParse("1000m"), + "memory": resource.MustParse("4000Mi"), + }, + }, + }, + }, + }, + ) + + cpuExpected := &resourceMetric{ + allocatable: resource.MustParse("1000m"), + request: resource.MustParse("350m"), + limit: resource.MustParse("400m"), + utilization: resource.MustParse("23m"), + } + + memoryExpected := &resourceMetric{ + allocatable: resource.MustParse("4000Mi"), + request: resource.MustParse("400Mi"), + limit: resource.MustParse("700Mi"), + utilization: resource.MustParse("299Mi"), + } + + assert.NotNil(t, cm.cpu) + ensureEqualResourceMetric(t, cm.cpu, cpuExpected) + assert.NotNil(t, cm.memory) + ensureEqualResourceMetric(t, cm.memory, memoryExpected) + + assert.NotNil(t, cm.nodeMetrics["example-node-1"]) + assert.NotNil(t, cm.nodeMetrics["example-node-1"].cpu) + ensureEqualResourceMetric(t, cm.nodeMetrics["example-node-1"].cpu, cpuExpected) + assert.NotNil(t, cm.nodeMetrics["example-node-1"].memory) + ensureEqualResourceMetric(t, cm.nodeMetrics["example-node-1"].memory, memoryExpected) + + assert.Len(t, cm.nodeMetrics["example-node-1"].podMetrics, 1) + + pm := cm.nodeMetrics["example-node-1"].podMetrics + // Change to pod specific util numbers + cpuExpected.utilization = resource.MustParse("23m") + memoryExpected.utilization = resource.MustParse("299Mi") + + assert.NotNil(t, pm["default-example-pod"]) + assert.NotNil(t, pm["default-example-pod"].cpu) + ensureEqualResourceMetric(t, pm["default-example-pod"].cpu, cpuExpected) + assert.NotNil(t, pm["default-example-pod"].memory) + ensureEqualResourceMetric(t, pm["default-example-pod"].memory, memoryExpected) +} + +func ensureEqualResourceMetric(t *testing.T, actual *resourceMetric, expected *resourceMetric) { + assert.Equal(t, actual.allocatable.MilliValue(), expected.allocatable.MilliValue()) + assert.Equal(t, actual.utilization.MilliValue(), expected.utilization.MilliValue()) + assert.Equal(t, actual.request.MilliValue(), expected.request.MilliValue()) + assert.Equal(t, actual.limit.MilliValue(), expected.limit.MilliValue()) +} + +func listNodes(n *corev1.NodeList) []string { + nodes := []string{} + + for _, node := range n.Items { + nodes = append(nodes, node.GetName()) + } + + return nodes +} + +func listPods(p *corev1.PodList) []string { + pods := []string{} + + for _, pod := range p.Items { + pods = append(pods, fmt.Sprintf("%s/%s", pod.GetNamespace(), pod.GetName())) + } + + return pods +} diff --git a/pkg/capacity/table.go b/pkg/capacity/table.go index c26f4aa0..be5e8a6f 100644 --- a/pkg/capacity/table.go +++ b/pkg/capacity/table.go @@ -89,9 +89,15 @@ func (tp *tablePrinter) Print() { } func (tp *tablePrinter) printLine(tl *tableLine) { - lineItems := []string{tl.node, tl.namespace} + lineItems := tp.getLineItems(tl) + fmt.Fprintf(tp.w, strings.Join(lineItems[:], "\t ")+"\n") +} + +func (tp *tablePrinter) getLineItems(tl *tableLine) []string { + lineItems := []string{tl.node} if tp.showContainers || tp.showPods { + lineItems = append(lineItems, tl.namespace) lineItems = append(lineItems, tl.pod) } @@ -113,7 +119,7 @@ func (tp *tablePrinter) printLine(tl *tableLine) { lineItems = append(lineItems, tl.memoryUtil) } - fmt.Fprintf(tp.w, strings.Join(lineItems[:], "\t ")+"\n") + return lineItems } func (tp *tablePrinter) printClusterLine() { diff --git a/pkg/capacity/table_test.go b/pkg/capacity/table_test.go new file mode 100644 index 00000000..2e6760c2 --- /dev/null +++ b/pkg/capacity/table_test.go @@ -0,0 +1,112 @@ +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capacity + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetLineItems(t *testing.T) { + tpNone := &tablePrinter{ + showPods: false, + showUtil: false, + showContainers: false, + } + + tpSome := &tablePrinter{ + showPods: false, + showUtil: false, + showContainers: true, + } + + tpAll := &tablePrinter{ + showPods: true, + showUtil: true, + showContainers: true, + } + + tl := &tableLine{ + node: "example-node-1", + namespace: "example-namespace", + pod: "nginx-fsde", + container: "nginx", + cpuRequests: "100m", + cpuLimits: "200m", + cpuUtil: "14m", + memoryRequests: "1000Mi", + memoryLimits: "2000Mi", + memoryUtil: "326Mi", + } + + var testCases = []struct { + name string + tp *tablePrinter + tl *tableLine + expected []string + }{ + { + name: "all false", + tp: tpNone, + tl: tl, + expected: []string{ + "example-node-1", + "100m", + "200m", + "1000Mi", + "2000Mi", + }, + }, { + name: "some true", + tp: tpSome, + tl: tl, + expected: []string{ + "example-node-1", + "example-namespace", + "nginx-fsde", + "nginx", + "100m", + "200m", + "1000Mi", + "2000Mi", + }, + }, { + name: "all true", + tp: tpAll, + tl: tl, + expected: []string{ + "example-node-1", + "example-namespace", + "nginx-fsde", + "nginx", + "100m", + "200m", + "14m", + "1000Mi", + "2000Mi", + "326Mi", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + lineItems := tc.tp.getLineItems(tl) + assert.Len(t, lineItems, len(tc.expected)) + assert.ElementsMatch(t, lineItems, tc.expected) + }) + } +} diff --git a/pkg/cmd/version.go b/pkg/cmd/version.go index 0480b214..09844b04 100644 --- a/pkg/cmd/version.go +++ b/pkg/cmd/version.go @@ -28,6 +28,6 @@ var versionCmd = &cobra.Command{ Use: "version", Short: "Print the version number of kube-capacity", Run: func(cmd *cobra.Command, args []string) { - fmt.Println("kube-capacity version 0.2.0") + fmt.Println("kube-capacity version 0.3.0") }, }