diff --git a/.circleci/config.yml b/.circleci/config.yml index 1d8b3121..9142fc1e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,11 +5,14 @@ jobs: working_directory: /go/src/github.com/robscott/kube-capacity docker: - - image: circleci/golang:1.11 + - image: circleci/golang:1.12 steps: - checkout - - run: go test -v ./pkg/... + - run: go get -u golang.org/x/lint/golint + - run: go list ./... | grep -v vendor | xargs golint -set_exit_status + - run: go list ./... | grep -v vendor | xargs go vet + - run: go test ./pkg/... -v -coverprofile cover.out workflows: version: 2 diff --git a/LICENSE b/LICENSE index b3ddd85a..5804391b 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2019 Rob Scott + Copyright 2019 Kube Capacity Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 3335eaef..3ab6c48c 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,18 @@ example-node-2 tiller tiller-deploy 140m (14%) 180m (18%) It's worth noting that utilization numbers from pods will likely not add up to the total node utilization numbers. Unlike request and limit numbers where node and cluster level numbers represent a sum of pod values, node metrics come directly from metrics-server and will likely include other forms of resource utilization. +### Sorting +To highlight the nodes, pods, and containers with the highest metrics, you can sort by a variety of columns: + +``` +kube-capacity --util --sort cpu.util + +NODE CPU REQUESTS CPU LIMITS CPU UTIL MEMORY REQUESTS MEMORY LIMITS MEMORY UTIL +* 560m (28%) 130m (7%) 40m (2%) 572Mi (9%) 770Mi (13%) 470Mi (8%) +example-node-2 340m (34%) 120m (12%) 30m (3%) 380Mi (13%) 410Mi (14%) 260Mi (9%) +example-node-1 220m (22%) 10m (1%) 10m (1%) 192Mi (6%) 360Mi (12%) 210Mi (7%) +``` + ### Filtering By Labels For more advanced usage, kube-capacity also supports filtering by pod, namespace, and/or node labels. The following examples show how to use these filters: @@ -91,25 +103,46 @@ kube-capacity --namespace-labels team=api kube-capacity --node-labels kubernetes.io/role=node ``` -## Prerequisites -Any commands requesting cluster utilization are dependent on [metrics-server](https://github.com/kubernetes-incubator/metrics-server) running on your cluster. If it's not already installed, you can install it with the official [helm chart](https://github.com/helm/charts/tree/master/stable/metrics-server). +### JSON and YAML Output +By default, kube-capacity will provide output in a table format. To view this data in JSON or YAML format, the output flag can be used. Here are some sample commands: +``` +kube-capacity --pods --output json +kube-capacity --pods --containers --util --output yaml +``` ## Flags Supported ``` + -c, --containers includes containers in output --context string context to use for Kubernetes config -h, --help help for kube-capacity -n, --namespace-labels string labels to filter namespaces with --node-labels string labels to filter nodes with + -o, --output string output format for information + (supports: [table json yaml]) + (default "table") -l, --pod-labels string labels to filter pods with -p, --pods includes pods in output + --sort string attribute to sort results be (supports: + [cpu.util cpu.request cpu.limit mem.util mem.request mem.limit name]) + (default "name") -u, --util includes resource utilization in output ``` +## Prerequisites +Any commands requesting cluster utilization are dependent on [metrics-server](https://github.com/kubernetes-incubator/metrics-server) running on your cluster. If it's not already installed, you can install it with the official [helm chart](https://github.com/helm/charts/tree/master/stable/metrics-server). + ## Similar Projects There are already some great projects out there that have similar goals. - [kube-resource-report](https://github.com/hjacobs/kube-resource-report): generates HTML/CSS report for resource requests and limits across multiple clusters. - [kubetop](https://github.com/LeastAuthority/kubetop): a CLI similar to top for Kubernetes, focused on resource utilization (not requests and limits). +## Contributors + +Although this project was originally developed by [robscott](https://github.com/robscott), there have been some great contributions from others: + +- [endzyme](https://github.com/endzyme) +- [justinbarrick](https://github.com/justinbarrick) + ## License Apache License 2.0 diff --git a/main.go b/main.go index f2c3aa5d..032c337a 100644 --- a/main.go +++ b/main.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/pkg/capacity/capacity.go b/pkg/capacity/capacity.go new file mode 100644 index 00000000..b7b8d861 --- /dev/null +++ b/pkg/capacity/capacity.go @@ -0,0 +1,127 @@ +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capacity + +import ( + "fmt" + "os" + + "k8s.io/client-go/kubernetes" + metrics "k8s.io/metrics/pkg/client/clientset/versioned" + + "github.com/robscott/kube-capacity/pkg/kube" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" +) + +// FetchAndPrint gathers cluster resource data and outputs it +func FetchAndPrint(showContainers, showPods, showUtil bool, podLabels, nodeLabels, namespaceLabels, kubeContext, output, sortBy string) { + clientset, err := kube.NewClientSet(kubeContext) + if err != nil { + fmt.Printf("Error connecting to Kubernetes: %v\n", err) + os.Exit(1) + } + + podList, nodeList := getPodsAndNodes(clientset, podLabels, nodeLabels, namespaceLabels) + pmList := &v1beta1.PodMetricsList{} + if showUtil { + mClientset, err := kube.NewMetricsClientSet(kubeContext) + if err != nil { + fmt.Printf("Error connecting to Metrics API: %v\n", err) + os.Exit(4) + } + + pmList = getMetrics(mClientset) + } + + cm := buildClusterMetric(podList, pmList, nodeList) + printList(&cm, showContainers, showPods, showUtil, output, sortBy) +} + +func getPodsAndNodes(clientset kubernetes.Interface, podLabels, nodeLabels, namespaceLabels string) (*corev1.PodList, *corev1.NodeList) { + nodeList, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{ + LabelSelector: nodeLabels, + }) + if err != nil { + fmt.Printf("Error listing Nodes: %v\n", err) + os.Exit(2) + } + + podList, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{ + LabelSelector: podLabels, + }) + if err != nil { + fmt.Printf("Error listing Pods: %v\n", err) + os.Exit(3) + } + + newPodItems := []corev1.Pod{} + + nodes := map[string]bool{} + for _, node := range nodeList.Items { + nodes[node.GetName()] = true + } + + for _, pod := range podList.Items { + if !nodes[pod.Spec.NodeName] { + continue + } + + newPodItems = append(newPodItems, pod) + } + + podList.Items = newPodItems + + if namespaceLabels != "" { + namespaceList, err := clientset.CoreV1().Namespaces().List(metav1.ListOptions{ + LabelSelector: namespaceLabels, + }) + if err != nil { + fmt.Printf("Error listing Namespaces: %v\n", err) + os.Exit(3) + } + + namespaces := map[string]bool{} + for _, ns := range namespaceList.Items { + namespaces[ns.GetName()] = true + } + + newPodItems := []corev1.Pod{} + + for _, pod := range podList.Items { + if !namespaces[pod.GetNamespace()] { + continue + } + + newPodItems = append(newPodItems, pod) + } + + podList.Items = newPodItems + } + + return podList, nodeList +} + +func getMetrics(mClientset *metrics.Clientset) *v1beta1.PodMetricsList { + pmList, err := mClientset.MetricsV1beta1().PodMetricses("").List(metav1.ListOptions{}) + if err != nil { + fmt.Printf("Error getting Pod Metrics: %v\n", err) + fmt.Println("For this to work, metrics-server needs to be running in your cluster") + os.Exit(6) + } + + return pmList +} diff --git a/pkg/capacity/capacity_test.go b/pkg/capacity/capacity_test.go new file mode 100644 index 00000000..2fd90a1e --- /dev/null +++ b/pkg/capacity/capacity_test.go @@ -0,0 +1,119 @@ +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capacity + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/client-go/kubernetes/fake" +) + +func TestGetPodsAndNodes(t *testing.T) { + clientset := fake.NewSimpleClientset( + node("mynode", map[string]string{"hello": "world"}), + node("mynode2", map[string]string{"hello": "world", "moon": "lol"}), + namespace("default", map[string]string{"app": "true"}), + namespace("kube-system", map[string]string{"system": "true"}), + namespace("other", map[string]string{"app": "true", "system": "true"}), + namespace("another", map[string]string{"hello": "world"}), + pod("mynode", "default", "mypod", map[string]string{"a": "test"}), + pod("mynode2", "kube-system", "mypod1", map[string]string{"b": "test"}), + pod("mynode", "other", "mypod2", map[string]string{"c": "test"}), + pod("mynode2", "other", "mypod3", map[string]string{"d": "test"}), + pod("mynode2", "default", "mypod4", map[string]string{"e": "test"}), + pod("mynode", "another", "mypod5", map[string]string{"f": "test"}), + pod("mynode", "default", "mypod6", map[string]string{"g": "test"}), + ) + + podList, nodeList := getPodsAndNodes(clientset, "", "", "") + assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "default/mypod", "kube-system/mypod1", "other/mypod2", "other/mypod3", "default/mypod4", + "another/mypod5", "default/mypod6", + }, listPods(podList)) + + podList, nodeList = getPodsAndNodes(clientset, "", "hello=world", "") + assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "default/mypod", "kube-system/mypod1", "other/mypod2", "other/mypod3", "default/mypod4", + "another/mypod5", "default/mypod6", + }, listPods(podList)) + + podList, nodeList = getPodsAndNodes(clientset, "", "moon=lol", "") + assert.Equal(t, []string{"mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "kube-system/mypod1", "other/mypod3", "default/mypod4", + }, listPods(podList)) + + podList, nodeList = getPodsAndNodes(clientset, "a=test", "", "") + assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "default/mypod", + }, listPods(podList)) + + podList, nodeList = getPodsAndNodes(clientset, "a=test,b!=test", "", "app=true") + assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) + assert.Equal(t, []string{ + "default/mypod", + }, listPods(podList)) +} + +func node(name string, labels map[string]string) *corev1.Node { + return &corev1.Node{ + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + } +} + +func namespace(name string, labels map[string]string) *corev1.Namespace { + return &corev1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + } +} + +func pod(node, namespace, name string, labels map[string]string) *corev1.Pod { + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + NodeName: node, + }, + } +} diff --git a/pkg/capacity/json.go b/pkg/capacity/json.go deleted file mode 100644 index d4750548..00000000 --- a/pkg/capacity/json.go +++ /dev/null @@ -1,101 +0,0 @@ -// Package capacity - json.go contains all the messy details for the json printer implementation -package capacity - -import ( - "encoding/json" - "fmt" -) - -type jsonNodeMetric struct { - Name string `json:"name"` - CPU *jsonResourceOutput `json:"cpu,omitempty"` - Memory *jsonResourceOutput `json:"memory,omitempty"` - Pods []*jsonPod `json:"pods,omitempty"` -} - -type jsonPod struct { - Name string `json:"name"` - Namespace string `json:"namespace"` - CPU *jsonResourceOutput `json:"cpu"` - Memory *jsonResourceOutput `json:"memory"` -} - -type jsonResourceOutput struct { - Requests string `json:"requests"` - RequestsPct string `json:"requests_pct"` - Limits string `json:"limits"` - LimitsPct string `json:"limits_pct"` - Utilization string `json:"utilization,omitempty"` - UtilizationPct string `json:"utilization_pct,omitempty"` -} - -type jsonClusterMetrics struct { - Nodes []*jsonNodeMetric `json:"nodes"` - ClusterTotals struct { - CPU *jsonResourceOutput `json:"cpu"` - Memory *jsonResourceOutput `json:"memory"` - } `json:"cluster_totals"` -} - -type jsonPrinter struct { - cm *clusterMetric - showPods bool - showUtil bool -} - -func (jp jsonPrinter) Print() { - jsonOutput := jp.buildJSONClusterMetrics() - - jsonRaw, err := json.MarshalIndent(jsonOutput, "", " ") - if err != nil { - fmt.Println("Error Marshalling JSON") - fmt.Println(err) - } - - fmt.Printf("%s", jsonRaw) -} - -func (jp *jsonPrinter) buildJSONClusterMetrics() jsonClusterMetrics { - var response jsonClusterMetrics - - response.ClusterTotals.CPU = jp.buildJSONResourceOutput(jp.cm.cpu) - response.ClusterTotals.Memory = jp.buildJSONResourceOutput(jp.cm.memory) - - for key, val := range jp.cm.nodeMetrics { - var node jsonNodeMetric - node.Name = key - node.CPU = jp.buildJSONResourceOutput(val.cpu) - node.Memory = jp.buildJSONResourceOutput(val.memory) - if jp.showPods { - for _, val := range val.podMetrics { - var newNode jsonPod - newNode.Name = val.name - newNode.Namespace = val.namespace - newNode.CPU = jp.buildJSONResourceOutput(val.cpu) - newNode.Memory = jp.buildJSONResourceOutput(val.memory) - node.Pods = append(node.Pods, &newNode) - } - } - response.Nodes = append(response.Nodes, &node) - } - - return response -} - -func (jp *jsonPrinter) buildJSONResourceOutput(item *resourceMetric) *jsonResourceOutput { - valueCalculator := item.valueFunction() - percentCalculator := item.percentFunction() - - out := jsonResourceOutput{ - Requests: valueCalculator(item.request), - RequestsPct: percentCalculator(item.request), - Limits: valueCalculator(item.limit), - LimitsPct: percentCalculator(item.limit), - } - - if jp.showUtil { - out.Utilization = valueCalculator(item.utilization) - out.UtilizationPct = percentCalculator(item.utilization) - } - return &out -} diff --git a/pkg/capacity/list.go b/pkg/capacity/list.go index f1c94725..4bca0562 100644 --- a/pkg/capacity/list.go +++ b/pkg/capacity/list.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,151 +15,139 @@ package capacity import ( + "encoding/json" "fmt" - "os" - "k8s.io/client-go/kubernetes" - metrics "k8s.io/metrics/pkg/client/clientset/versioned" - - "github.com/robscott/kube-capacity/pkg/kube" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + "sigs.k8s.io/yaml" ) -// List gathers cluster resource data and outputs it -func List(showPods, showUtil bool, podLabels, nodeLabels, namespaceLabels, kubeContext string, output string) { - clientset, err := kube.NewClientSet(kubeContext) - if err != nil { - fmt.Printf("Error connecting to Kubernetes: %v\n", err) - os.Exit(1) - } - - podList, nodeList := getPodsAndNodes(clientset, podLabels, nodeLabels, namespaceLabels) - pmList := &v1beta1.PodMetricsList{} - if showUtil { - mClientset, err := kube.NewMetricsClientSet(kubeContext) - if err != nil { - fmt.Printf("Error connecting to Metrics API: %v\n", err) - os.Exit(4) - } - - pmList = getMetrics(mClientset) - } - cm := buildClusterMetric(podList, pmList, nodeList) - printList(&cm, showPods, showUtil, output) +type listNodeMetric struct { + Name string `json:"name"` + CPU *listResourceOutput `json:"cpu,omitempty"` + Memory *listResourceOutput `json:"memory,omitempty"` + Pods []*listPod `json:"pods,omitempty"` } -func getPodsAndNodes(clientset kubernetes.Interface, podLabels, nodeLabels, namespaceLabels string) (*corev1.PodList, *corev1.NodeList) { - nodeList, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{ - LabelSelector: nodeLabels, - }) - if err != nil { - fmt.Printf("Error listing Nodes: %v\n", err) - os.Exit(2) - } - - podList, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{ - LabelSelector: podLabels, - }) - if err != nil { - fmt.Printf("Error listing Pods: %v\n", err) - os.Exit(3) - } - - newPodItems := []corev1.Pod{} - - nodes := map[string]bool{} - for _, node := range nodeList.Items { - nodes[node.GetName()] = true - } +type listPod struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + CPU *listResourceOutput `json:"cpu"` + Memory *listResourceOutput `json:"memory"` + Containers []listContainer `json:"containers"` +} - for _, pod := range podList.Items { - if !nodes[pod.Spec.NodeName] { - continue - } +type listContainer struct { + Name string `json:"name"` + CPU *listResourceOutput `json:"cpu"` + Memory *listResourceOutput `json:"memory"` +} - newPodItems = append(newPodItems, pod) - } +type listResourceOutput struct { + Requests string `json:"requests"` + RequestsPct string `json:"requests_pct"` + Limits string `json:"limits"` + LimitsPct string `json:"limits_pct"` + Utilization string `json:"utilization,omitempty"` + UtilizationPct string `json:"utilization_pct,omitempty"` +} - podList.Items = newPodItems +type listClusterMetrics struct { + Nodes []*listNodeMetric `json:"nodes"` + ClusterTotals *listClusterTotals `json:"cluster_totals"` +} - if namespaceLabels != "" { - namespaceList, err := clientset.CoreV1().Namespaces().List(metav1.ListOptions{ - LabelSelector: namespaceLabels, - }) - if err != nil { - fmt.Printf("Error listing Namespaces: %v\n", err) - os.Exit(3) - } +type listClusterTotals struct { + CPU *listResourceOutput `json:"cpu"` + Memory *listResourceOutput `json:"memory"` +} - namespaces := map[string]bool{} - for _, ns := range namespaceList.Items { - namespaces[ns.GetName()] = true - } +type listPrinter struct { + cm *clusterMetric + showPods bool + showContainers bool + showUtil bool + sortBy string +} - newPodItems := []corev1.Pod{} +func (lp listPrinter) Print(outputType string) { + listOutput := lp.buildListClusterMetrics() - for _, pod := range podList.Items { - if !namespaces[pod.GetNamespace()] { - continue + jsonRaw, err := json.MarshalIndent(listOutput, "", " ") + if err != nil { + fmt.Println("Error Marshalling JSON") + fmt.Println(err) + } else { + if outputType == JSONOutput { + fmt.Printf("%s", jsonRaw) + } else { + // This is a strange approach, but the k8s YAML package + // already marshalls to JSON before converting to YAML, + // this just allows us to follow the same code path. + yamlRaw, err := yaml.JSONToYAML(jsonRaw) + if err != nil { + fmt.Println("Error Converting JSON to Yaml") + fmt.Println(err) + } else { + fmt.Printf("%s", yamlRaw) } - - newPodItems = append(newPodItems, pod) } - - podList.Items = newPodItems } - - return podList, nodeList } -func getMetrics(mClientset *metrics.Clientset) *v1beta1.PodMetricsList { - pmList, err := mClientset.MetricsV1beta1().PodMetricses("").List(metav1.ListOptions{}) - if err != nil { - fmt.Printf("Error getting Pod Metrics: %v\n", err) - fmt.Println("For this to work, metrics-server needs to be running in your cluster") - os.Exit(6) - } - - return pmList -} +func (lp *listPrinter) buildListClusterMetrics() listClusterMetrics { + var response listClusterMetrics -func buildClusterMetric(podList *corev1.PodList, pmList *v1beta1.PodMetricsList, nodeList *corev1.NodeList) clusterMetric { - cm := clusterMetric{ - cpu: &resourceMetric{resourceType: "cpu"}, - memory: &resourceMetric{resourceType: "memory"}, - nodeMetrics: map[string]*nodeMetric{}, - podMetrics: map[string]*podMetric{}, + response.ClusterTotals = &listClusterTotals{ + CPU: lp.buildListResourceOutput(lp.cm.cpu), + Memory: lp.buildListResourceOutput(lp.cm.memory), } - for _, node := range nodeList.Items { - cm.nodeMetrics[node.Name] = &nodeMetric{ - cpu: &resourceMetric{ - resourceType: "cpu", - allocatable: node.Status.Allocatable["cpu"], - }, - memory: &resourceMetric{ - resourceType: "memory", - allocatable: node.Status.Allocatable["memory"], - }, - podMetrics: map[string]*podMetric{}, + for _, nodeMetric := range lp.cm.getSortedNodeMetrics(lp.sortBy) { + var node listNodeMetric + node.Name = nodeMetric.name + node.CPU = lp.buildListResourceOutput(nodeMetric.cpu) + node.Memory = lp.buildListResourceOutput(nodeMetric.memory) + + if lp.showPods || lp.showContainers { + for _, podMetric := range nodeMetric.getSortedPodMetrics(lp.sortBy) { + var pod listPod + pod.Name = podMetric.name + pod.Namespace = podMetric.namespace + pod.CPU = lp.buildListResourceOutput(podMetric.cpu) + pod.Memory = lp.buildListResourceOutput(podMetric.memory) + + if lp.showContainers { + for _, containerMetric := range podMetric.getSortedContainerMetrics(lp.sortBy) { + pod.Containers = append(pod.Containers, listContainer{ + Name: containerMetric.name, + Memory: lp.buildListResourceOutput(containerMetric.memory), + CPU: lp.buildListResourceOutput(containerMetric.cpu), + }) + } + } + node.Pods = append(node.Pods, &pod) + } } - - cm.cpu.allocatable.Add(node.Status.Allocatable["cpu"]) - cm.memory.allocatable.Add(node.Status.Allocatable["memory"]) + response.Nodes = append(response.Nodes, &node) } - podMetrics := map[string]v1beta1.PodMetrics{} - for _, pm := range pmList.Items { - podMetrics[fmt.Sprintf("%s-%s", pm.GetNamespace(), pm.GetName())] = pm - } + return response +} - for _, pod := range podList.Items { - if pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed { - cm.addPodMetric(&pod, podMetrics[fmt.Sprintf("%s-%s", pod.GetNamespace(), pod.GetName())]) - } +func (lp *listPrinter) buildListResourceOutput(item *resourceMetric) *listResourceOutput { + valueCalculator := item.valueFunction() + percentCalculator := item.percentFunction() + + out := listResourceOutput{ + Requests: valueCalculator(item.request), + RequestsPct: percentCalculator(item.request), + Limits: valueCalculator(item.limit), + LimitsPct: percentCalculator(item.limit), } - return cm + if lp.showUtil { + out.Utilization = valueCalculator(item.utilization) + out.UtilizationPct = percentCalculator(item.utilization) + } + return &out } diff --git a/pkg/capacity/list_test.go b/pkg/capacity/list_test.go index 4dc85334..c052540f 100644 --- a/pkg/capacity/list_test.go +++ b/pkg/capacity/list_test.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ package capacity import ( - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -24,39 +23,165 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" - - "k8s.io/client-go/kubernetes/fake" ) -func TestBuildClusterMetricEmpty(t *testing.T) { - cm := buildClusterMetric( - &corev1.PodList{}, &v1beta1.PodMetricsList{}, &corev1.NodeList{}, - ) +func TestBuildListClusterMetricsNoOptions(t *testing.T) { + cm := getTestClusterMetric() + + lp := listPrinter{ + cm: &cm, + } + + lcm := lp.buildListClusterMetrics() - expected := clusterMetric{ - cpu: &resourceMetric{ - resourceType: "cpu", - allocatable: resource.Quantity{}, - request: resource.Quantity{}, - limit: resource.Quantity{}, - utilization: resource.Quantity{}, + assert.EqualValues(t, &listClusterTotals{ + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", }, - memory: &resourceMetric{ - resourceType: "memory", - allocatable: resource.Quantity{}, - request: resource.Quantity{}, - limit: resource.Quantity{}, - utilization: resource.Quantity{}, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", }, - nodeMetrics: map[string]*nodeMetric{}, - podMetrics: map[string]*podMetric{}, + }, lcm.ClusterTotals) + + assert.EqualValues(t, &listNodeMetric{ + Name: "example-node-1", + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", + }, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", + }, + }, lcm.Nodes[0]) + +} + +func TestBuildListClusterMetricsAllOptions(t *testing.T) { + cm := getTestClusterMetric() + + lp := listPrinter{ + cm: &cm, + showUtil: true, + showPods: true, + showContainers: true, } - assert.EqualValues(t, cm, expected) + lcm := lp.buildListClusterMetrics() + + assert.EqualValues(t, &listClusterTotals{ + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", + Utilization: "63m", + UtilizationPct: "6%", + }, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", + Utilization: "439Mi", + UtilizationPct: "10%", + }, + }, lcm.ClusterTotals) + + assert.EqualValues(t, &listNodeMetric{ + Name: "example-node-1", + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", + Utilization: "63m", + UtilizationPct: "6%", + }, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", + Utilization: "439Mi", + UtilizationPct: "10%", + }, + Pods: []*listPod{ + { + Name: "example-pod", + Namespace: "default", + CPU: &listResourceOutput{ + Requests: "650m", + RequestsPct: "65%", + Limits: "810m", + LimitsPct: "81%", + Utilization: "63m", + UtilizationPct: "6%", + }, + Memory: &listResourceOutput{ + Requests: "410Mi", + RequestsPct: "10%", + Limits: "580Mi", + LimitsPct: "14%", + Utilization: "439Mi", + UtilizationPct: "10%", + }, + Containers: []listContainer{ + { + Name: "example-container-1", + CPU: &listResourceOutput{ + Requests: "450m", + RequestsPct: "45%", + Limits: "560m", + LimitsPct: "56%", + Utilization: "40m", + UtilizationPct: "4%", + }, + Memory: &listResourceOutput{ + Requests: "160Mi", + RequestsPct: "4%", + Limits: "280Mi", + LimitsPct: "7%", + Utilization: "288Mi", + UtilizationPct: "7%", + }, + }, { + Name: "example-container-2", + CPU: &listResourceOutput{ + Requests: "200m", + RequestsPct: "20%", + Limits: "250m", + LimitsPct: "25%", + Utilization: "23m", + UtilizationPct: "2%", + }, + Memory: &listResourceOutput{ + Requests: "250Mi", + RequestsPct: "6%", + Limits: "300Mi", + LimitsPct: "7%", + Utilization: "151Mi", + UtilizationPct: "3%", + }, + }, + }, + }, + }}, lcm.Nodes[0]) + } -func TestBuildClusterMetricFull(t *testing.T) { - cm := buildClusterMetric( +func getTestClusterMetric() clusterMetric { + return buildClusterMetric( &corev1.PodList{ Items: []corev1.Pod{ { @@ -68,26 +193,28 @@ func TestBuildClusterMetricFull(t *testing.T) { NodeName: "example-node-1", Containers: []corev1.Container{ { + Name: "example-container-1", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - "cpu": resource.MustParse("250m"), - "memory": resource.MustParse("250Mi"), + "cpu": resource.MustParse("450m"), + "memory": resource.MustParse("160Mi"), }, Limits: corev1.ResourceList{ - "cpu": resource.MustParse("250m"), - "memory": resource.MustParse("500Mi"), + "cpu": resource.MustParse("560m"), + "memory": resource.MustParse("280Mi"), }, }, }, { + Name: "example-container-2", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - "cpu": resource.MustParse("100m"), - "memory": resource.MustParse("150Mi"), + "cpu": resource.MustParse("200m"), + "memory": resource.MustParse("250Mi"), }, Limits: corev1.ResourceList{ - "cpu": resource.MustParse("150m"), - "memory": resource.MustParse("200Mi"), + "cpu": resource.MustParse("250m"), + "memory": resource.MustParse("300Mi"), }, }, }, @@ -104,15 +231,17 @@ func TestBuildClusterMetricFull(t *testing.T) { }, Containers: []v1beta1.ContainerMetrics{ { + Name: "example-container-1", Usage: corev1.ResourceList{ - "cpu": resource.MustParse("10m"), - "memory": resource.MustParse("188Mi"), + "cpu": resource.MustParse("40m"), + "memory": resource.MustParse("288Mi"), }, }, { + Name: "example-container-2", Usage: corev1.ResourceList{ - "cpu": resource.MustParse("13m"), - "memory": resource.MustParse("111Mi"), + "cpu": resource.MustParse("23m"), + "memory": resource.MustParse("151Mi"), }, }, }, @@ -134,162 +263,4 @@ func TestBuildClusterMetricFull(t *testing.T) { }, }, ) - - cpuExpected := &resourceMetric{ - allocatable: resource.MustParse("1000m"), - request: resource.MustParse("350m"), - limit: resource.MustParse("400m"), - utilization: resource.MustParse("23m"), - } - - memoryExpected := &resourceMetric{ - allocatable: resource.MustParse("4000Mi"), - request: resource.MustParse("400Mi"), - limit: resource.MustParse("700Mi"), - utilization: resource.MustParse("299Mi"), - } - - assert.Len(t, cm.podMetrics, 1) - - assert.NotNil(t, cm.cpu) - ensureEqualResourceMetric(t, cm.cpu, cpuExpected) - assert.NotNil(t, cm.memory) - ensureEqualResourceMetric(t, cm.memory, memoryExpected) - - assert.NotNil(t, cm.nodeMetrics["example-node-1"]) - assert.NotNil(t, cm.nodeMetrics["example-node-1"].cpu) - ensureEqualResourceMetric(t, cm.nodeMetrics["example-node-1"].cpu, cpuExpected) - assert.NotNil(t, cm.nodeMetrics["example-node-1"].memory) - ensureEqualResourceMetric(t, cm.nodeMetrics["example-node-1"].memory, memoryExpected) - - // Change to pod specific util numbers - cpuExpected.utilization = resource.MustParse("23m") - memoryExpected.utilization = resource.MustParse("299Mi") - - assert.NotNil(t, cm.podMetrics["default-example-pod"]) - assert.NotNil(t, cm.podMetrics["default-example-pod"].cpu) - ensureEqualResourceMetric(t, cm.podMetrics["default-example-pod"].cpu, cpuExpected) - assert.NotNil(t, cm.podMetrics["default-example-pod"].memory) - ensureEqualResourceMetric(t, cm.podMetrics["default-example-pod"].memory, memoryExpected) -} - -func ensureEqualResourceMetric(t *testing.T, actual *resourceMetric, expected *resourceMetric) { - assert.Equal(t, actual.allocatable.MilliValue(), expected.allocatable.MilliValue()) - assert.Equal(t, actual.utilization.MilliValue(), expected.utilization.MilliValue()) - assert.Equal(t, actual.request.MilliValue(), expected.request.MilliValue()) - assert.Equal(t, actual.limit.MilliValue(), expected.limit.MilliValue()) -} - -func listNodes(n *corev1.NodeList) []string { - nodes := []string{} - - for _, node := range n.Items { - nodes = append(nodes, node.GetName()) - } - - return nodes -} - -func listPods(p *corev1.PodList) []string { - pods := []string{} - - for _, pod := range p.Items { - pods = append(pods, fmt.Sprintf("%s/%s", pod.GetNamespace(), pod.GetName())) - } - - return pods -} - -func node(name string, labels map[string]string) *corev1.Node { - return &corev1.Node{ - TypeMeta: metav1.TypeMeta{ - Kind: "Node", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - } -} - -func namespace(name string, labels map[string]string) *corev1.Namespace { - return &corev1.Namespace{ - TypeMeta: metav1.TypeMeta{ - Kind: "Namespace", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - } -} - -func pod(node, namespace, name string, labels map[string]string) *corev1.Pod { - return &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: labels, - }, - Spec: corev1.PodSpec{ - NodeName: node, - }, - } -} - -func TestGetPodsAndNodes(t *testing.T) { - clientset := fake.NewSimpleClientset( - node("mynode", map[string]string{"hello": "world"}), - node("mynode2", map[string]string{"hello": "world", "moon": "lol"}), - namespace("default", map[string]string{"app": "true"}), - namespace("kube-system", map[string]string{"system": "true"}), - namespace("other", map[string]string{"app": "true", "system": "true"}), - namespace("another", map[string]string{"hello": "world"}), - pod("mynode", "default", "mypod", map[string]string{"a": "test"}), - pod("mynode2", "kube-system", "mypod1", map[string]string{"b": "test"}), - pod("mynode", "other", "mypod2", map[string]string{"c": "test"}), - pod("mynode2", "other", "mypod3", map[string]string{"d": "test"}), - pod("mynode2", "default", "mypod4", map[string]string{"e": "test"}), - pod("mynode", "another", "mypod5", map[string]string{"f": "test"}), - pod("mynode", "default", "mypod6", map[string]string{"g": "test"}), - ) - - podList, nodeList := getPodsAndNodes(clientset, "", "", "") - assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "default/mypod", "kube-system/mypod1", "other/mypod2", "other/mypod3", "default/mypod4", - "another/mypod5", "default/mypod6", - }, listPods(podList)) - - podList, nodeList = getPodsAndNodes(clientset, "", "hello=world", "") - assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "default/mypod", "kube-system/mypod1", "other/mypod2", "other/mypod3", "default/mypod4", - "another/mypod5", "default/mypod6", - }, listPods(podList)) - - podList, nodeList = getPodsAndNodes(clientset, "", "moon=lol", "") - assert.Equal(t, []string{"mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "kube-system/mypod1", "other/mypod3", "default/mypod4", - }, listPods(podList)) - - podList, nodeList = getPodsAndNodes(clientset, "a=test", "", "") - assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "default/mypod", - }, listPods(podList)) - - - podList, nodeList = getPodsAndNodes(clientset, "a=test,b!=test", "", "app=true") - assert.Equal(t, []string{"mynode", "mynode2"}, listNodes(nodeList)) - assert.Equal(t, []string{ - "default/mypod", - }, listPods(podList)) } diff --git a/pkg/capacity/printer.go b/pkg/capacity/printer.go index 75da098e..0e6fcec1 100644 --- a/pkg/capacity/printer.go +++ b/pkg/capacity/printer.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,10 +21,12 @@ import ( ) const ( - //TableOutput is the constant value for output type text + //TableOutput is the constant value for output type table TableOutput string = "table" - //JSONOutput is the constant value for output type text + //JSONOutput is the constant value for output type JSON JSONOutput string = "json" + //YAMLOutput is the constant value for output type YAML + YAMLOutput string = "yaml" ) // SupportedOutputs returns a string list of output formats supposed by this package @@ -32,41 +34,32 @@ func SupportedOutputs() []string { return []string{ TableOutput, JSONOutput, + YAMLOutput, } } -type printer interface { - Print() -} - -func printList(cm *clusterMetric, showPods bool, showUtil bool, output string) { - p, err := printerFactory(cm, showPods, showUtil, output) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - p.Print() -} - -func printerFactory(cm *clusterMetric, showPods bool, showUtil bool, outputType string) (printer, error) { - var response printer - switch outputType { - case JSONOutput: - response = jsonPrinter{ - cm: cm, - showPods: showPods, - showUtil: showUtil, +func printList(cm *clusterMetric, showContainers, showPods, showUtil bool, output, sortBy string) { + if output == JSONOutput || output == YAMLOutput { + lp := &listPrinter{ + cm: cm, + showPods: showPods, + showUtil: showUtil, + showContainers: showContainers, + sortBy: sortBy, } - return response, nil - case TableOutput: - response = tablePrinter{ - cm: cm, - showPods: showPods, - showUtil: showUtil, - w: new(tabwriter.Writer), + lp.Print(output) + } else if output == TableOutput { + tp := &tablePrinter{ + cm: cm, + showPods: showPods, + showUtil: showUtil, + showContainers: showContainers, + sortBy: sortBy, + w: new(tabwriter.Writer), } - return response, nil - default: - return response, fmt.Errorf("Called with an unsupported output type: %s", outputType) + tp.Print() + } else { + fmt.Printf("Called with an unsupported output type: %s", output) + os.Exit(1) } } diff --git a/pkg/capacity/resources.go b/pkg/capacity/resources.go index eb2b2ca8..658d6e95 100644 --- a/pkg/capacity/resources.go +++ b/pkg/capacity/resources.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ package capacity import ( "fmt" + "sort" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -23,6 +24,17 @@ import ( v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" ) +// SupportedSortAttributes lists the valid sorting options +var SupportedSortAttributes = [...]string{ + "cpu.util", + "cpu.request", + "cpu.limit", + "mem.util", + "mem.request", + "mem.limit", + "name", +} + type resourceMetric struct { resourceType string allocatable resource.Quantity @@ -35,20 +47,66 @@ type clusterMetric struct { cpu *resourceMetric memory *resourceMetric nodeMetrics map[string]*nodeMetric - podMetrics map[string]*podMetric } type nodeMetric struct { + name string cpu *resourceMetric memory *resourceMetric podMetrics map[string]*podMetric } type podMetric struct { - name string - namespace string - cpu *resourceMetric - memory *resourceMetric + name string + namespace string + cpu *resourceMetric + memory *resourceMetric + containerMetrics map[string]*containerMetric +} + +type containerMetric struct { + name string + cpu *resourceMetric + memory *resourceMetric +} + +func buildClusterMetric(podList *corev1.PodList, pmList *v1beta1.PodMetricsList, nodeList *corev1.NodeList) clusterMetric { + cm := clusterMetric{ + cpu: &resourceMetric{resourceType: "cpu"}, + memory: &resourceMetric{resourceType: "memory"}, + nodeMetrics: map[string]*nodeMetric{}, + } + + for _, node := range nodeList.Items { + cm.nodeMetrics[node.Name] = &nodeMetric{ + name: node.Name, + cpu: &resourceMetric{ + resourceType: "cpu", + allocatable: node.Status.Allocatable["cpu"], + }, + memory: &resourceMetric{ + resourceType: "memory", + allocatable: node.Status.Allocatable["memory"], + }, + podMetrics: map[string]*podMetric{}, + } + + cm.cpu.allocatable.Add(node.Status.Allocatable["cpu"]) + cm.memory.allocatable.Add(node.Status.Allocatable["memory"]) + } + + podMetrics := map[string]v1beta1.PodMetrics{} + for _, pm := range pmList.Items { + podMetrics[fmt.Sprintf("%s-%s", pm.GetNamespace(), pm.GetName())] = pm + } + + for _, pod := range podList.Items { + if pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed { + cm.addPodMetric(&pod, podMetrics[fmt.Sprintf("%s-%s", pod.GetNamespace(), pod.GetName())]) + } + } + + return cm } func (rm *resourceMetric) addMetric(m *resourceMetric) { @@ -61,6 +119,7 @@ func (rm *resourceMetric) addMetric(m *resourceMetric) { func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMetrics) { req, limit := resourcehelper.PodRequestsAndLimits(pod) key := fmt.Sprintf("%s-%s", pod.Namespace, pod.Name) + nm := cm.nodeMetrics[pod.Spec.NodeName] pm := &podMetric{ name: pod.Name, @@ -75,19 +134,37 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet request: req["memory"], limit: limit["memory"], }, + containerMetrics: map[string]*containerMetric{}, + } + + for _, container := range pod.Spec.Containers { + pm.containerMetrics[container.Name] = &containerMetric{ + name: container.Name, + cpu: &resourceMetric{ + resourceType: "cpu", + request: container.Resources.Requests["cpu"], + limit: container.Resources.Limits["cpu"], + allocatable: nm.cpu.allocatable, + }, + memory: &resourceMetric{ + resourceType: "memory", + request: container.Resources.Requests["memory"], + limit: container.Resources.Limits["memory"], + allocatable: nm.memory.allocatable, + }, + } } - cm.podMetrics[key] = pm - nm := cm.nodeMetrics[pod.Spec.NodeName] if nm != nil { cm.cpu.request.Add(req["cpu"]) cm.cpu.limit.Add(limit["cpu"]) cm.memory.request.Add(req["memory"]) cm.memory.limit.Add(limit["memory"]) - cm.podMetrics[key].cpu.allocatable = nm.cpu.allocatable - cm.podMetrics[key].memory.allocatable = nm.memory.allocatable - nm.podMetrics[key] = cm.podMetrics[key] + nm.podMetrics[key] = pm + nm.podMetrics[key].cpu.allocatable = nm.cpu.allocatable + nm.podMetrics[key].memory.allocatable = nm.memory.allocatable + nm.cpu.request.Add(req["cpu"]) nm.cpu.limit.Add(limit["cpu"]) nm.memory.request.Add(req["memory"]) @@ -95,7 +172,9 @@ func (cm *clusterMetric) addPodMetric(pod *corev1.Pod, podMetrics v1beta1.PodMet } for _, container := range podMetrics.Containers { + pm.containerMetrics[container.Name].cpu.utilization = container.Usage["cpu"] pm.cpu.utilization.Add(container.Usage["cpu"]) + pm.containerMetrics[container.Name].memory.utilization = container.Usage["memory"] pm.memory.utilization.Add(container.Usage["memory"]) if nm == nil { @@ -115,6 +194,108 @@ func (cm *clusterMetric) addNodeMetric(nm *nodeMetric) { cm.memory.addMetric(nm.memory) } +func (cm *clusterMetric) getSortedNodeMetrics(sortBy string) []*nodeMetric { + sortedNodeMetrics := make([]*nodeMetric, len(cm.nodeMetrics)) + + i := 0 + for name := range cm.nodeMetrics { + sortedNodeMetrics[i] = cm.nodeMetrics[name] + i++ + } + + sort.Slice(sortedNodeMetrics, func(i, j int) bool { + m1 := sortedNodeMetrics[i] + m2 := sortedNodeMetrics[j] + + switch sortBy { + case "cpu.util": + return m2.cpu.utilization.MilliValue() < m1.cpu.utilization.MilliValue() + case "cpu.limit": + return m2.cpu.limit.MilliValue() < m1.cpu.limit.MilliValue() + case "cpu.request": + return m2.cpu.request.MilliValue() < m1.cpu.request.MilliValue() + case "mem.util": + return m2.memory.utilization.Value() < m1.memory.utilization.Value() + case "mem.limit": + return m2.memory.limit.Value() < m1.memory.limit.Value() + case "mem.request": + return m2.memory.request.Value() < m1.memory.request.Value() + default: + return m1.name < m2.name + } + }) + + return sortedNodeMetrics +} + +func (nm *nodeMetric) getSortedPodMetrics(sortBy string) []*podMetric { + sortedPodMetrics := make([]*podMetric, len(nm.podMetrics)) + + i := 0 + for name := range nm.podMetrics { + sortedPodMetrics[i] = nm.podMetrics[name] + i++ + } + + sort.Slice(sortedPodMetrics, func(i, j int) bool { + m1 := sortedPodMetrics[i] + m2 := sortedPodMetrics[j] + + switch sortBy { + case "cpu.util": + return m2.cpu.utilization.MilliValue() < m1.cpu.utilization.MilliValue() + case "cpu.limit": + return m2.cpu.limit.MilliValue() < m1.cpu.limit.MilliValue() + case "cpu.request": + return m2.cpu.request.MilliValue() < m1.cpu.request.MilliValue() + case "mem.util": + return m2.memory.utilization.Value() < m1.memory.utilization.Value() + case "mem.limit": + return m2.memory.limit.Value() < m1.memory.limit.Value() + case "mem.request": + return m2.memory.request.Value() < m1.memory.request.Value() + default: + return m1.name < m2.name + } + }) + + return sortedPodMetrics +} + +func (pm *podMetric) getSortedContainerMetrics(sortBy string) []*containerMetric { + sortedContainerMetrics := make([]*containerMetric, len(pm.containerMetrics)) + + i := 0 + for name := range pm.containerMetrics { + sortedContainerMetrics[i] = pm.containerMetrics[name] + i++ + } + + sort.Slice(sortedContainerMetrics, func(i, j int) bool { + m1 := sortedContainerMetrics[i] + m2 := sortedContainerMetrics[j] + + switch sortBy { + case "cpu.util": + return m2.cpu.utilization.MilliValue() < m1.cpu.utilization.MilliValue() + case "cpu.limit": + return m2.cpu.limit.MilliValue() < m1.cpu.limit.MilliValue() + case "cpu.request": + return m2.cpu.request.MilliValue() < m1.cpu.request.MilliValue() + case "mem.util": + return m2.memory.utilization.Value() < m1.memory.utilization.Value() + case "mem.limit": + return m2.memory.limit.Value() < m1.memory.limit.Value() + case "mem.request": + return m2.memory.request.Value() < m1.memory.request.Value() + default: + return m1.name < m2.name + } + }) + + return sortedContainerMetrics +} + func (rm *resourceMetric) requestString() string { return resourceString(rm.request, rm.allocatable, rm.resourceType) } @@ -134,9 +315,10 @@ func resourceString(actual, allocatable resource.Quantity, resourceType string) } if resourceType == "cpu" { - return fmt.Sprintf("%dm (%d%%)", actual.MilliValue(), int64(utilPercent)) + return fmt.Sprintf("%dm (%d", actual.MilliValue(), int64(utilPercent)) + "%%)" } - return fmt.Sprintf("%dMi (%d%%)", actual.Value()/1048576, int64(utilPercent)) + + return fmt.Sprintf("%dMi (%d", actual.Value()/1048576, int64(utilPercent)) + "%%)" } // NOTE: This might not be a great place for closures due to the cyclical nature of how resourceType works. Perhaps better implemented another way. diff --git a/pkg/capacity/resources_test.go b/pkg/capacity/resources_test.go new file mode 100644 index 00000000..0b735f8b --- /dev/null +++ b/pkg/capacity/resources_test.go @@ -0,0 +1,199 @@ +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capacity + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" +) + +func TestBuildClusterMetricEmpty(t *testing.T) { + cm := buildClusterMetric( + &corev1.PodList{}, &v1beta1.PodMetricsList{}, &corev1.NodeList{}, + ) + + expected := clusterMetric{ + cpu: &resourceMetric{ + resourceType: "cpu", + allocatable: resource.Quantity{}, + request: resource.Quantity{}, + limit: resource.Quantity{}, + utilization: resource.Quantity{}, + }, + memory: &resourceMetric{ + resourceType: "memory", + allocatable: resource.Quantity{}, + request: resource.Quantity{}, + limit: resource.Quantity{}, + utilization: resource.Quantity{}, + }, + nodeMetrics: map[string]*nodeMetric{}, + } + + assert.EqualValues(t, cm, expected) +} + +func TestBuildClusterMetricFull(t *testing.T) { + cm := buildClusterMetric( + &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "example-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: "example-node-1", + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("250m"), + "memory": resource.MustParse("250Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("250m"), + "memory": resource.MustParse("500Mi"), + }, + }, + }, + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("150Mi"), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("150m"), + "memory": resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + }, &v1beta1.PodMetricsList{ + Items: []v1beta1.PodMetrics{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "example-pod", + Namespace: "default", + }, + Containers: []v1beta1.ContainerMetrics{ + { + Usage: corev1.ResourceList{ + "cpu": resource.MustParse("10m"), + "memory": resource.MustParse("188Mi"), + }, + }, + { + Usage: corev1.ResourceList{ + "cpu": resource.MustParse("13m"), + "memory": resource.MustParse("111Mi"), + }, + }, + }, + }, + }, + }, &corev1.NodeList{ + Items: []corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "example-node-1", + }, + Status: corev1.NodeStatus{ + Allocatable: corev1.ResourceList{ + "cpu": resource.MustParse("1000m"), + "memory": resource.MustParse("4000Mi"), + }, + }, + }, + }, + }, + ) + + cpuExpected := &resourceMetric{ + allocatable: resource.MustParse("1000m"), + request: resource.MustParse("350m"), + limit: resource.MustParse("400m"), + utilization: resource.MustParse("23m"), + } + + memoryExpected := &resourceMetric{ + allocatable: resource.MustParse("4000Mi"), + request: resource.MustParse("400Mi"), + limit: resource.MustParse("700Mi"), + utilization: resource.MustParse("299Mi"), + } + + assert.NotNil(t, cm.cpu) + ensureEqualResourceMetric(t, cm.cpu, cpuExpected) + assert.NotNil(t, cm.memory) + ensureEqualResourceMetric(t, cm.memory, memoryExpected) + + assert.NotNil(t, cm.nodeMetrics["example-node-1"]) + assert.NotNil(t, cm.nodeMetrics["example-node-1"].cpu) + ensureEqualResourceMetric(t, cm.nodeMetrics["example-node-1"].cpu, cpuExpected) + assert.NotNil(t, cm.nodeMetrics["example-node-1"].memory) + ensureEqualResourceMetric(t, cm.nodeMetrics["example-node-1"].memory, memoryExpected) + + assert.Len(t, cm.nodeMetrics["example-node-1"].podMetrics, 1) + + pm := cm.nodeMetrics["example-node-1"].podMetrics + // Change to pod specific util numbers + cpuExpected.utilization = resource.MustParse("23m") + memoryExpected.utilization = resource.MustParse("299Mi") + + assert.NotNil(t, pm["default-example-pod"]) + assert.NotNil(t, pm["default-example-pod"].cpu) + ensureEqualResourceMetric(t, pm["default-example-pod"].cpu, cpuExpected) + assert.NotNil(t, pm["default-example-pod"].memory) + ensureEqualResourceMetric(t, pm["default-example-pod"].memory, memoryExpected) +} + +func ensureEqualResourceMetric(t *testing.T, actual *resourceMetric, expected *resourceMetric) { + assert.Equal(t, actual.allocatable.MilliValue(), expected.allocatable.MilliValue()) + assert.Equal(t, actual.utilization.MilliValue(), expected.utilization.MilliValue()) + assert.Equal(t, actual.request.MilliValue(), expected.request.MilliValue()) + assert.Equal(t, actual.limit.MilliValue(), expected.limit.MilliValue()) +} + +func listNodes(n *corev1.NodeList) []string { + nodes := []string{} + + for _, node := range n.Items { + nodes = append(nodes, node.GetName()) + } + + return nodes +} + +func listPods(p *corev1.PodList) []string { + pods := []string{} + + for _, pod := range p.Items { + pods = append(pods, fmt.Sprintf("%s/%s", pod.GetNamespace(), pod.GetName())) + } + + return pods +} diff --git a/pkg/capacity/table.go b/pkg/capacity/table.go index bf83f65f..be5e8a6f 100644 --- a/pkg/capacity/table.go +++ b/pkg/capacity/table.go @@ -1,159 +1,183 @@ -// Package capacity - text.go contains all the messy details for the text printer implementation +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package capacity import ( "fmt" "os" - "sort" + "strings" "text/tabwriter" ) type tablePrinter struct { - cm *clusterMetric - showPods bool - showUtil bool - w *tabwriter.Writer + cm *clusterMetric + showPods bool + showUtil bool + showContainers bool + sortBy string + w *tabwriter.Writer +} + +type tableLine struct { + node string + namespace string + pod string + container string + cpuRequests string + cpuLimits string + cpuUtil string + memoryRequests string + memoryLimits string + memoryUtil string } -func (tp tablePrinter) Print() { +var headerStrings = tableLine{ + node: "NODE", + namespace: "NAMESPACE", + pod: "POD", + container: "CONTAINER", + cpuRequests: "CPU REQUESTS", + cpuLimits: "CPU LIMITS", + cpuUtil: "CPU UTIL", + memoryRequests: "MEMORY REQUESTS", + memoryLimits: "MEMORY LIMITS", + memoryUtil: "MEMORY UTIL", +} + +func (tp *tablePrinter) Print() { tp.w.Init(os.Stdout, 0, 8, 2, ' ', 0) - names := make([]string, len(tp.cm.nodeMetrics)) + sortedNodeMetrics := tp.cm.getSortedNodeMetrics(tp.sortBy) - i := 0 - for name := range tp.cm.nodeMetrics { - names[i] = name - i++ - } - sort.Strings(names) + tp.printLine(&headerStrings) - tp.printHeaders() + if len(sortedNodeMetrics) > 1 { + tp.printClusterLine() + tp.printLine(&tableLine{}) + } - for _, name := range names { - tp.printNode(name, tp.cm.nodeMetrics[name]) + for _, nm := range sortedNodeMetrics { + tp.printNodeLine(nm.name, nm) + tp.printLine(&tableLine{}) + + if tp.showPods || tp.showContainers { + podMetrics := nm.getSortedPodMetrics(tp.sortBy) + for _, pm := range podMetrics { + tp.printPodLine(nm.name, pm) + if tp.showContainers { + containerMetrics := pm.getSortedContainerMetrics(tp.sortBy) + for _, containerMetric := range containerMetrics { + tp.printContainerLine(nm.name, pm, containerMetric) + } + } + } + } } tp.w.Flush() } -func (tp *tablePrinter) printHeaders() { - if tp.showPods && tp.showUtil { - fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CPU REQUESTS \t CPU LIMITS \t CPU UTIL \t MEMORY REQUESTS \t MEMORY LIMITS \t MEMORY UTIL") +func (tp *tablePrinter) printLine(tl *tableLine) { + lineItems := tp.getLineItems(tl) + fmt.Fprintf(tp.w, strings.Join(lineItems[:], "\t ")+"\n") +} - if len(tp.cm.nodeMetrics) > 1 { - fmt.Fprintf(tp.w, "* \t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), - tp.cm.cpu.limitString(), - tp.cm.cpu.utilString(), - tp.cm.memory.requestString(), - tp.cm.memory.limitString(), - tp.cm.memory.utilString()) +func (tp *tablePrinter) getLineItems(tl *tableLine) []string { + lineItems := []string{tl.node} - fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t\t") - } - } else if tp.showPods { - fmt.Fprintln(tp.w, "NODE\t NAMESPACE\t POD\t CPU REQUESTS \t CPU LIMITS \t MEMORY REQUESTS \t MEMORY LIMITS") - - fmt.Fprintf(tp.w, "* \t *\t *\t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), - tp.cm.cpu.limitString(), - tp.cm.memory.requestString(), - tp.cm.memory.limitString()) - - fmt.Fprintln(tp.w, "\t\t\t\t\t\t") - - } else if tp.showUtil { - fmt.Fprintln(tp.w, "NODE\t CPU REQUESTS \t CPU LIMITS \t CPU UTIL \t MEMORY REQUESTS \t MEMORY LIMITS \t MEMORY UTIL") - - fmt.Fprintf(tp.w, "* \t %s \t %s \t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), - tp.cm.cpu.limitString(), - tp.cm.cpu.utilString(), - tp.cm.memory.requestString(), - tp.cm.memory.limitString(), - tp.cm.memory.utilString()) - - } else { - fmt.Fprintln(tp.w, "NODE\t CPU REQUESTS \t CPU LIMITS \t MEMORY REQUESTS \t MEMORY LIMITS") - - if len(tp.cm.nodeMetrics) > 1 { - fmt.Fprintf(tp.w, "* \t %s \t %s \t %s \t %s \n", - tp.cm.cpu.requestString(), tp.cm.cpu.limitString(), - tp.cm.memory.requestString(), tp.cm.memory.limitString()) - } + if tp.showContainers || tp.showPods { + lineItems = append(lineItems, tl.namespace) + lineItems = append(lineItems, tl.pod) } -} -func (tp *tablePrinter) printNode(name string, nm *nodeMetric) { - podNames := make([]string, len(nm.podMetrics)) + if tp.showContainers { + lineItems = append(lineItems, tl.container) + } - i := 0 - for name := range nm.podMetrics { - podNames[i] = name - i++ + lineItems = append(lineItems, tl.cpuRequests) + lineItems = append(lineItems, tl.cpuLimits) + + if tp.showUtil { + lineItems = append(lineItems, tl.cpuUtil) } - sort.Strings(podNames) - - if tp.showPods && tp.showUtil { - fmt.Fprintf(tp.w, "%s \t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - nm.cpu.requestString(), - nm.cpu.limitString(), - nm.cpu.utilString(), - nm.memory.requestString(), - nm.memory.limitString(), - nm.memory.utilString()) - - for _, podName := range podNames { - pm := nm.podMetrics[podName] - fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - pm.namespace, - pm.name, - pm.cpu.requestString(), - pm.cpu.limitString(), - pm.cpu.utilString(), - pm.memory.requestString(), - pm.memory.limitString(), - pm.memory.utilString()) - } - fmt.Fprintln(tp.w, "\t\t\t\t\t\t\t\t") - - } else if tp.showPods { - fmt.Fprintf(tp.w, "%s \t *\t *\t %s \t %s \t %s \t %s \n", - name, - nm.cpu.requestString(), - nm.cpu.limitString(), - nm.memory.requestString(), - nm.memory.limitString()) - - for _, podName := range podNames { - pm := nm.podMetrics[podName] - fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - pm.namespace, - pm.name, - pm.cpu.requestString(), - pm.cpu.limitString(), - pm.memory.requestString(), - pm.memory.limitString()) - } + lineItems = append(lineItems, tl.memoryRequests) + lineItems = append(lineItems, tl.memoryLimits) - fmt.Fprintln(tp.w, "\t\t\t\t\t\t") - - } else if tp.showUtil { - fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \n", - name, - nm.cpu.requestString(), - nm.cpu.limitString(), - nm.cpu.utilString(), - nm.memory.requestString(), - nm.memory.limitString(), - nm.memory.utilString()) - - } else { - fmt.Fprintf(tp.w, "%s \t %s \t %s \t %s \t %s \n", name, - nm.cpu.requestString(), nm.cpu.limitString(), - nm.memory.requestString(), nm.memory.limitString()) + if tp.showUtil { + lineItems = append(lineItems, tl.memoryUtil) } + + return lineItems +} + +func (tp *tablePrinter) printClusterLine() { + tp.printLine(&tableLine{ + node: "*", + namespace: "*", + pod: "*", + container: "*", + cpuRequests: tp.cm.cpu.requestString(), + cpuLimits: tp.cm.cpu.limitString(), + cpuUtil: tp.cm.cpu.utilString(), + memoryRequests: tp.cm.memory.requestString(), + memoryLimits: tp.cm.memory.limitString(), + memoryUtil: tp.cm.memory.utilString(), + }) +} + +func (tp *tablePrinter) printNodeLine(nodeName string, nm *nodeMetric) { + tp.printLine(&tableLine{ + node: nodeName, + namespace: "*", + pod: "*", + container: "*", + cpuRequests: nm.cpu.requestString(), + cpuLimits: nm.cpu.limitString(), + cpuUtil: nm.cpu.utilString(), + memoryRequests: nm.memory.requestString(), + memoryLimits: nm.memory.limitString(), + memoryUtil: nm.memory.utilString(), + }) +} + +func (tp *tablePrinter) printPodLine(nodeName string, pm *podMetric) { + tp.printLine(&tableLine{ + node: nodeName, + namespace: pm.namespace, + pod: pm.name, + container: "*", + cpuRequests: pm.cpu.requestString(), + cpuLimits: pm.cpu.limitString(), + cpuUtil: pm.cpu.utilString(), + memoryRequests: pm.memory.requestString(), + memoryLimits: pm.memory.limitString(), + memoryUtil: pm.memory.utilString(), + }) +} + +func (tp *tablePrinter) printContainerLine(nodeName string, pm *podMetric, cm *containerMetric) { + tp.printLine(&tableLine{ + node: nodeName, + namespace: pm.namespace, + pod: pm.name, + container: cm.name, + cpuRequests: cm.cpu.requestString(), + cpuLimits: cm.cpu.limitString(), + cpuUtil: cm.cpu.utilString(), + memoryRequests: cm.memory.requestString(), + memoryLimits: cm.memory.limitString(), + memoryUtil: cm.memory.utilString(), + }) } diff --git a/pkg/capacity/table_test.go b/pkg/capacity/table_test.go new file mode 100644 index 00000000..2e6760c2 --- /dev/null +++ b/pkg/capacity/table_test.go @@ -0,0 +1,112 @@ +// Copyright 2019 Kube Capacity Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capacity + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetLineItems(t *testing.T) { + tpNone := &tablePrinter{ + showPods: false, + showUtil: false, + showContainers: false, + } + + tpSome := &tablePrinter{ + showPods: false, + showUtil: false, + showContainers: true, + } + + tpAll := &tablePrinter{ + showPods: true, + showUtil: true, + showContainers: true, + } + + tl := &tableLine{ + node: "example-node-1", + namespace: "example-namespace", + pod: "nginx-fsde", + container: "nginx", + cpuRequests: "100m", + cpuLimits: "200m", + cpuUtil: "14m", + memoryRequests: "1000Mi", + memoryLimits: "2000Mi", + memoryUtil: "326Mi", + } + + var testCases = []struct { + name string + tp *tablePrinter + tl *tableLine + expected []string + }{ + { + name: "all false", + tp: tpNone, + tl: tl, + expected: []string{ + "example-node-1", + "100m", + "200m", + "1000Mi", + "2000Mi", + }, + }, { + name: "some true", + tp: tpSome, + tl: tl, + expected: []string{ + "example-node-1", + "example-namespace", + "nginx-fsde", + "nginx", + "100m", + "200m", + "1000Mi", + "2000Mi", + }, + }, { + name: "all true", + tp: tpAll, + tl: tl, + expected: []string{ + "example-node-1", + "example-namespace", + "nginx-fsde", + "nginx", + "100m", + "200m", + "14m", + "1000Mi", + "2000Mi", + "326Mi", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + lineItems := tc.tp.getLineItems(tl) + assert.Len(t, lineItems, len(tc.expected)) + assert.ElementsMatch(t, lineItems, tc.expected) + }) + } +} diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index b351566b..af69bcee 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" ) +var showContainers bool var showPods bool var showUtil bool var podLabels string @@ -29,6 +30,7 @@ var nodeLabels string var namespaceLabels string var kubeContext string var outputFormat string +var sortBy string var rootCmd = &cobra.Command{ Use: "kube-capacity", @@ -44,18 +46,32 @@ var rootCmd = &cobra.Command{ os.Exit(1) } - capacity.List(showPods, showUtil, podLabels, nodeLabels, namespaceLabels, kubeContext, outputFormat) + capacity.FetchAndPrint(showContainers, showPods, showUtil, podLabels, nodeLabels, namespaceLabels, kubeContext, outputFormat, sortBy) }, } func init() { - rootCmd.PersistentFlags().BoolVarP(&showPods, "pods", "p", false, "includes pods in output") - rootCmd.PersistentFlags().BoolVarP(&showUtil, "util", "u", false, "includes resource utilization in output") - rootCmd.PersistentFlags().StringVarP(&podLabels, "pod-labels", "l", "", "labels to filter pods with") - rootCmd.PersistentFlags().StringVarP(&nodeLabels, "node-labels", "", "", "labels to filter nodes with") - rootCmd.PersistentFlags().StringVarP(&namespaceLabels, "namespace-labels", "n", "", "labels to filter namespaces with") - rootCmd.PersistentFlags().StringVarP(&kubeContext, "context", "", "", "context to use for Kubernetes config") - rootCmd.PersistentFlags().StringVarP(&outputFormat, "output", "o", capacity.TableOutput, fmt.Sprintf("output format for information (supports: %v)", capacity.SupportedOutputs())) + rootCmd.PersistentFlags().BoolVarP(&showContainers, + "containers", "c", false, "includes containers in output") + rootCmd.PersistentFlags().BoolVarP(&showPods, + "pods", "p", false, "includes pods in output") + rootCmd.PersistentFlags().BoolVarP(&showUtil, + "util", "u", false, "includes resource utilization in output") + rootCmd.PersistentFlags().StringVarP(&podLabels, + "pod-labels", "l", "", "labels to filter pods with") + rootCmd.PersistentFlags().StringVarP(&nodeLabels, + "node-labels", "", "", "labels to filter nodes with") + rootCmd.PersistentFlags().StringVarP(&namespaceLabels, + "namespace-labels", "n", "", "labels to filter namespaces with") + rootCmd.PersistentFlags().StringVarP(&kubeContext, + "context", "", "", "context to use for Kubernetes config") + rootCmd.PersistentFlags().StringVarP(&sortBy, + "sort", "", "name", + fmt.Sprintf("attribute to sort results be (supports: %v)", capacity.SupportedSortAttributes)) + + rootCmd.PersistentFlags().StringVarP(&outputFormat, + "output", "o", capacity.TableOutput, + fmt.Sprintf("output format for information (supports: %v)", capacity.SupportedOutputs())) } // Execute is the primary entrypoint for this CLI diff --git a/pkg/cmd/version.go b/pkg/cmd/version.go index 041739bc..09844b04 100644 --- a/pkg/cmd/version.go +++ b/pkg/cmd/version.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,6 +28,6 @@ var versionCmd = &cobra.Command{ Use: "version", Short: "Print the version number of kube-capacity", Run: func(cmd *cobra.Command, args []string) { - fmt.Println("kube-capacity version 0.2.0") + fmt.Println("kube-capacity version 0.3.0") }, } diff --git a/pkg/kube/clientset.go b/pkg/kube/clientset.go index 0537358a..4e698d84 100644 --- a/pkg/kube/clientset.go +++ b/pkg/kube/clientset.go @@ -1,4 +1,4 @@ -// Copyright 2019 Rob Scott +// Copyright 2019 Kube Capacity Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License.