From aab46f6aad5439c6572c8a9af7c1349457ab0048 Mon Sep 17 00:00:00 2001 From: derailed Date: Wed, 1 Jan 2020 23:00:22 -0700 Subject: [PATCH] fix for #452 --- change_logs/release_0.10.5.md | 2 +- change_logs/release_0.10.6.md | 25 +++ internal/client/gvr.go | 1 + internal/dao/benchmark.go | 4 +- internal/dao/container.go | 4 +- internal/dao/context.go | 4 +- internal/dao/cronjob.go | 4 +- internal/dao/dp.go | 8 +- internal/dao/ds.go | 6 +- internal/dao/job.go | 4 +- internal/dao/pod.go | 4 +- internal/dao/portforward.go | 4 +- internal/dao/screen_dump.go | 4 +- internal/dao/sts.go | 8 +- internal/dao/svc.go | 4 +- internal/keys.go | 1 + internal/model/node.go | 124 +++---------- internal/model/node_test.go | 70 ++++++++ internal/model/pod.go | 60 +++---- internal/model/pod_test.go | 52 ++++++ internal/model/test_assets/n1.json | 275 +++++++++++++++++++++++++++++ internal/model/test_assets/p1.json | 146 +++++++++++++++ internal/render/cm.go | 77 ++++++-- internal/render/cm_test.go | 17 +- internal/render/dp.go | 1 + internal/render/dp_test.go | 16 +- internal/render/node.go | 41 +++-- internal/render/node_test.go | 44 ++--- internal/render/pod.go | 33 ++-- internal/render/pod_test.go | 44 +++-- internal/view/logs_extender.go | 1 - internal/view/node.go | 15 ++ internal/view/pod.go | 16 ++ internal/watch/factory.go | 14 +- 34 files changed, 885 insertions(+), 248 deletions(-) create mode 100644 change_logs/release_0.10.6.md create mode 100644 internal/model/node_test.go create mode 100644 internal/model/pod_test.go create mode 100644 internal/model/test_assets/n1.json create mode 100644 internal/model/test_assets/p1.json diff --git a/change_logs/release_0.10.5.md b/change_logs/release_0.10.5.md index de3568e1..d55773c4 100644 --- a/change_logs/release_0.10.5.md +++ b/change_logs/release_0.10.5.md @@ -1,6 +1,6 @@ -# Release v0.10.4 +# Release v0.10.5 ## Notes diff --git a/change_logs/release_0.10.6.md b/change_logs/release_0.10.6.md new file mode 100644 index 00000000..aeb89706 --- /dev/null +++ b/change_logs/release_0.10.6.md @@ -0,0 +1,25 @@ + + +# Release v0.10.6 + +## Notes + +Thank you to all that contributed with flushing out issues and enhancements for K9s! I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev and see if we're happier with some of the fixes! If you've filed an issue please help me verify and close. Your support, kindness and awesome suggestions to make K9s better is as ever very much noticed and appreciated! + +Also if you dig this tool, please make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer) + +--- + +## Change Logs + +Maintenance release! + +--- + +## Resolved Bugs/Features + +* [Issue #452](https://github.com/derailed/k9s/issues/452) + +--- + + © 2019 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0) diff --git a/internal/client/gvr.go b/internal/client/gvr.go index 689db250..c22e5f81 100644 --- a/internal/client/gvr.go +++ b/internal/client/gvr.go @@ -41,6 +41,7 @@ func NewGVR(gvr string) GVR { return GVR{raw: gvr, g: g, v: v, r: r, sr: sr} } +// NewGVRFromMeta builds a gvr from resource metadata. func NewGVRFromMeta(a metav1.APIResource) GVR { return GVR{ raw: path.Join(a.Group, a.Version, a.Name), diff --git a/internal/dao/benchmark.go b/internal/dao/benchmark.go index c547678e..0babc1d5 100644 --- a/internal/dao/benchmark.go +++ b/internal/dao/benchmark.go @@ -9,8 +9,8 @@ type Benchmark struct { Generic } -var _ Accessor = &Benchmark{} -var _ Nuker = &Benchmark{} +var _ Accessor = (*Benchmark)(nil) +var _ Nuker = (*Benchmark)(nil) // Delete a Benchmark. func (d *Benchmark) Delete(path string, cascade, force bool) error { diff --git a/internal/dao/container.go b/internal/dao/container.go index 43c2e1d6..379910fb 100644 --- a/internal/dao/container.go +++ b/internal/dao/container.go @@ -18,8 +18,8 @@ type Container struct { Generic } -var _ Accessor = &Container{} -var _ Loggable = &Container{} +var _ Accessor = (*Container)(nil) +var _ Loggable = (*Container)(nil) // TailLogs tails a given container logs func (c *Container) TailLogs(ctx context.Context, logChan chan<- string, opts LogOptions) error { diff --git a/internal/dao/context.go b/internal/dao/context.go index 4015018a..361ee6e6 100644 --- a/internal/dao/context.go +++ b/internal/dao/context.go @@ -16,8 +16,8 @@ type Context struct { Generic } -var _ Accessor = &Context{} -var _ Switchable = &Context{} +var _ Accessor = (*Context)(nil) +var _ Switchable = (*Context)(nil) func (c *Context) config() *client.Config { return c.Factory.Client().Config() diff --git a/internal/dao/cronjob.go b/internal/dao/cronjob.go index dc4bc04b..d6636fa3 100644 --- a/internal/dao/cronjob.go +++ b/internal/dao/cronjob.go @@ -14,8 +14,8 @@ type CronJob struct { Generic } -var _ Accessor = &CronJob{} -var _ Runnable = &CronJob{} +var _ Accessor = (*CronJob)(nil) +var _ Runnable = (*CronJob)(nil) // Run a CronJob. func (c *CronJob) Run(path string) error { diff --git a/internal/dao/dp.go b/internal/dao/dp.go index eee7a911..e05420ff 100644 --- a/internal/dao/dp.go +++ b/internal/dao/dp.go @@ -20,10 +20,10 @@ type Deployment struct { Generic } -var _ Accessor = &Deployment{} -var _ Loggable = &Deployment{} -var _ Restartable = &Deployment{} -var _ Scalable = &Deployment{} +var _ Accessor = (*Deployment)(nil) +var _ Loggable = (*Deployment)(nil) +var _ Restartable = (*Deployment)(nil) +var _ Scalable = (*Deployment)(nil) // Scale a Deployment. func (d *Deployment) Scale(path string, replicas int32) error { diff --git a/internal/dao/ds.go b/internal/dao/ds.go index b74b58d4..2bf76d67 100644 --- a/internal/dao/ds.go +++ b/internal/dao/ds.go @@ -25,9 +25,9 @@ type DaemonSet struct { Generic } -var _ Accessor = &DaemonSet{} -var _ Loggable = &DaemonSet{} -var _ Restartable = &DaemonSet{} +var _ Accessor = (*DaemonSet)(nil) +var _ Loggable = (*DaemonSet)(nil) +var _ Restartable = (*DaemonSet)(nil) // Restart a DaemonSet rollout. func (d *DaemonSet) Restart(path string) error { diff --git a/internal/dao/job.go b/internal/dao/job.go index e3f8a834..6be9cc98 100644 --- a/internal/dao/job.go +++ b/internal/dao/job.go @@ -16,8 +16,8 @@ type Job struct { Generic } -var _ Accessor = &Job{} -var _ Loggable = &Job{} +var _ Accessor = (*Job)(nil) +var _ Loggable = (*Job)(nil) // TailLogs tail logs for all pods represented by this Job. func (j *Job) TailLogs(ctx context.Context, c chan<- string, opts LogOptions) error { diff --git a/internal/dao/pod.go b/internal/dao/pod.go index 1b3be251..12b35eed 100644 --- a/internal/dao/pod.go +++ b/internal/dao/pod.go @@ -28,8 +28,8 @@ type Pod struct { Generic } -var _ Accessor = &Pod{} -var _ Loggable = &Pod{} +var _ Accessor = (*Pod)(nil) +var _ Loggable = (*Pod)(nil) // Logs fetch container logs for a given pod and container. func (p *Pod) Logs(path string, opts *v1.PodLogOptions) (*restclient.Request, error) { diff --git a/internal/dao/portforward.go b/internal/dao/portforward.go index da302f4c..b3cf6963 100644 --- a/internal/dao/portforward.go +++ b/internal/dao/portforward.go @@ -9,8 +9,8 @@ type PortForward struct { Generic } -var _ Accessor = &PortForward{} -var _ Nuker = &PortForward{} +var _ Accessor = (*PortForward)(nil) +var _ Nuker = (*PortForward)(nil) // Delete a portforward. func (p *PortForward) Delete(path string, cascade, force bool) error { diff --git a/internal/dao/screen_dump.go b/internal/dao/screen_dump.go index 55825cd3..9a7e85a1 100644 --- a/internal/dao/screen_dump.go +++ b/internal/dao/screen_dump.go @@ -9,8 +9,8 @@ type ScreenDump struct { Generic } -var _ Accessor = &ScreenDump{} -var _ Nuker = &ScreenDump{} +var _ Accessor = (*ScreenDump)(nil) +var _ Nuker = (*ScreenDump)(nil) // Delete a ScreenDump. func (d *ScreenDump) Delete(path string, cascade, force bool) error { diff --git a/internal/dao/sts.go b/internal/dao/sts.go index 490bd864..882542d4 100644 --- a/internal/dao/sts.go +++ b/internal/dao/sts.go @@ -20,10 +20,10 @@ type StatefulSet struct { Generic } -var _ Accessor = &StatefulSet{} -var _ Loggable = &StatefulSet{} -var _ Restartable = &StatefulSet{} -var _ Scalable = &StatefulSet{} +var _ Accessor = (*StatefulSet)(nil) +var _ Loggable = (*StatefulSet)(nil) +var _ Restartable = (*StatefulSet)(nil) +var _ Scalable = (*StatefulSet)(nil) // Scale a StatefulSet. func (s *StatefulSet) Scale(path string, replicas int32) error { diff --git a/internal/dao/svc.go b/internal/dao/svc.go index 5eb7d134..d39cd68d 100644 --- a/internal/dao/svc.go +++ b/internal/dao/svc.go @@ -16,8 +16,8 @@ type Service struct { Generic } -var _ Accessor = &Service{} -var _ Loggable = &Service{} +var _ Accessor = (*Service)(nil) +var _ Loggable = (*Service)(nil) // TailLogs tail logs for all pods represented by this Service. func (s *Service) TailLogs(ctx context.Context, c chan<- string, opts LogOptions) error { diff --git a/internal/keys.go b/internal/keys.go index f02d55e4..240990b5 100644 --- a/internal/keys.go +++ b/internal/keys.go @@ -24,4 +24,5 @@ const ( KeyCluster ContextKey = "cluster" KeyApp ContextKey = "app" KeyStyles ContextKey = "styles" + KeyMetrics ContextKey = "metrics" ) diff --git a/internal/model/node.go b/internal/model/node.go index 68a2eb23..8386239b 100644 --- a/internal/model/node.go +++ b/internal/model/node.go @@ -3,19 +3,18 @@ package model import ( "context" "fmt" + "time" - "github.com/derailed/k9s/internal/client" + "github.com/derailed/k9s/internal" "github.com/derailed/k9s/internal/dao" "github.com/derailed/k9s/internal/render" "github.com/rs/zerolog/log" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" ) -var _ render.NodeWithMetrics = &NodeWithMetrics{} +type NodeMetricsFunc func() (*mv1beta1.NodeMetricsList, error) // Node represents a node model. type Node struct { @@ -23,65 +22,50 @@ type Node struct { } // List returns a collection of node resources. -func (n *Node) List(_ context.Context) ([]runtime.Object, error) { +func (n *Node) List(ctx context.Context) ([]runtime.Object, error) { + defer func(t time.Time) { + log.Debug().Msgf("LIST NODES elapsed %v", time.Since(t)) + }(time.Now()) + + nmx, ok := ctx.Value(internal.KeyMetrics).(*mv1beta1.NodeMetricsList) + if !ok { + log.Warn().Msgf("No node metrics available in context") + } + nn, err := dao.FetchNodes(n.factory) if err != nil { return nil, err } oo := make([]runtime.Object, len(nn.Items)) - for i := range nn.Items { + for i, no := range nn.Items { o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&nn.Items[i]) if err != nil { return nil, err } - oo[i] = &unstructured.Unstructured{Object: o} + oo[i] = &render.NodeWithMetrics{ + Raw: &unstructured.Unstructured{Object: o}, + MX: nodeMetricsFor(MetaFQN(no.ObjectMeta), nmx), + } } return oo, nil } -func nameFromMeta(m map[string]interface{}) string { - meta, ok := m["metadata"].(map[string]interface{}) - if !ok { - return "n/a" - } - - name, ok := meta["name"].(string) - if !ok { - return "n/a" - } - - return name -} - // Hydrate returns nodes as rows. func (n *Node) Hydrate(oo []runtime.Object, rr render.Rows, re Renderer) error { - mx := client.NewMetricsServer(n.factory.Client()) - mmx, err := mx.FetchNodesMetrics() - if err != nil { - log.Warn().Err(err).Msg("No node metrics") - } + defer func(t time.Time) { + log.Debug().Msgf("HYDRATE NODES elapsed %v", time.Since(t)) + }(time.Now()) for i, o := range oo { - no, ok := o.(*unstructured.Unstructured) + nmx, ok := o.(*render.NodeWithMetrics) if !ok { - return fmt.Errorf("expecting unstructured but got %T", o) - } - pods, err := n.nodePods(n.factory, nameFromMeta(no.Object)) - if err != nil { - return err + return fmt.Errorf("expecting *NodeWithMetrics but got %T", o) } - var ( - row render.Row - nmx = NodeWithMetrics{ - object: no, - mx: nodeMetricsFor(o, mmx), - pods: pods, - } - ) - if err := re.Render(&nmx, "", &row); err != nil { + var row render.Row + if err := re.Render(nmx, render.ClusterScope, &row); err != nil { return err } rr[i] = row @@ -90,8 +74,10 @@ func (n *Node) Hydrate(oo []runtime.Object, rr render.Rows, re Renderer) error { return nil } -func nodeMetricsFor(o runtime.Object, mmx *mv1beta1.NodeMetricsList) *mv1beta1.NodeMetrics { - fqn := extractFQN(o) +// ---------------------------------------------------------------------------- +// Helpers... + +func nodeMetricsFor(fqn string, mmx *mv1beta1.NodeMetricsList) *mv1beta1.NodeMetrics { for _, mx := range mmx.Items { if MetaFQN(mx.ObjectMeta) == fqn { return &mx @@ -99,55 +85,3 @@ func nodeMetricsFor(o runtime.Object, mmx *mv1beta1.NodeMetricsList) *mv1beta1.N } return nil } - -func (n *Node) nodePods(f dao.Factory, node string) ([]*v1.Pod, error) { - pp, err := f.List("v1/pods", render.AllNamespaces, true, labels.Everything()) - if err != nil { - return nil, err - } - - pods := make([]*v1.Pod, 0, len(pp)) - for _, p := range pp { - o, ok := p.(*unstructured.Unstructured) - if !ok { - return nil, fmt.Errorf("expecting unstructured but got %T", p) - } - var pod v1.Pod - err := runtime.DefaultUnstructuredConverter.FromUnstructured(o.Object, &pod) - if err != nil { - log.Error().Err(err).Msg("Converting Pod") - return nil, err - } - if pod.Spec.NodeName != node || pod.Status.Phase != v1.PodSucceeded { - continue - } - pods = append(pods, &pod) - } - - return pods, nil -} - -// ---------------------------------------------------------------------------- -// Helpers... - -// NodeWithMetrics represents a node with its associated metrics. -type NodeWithMetrics struct { - object runtime.Object - mx *mv1beta1.NodeMetrics - pods []*v1.Pod -} - -// Object returns a node. -func (n *NodeWithMetrics) Object() runtime.Object { - return n.object -} - -// Metrics returns the node metrics. -func (n *NodeWithMetrics) Metrics() *mv1beta1.NodeMetrics { - return n.mx -} - -// Pods return pods running on this node. -func (n *NodeWithMetrics) Pods() []*v1.Pod { - return n.pods -} diff --git a/internal/model/node_test.go b/internal/model/node_test.go new file mode 100644 index 00000000..d20b1a25 --- /dev/null +++ b/internal/model/node_test.go @@ -0,0 +1,70 @@ +package model_test + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "testing" + + "github.com/derailed/k9s/internal/model" + "github.com/derailed/k9s/internal/render" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestNodeHydrate(t *testing.T) { + f := makeFactory() + var no model.Node + no.Init("", "v1/nodes", f) + + o := render.NodeWithMetrics{Raw: load(t, "n1")} + rr := make(render.Rows, 1) + assert.Nil(t, no.Hydrate([]runtime.Object{&o}, rr, render.Node{})) + assert.Equal(t, 1, len(rr)) + assert.Equal(t, "minikube", rr[0].ID) + assert.Equal(t, render.Fields{ + "minikube", + "Ready", + "master", + "v1.17.0", + "4.19.81", + "192.168.64.6", + "", + "n/a", + "n/a", + "n/a", + "n/a", + "n/a", + "n/a", + }, rr[0].Fields[:len(rr[0].Fields)-1]) +} + +func BenchmarkNodeHydrate(b *testing.B) { + f := makeFactory() + var no model.Node + no.Init("", "v1/nodes", f) + o := load(b, "n1") + rr := make(render.Rows, 1) + + oo := []runtime.Object{&render.NodeWithMetrics{Raw: o}} + re := render.Node{} + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + no.Hydrate(oo, rr, re) + } +} + +// Helpers... + +func load(t assert.TestingT, n string) *unstructured.Unstructured { + raw, err := ioutil.ReadFile(fmt.Sprintf("test_assets/%s.json", n)) + assert.Nil(t, err) + + var o unstructured.Unstructured + err = json.Unmarshal(raw, &o) + assert.Nil(t, err) + + return &o +} diff --git a/internal/model/pod.go b/internal/model/pod.go index 87fe2d5f..2ce20185 100644 --- a/internal/model/pod.go +++ b/internal/model/pod.go @@ -3,9 +3,9 @@ package model import ( "context" "fmt" + "time" "github.com/derailed/k9s/internal" - "github.com/derailed/k9s/internal/client" "github.com/derailed/k9s/internal/render" "github.com/rs/zerolog/log" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -21,11 +21,20 @@ type Pod struct { // List returns a collection of nodes. func (p *Pod) List(ctx context.Context) ([]runtime.Object, error) { + defer func(t time.Time) { + log.Debug().Msgf("LIST PODS elapsed %v", time.Since(t)) + }(time.Now()) + oo, err := p.Resource.List(ctx) if err != nil { return oo, err } + pmx, ok := ctx.Value(internal.KeyMetrics).(*mv1beta1.PodMetricsList) + if !ok { + log.Warn().Msgf("expecting context PodMetricsList") + } + sel, ok := ctx.Value(internal.KeyFields).(string) if !ok { return oo, nil @@ -40,14 +49,19 @@ func (p *Pod) List(ctx context.Context) ([]runtime.Object, error) { for _, o := range oo { u, ok := o.(*unstructured.Unstructured) if !ok { - return res, fmt.Errorf("expecting unstructured but got `%T", o) + return res, fmt.Errorf("expecting *unstructured.Unstructured but got `%T", o) } + if nodeName == "" { + res = append(res, &render.PodWithMetrics{Raw: u, MX: podMetricsFor(o, pmx)}) + continue + } + spec, ok := u.Object["spec"].(map[string]interface{}) if !ok { return res, fmt.Errorf("expecting interface map but got `%T", o) } - if nodeName == "" || spec["nodeName"] == nodeName { - res = append(res, o) + if spec["nodeName"] == nodeName { + res = append(res, &render.PodWithMetrics{Raw: u, MX: podMetricsFor(o, pmx)}) } } @@ -56,19 +70,18 @@ func (p *Pod) List(ctx context.Context) ([]runtime.Object, error) { // Hydrate returns pod resources as rows. func (p *Pod) Hydrate(oo []runtime.Object, rr render.Rows, re Renderer) error { - mx := client.NewMetricsServer(p.factory.Client()) - mmx, err := mx.FetchPodsMetrics(p.namespace) - if err != nil { - log.Warn().Err(err).Msgf("No metrics found for pod") - } + defer func(t time.Time) { + log.Debug().Msgf("HYDRATE PODS elapsed %v", time.Since(t)) + }(time.Now()) var index int for _, o := range oo { - var ( - row render.Row - pmx = PodWithMetrics{object: o, mx: podMetricsFor(o, mmx)} - ) - if err := re.Render(&pmx, p.namespace, &row); err != nil { + po, ok := o.(*render.PodWithMetrics) + if !ok { + return fmt.Errorf("expecting *PodWithMetric but got %T", po) + } + var row render.Row + if err := re.Render(po, p.namespace, &row); err != nil { return err } rr[index] = row @@ -78,6 +91,9 @@ func (p *Pod) Hydrate(oo []runtime.Object, rr render.Rows, re Renderer) error { return nil } +// ---------------------------------------------------------------------------- +// Helpers... + func podMetricsFor(o runtime.Object, mmx *mv1beta1.PodMetricsList) *mv1beta1.PodMetrics { fqn := extractFQN(o) for _, mx := range mmx.Items { @@ -87,19 +103,3 @@ func podMetricsFor(o runtime.Object, mmx *mv1beta1.PodMetricsList) *mv1beta1.Pod } return nil } - -// PodWithMetrics represents a pod and its metrics. -type PodWithMetrics struct { - object runtime.Object - mx *mv1beta1.PodMetrics -} - -// Object returns a pod. -func (p *PodWithMetrics) Object() runtime.Object { - return p.object -} - -// Metrics returns the metrics associated with the pod. -func (p *PodWithMetrics) Metrics() *mv1beta1.PodMetrics { - return p.mx -} diff --git a/internal/model/pod_test.go b/internal/model/pod_test.go new file mode 100644 index 00000000..6f080d9d --- /dev/null +++ b/internal/model/pod_test.go @@ -0,0 +1,52 @@ +package model_test + +import ( + "testing" + + "github.com/derailed/k9s/internal/model" + "github.com/derailed/k9s/internal/render" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestPodHydrate(t *testing.T) { + f := makeFactory() + var po model.Pod + po.Init("", "v1/pods", f) + + o := render.PodWithMetrics{Raw: load(t, "p1")} + rr := make(render.Rows, 1) + assert.Nil(t, po.Hydrate([]runtime.Object{&o}, rr, render.Pod{})) + assert.Equal(t, 1, len(rr)) + assert.Equal(t, "default/nginx-7fb78fb6d8-2w75j", rr[0].ID) + assert.Equal(t, render.Fields{ + "default", + "nginx-7fb78fb6d8-2w75j", + "1/1", + "Running", + "0", + "n/a", + "n/a", + "n/a", + "n/a", + "10.44.0.229", + "gke-k9s-default-pool-0fa2fb89-lbtf", + "GA", + }, rr[0].Fields[:len(rr[0].Fields)-1]) +} + +func BenchmarkPodHydrate(b *testing.B) { + f := makeFactory() + var po model.Pod + po.Init("", "v1/pods", f) + o := load(b, "p1") + rr := make(render.Rows, 1) + + oo := []runtime.Object{&render.PodWithMetrics{Raw: o}} + re := render.Pod{} + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + po.Hydrate(oo, rr, re) + } +} diff --git a/internal/model/test_assets/n1.json b/internal/model/test_assets/n1.json new file mode 100644 index 00000000..5ef1f87a --- /dev/null +++ b/internal/model/test_assets/n1.json @@ -0,0 +1,275 @@ +{ + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "annotations": { + "kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock", + "node.alpha.kubernetes.io/ttl": "0", + "volumes.kubernetes.io/controller-managed-attach-detach": "true" + }, + "creationTimestamp": "2019-12-31T20:49:21Z", + "labels": { + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "minikube", + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/master": "" + }, + "name": "minikube", + "resourceVersion": "214450", + "selfLink": "/api/v1/nodes/minikube", + "uid": "a33a26f0-7688-47b6-8dbf-5a04ea7f43d4" + }, + "spec": {}, + "status": { + "addresses": [ + { + "address": "192.168.64.6", + "type": "InternalIP" + }, + { + "address": "minikube", + "type": "Hostname" + } + ], + "allocatable": { + "cpu": "4", + "ephemeral-storage": "16954240Ki", + "hugepages-2Mi": "0", + "memory": "8163684Ki", + "pods": "110" + }, + "capacity": { + "cpu": "4", + "ephemeral-storage": "16954240Ki", + "hugepages-2Mi": "0", + "memory": "8163684Ki", + "pods": "110" + }, + "conditions": [ + { + "lastHeartbeatTime": "2020-01-01T22:05:55Z", + "lastTransitionTime": "2019-12-31T20:49:18Z", + "message": "kubelet has sufficient memory available", + "reason": "KubeletHasSufficientMemory", + "status": "False", + "type": "MemoryPressure" + }, + { + "lastHeartbeatTime": "2020-01-01T22:05:55Z", + "lastTransitionTime": "2019-12-31T20:49:18Z", + "message": "kubelet has no disk pressure", + "reason": "KubeletHasNoDiskPressure", + "status": "False", + "type": "DiskPressure" + }, + { + "lastHeartbeatTime": "2020-01-01T22:05:55Z", + "lastTransitionTime": "2019-12-31T20:49:18Z", + "message": "kubelet has sufficient PID available", + "reason": "KubeletHasSufficientPID", + "status": "False", + "type": "PIDPressure" + }, + { + "lastHeartbeatTime": "2020-01-01T22:05:55Z", + "lastTransitionTime": "2019-12-31T20:49:22Z", + "message": "kubelet is posting ready status", + "reason": "KubeletReady", + "status": "True", + "type": "Ready" + } + ], + "daemonEndpoints": { + "kubeletEndpoint": { + "Port": 10250 + } + }, + "images": [ + { + "names": [ + "quay.io/kubernetes-ingress-controller/nginx-ingress-controller@sha256:d0b22f715fcea5598ef7f869d308b55289a3daaa12922fa52a1abf17703c88e7", + "quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1" + ], + "sizeBytes": 483167446 + }, + { + "names": [ + "istio/proxyv2@sha256:236527816ff67f8492d7286775e09c28e207aee2f6f3c3d9258cd2248af4afa5", + "istio/proxyv2:1.2.2" + ], + "sizeBytes": 369614978 + }, + { + "names": [ + "quay.io/kiali/kiali@sha256:60ceb57682e95fa3fb7c6e12d797f21c9e242c5583fa024a859d1085d0985c7b", + "quay.io/kiali/kiali:v0.20" + ], + "sizeBytes": 344083595 + }, + { + "names": [ + "istio/kubectl@sha256:a94f8f992bc1e996319a58ff934f9c5e6658e2338fb59e1d937f919b8146d050", + "istio/kubectl:1.2.2" + ], + "sizeBytes": 341145787 + }, + { + "names": [ + "istio/galley@sha256:786bb02b6d425697826ce740d723664beababf7a513eb8d4c95b42b35a99e91d", + "istio/galley:1.2.2" + ], + "sizeBytes": 306543175 + }, + { + "names": [ + "istio/pilot@sha256:ab08845a7f4d1fd44c8481b35161a8da0cbf880f3d4f690740aec27350758a95", + "istio/pilot:1.2.2" + ], + "sizeBytes": 303914365 + }, + { + "names": [ + "k8s.gcr.io/etcd@sha256:4afb99b4690b418ffc2ceb67e1a17376457e441c1f09ab55447f0aaf992fa646", + "k8s.gcr.io/etcd:3.4.3-0" + ], + "sizeBytes": 288426917 + }, + { + "names": [ + "grafana/grafana@sha256:d66b41cf7e0586274ca3e15e03299e4cfde48019fd756bb97cc9db57da9b0c86", + "grafana/grafana:6.1.6" + ], + "sizeBytes": 245005426 + }, + { + "names": [ + "k8s.gcr.io/kube-apiserver@sha256:e3ec33d533257902ad9ebe3d399c17710e62009201a7202aec941e351545d662", + "k8s.gcr.io/kube-apiserver:v1.17.0" + ], + "sizeBytes": 170957331 + }, + { + "names": [ + "k8s.gcr.io/kube-controller-manager@sha256:0438efb5098a2ca634ea8c6b0d804742b733d0d13fd53cf62c73e32c659a3c39", + "k8s.gcr.io/kube-controller-manager:v1.17.0" + ], + "sizeBytes": 160877075 + }, + { + "names": [ + "k8s.gcr.io/kube-proxy@sha256:b2ba9441af30261465e5c41be63e462d0050b09ad280001ae731f399b2b00b75", + "k8s.gcr.io/kube-proxy:v1.17.0" + ], + "sizeBytes": 115960823 + }, + { + "names": [ + "k8s.gcr.io/nginx-slim@sha256:8b4501fe0fe221df663c22e16539f399e89594552f400408303c42f3dd8d0e52", + "k8s.gcr.io/nginx-slim:0.8" + ], + "sizeBytes": 110487599 + }, + { + "names": [ + "prom/prometheus@sha256:1224ee30a3be668e0b22444773c4c1b750778af492094b6cd375c780c7526e22", + "prom/prometheus:v2.8.0" + ], + "sizeBytes": 108629897 + }, + { + "names": [ + "istio/mixer@sha256:886726967363477eeba4cbf48675b058bcf833c932763b0964db80390fc06ceb", + "istio/mixer:1.2.2" + ], + "sizeBytes": 97783922 + }, + { + "names": [ + "k8s.gcr.io/kube-scheduler@sha256:5215c4216a65f7e76c1895ba951a12dc1c947904a91810fc66a544ff1d7e87db", + "k8s.gcr.io/kube-scheduler:v1.17.0" + ], + "sizeBytes": 94431763 + }, + { + "names": [ + "kubernetesui/dashboard:v2.0.0-beta8" + ], + "sizeBytes": 90835427 + }, + { + "names": [ + "k8s.gcr.io/kube-addon-manager:v9.0.2" + ], + "sizeBytes": 83076028 + }, + { + "names": [ + "gcr.io/k8s-minikube/storage-provisioner:v1.8.1" + ], + "sizeBytes": 80815640 + }, + { + "names": [ + "istio/citadel@sha256:1e8065b277cb79a32ef617f7af468f9afe5b21ec2e0b42245d029c59fe3ce435", + "istio/citadel:1.2.2" + ], + "sizeBytes": 68454561 + }, + { + "names": [ + "istio/sidecar_injector@sha256:c8f6f5fb1bb2434f68199e06b124e85dc58a3879bf1275a4d39c400836bd3ca4", + "istio/sidecar_injector:1.2.2" + ], + "sizeBytes": 63917960 + }, + { + "names": [ + "k8s.gcr.io/metrics-server-amd64@sha256:49a9f12f7067d11f42c803dbe61ed2c1299959ad85cb315b25ff7eef8e6b8892", + "k8s.gcr.io/metrics-server-amd64:v0.2.1" + ], + "sizeBytes": 42541759 + }, + { + "names": [ + "k8s.gcr.io/coredns@sha256:7ec975f167d815311a7136c32e70735f0d00b73781365df1befd46ed35bd4fe7", + "k8s.gcr.io/coredns:1.6.5" + ], + "sizeBytes": 41578211 + }, + { + "names": [ + "kubernetesui/metrics-scraper:v1.0.2" + ], + "sizeBytes": 40101552 + }, + { + "names": [ + "jaegertracing/all-in-one@sha256:29c921747eddfa96c97cf96aac0180e97bfdfcbea25e230daef09711103d1f61", + "jaegertracing/all-in-one:1.9" + ], + "sizeBytes": 37328894 + }, + { + "names": [ + "k8s.gcr.io/pause@sha256:f78411e19d84a252e53bff71a4407a5686c46983a2c2eeed83929b888179acea", + "k8s.gcr.io/pause:3.1" + ], + "sizeBytes": 742472 + } + ], + "nodeInfo": { + "architecture": "amd64", + "bootID": "478c895b-009b-4b6e-9115-63502eaa68cb", + "containerRuntimeVersion": "docker://19.3.5", + "kernelVersion": "4.19.81", + "kubeProxyVersion": "v1.17.0", + "kubeletVersion": "v1.17.0", + "machineID": "6c484e2bfebf46f2ac854c484bcfa392", + "operatingSystem": "linux", + "osImage": "Buildroot 2019.02.7", + "systemUUID": "dbc511ea-0000-0000-a42f-acde48001122" + } + } +} \ No newline at end of file diff --git a/internal/model/test_assets/p1.json b/internal/model/test_assets/p1.json new file mode 100644 index 00000000..ea8d8dad --- /dev/null +++ b/internal/model/test_assets/p1.json @@ -0,0 +1,146 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "annotations": { + "kubectl.kubernetes.io/restartedAt": "2019-12-31T12:26:47-07:00" + }, + "creationTimestamp": "2019-12-31T19:27:22Z", + "generateName": "nginx-7fb78fb6d8-", + "labels": { + "app": "nginx", + "pod-template-hash": "7fb78fb6d8" + }, + "name": "nginx-7fb78fb6d8-2w75j", + "namespace": "default", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ReplicaSet", + "name": "nginx-7fb78fb6d8", + "uid": "7ccd0600-2c03-11ea-883f-42010a800044" + } + ], + "resourceVersion": "87290191", + "selfLink": "/api/v1/namespaces/default/pods/nginx-7fb78fb6d8-2w75j", + "uid": "91bb1cf2-2c03-11ea-883f-42010a800044" + }, + "spec": { + "containers": [ + { + "image": "k8s.gcr.io/nginx-slim:0.8", + "imagePullPolicy": "IfNotPresent", + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "protocol": "TCP" + } + ], + "resources": { + "limits": { + "cpu": "200m", + "memory": "20Mi" + }, + "requests": { + "cpu": "200m", + "memory": "20Mi" + } + }, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "volumeMounts": [ + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-dsl46", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "enableServiceLinks": true, + "nodeName": "gke-k9s-default-pool-0fa2fb89-lbtf", + "priority": 0, + "restartPolicy": "Always", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "name": "default-token-dsl46", + "secret": { + "defaultMode": 420, + "secretName": "default-token-dsl46" + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2019-12-31T19:27:23Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-12-31T19:27:25Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-12-31T19:27:25Z", + "status": "True", + "type": "ContainersReady" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-12-31T19:27:22Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "containerID": "docker://90e0abf7a779dd76d36038883312baed57a8351428a1d6340df3cff698f51809", + "image": "k8s.gcr.io/nginx-slim:0.8", + "imageID": "docker-pullable://k8s.gcr.io/nginx-slim@sha256:8b4501fe0fe221df663c22e16539f399e89594552f400408303c42f3dd8d0e52", + "lastState": {}, + "name": "nginx", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-12-31T19:27:24Z" + } + } + } + ], + "hostIP": "10.128.0.15", + "phase": "Running", + "podIP": "10.44.0.229", + "qosClass": "Guaranteed", + "startTime": "2019-12-31T19:27:23Z" + } +} \ No newline at end of file diff --git a/internal/render/cm.go b/internal/render/cm.go index 602496f3..7f63eeae 100644 --- a/internal/render/cm.go +++ b/internal/render/cm.go @@ -3,11 +3,11 @@ package render import ( "fmt" "strconv" + "time" "github.com/derailed/tview" - v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" ) // ConfigMap renders a K8s ConfigMap to screen. @@ -33,27 +33,78 @@ func (ConfigMap) Header(ns string) HeaderRow { } // Render renders a K8s resource to screen. +// BOZO!! 44allocs down to 5allocs avoiding marshal?? func (c ConfigMap) Render(o interface{}, ns string, r *Row) error { raw, ok := o.(*unstructured.Unstructured) if !ok { return fmt.Errorf("Expected ConfigMap, but got %T", o) } - var cm v1.ConfigMap - err := runtime.DefaultUnstructuredConverter.FromUnstructured(raw.Object, &cm) + + meta, ok := raw.Object["metadata"].(map[string]interface{}) + if !ok { + return fmt.Errorf("No meta") + } + + n, nss := extractMetaField(meta, "name"), extractMetaField(meta, "namespace") + r.ID = FQN(nss, n) + r.Fields = make(Fields, 0, len(c.Header(ns))) + if isAllNamespace(ns) { + r.Fields = append(r.Fields, nss) + } + + var size int + data, ok := raw.Object["data"] + if ok { + d, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("expecting map but got %T", raw.Object["data"]) + } + size = len(d) + } + t, err := extractMetaTime(meta) if err != nil { return err } - - r.ID = MetaFQN(cm.ObjectMeta) - r.Fields = make(Fields, 0, len(c.Header(ns))) - if isAllNamespace(ns) { - r.Fields = append(r.Fields, cm.Namespace) - } r.Fields = append(r.Fields, - cm.Name, - strconv.Itoa(len(cm.Data)), - toAge(cm.ObjectMeta.CreationTimestamp), + n, + strconv.Itoa(size), + toAge(t), ) + // var cm v1.ConfigMap + // err := runtime.DefaultUnstructuredConverter.FromUnstructured(raw.Object, &cm) + // if err != nil { + // return err + // } + + // r.ID = MetaFQN(cm.ObjectMeta) + // r.Fields = make(Fields, 0, len(c.Header(ns))) + // if isAllNamespace(ns) { + // r.Fields = append(r.Fields, cm.Namespace) + // } + // r.Fields = append(r.Fields, + // cm.Name, + // strconv.Itoa(len(cm.Data)), + // toAge(cm.ObjectMeta.CreationTimestamp), + // ) + return nil } + +func extractMetaTime(m map[string]interface{}) (metav1.Time, error) { + f, ok := m["creationTimestamp"] + if !ok { + return metav1.Time{}, fmt.Errorf("failed to extract time from meta") + } + + t, ok := f.(string) + if !ok { + return metav1.Time{}, fmt.Errorf("failed to extract time from field") + } + + ti, err := time.Parse(time.RFC3339, t) + if err != nil { + return metav1.Time{}, err + } + return metav1.Time{Time: ti}, nil +} diff --git a/internal/render/cm_test.go b/internal/render/cm_test.go index 62b615a3..837a5c46 100644 --- a/internal/render/cm_test.go +++ b/internal/render/cm_test.go @@ -11,18 +11,29 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) -func TestCMRender(t *testing.T) { +func TestCmRender(t *testing.T) { c := render.ConfigMap{} r := render.NewRow(4) - c.Render(load(t, "cm"), "", &r) + assert.Nil(t, c.Render(load(t, "cm"), "", &r)) assert.Equal(t, "default/blee", r.ID) assert.Equal(t, render.Fields{"default", "blee", "2"}, r.Fields[:3]) } +func BenchmarkCmRender(b *testing.B) { + c := render.ConfigMap{} + r := render.NewRow(4) + o := load(b, "cm") + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = c.Render(o, "", &r) + } +} + // Helpers... -func load(t *testing.T, n string) *unstructured.Unstructured { +func load(t assert.TestingT, n string) *unstructured.Unstructured { raw, err := ioutil.ReadFile(fmt.Sprintf("assets/%s.json", n)) assert.Nil(t, err) diff --git a/internal/render/dp.go b/internal/render/dp.go index 6175c4f9..efb16641 100644 --- a/internal/render/dp.go +++ b/internal/render/dp.go @@ -58,6 +58,7 @@ func (d Deployment) Render(o interface{}, ns string, r *Row) error { if !ok { return fmt.Errorf("Expected Deployment, but got %T", o) } + var dp appsv1.Deployment err := runtime.DefaultUnstructuredConverter.FromUnstructured(raw.Object, &dp) if err != nil { diff --git a/internal/render/dp_test.go b/internal/render/dp_test.go index 71c1003d..ea8168c6 100644 --- a/internal/render/dp_test.go +++ b/internal/render/dp_test.go @@ -7,11 +7,23 @@ import ( "github.com/stretchr/testify/assert" ) -func TestDeploymentRender(t *testing.T) { +func TestDpRender(t *testing.T) { c := render.Deployment{} r := render.NewRow(7) - c.Render(load(t, "dp"), "", &r) + assert.Nil(t, c.Render(load(t, "dp"), "", &r)) assert.Equal(t, "icx/icx-db", r.ID) assert.Equal(t, render.Fields{"icx", "icx-db", "1/1", "1", "1"}, r.Fields[:5]) } + +func BenchmarkDpRender(b *testing.B) { + c := render.Deployment{} + r := render.NewRow(7) + o := load(b, "dp") + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = c.Render(o, "", &r) + } +} diff --git a/internal/render/node.go b/internal/render/node.go index a28edba2..94cf6a96 100644 --- a/internal/render/node.go +++ b/internal/render/node.go @@ -5,10 +5,10 @@ import ( "strings" "github.com/derailed/tview" - "github.com/rs/zerolog/log" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" ) @@ -17,13 +17,6 @@ const ( nodeLabelRole = "kubernetes.io/role" ) -// NodeWithMetrics represents a resourve object with usage metrics. -type NodeWithMetrics interface { - Object() runtime.Object - Metrics() *mv1beta1.NodeMetrics - Pods() []*v1.Pod -} - // Node renders a K8s Node to screen. type Node struct{} @@ -54,29 +47,33 @@ func (Node) Header(_ string) HeaderRow { // Render renders a K8s resource to screen. func (n Node) Render(o interface{}, ns string, r *Row) error { - oo, ok := o.(NodeWithMetrics) + oo, ok := o.(*NodeWithMetrics) if !ok { - return fmt.Errorf("Expected NodeAndMetrics, but got %T", o) + return fmt.Errorf("Expected *NodeAndMetrics, but got %T", o) } + meta, ok := oo.Raw.Object["metadata"].(map[string]interface{}) + if !ok { + return fmt.Errorf("Unable to extract meta") + } + na := extractMetaField(meta, "name") var no v1.Node - err := runtime.DefaultUnstructuredConverter.FromUnstructured(oo.Object().(*unstructured.Unstructured).Object, &no) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(oo.Raw.Object, &no) if err != nil { - log.Error().Err(err).Msg("Converting Node") return err } iIP, eIP := getIPs(no.Status.Addresses) iIP, eIP = missing(iIP), missing(eIP) - c, a, p := gatherNodeMX(&no, oo.Metrics()) + c, a, p := gatherNodeMX(&no, oo.MX) sta := make([]string, 10) status(no.Status, no.Spec.Unschedulable, sta) ro := make([]string, 10) nodeRoles(&no, ro) - r.ID = MetaFQN(no.ObjectMeta) + r.ID = FQN("", na) r.Fields = make(Fields, 0, len(n.Header(ns))) r.Fields = append(r.Fields, no.Name, @@ -101,6 +98,22 @@ func (n Node) Render(o interface{}, ns string, r *Row) error { // ---------------------------------------------------------------------------- // Helpers... +// NodeWithMetrics represents a node with its associated metrics. +type NodeWithMetrics struct { + Raw *unstructured.Unstructured + MX *mv1beta1.NodeMetrics +} + +// GetObjectKind returns a schema object. +func (n *NodeWithMetrics) GetObjectKind() schema.ObjectKind { + return nil +} + +// DeepCopyObject returns a container copy. +func (n *NodeWithMetrics) DeepCopyObject() runtime.Object { + return n +} + func gatherNodeMX(no *v1.Node, mx *mv1beta1.NodeMetrics) (c metric, a metric, p metric) { c, a, p = noMetric(), noMetric(), noMetric() if mx == nil { diff --git a/internal/render/node_test.go b/internal/render/node_test.go index ff19fe5b..fb3375cb 100644 --- a/internal/render/node_test.go +++ b/internal/render/node_test.go @@ -5,23 +5,19 @@ import ( "github.com/derailed/k9s/internal/render" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" ) func TestNodeRender(t *testing.T) { - pom := nodeMetrics{ - load(t, "no"), - makeNodeMX("n1", "10m", "10Mi"), - []*v1.Pod{}, + pom := render.NodeWithMetrics{ + Raw: load(t, "no"), + MX: makeNodeMX("n1", "10m", "10Mi"), } var no render.Node r := render.NewRow(14) - err := no.Render(pom, "", &r) + err := no.Render(&pom, "", &r) assert.Nil(t, err) assert.Equal(t, "minikube", r.ID) @@ -29,27 +25,23 @@ func TestNodeRender(t *testing.T) { assert.Equal(t, e, r.Fields[:13]) } +func BenchmarkNodeRender(b *testing.B) { + pom := render.NodeWithMetrics{ + Raw: load(b, "no"), + MX: makeNodeMX("n1", "10m", "10Mi"), + } + var no render.Node + r := render.NewRow(14) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = no.Render(&pom, "", &r) + } +} + // ---------------------------------------------------------------------------- // Helpers... -type nodeMetrics struct { - o *unstructured.Unstructured - m *mv1beta1.NodeMetrics - pod []*v1.Pod -} - -func (p nodeMetrics) Object() runtime.Object { - return p.o -} - -func (p nodeMetrics) Metrics() *mv1beta1.NodeMetrics { - return p.m -} - -func (p nodeMetrics) Pods() []*v1.Pod { - return p.pod -} - func makeNodeMX(name, cpu, mem string) *mv1beta1.NodeMetrics { return &mv1beta1.NodeMetrics{ ObjectMeta: metav1.ObjectMeta{ diff --git a/internal/render/pod.go b/internal/render/pod.go index 7c65db7d..2f3dbfe0 100644 --- a/internal/render/pod.go +++ b/internal/render/pod.go @@ -7,21 +7,15 @@ import ( "github.com/derailed/tview" "github.com/gdamore/tcell" - "github.com/rs/zerolog/log" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/util/node" mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" ) -// PodWithMetrics represents a resourve object with usage metrics. -type PodWithMetrics interface { - Object() runtime.Object - Metrics() *mv1beta1.PodMetrics -} - // Pod renders a K8s Pod to screen. type Pod struct{} @@ -94,21 +88,20 @@ func (Pod) Header(ns string) HeaderRow { // Render renders a K8s resource to screen. func (p Pod) Render(o interface{}, ns string, r *Row) error { - oo, ok := o.(PodWithMetrics) + oo, ok := o.(*PodWithMetrics) if !ok { - return fmt.Errorf("Expected PodAndMetrics, but got %T", o) + return fmt.Errorf("Expected PodWithMetrics, but got %T", o) } var po v1.Pod - err := runtime.DefaultUnstructuredConverter.FromUnstructured(oo.Object().(*unstructured.Unstructured).Object, &po) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(oo.Raw.Object, &po) if err != nil { - log.Error().Err(err).Msg("Expecting a pod resource") return err } ss := po.Status.ContainerStatuses cr, _, rc := p.statuses(ss) - c, perc := p.gatherPodMX(&po, oo.Metrics()) + c, perc := p.gatherPodMX(&po, oo.MX) r.ID = MetaFQN(po.ObjectMeta) r.Fields = make(Fields, 0, len(p.Header(ns))) @@ -136,6 +129,22 @@ func (p Pod) Render(o interface{}, ns string, r *Row) error { // ---------------------------------------------------------------------------- // Helpers... +// PodWithMetrics represents a pod and its metrics. +type PodWithMetrics struct { + Raw *unstructured.Unstructured + MX *mv1beta1.PodMetrics +} + +// GetObjectKind returns a schema object. +func (p *PodWithMetrics) GetObjectKind() schema.ObjectKind { + return nil +} + +// DeepCopyObject returns a container copy. +func (p *PodWithMetrics) DeepCopyObject() runtime.Object { + return p +} + func (*Pod) gatherPodMX(pod *v1.Pod, mx *mv1beta1.PodMetrics) (c, p metric) { c, p = noMetric(), noMetric() if mx == nil { diff --git a/internal/render/pod_test.go b/internal/render/pod_test.go index a735e6fd..5e70397c 100644 --- a/internal/render/pod_test.go +++ b/internal/render/pod_test.go @@ -9,8 +9,6 @@ import ( v1 "k8s.io/api/core/v1" res "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" ) @@ -61,11 +59,14 @@ func TestPodColorer(t *testing.T) { } func TestPodRender(t *testing.T) { - pom := podMetrics{load(t, "po"), makePodMX("nginx", "10m", "10Mi")} + pom := render.PodWithMetrics{ + Raw: load(t, "po"), + MX: makePodMX("nginx", "10m", "10Mi"), + } var po render.Pod r := render.NewRow(12) - err := po.Render(pom, "", &r) + err := po.Render(&pom, "", &r) assert.Nil(t, err) assert.Equal(t, "default/nginx", r.ID) @@ -73,12 +74,30 @@ func TestPodRender(t *testing.T) { assert.Equal(t, e, r.Fields[:12]) } +func BenchmarkPodRender(b *testing.B) { + pom := render.PodWithMetrics{ + Raw: load(b, "po"), + MX: makePodMX("nginx", "10m", "10Mi"), + } + var po render.Pod + r := render.NewRow(12) + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = po.Render(&pom, "", &r) + } +} + func TestPodInitRender(t *testing.T) { - pom := podMetrics{load(t, "po_init"), makePodMX("nginx", "10m", "10Mi")} + pom := render.PodWithMetrics{ + Raw: load(t, "po_init"), + MX: makePodMX("nginx", "10m", "10Mi"), + } var po render.Pod r := render.NewRow(12) - err := po.Render(pom, "", &r) + err := po.Render(&pom, "", &r) assert.Nil(t, err) assert.Equal(t, "default/nginx", r.ID) @@ -89,19 +108,6 @@ func TestPodInitRender(t *testing.T) { // ---------------------------------------------------------------------------- // Helpers... -type podMetrics struct { - o *unstructured.Unstructured - m *mv1beta1.PodMetrics -} - -func (p podMetrics) Object() runtime.Object { - return p.o -} - -func (p podMetrics) Metrics() *mv1beta1.PodMetrics { - return p.m -} - func makePodMX(name, cpu, mem string) *mv1beta1.PodMetrics { return &mv1beta1.PodMetrics{ ObjectMeta: metav1.ObjectMeta{ diff --git a/internal/view/logs_extender.go b/internal/view/logs_extender.go index c2afc73d..9dc21181 100644 --- a/internal/view/logs_extender.go +++ b/internal/view/logs_extender.go @@ -64,7 +64,6 @@ func (l *LogsExtender) showLogs(path string, prev bool) { l.App().Flash().Err(err) return } - l.App().factory.WaitForCacheSync() co := "" if l.containerFn != nil { diff --git a/internal/view/node.go b/internal/view/node.go index 77137937..7a66d420 100644 --- a/internal/view/node.go +++ b/internal/view/node.go @@ -1,9 +1,13 @@ package view import ( + "context" + + "github.com/derailed/k9s/internal" "github.com/derailed/k9s/internal/client" "github.com/derailed/k9s/internal/ui" "github.com/gdamore/tcell" + "github.com/rs/zerolog/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -19,6 +23,7 @@ func NewNode(gvr client.GVR) ResourceViewer { } n.SetBindKeysFn(n.bindKeys) n.GetTable().SetEnterFn(n.showPods) + n.SetContextFn(n.nodeContext) return &n } @@ -34,6 +39,16 @@ func (n *Node) bindKeys(aa ui.KeyActions) { }) } +func (n *Node) nodeContext(ctx context.Context) context.Context { + mx := client.NewMetricsServer(n.App().factory.Client()) + nmx, err := mx.FetchNodesMetrics() + if err != nil { + log.Warn().Err(err).Msgf("No node metrics") + } + + return context.WithValue(ctx, internal.KeyMetrics, nmx) +} + func (n *Node) showPods(app *App, ns, res, sel string) { showPods(app, n.GetTable().GetSelectedItem(), "", "spec.nodeName="+sel) } diff --git a/internal/view/pod.go b/internal/view/pod.go index dff202ca..c55c1ac9 100644 --- a/internal/view/pod.go +++ b/internal/view/pod.go @@ -32,6 +32,7 @@ func NewPod(gvr client.GVR) ResourceViewer { p.SetBindKeysFn(p.bindKeys) p.GetTable().SetEnterFn(p.showContainers) p.GetTable().SetColorerFn(render.Pod{}.ColorerFunc()) + p.SetContextFn(p.podMXContext) return &p } @@ -52,6 +53,21 @@ func (p *Pod) bindKeys(aa ui.KeyActions) { }) } +func (p *Pod) podMXContext(ctx context.Context) context.Context { + ns, ok := ctx.Value(internal.KeyNamespace).(string) + if !ok { + log.Error().Err(fmt.Errorf("Expecting context namespace")) + } + log.Debug().Msgf("POD METRICS in NS %q", ns) + mx := client.NewMetricsServer(p.App().factory.Client()) + nmx, err := mx.FetchPodsMetrics(ns) + if err != nil { + log.Warn().Err(err).Msgf("No pods metrics") + } + + return context.WithValue(ctx, internal.KeyMetrics, nmx) +} + func (p *Pod) showContainers(app *App, ns, gvr, path string) { log.Debug().Msgf("SHOW CONTAINERS %q -- %q -- %q", gvr, ns, path) co := NewContainer(client.NewGVR("containers")) diff --git a/internal/watch/factory.go b/internal/watch/factory.go index e5b9061c..c8b9d8ac 100644 --- a/internal/watch/factory.go +++ b/internal/watch/factory.go @@ -63,7 +63,7 @@ func (f *Factory) Terminate() { // List returns a resource collection. func (f *Factory) List(gvr, ns string, wait bool, sel labels.Selector) ([]runtime.Object, error) { defer func(t time.Time) { - log.Debug().Msgf("LIST time %v", time.Since(t)) + log.Debug().Msgf("LIST elapsed %v", time.Since(t)) }(time.Now()) Dump(f) @@ -85,7 +85,7 @@ func (f *Factory) List(gvr, ns string, wait bool, sel labels.Selector) ([]runtim // Get retrieves a given resource. func (f *Factory) Get(gvr, path string, wait bool, sel labels.Selector) (runtime.Object, error) { defer func(t time.Time) { - log.Debug().Msgf("GET time %v", time.Since(t)) + log.Debug().Msgf("GET elapsed %v", time.Since(t)) }(time.Now()) ns, n := namespaced(path) @@ -105,7 +105,15 @@ func (f *Factory) Get(gvr, path string, wait bool, sel labels.Selector) (runtime func (f *Factory) waitForCacheSync(ns string) { if fac, ok := f.factories[ns]; ok { - fac.WaitForCacheSync(f.stopChan) + // Hang for a sec for the cache to refresh if still not done bail out! + const dur = 1 * time.Second + c := make(chan struct{}) + go func(c chan struct{}) { + <-time.After(dur) + log.Warn().Msgf("Wait for sync timed out!") + close(c) + }(c) + fac.WaitForCacheSync(c) } }