From 391eed9ea445ff7ca4caf465817086a2d78234c5 Mon Sep 17 00:00:00 2001 From: derailed Date: Fri, 6 Mar 2020 10:03:47 -0700 Subject: [PATCH] rework thresholds. Fix #604 #601 #598 #593 --- change_logs/release_v0.17.5.md | 47 +++++++ internal/client/client.go | 1 + internal/config/config_test.go | 28 ++-- internal/config/threshold.go | 123 ++++++------------ internal/config/threshold_test.go | 38 +++--- internal/dao/pod.go | 2 +- internal/view/browser.go | 2 +- internal/view/cluster_info.go | 46 ++++--- internal/view/{port_forward.go => pf.go} | 2 +- .../view/{port_forward_test.go => pf_test.go} | 0 internal/view/pulse.go | 4 +- 11 files changed, 152 insertions(+), 141 deletions(-) create mode 100644 change_logs/release_v0.17.5.md rename internal/view/{port_forward.go => pf.go} (98%) rename internal/view/{port_forward_test.go => pf_test.go} (100%) diff --git a/change_logs/release_v0.17.5.md b/change_logs/release_v0.17.5.md new file mode 100644 index 00000000..ee5ddca6 --- /dev/null +++ b/change_logs/release_v0.17.5.md @@ -0,0 +1,47 @@ + + +# Release v0.17.5 + +## Notes + +Thank you to all that contributed with flushing out issues and enhancements for K9s! I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev and see if we're happier with some of the fixes! If you've filed an issue please help me verify and close. Your support, kindness and awesome suggestions to make K9s better is as ever very much noticed and appreciated! + +Also if you dig this tool, please consider sponsoring 👆us or make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer) + +On Slack? Please join us [K9slackers](https://join.slack.com/t/k9sers/shared_invite/enQtOTA5MDEyNzI5MTU0LWQ1ZGI3MzliYzZhZWEyNzYxYzA3NjE0YTk1YmFmNzViZjIyNzhkZGI0MmJjYzhlNjdlMGJhYzE2ZGU1NjkyNTM) + +--- + + + +## Thresholds Reloaded! + +In the previous k9s release, we've introduced the notion of thresholds to provide with an alert mechanism when either the cpu or memory goes high on your clusters. Looking at the current solution, we felt we needed a bit more granularity in the severity levels thanks to [Eldad Assis](https://github.com/eldada) feedback on this one! So here is the new configuration for cluster thresholds. Please keep in mind this feature is still in flux! + +```yaml +# $HOME/.k9s/config.yml +k9s: + refreshRate: 2 + headless: false + ... + # Specify resources thresholds in percent - defaults: critical=90, warn=70 + thresholds: + cpu: + critical: 85 + warn: 75 + memory: + critical: 80 + warn: 70 + ... +``` + +## Resolved Bugs/Features/PRs + +- [Issue #604](https://github.com/derailed/k9s/issues/604) +- [Issue #601](https://github.com/derailed/k9s/issues/601) Thank you [Christian Vent](https://github.com/christian-vent) +- [Issue #598](https://github.com/derailed/k9s/issues/598) `Ctrl-l` will now trigger the benchmarking toggle! +- [Issue #593](https://github.com/derailed/k9s/issues/593) + +--- + + © 2020 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0) diff --git a/internal/client/client.go b/internal/client/client.go index 9dc252c2..9741e48c 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -305,6 +305,7 @@ func (a *APIClient) reset() { a.cache = cache.NewLRUExpireCache(cacheSize) a.client, a.dClient, a.nsClient, a.mxsClient = nil, nil, nil, nil + a.cachedClient = nil } func (a *APIClient) supportsMetricsResources() (supported bool) { diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 47e58a15..a7b2cd09 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -299,17 +299,11 @@ var expectedConfig = `k9s: active: ctx thresholds: cpu: - defcon: - - 90 - - 80 - - 75 - - 70 + critical: 90 + warn: 70 memory: - defcon: - - 90 - - 80 - - 75 - - 70 + critical: 90 + warn: 70 ` var resetConfig = `k9s: @@ -331,15 +325,9 @@ var resetConfig = `k9s: active: po thresholds: cpu: - defcon: - - 90 - - 80 - - 75 - - 70 + critical: 90 + warn: 70 memory: - defcon: - - 90 - - 80 - - 75 - - 70 + critical: 90 + warn: 70 ` diff --git a/internal/config/threshold.go b/internal/config/threshold.go index 652e8f5f..e65a995c 100644 --- a/internal/config/threshold.go +++ b/internal/config/threshold.go @@ -1,81 +1,63 @@ package config import ( - "strings" - "github.com/derailed/k9s/internal/client" - "github.com/derailed/k9s/internal/render" ) const ( - // DefCon1 tracks high severity. - DefCon1 DefConLevel = iota + 1 + // SeverityLow tracks low severity. + SeverityLow SeverityLevel = iota - // DefCon2 tracks warn level. - DefCon2 + // SeverityMedium tracks medium severity level. + SeverityMedium - // DefCon3 tracks medium level. - DefCon3 - - // DefCon4 tracks low level. - DefCon4 - - // DefCon5 tracks all cool. - DefCon5 + // SeverityHigh tracks high severity level. + SeverityHigh ) -// DefConLevel tracks defcon severity. -type DefConLevel int +// SeverityLevel tracks severity levels. +type SeverityLevel int -// DefCon tracks a resource alert level. -type DefCon struct { - Levels []int `yaml:"defcon,omitempty"` +// Severity tracks a resource severity levels. +type Severity struct { + Critical int `yaml:"critical"` + Warn int `yaml:"warn"` } -// NewDefCon returns a new instance. -func NewDefCon() *DefCon { - return &DefCon{Levels: []int{90, 80, 75, 70}} +// NewSeverity returns a new instance. +func NewSeverity() *Severity { + return &Severity{ + Critical: 90, + Warn: 70, + } } // Validate checks all thresholds and make sure we're cool. If not use defaults. -func (d *DefCon) Validate() { - norm := NewDefCon() - if len(d.Levels) < 4 { - d.Levels = norm.Levels - return +func (s *Severity) Validate() { + norm := NewSeverity() + if !validateRange(s.Warn) { + s.Warn = norm.Warn } - for i, level := range d.Levels { - if !d.isValidRange(level) { - d.Levels[i] = norm.Levels[i] - } + if !validateRange(s.Critical) { + s.Critical = norm.Critical } } -// String returns defcon settings a string. -func (d *DefCon) String() string { - ss := make([]string, len(d.Levels)) - for i := 0; i < len(d.Levels); i++ { - ss[i] = render.PrintPerc(d.Levels[i]) - } - return strings.Join(ss, "|") -} - -func (d *DefCon) isValidRange(v int) bool { - if v < 0 || v > 100 { +func validateRange(v int) bool { + if v <= 0 || v > 100 { return false } - return true } // Threshold tracks threshold to alert user when excided. -type Threshold map[string]*DefCon +type Threshold map[string]*Severity // NewThreshold returns a new threshold. func NewThreshold() Threshold { return Threshold{ - "cpu": NewDefCon(), - "memory": NewDefCon(), + "cpu": NewSeverity(), + "memory": NewSeverity(), } } @@ -84,7 +66,7 @@ func (t Threshold) Validate(c client.Connection, ks KubeSettings) { for _, k := range []string{"cpu", "memory"} { v, ok := t[k] if !ok { - t[k] = NewDefCon() + t[k] = NewSeverity() } else { v.Validate() } @@ -92,48 +74,29 @@ func (t Threshold) Validate(c client.Connection, ks KubeSettings) { } // DefConFor returns a defcon level for the current state. -func (t Threshold) DefConFor(k string, v int) DefConLevel { - dc, ok := t[k] +func (t Threshold) LevelFor(k string, v int) SeverityLevel { + s, ok := t[k] if !ok || v < 0 || v > 100 { - return DefCon5 + return SeverityLow } - for i, l := range dc.Levels { - if v >= l { - return dcLevelFor(i) - } + if v >= s.Critical { + return SeverityHigh + } + if v >= s.Warn { + return SeverityMedium } - return DefCon5 + return SeverityLow } // DefConColorFor returns an defcon level associated level. -func (t *Threshold) DefConColorFor(k string, v int) string { - switch t.DefConFor(k, v) { - case DefCon1: +func (t *Threshold) SeverityColor(k string, v int) string { + switch t.LevelFor(k, v) { + case SeverityHigh: return "red" - case DefCon2: + case SeverityMedium: return "orangered" - case DefCon3: - return "orange" default: return "green" } } - -// ---------------------------------------------------------------------------- -// Helpers... - -func dcLevelFor(l int) DefConLevel { - switch l { - case 0: - return DefCon1 - case 1: - return DefCon2 - case 2: - return DefCon3 - case 3: - return DefCon4 - default: - return DefCon5 - } -} diff --git a/internal/config/threshold_test.go b/internal/config/threshold_test.go index 748d6ae3..1c1be937 100644 --- a/internal/config/threshold_test.go +++ b/internal/config/threshold_test.go @@ -7,25 +7,25 @@ import ( "github.com/stretchr/testify/assert" ) -func TestDefConValidate(t *testing.T) { +func TestSeverityValidate(t *testing.T) { uu := map[string]struct { - d, e *config.DefCon + d, e *config.Severity }{ "default": { - d: config.NewDefCon(), - e: config.NewDefCon(), + d: config.NewSeverity(), + e: config.NewSeverity(), }, "toast": { - d: &config.DefCon{Levels: []int{10}}, - e: config.NewDefCon(), + d: &config.Severity{Warn: 10}, + e: &config.Severity{Warn: 10, Critical: 90}, }, "negative": { - d: &config.DefCon{Levels: []int{-1, 10, 10, 10}}, - e: &config.DefCon{Levels: []int{90, 10, 10, 10}}, + d: &config.Severity{Warn: -1}, + e: config.NewSeverity(), }, "out-of-range": { - d: &config.DefCon{Levels: []int{150, 200, 10, 300}}, - e: &config.DefCon{Levels: []int{90, 80, 10, 70}}, + d: &config.Severity{Warn: 150}, + e: config.NewSeverity(), }, } @@ -38,41 +38,41 @@ func TestDefConValidate(t *testing.T) { } } -func TestDefConFor(t *testing.T) { +func TestLevelFor(t *testing.T) { uu := map[string]struct { k string v int - e config.DefConLevel + e config.SeverityLevel }{ "normal": { k: "cpu", v: 0, - e: config.DefCon5, + e: config.SeverityLow, }, "4": { k: "cpu", v: 71, - e: config.DefCon4, + e: config.SeverityMedium, }, "3": { k: "cpu", v: 75, - e: config.DefCon3, + e: config.SeverityMedium, }, "2": { k: "cpu", v: 80, - e: config.DefCon2, + e: config.SeverityMedium, }, "1": { k: "cpu", v: 100, - e: config.DefCon1, + e: config.SeverityHigh, }, "over": { k: "cpu", v: 150, - e: config.DefCon5, + e: config.SeverityLow, }, } @@ -80,7 +80,7 @@ func TestDefConFor(t *testing.T) { for k := range uu { u := uu[k] t.Run(k, func(t *testing.T) { - assert.Equal(t, u.e, o.DefConFor(u.k, u.v)) + assert.Equal(t, u.e, o.LevelFor(u.k, u.v)) }) } } diff --git a/internal/dao/pod.go b/internal/dao/pod.go index f1c2271e..de561f43 100644 --- a/internal/dao/pod.go +++ b/internal/dao/pod.go @@ -94,7 +94,7 @@ func (p *Pod) List(ctx context.Context, ns string) ([]runtime.Object, error) { } } - var res []runtime.Object + res := make([]runtime.Object, 0, len(oo)) for _, o := range oo { u, ok := o.(*unstructured.Unstructured) if !ok { diff --git a/internal/view/browser.go b/internal/view/browser.go index 2fc3f10e..cbb72cd7 100644 --- a/internal/view/browser.go +++ b/internal/view/browser.go @@ -284,7 +284,7 @@ func (b *Browser) editCmd(evt *tcell.EventKey) *tcell.EventKey { } ns, n := client.Namespaced(path) - if ok, err := b.app.Conn().CanI(ns, b.GVR().String(), []string{"edit"}); !ok || err != nil { + if ok, err := b.app.Conn().CanI(ns, b.GVR().String(), []string{"patch"}); !ok || err != nil { b.App().Flash().Err(fmt.Errorf("Current user can't edit resource %s", b.GVR())) return nil } diff --git a/internal/view/cluster_info.go b/internal/view/cluster_info.go index d81d9372..552d6338 100644 --- a/internal/view/cluster_info.go +++ b/internal/view/cluster_info.go @@ -72,16 +72,16 @@ func (c *ClusterInfo) infoCell(t string) *tview.TableCell { return cell } -// ClusterInfoUpdated notifies the cluster meta was updated. -func (c *ClusterInfo) ClusterInfoUpdated(data model.ClusterMeta) { - c.ClusterInfoChanged(data, data) -} - func (c *ClusterInfo) setCell(row int, s string) int { c.GetCell(row, 1).SetText(s) return row + 1 } +// ClusterInfoUpdated notifies the cluster meta was updated. +func (c *ClusterInfo) ClusterInfoUpdated(data model.ClusterMeta) { + c.ClusterInfoChanged(data, data) +} + // ClusterInfoChanged notifies the cluster meta was changed. func (c *ClusterInfo) ClusterInfoChanged(prev, curr model.ClusterMeta) { c.app.QueueUpdateDraw(func() { @@ -101,20 +101,18 @@ func (c *ClusterInfo) ClusterInfoChanged(prev, curr model.ClusterMeta) { }) } -const defconFmt = "Cluster <%s> at DEFCON %d" +const defconFmt = "%s %s level!" func (c *ClusterInfo) setDefCon(cpu, mem int) { var set bool - dc := c.app.Config.K9s.Thresholds.DefConFor("cpu", cpu) - if dc < config.DefCon5 { - l := flashFromDefCon(dc) - c.app.Status(l, fmt.Sprintf(defconFmt, "cpu", int(dc))) + l := c.app.Config.K9s.Thresholds.LevelFor("cpu", cpu) + if l > config.SeverityLow { + c.app.Status(flashLevel(l), fmt.Sprintf(defconFmt, flashMessage(l), "CPU")) set = true } - dc = c.app.Config.K9s.Thresholds.DefConFor("memory", mem) - if dc < config.DefCon5 { - l := flashFromDefCon(dc) - c.app.Status(l, fmt.Sprintf(defconFmt, "mem", int(dc))) + l = c.app.Config.K9s.Thresholds.LevelFor("memory", mem) + if l > config.SeverityLow { + c.app.Status(flashLevel(l), fmt.Sprintf(defconFmt, flashMessage(l), "Memory")) set = true } if !set { @@ -131,13 +129,27 @@ func (c *ClusterInfo) updateStyle() { } } -func flashFromDefCon(l config.DefConLevel) model.FlashLevel { +// ---------------------------------------------------------------------------- +// Helpers... + +func flashLevel(l config.SeverityLevel) model.FlashLevel { switch l { - case config.DefCon1: + case config.SeverityHigh: return model.FlashErr - case config.DefCon2, config.DefCon3: + case config.SeverityMedium: return model.FlashWarn default: return model.FlashInfo } } + +func flashMessage(l config.SeverityLevel) string { + switch l { + case config.SeverityHigh: + return "Critical" + case config.SeverityMedium: + return "Warning" + default: + return "OK" + } +} diff --git a/internal/view/port_forward.go b/internal/view/pf.go similarity index 98% rename from internal/view/port_forward.go rename to internal/view/pf.go index 5adcaa0c..8c2f6deb 100644 --- a/internal/view/port_forward.go +++ b/internal/view/pf.go @@ -48,7 +48,7 @@ func (p *PortForward) portForwardContext(ctx context.Context) context.Context { func (p *PortForward) bindKeys(aa ui.KeyActions) { aa.Add(ui.KeyActions{ tcell.KeyEnter: ui.NewKeyAction("View Benchmarks", p.showBenchCmd, true), - tcell.KeyCtrlB: ui.NewKeyAction("Bench Run/Stop", p.toggleBenchCmd, true), + tcell.KeyCtrlL: ui.NewKeyAction("Benchmark Run/Stop", p.toggleBenchCmd, true), tcell.KeyCtrlD: ui.NewKeyAction("Delete", p.deleteCmd, true), ui.KeyShiftP: ui.NewKeyAction("Sort Ports", p.GetTable().SortColCmd("PORTS", true), false), ui.KeyShiftU: ui.NewKeyAction("Sort URL", p.GetTable().SortColCmd("URL", true), false), diff --git a/internal/view/port_forward_test.go b/internal/view/pf_test.go similarity index 100% rename from internal/view/port_forward_test.go rename to internal/view/pf_test.go diff --git a/internal/view/pulse.go b/internal/view/pulse.go index 790c3005..147ea186 100644 --- a/internal/view/pulse.go +++ b/internal/view/pulse.go @@ -159,7 +159,7 @@ func (p *Pulse) PulseChanged(c *health.Check) { perc := client.ToPercentage(c.Tally(health.S1), c.Tally(health.S2)) v.SetLegend(fmt.Sprintf(cpuFmt, strings.Title(gvr.R()), - p.app.Config.K9s.Thresholds.DefConColorFor("cpu", perc), + p.app.Config.K9s.Thresholds.SeverityColor("cpu", perc), render.PrintPerc(perc), nn[0], render.AsThousands(c.Tally(health.S1)), @@ -170,7 +170,7 @@ func (p *Pulse) PulseChanged(c *health.Check) { perc := client.ToPercentage(c.Tally(health.S1), c.Tally(health.S2)) v.SetLegend(fmt.Sprintf(memFmt, strings.Title(gvr.R()), - p.app.Config.K9s.Thresholds.DefConColorFor("memory", perc), + p.app.Config.K9s.Thresholds.SeverityColor("memory", perc), render.PrintPerc(perc), nn[0], render.AsThousands(c.Tally(health.S1)),