Rel v0.50.8 (#3457)

* revert ns cmd

* fix#3421-helm-view-ns

* fix#3439-add-vendor-config

* fix#3453-add-gpu-cols

* rel notes
mine
Fernand Galiana 2025-07-15 08:38:28 -06:00 committed by GitHub
parent 711a8b8fdf
commit 855e995b3a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
33 changed files with 646 additions and 198 deletions

View File

@ -1,5 +1,5 @@
NAME := k9s
VERSION ?= v0.50.7
VERSION ?= v0.50.8
PACKAGE := github.com/derailed/$(NAME)
OUTPUT_BIN ?= execs/${NAME}
GO_FLAGS ?=

View File

@ -23,7 +23,6 @@ Your donations will go a long way in keeping our servers lights on and beers in
[![Go Report Card](https://goreportcard.com/badge/github.com/derailed/k9s?)](https://goreportcard.com/report/github.com/derailed/k9s)
[![golangci badge](https://github.com/golangci/golangci-web/blob/master/src/assets/images/badge_a_plus_flat.svg)](https://golangci.com/r/github.com/derailed/k9s)
[![codebeat badge](https://codebeat.co/badges/89e5a80e-dfe8-4426-acf6-6be781e0a12e)](https://codebeat.co/projects/github-com-derailed-k9s-master)
[![Build Status](https://api.travis-ci.com/derailed/k9s.svg?branch=master)](https://travis-ci.com/derailed/k9s)
[![Docker Repository on Quay](https://quay.io/repository/derailed/k9s/status "Docker Repository on Quay")](https://quay.io/repository/derailed/k9s)
[![release](https://img.shields.io/github/release-pre/derailed/k9s.svg)](https://github.com/derailed/k9s/releases)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/mum4k/termdash/blob/master/LICENSE)
@ -77,17 +76,6 @@ Wanna discuss K9s features with your fellow `K9sers` or simply show your support
---
## 🥳 A Word From Our Rhodium Sponsors...
Below are organizations that have opted to show their support and sponsor K9s.
<br/>
<a href="https://panfactum.com"><img src="assets/sponsors/panfactum.png" alt="panfactum"></a>
<br/>
<br/>
---
## Installation
K9s is available on Linux, macOS and Windows platforms.
@ -407,6 +395,10 @@ You can now override the context portForward default address configuration by se
k9s:
# Enable periodic refresh of resource browser windows. Default false
liveViewAutoRefresh: false
# !!New!! v0.50.8...
# Extends the list of supported GPU vendors. The key is the vendor name, the value must correspond to k8s resource driver designation.
gpuVendors:
bozo: bozo/gpu
# The path to screen dump. Default: '%temp_dir%/k9s-screens-%username%' (k9s info)
screenDumpDir: /tmp/dumps
# Represents ui poll intervals in seconds. Default 2secs

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

View File

@ -0,0 +1,41 @@
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/k9s.png" align="center" width="800" height="auto"/>
# Release v0.50.8
## Notes
Thank you to all that contributed with flushing out issues and enhancements for K9s!
I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev
and see if we're happier with some of the fixes!
If you've filed an issue please help me verify and close.
Your support, kindness and awesome suggestions to make K9s better are, as ever, very much noted and appreciated!
Also big thanks to all that have allocated their own time to help others on both slack and on this repo!!
As you may know, K9s is not pimped out by corps with deep pockets, thus if you feel K9s is helping your Kubernetes journey,
please consider joining our [sponsorship program](https://github.com/sponsors/derailed) and/or make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer)
On Slack? Please join us [K9slackers](https://join.slack.com/t/k9sers/shared_invite/zt-3360a389v-ElLHrb0Dp1kAXqYUItSAFA)
## Maintenance Release!
---
## Resolved Issues
* [#3453](https://github.com/derailed/k9s/issues/3453) [Feature Request] Add GPU column to pod/container view
* [#3451](https://github.com/derailed/k9s/issues/3451) Weirdness when filtering namespaces
* [#3439](https://github.com/derailed/k9s/issues/3438) Allow KnownGPUVendors customization
---
## Contributed PRs
Please be sure to give `Big Thanks!` and `ATTA Girls/Boys!` to all the fine contributors for making K9s better for all of us!!
* [#3437](https://github.com/derailed/k9s/pull/3437) feat: Add GPU usage to pod view
* [#3421](https://github.com/derailed/k9s/pull/3421) Fix #3421 - can't switch namespaces in helm view
* [#3356](https://github.com/derailed/k9s/pull/3356) allow skin to be selected via K9S_SKIN env var
---
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/imhotep_logo.png" width="32" height="auto"/> © 2025 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0)#

4
go.mod
View File

@ -1,6 +1,6 @@
module github.com/derailed/k9s
go 1.24.4
go 1.24.3
require (
github.com/adrg/xdg v0.5.3
@ -29,7 +29,7 @@ require (
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394
golang.org/x/text v0.26.0
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm/v3 v3.18.3
helm.sh/helm/v3 v3.18.4
k8s.io/api v0.33.2
k8s.io/apiextensions-apiserver v0.33.2
k8s.io/apimachinery v0.33.2

4
go.sum
View File

@ -2604,8 +2604,8 @@ gorm.io/gorm v1.26.1 h1:ghB2gUI9FkS46luZtn6DLZ0f6ooBJ5IbVej2ENFDjRw=
gorm.io/gorm v1.26.1/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
helm.sh/helm/v3 v3.18.3 h1:+cvyGKgs7Jt7BN3Klmb4SsG4IkVpA7GAZVGvMz6VO4I=
helm.sh/helm/v3 v3.18.3/go.mod h1:wUc4n3txYBocM7S9RjTeZBN9T/b5MjffpcSsWEjSIpw=
helm.sh/helm/v3 v3.18.4 h1:pNhnHM3nAmDrxz6/UC+hfjDY4yeDATQCka2/87hkZXQ=
helm.sh/helm/v3 v3.18.4/go.mod h1:WVnwKARAw01iEdjpEkP7Ii1tT1pTPYfM1HsakFKM3LI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -270,6 +270,7 @@ func (a *APIClient) ValidNamespaceNames() (NamespaceNames, error) {
ok, err := a.CanI(ClusterScope, NsGVR, "", ListAccess)
if !ok || err != nil {
a.cache.Add(cacheNSKey, NamespaceNames{}, cacheExpiry)
return nil, fmt.Errorf("user not authorized to list all namespaces")
}

View File

@ -561,6 +561,9 @@ func TestConfigSaveFile(t *testing.T) {
require.NoError(t, cfg.Load("testdata/configs/k9s.yaml", true))
cfg.K9s.RefreshRate = 100
cfg.K9s.GPUVendors = map[string]string{
"bozo": "bozo/gpu.com",
}
cfg.K9s.APIServerTimeout = "30s"
cfg.K9s.ReadOnly = true
cfg.K9s.Logger.TailCount = 500

View File

@ -8,6 +8,17 @@
"additionalProperties": false,
"properties": {
"liveViewAutoRefresh": { "type": "boolean" },
"gpuVendors": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"vendor": { "type": "string" },
"model": { "type": "string" }
},
"required": ["vendor", "model"]
}
},
"screenDumpDir": {"type": "string"},
"refreshRate": { "type": "integer" },
"apiServerTimeout": { "type": "string" },

View File

@ -19,7 +19,12 @@ import (
"github.com/derailed/k9s/internal/slogs"
)
var KnownGPUVendors = map[string]string{
type gpuVendors map[string]string
// KnownGPUVendors tracks a set of known GPU vendors.
var KnownGPUVendors = defaultGPUVendors
var defaultGPUVendors = gpuVendors{
"nvidia": "nvidia.com/gpu",
"amd": "amd.com/gpu",
"intel": "gpu.intel.com/i915",
@ -28,6 +33,7 @@ var KnownGPUVendors = map[string]string{
// K9s tracks K9s configuration options.
type K9s struct {
LiveViewAutoRefresh bool `json:"liveViewAutoRefresh" yaml:"liveViewAutoRefresh"`
GPUVendors gpuVendors `json:"gpuVendors" yaml:"gpuVendors"`
ScreenDumpDir string `json:"screenDumpDir" yaml:"screenDumpDir,omitempty"`
RefreshRate int `json:"refreshRate" yaml:"refreshRate"`
APIServerTimeout string `json:"apiServerTimeout" yaml:"apiServerTimeout"`
@ -60,6 +66,7 @@ type K9s struct {
func NewK9s(conn client.Connection, ks data.KubeSettings) *K9s {
return &K9s{
RefreshRate: defaultRefreshRate,
GPUVendors: make(gpuVendors),
MaxConnRetry: defaultMaxConnRetry,
APIServerTimeout: client.DefaultCallTimeoutDuration.String(),
ScreenDumpDir: AppDumpsDir,
@ -121,6 +128,10 @@ func (k *K9s) Merge(k1 *K9s) {
return
}
for k, v := range k1.GPUVendors {
KnownGPUVendors[k] = v
}
k.LiveViewAutoRefresh = k1.LiveViewAutoRefresh
k.DefaultView = k1.DefaultView
k.ScreenDumpDir = k1.ScreenDumpDir

View File

@ -1,5 +1,6 @@
k9s:
liveViewAutoRefresh: false
gpuVendors: {}
screenDumpDir: /tmp/k9s-test/screen-dumps
refreshRate: 2
apiServerTimeout: 15s

View File

@ -1,5 +1,7 @@
k9s:
liveViewAutoRefresh: true
gpuVendors:
bozo: bozo/gpu.com
screenDumpDir: /tmp/k9s-test/screen-dumps
refreshRate: 100
apiServerTimeout: 30s

View File

@ -1,5 +1,6 @@
k9s:
liveViewAutoRefresh: true
gpuVendors: {}
screenDumpDir: /tmp/k9s-test/screen-dumps
refreshRate: 2
apiServerTimeout: 10s

View File

@ -112,6 +112,16 @@ func (m *Meta) GVK2GVR(gv schema.GroupVersion, kind string) (*client.GVR, bool,
return client.NoGVR, false, false
}
// IsNamespaced checks if a given resource is namespaced.
func (m *Meta) IsNamespaced(gvr *client.GVR) (bool, error) {
res, err := m.MetaFor(gvr)
if err != nil {
return false, err
}
return res.Namespaced, nil
}
// MetaFor returns a resource metadata for a given gvr.
func (m *Meta) MetaFor(gvr *client.GVR) (*metav1.APIResource, error) {
m.mx.RLock()

View File

@ -36,7 +36,7 @@ func TestTableReconcile(t *testing.T) {
err := ta.reconcile(ctx)
require.NoError(t, err)
data := ta.Peek()
assert.Equal(t, 25, data.HeaderCount())
assert.Equal(t, 26, data.HeaderCount())
assert.Equal(t, 1, data.RowCount())
assert.Equal(t, client.NamespaceAll, data.GetNamespace())
}

View File

@ -37,7 +37,7 @@ func TestTableRefresh(t *testing.T) {
ctx = context.WithValue(ctx, internal.KeyWithMetrics, false)
require.NoError(t, ta.Refresh(ctx))
data := ta.Peek()
assert.Equal(t, 25, data.HeaderCount())
assert.Equal(t, 26, data.HeaderCount())
assert.Equal(t, 1, data.RowCount())
assert.Equal(t, client.NamespaceAll, data.GetNamespace())
assert.Equal(t, 1, l.count)

View File

@ -87,13 +87,14 @@ var defaultCOHeader = model1.Header{
model1.HeaderColumn{Name: "RESTARTS", Attrs: model1.Attrs{Align: tview.AlignRight}},
model1.HeaderColumn{Name: "PROBES(L:R:S)"},
model1.HeaderColumn{Name: "CPU", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "MEM", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "CPU/RL", Attrs: model1.Attrs{Align: tview.AlignRight}},
model1.HeaderColumn{Name: "MEM/RL", Attrs: model1.Attrs{Align: tview.AlignRight}},
model1.HeaderColumn{Name: "%CPU/R", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "%CPU/L", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "MEM", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "MEM/RL", Attrs: model1.Attrs{Align: tview.AlignRight}},
model1.HeaderColumn{Name: "%MEM/R", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "%MEM/L", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "GPU/RL", Attrs: model1.Attrs{Align: tview.AlignRight}},
model1.HeaderColumn{Name: "PORTS"},
model1.HeaderColumn{Name: "VALID", Attrs: model1.Attrs{Wide: true}},
model1.HeaderColumn{Name: "AGE", Attrs: model1.Attrs{Time: true}},
@ -110,7 +111,7 @@ func (c Container) Render(o any, _ string, row *model1.Row) error {
}
func (c Container) defaultRow(cr ContainerRes, r *model1.Row) error {
cur, res := gatherMetrics(cr.Container, cr.MX)
cur, res := gatherContainerMX(cr.Container, cr.MX)
ready, state, restarts := falseStr, MissingValue, "0"
if cr.Status != nil {
ready, state, restarts = boolToStr(cr.Status.Ready), ToContainerState(cr.Status.State), strconv.Itoa(int(cr.Status.RestartCount))
@ -127,13 +128,14 @@ func (c Container) defaultRow(cr ContainerRes, r *model1.Row) error {
restarts,
probe(cr.Container.LivenessProbe) + ":" + probe(cr.Container.ReadinessProbe) + ":" + probe(cr.Container.StartupProbe),
toMc(cur.cpu),
toMi(cur.mem),
toMc(res.cpu) + ":" + toMc(res.lcpu),
toMi(res.mem) + ":" + toMi(res.lmem),
client.ToPercentageStr(cur.cpu, res.cpu),
client.ToPercentageStr(cur.cpu, res.lcpu),
toMi(cur.mem),
toMi(res.mem) + ":" + toMi(res.lmem),
client.ToPercentageStr(cur.mem, res.mem),
client.ToPercentageStr(cur.mem, res.lmem),
toMc(res.gpu) + ":" + toMc(res.lgpu),
ToContainerPorts(cr.Container.Ports),
AsStatus(c.diagnose(state, ready)),
ToAge(cr.Age),
@ -170,26 +172,36 @@ func containerRequests(co *v1.Container) v1.ResourceList {
return nil
}
func gatherMetrics(co *v1.Container, mx *mv1beta1.ContainerMetrics) (c, r metric) {
func gatherContainerMX(co *v1.Container, mx *mv1beta1.ContainerMetrics) (c, r metric) {
rList, lList := containerRequests(co), co.Resources.Limits
if rList.Cpu() != nil {
r.cpu = rList.Cpu().MilliValue()
if q := rList.Cpu(); q != nil {
r.cpu = q.MilliValue()
}
if rList.Memory() != nil {
r.mem = rList.Memory().Value()
if q := lList.Cpu(); q != nil {
r.lcpu = q.MilliValue()
}
if lList.Cpu() != nil {
r.lcpu = lList.Cpu().MilliValue()
if q := rList.Memory(); q != nil {
r.mem = q.Value()
}
if lList.Memory() != nil {
r.lmem = lList.Memory().Value()
if q := lList.Memory(); q != nil {
r.lmem = q.Value()
}
if q := extractGPU(rList); q != nil {
r.gpu = q.Value()
}
if q := extractGPU(lList); q != nil {
r.lgpu = q.Value()
}
if mx != nil {
if mx.Usage.Cpu() != nil {
c.cpu = mx.Usage.Cpu().MilliValue()
if q := mx.Usage.Cpu(); q != nil {
c.cpu = q.MilliValue()
}
if mx.Usage.Memory() != nil {
c.mem = mx.Usage.Memory().Value()
if q := mx.Usage.Memory(); q != nil {
c.mem = q.Value()
}
}

View File

@ -0,0 +1,159 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of K9s
package render
import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
)
func Test_gatherContainerMX(t *testing.T) {
uu := map[string]struct {
container v1.Container
mx *mv1beta1.ContainerMetrics
c, r metric
}{
"empty": {},
"amd-request": {
container: v1.Container{
Name: "fred",
Image: "img",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10m"),
v1.ResourceMemory: resource.MustParse("20Mi"),
"nvidia.com/gpu": resource.MustParse("1"),
},
},
},
mx: &mv1beta1.ContainerMetrics{
Name: "fred",
Usage: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10m"),
v1.ResourceMemory: resource.MustParse("20Mi"),
},
},
c: metric{
cpu: 10,
mem: 20971520,
},
r: metric{
cpu: 10,
gpu: 1,
mem: 20971520,
},
},
"amd-both": {
container: v1.Container{
Name: "fred",
Image: "img",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10m"),
v1.ResourceMemory: resource.MustParse("20Mi"),
"nvidia.com/gpu": resource.MustParse("1"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("50m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
"nvidia.com/gpu": resource.MustParse("2"),
},
},
},
mx: &mv1beta1.ContainerMetrics{
Name: "fred",
Usage: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10m"),
v1.ResourceMemory: resource.MustParse("20Mi"),
},
},
c: metric{
cpu: 10,
mem: 20971520,
},
r: metric{
cpu: 10,
gpu: 1,
mem: 20971520,
lcpu: 50,
lgpu: 2,
lmem: 104857600,
},
},
"amd-limits": {
container: v1.Container{
Name: "fred",
Image: "img",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("50m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
"nvidia.com/gpu": resource.MustParse("2"),
},
},
},
mx: &mv1beta1.ContainerMetrics{
Name: "fred",
Usage: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10m"),
v1.ResourceMemory: resource.MustParse("20Mi"),
},
},
c: metric{
cpu: 10,
mem: 20971520,
},
r: metric{
cpu: 50,
gpu: 2,
mem: 104857600,
lcpu: 50,
lgpu: 2,
lmem: 104857600,
},
},
"amd-no-mx": {
container: v1.Container{
Name: "fred",
Image: "img",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10m"),
v1.ResourceMemory: resource.MustParse("20Mi"),
"nvidia.com/gpu": resource.MustParse("1"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("50m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
"nvidia.com/gpu": resource.MustParse("2"),
},
},
},
r: metric{
cpu: 10,
gpu: 1,
mem: 20971520,
lcpu: 50,
lgpu: 2,
lmem: 104857600,
},
},
}
for k, u := range uu {
t.Run(k, func(t *testing.T) {
c, r := gatherContainerMX(&u.container, u.mx)
assert.Equal(t, u.c, c)
assert.Equal(t, u.r, r)
})
}
}

View File

@ -40,13 +40,14 @@ func TestContainer(t *testing.T) {
"0",
"off:off:off",
"10",
"20",
"20:20",
"50",
"50",
"20",
"100:100",
"50",
"50",
"20",
"20",
"0:0",
"",
"container is not ready",
},

View File

@ -265,6 +265,14 @@ func mapToIfc(m any) (s string) {
return
}
func toMu(v int64) string {
if v == 0 {
return NAValue
}
return strconv.Itoa(int(v))
}
func toMc(v int64) string {
if v == 0 {
return ZeroValue

View File

@ -56,7 +56,7 @@ func TestTableHydrate(t *testing.T) {
re := NewPod()
require.NoError(t, model1.Hydrate("blee", oo, rr, re))
assert.Len(t, rr, 1)
assert.Len(t, rr[0].Fields, 25)
assert.Len(t, rr[0].Fields, 26)
}
func TestToAge(t *testing.T) {

View File

@ -13,7 +13,6 @@ import (
"strings"
"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/config"
"github.com/derailed/k9s/internal/model1"
"github.com/derailed/k9s/internal/slogs"
"github.com/derailed/tview"
@ -42,12 +41,13 @@ var defaultNOHeader = model1.Header{
model1.HeaderColumn{Name: "EXTERNAL-IP", Attrs: model1.Attrs{Wide: true}},
model1.HeaderColumn{Name: "PODS", Attrs: model1.Attrs{Align: tview.AlignRight}},
model1.HeaderColumn{Name: "CPU", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "MEM", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "%CPU", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "%MEM", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "CPU/A", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "%CPU", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "MEM", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "MEM/A", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "GPU"},
model1.HeaderColumn{Name: "%MEM", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "GPU/A", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "GPU/C", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "LABELS", Attrs: model1.Attrs{Wide: true}},
model1.HeaderColumn{Name: "VALID", Attrs: model1.Attrs{Wide: true}},
model1.HeaderColumn{Name: "AGE", Attrs: model1.Attrs{Time: true}},
@ -97,6 +97,7 @@ func (n Node) defaultRow(nwm *NodeWithMetrics, r *model1.Row) error {
iIP, eIP = missing(iIP), missing(eIP)
c, a := gatherNodeMX(&no, nwm.MX)
statuses := make(sort.StringSlice, 10)
status(no.Status.Conditions, no.Spec.Unschedulable, statuses)
sort.Sort(statuses)
@ -122,12 +123,13 @@ func (n Node) defaultRow(nwm *NodeWithMetrics, r *model1.Row) error {
eIP,
podCount,
toMc(c.cpu),
toMi(c.mem),
client.ToPercentageStr(c.cpu, a.cpu),
client.ToPercentageStr(c.mem, a.mem),
toMc(a.cpu),
client.ToPercentageStr(c.cpu, a.cpu),
toMi(c.mem),
toMi(a.mem),
n.gpuSpec(no.Status.Capacity, no.Status.Allocatable),
client.ToPercentageStr(c.mem, a.mem),
toMu(a.gpu),
toMu(c.gpu),
mapToStr(no.Labels),
AsStatus(n.diagnose(statuses)),
ToAge(no.GetCreationTimestamp()),
@ -136,21 +138,6 @@ func (n Node) defaultRow(nwm *NodeWithMetrics, r *model1.Row) error {
return nil
}
func (Node) gpuSpec(capacity, allocatable v1.ResourceList) string {
spec := NAValue
for k, v := range config.KnownGPUVendors {
key := v1.ResourceName(v)
if capacity, ok := capacity[key]; ok {
if allocs, ok := allocatable[key]; ok {
spec = fmt.Sprintf("%s/%s (%s)", capacity.String(), allocs.String(), k)
break
}
}
}
return spec
}
// Healthy checks component health.
func (n Node) Healthy(_ context.Context, o any) error {
nwm, ok := o.(*NodeWithMetrics)
@ -216,16 +203,21 @@ func (n *NodeWithMetrics) DeepCopyObject() runtime.Object {
}
type metric struct {
cpu, mem int64
lcpu, lmem int64
cpu, gpu, mem int64
lcpu, lgpu, lmem int64
}
func gatherNodeMX(no *v1.Node, mx *mv1beta1.NodeMetrics) (c, a metric) {
a.cpu, a.mem = no.Status.Allocatable.Cpu().MilliValue(), no.Status.Allocatable.Memory().Value()
a.cpu = no.Status.Allocatable.Cpu().MilliValue()
a.mem = no.Status.Allocatable.Memory().Value()
if mx != nil {
c.cpu, c.mem = mx.Usage.Cpu().MilliValue(), mx.Usage.Memory().Value()
c.cpu = mx.Usage.Cpu().MilliValue()
c.mem = mx.Usage.Memory().Value()
}
a.gpu = extractGPU(no.Status.Allocatable).Value()
c.gpu = extractGPU(no.Status.Capacity).Value()
return
}

View File

@ -6,73 +6,122 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
)
func Test_gpuSpec(t *testing.T) {
func Test_gatherNodeMX(t *testing.T) {
uu := map[string]struct {
capacity v1.ResourceList
allocatable v1.ResourceList
e string
node v1.Node
nMX *mv1beta1.NodeMetrics
ec, ea metric
}{
"empty": {
e: NAValue,
},
"empty": {},
"nvidia": {
capacity: v1.ResourceList{
v1.ResourceName("nvidia.com/gpu"): resource.MustParse("2"),
node: v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "nvidia",
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("4Gi"),
v1.ResourceName("nvidia.com/gpu"): resource.MustParse("2"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
v1.ResourceName("nvidia.com/gpu"): resource.MustParse("4"),
},
},
},
allocatable: v1.ResourceList{
v1.ResourceName("nvidia.com/gpu"): resource.MustParse("4"),
nMX: &mv1beta1.NodeMetrics{
ObjectMeta: metav1.ObjectMeta{
Name: "nvidia",
},
Usage: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("4Gi"),
v1.ResourceName("nvidia.com/gpu"): resource.MustParse("2"),
},
},
ea: metric{
cpu: 8000,
mem: 8589934592,
gpu: 4,
},
ec: metric{
cpu: 3000,
mem: 4294967296,
gpu: 2,
},
e: "2/4 (nvidia)",
},
"intel": {
capacity: v1.ResourceList{
v1.ResourceName("gpu.intel.com/i915"): resource.MustParse("2"),
node: v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "intel",
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("4Gi"),
v1.ResourceName("gpu.intel.com/i915"): resource.MustParse("2"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
v1.ResourceName("gpu.intel.com/i915"): resource.MustParse("4"),
},
},
},
allocatable: v1.ResourceList{
v1.ResourceName("gpu.intel.com/i915"): resource.MustParse("4"),
ea: metric{
cpu: 8000,
mem: 8589934592,
gpu: 4,
},
ec: metric{
cpu: 0,
mem: 0,
gpu: 2,
},
e: "2/4 (intel)",
},
"amd": {
capacity: v1.ResourceList{
v1.ResourceName("amd.com/gpu"): resource.MustParse("2"),
"unknown-vendor": {
node: v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "amd",
},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("4Gi"),
v1.ResourceName("bozo/gpu"): resource.MustParse("2"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
v1.ResourceName("bozo/gpu"): resource.MustParse("4"),
},
},
},
allocatable: v1.ResourceList{
v1.ResourceName("amd.com/gpu"): resource.MustParse("4"),
ea: metric{
cpu: 8000,
mem: 8589934592,
gpu: 0,
},
e: "2/4 (amd)",
},
"toast-cap": {
capacity: v1.ResourceList{
v1.ResourceName("gpu.intel.com/iBOZO"): resource.MustParse("2"),
ec: metric{
gpu: 0,
},
allocatable: v1.ResourceList{
v1.ResourceName("gpu.intel.com/i915"): resource.MustParse("4"),
},
e: NAValue,
},
"toast-alloc": {
capacity: v1.ResourceList{
v1.ResourceName("gpu.intel.com/i915"): resource.MustParse("2"),
},
allocatable: v1.ResourceList{
v1.ResourceName("gpu.intel.com/iBOZO"): resource.MustParse("4"),
},
e: NAValue,
},
}
for k, u := range uu {
t.Run(k, func(t *testing.T) {
var n Node
assert.Equal(t, u.e, n.gpuSpec(u.capacity, u.allocatable))
c, a := gatherNodeMX(&u.node, u.nMX)
assert.Equal(t, u.ec, c)
assert.Equal(t, u.ea, a)
})
}
}

View File

@ -26,8 +26,8 @@ func TestNodeRender(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "minikube", r.ID)
e := model1.Fields{"minikube", "Ready", "master", "amd64", "0", "v1.15.2", "Buildroot 2018.05.3", "4.15.0", "192.168.64.107", "<none>", "0", "10", "20", "0", "0", "4000", "7874", "n/a"}
assert.Equal(t, e, r.Fields[:18])
e := model1.Fields{"minikube", "Ready", "master", "amd64", "0", "v1.15.2", "Buildroot 2018.05.3", "4.15.0", "192.168.64.107", "<none>", "0", "10", "4000", "0", "20", "7874", "0", "n/a", "n/a"}
assert.Equal(t, e, r.Fields[:19])
}
func BenchmarkNodeRender(b *testing.B) {

View File

@ -11,6 +11,7 @@ import (
"strings"
"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/config"
"github.com/derailed/k9s/internal/model1"
"github.com/derailed/k9s/internal/slogs"
"github.com/derailed/tcell/v2"
@ -60,13 +61,14 @@ var defaultPodHeader = model1.Header{
model1.HeaderColumn{Name: "RESTARTS", Attrs: model1.Attrs{Align: tview.AlignRight}},
model1.HeaderColumn{Name: "LAST RESTART", Attrs: model1.Attrs{Align: tview.AlignRight, Time: true, Wide: true}},
model1.HeaderColumn{Name: "CPU", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "MEM", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "CPU/RL", Attrs: model1.Attrs{Align: tview.AlignRight, Wide: true}},
model1.HeaderColumn{Name: "MEM/RL", Attrs: model1.Attrs{Align: tview.AlignRight, Wide: true}},
model1.HeaderColumn{Name: "%CPU/R", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "%CPU/L", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "MEM", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "MEM/RL", Attrs: model1.Attrs{Align: tview.AlignRight, Wide: true}},
model1.HeaderColumn{Name: "%MEM/R", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "%MEM/L", Attrs: model1.Attrs{Align: tview.AlignRight, MX: true}},
model1.HeaderColumn{Name: "GPU/RL", Attrs: model1.Attrs{Align: tview.AlignRight, Wide: true}},
model1.HeaderColumn{Name: "IP"},
model1.HeaderColumn{Name: "NODE"},
model1.HeaderColumn{Name: "SERVICE-ACCOUNT", Attrs: model1.Attrs{Wide: true}},
@ -168,7 +170,7 @@ func (p *Pod) defaultRow(pwm *PodWithMetrics, row *model1.Row) error {
if pwm.MX != nil {
ccmx = pwm.MX.Containers
}
c, r := gatherCoMX(spec, ccmx)
c, r := gatherPodMX(spec, ccmx)
phase := p.Phase(dt, spec, &st)
ns, n := pwm.Raw.GetNamespace(), pwm.Raw.GetName()
@ -184,13 +186,14 @@ func (p *Pod) defaultRow(pwm *PodWithMetrics, row *model1.Row) error {
strconv.Itoa(cRestarts + iRestarts),
ToAge(lastRestart),
toMc(c.cpu),
toMi(c.mem),
toMc(r.cpu) + ":" + toMc(r.lcpu),
toMi(r.mem) + ":" + toMi(r.lmem),
client.ToPercentageStr(c.cpu, r.cpu),
client.ToPercentageStr(c.cpu, r.lcpu),
toMi(c.mem),
toMi(r.mem) + ":" + toMi(r.lmem),
client.ToPercentageStr(c.mem, r.mem),
client.ToPercentageStr(c.mem, r.lmem),
toMc(r.gpu) + ":" + toMc(r.lgpu),
na(st.PodIP),
na(spec.NodeName),
na(spec.ServiceAccountName),
@ -295,16 +298,16 @@ func (p *PodWithMetrics) DeepCopyObject() runtime.Object {
return p
}
func gatherCoMX(spec *v1.PodSpec, ccmx []mv1beta1.ContainerMetrics) (c, r metric) {
func gatherPodMX(spec *v1.PodSpec, ccmx []mv1beta1.ContainerMetrics) (c, r metric) {
cc := make([]v1.Container, 0, len(spec.InitContainers)+len(spec.Containers))
cc = append(cc, filterSidecarCO(spec.InitContainers)...)
cc = append(cc, spec.Containers...)
rcpu, rmem := cosRequests(cc)
r.cpu, r.mem = rcpu.MilliValue(), rmem.Value()
rcpu, rmem, rgpu := cosRequests(cc)
r.cpu, r.mem, r.gpu = rcpu.MilliValue(), rmem.Value(), rgpu.Value()
lcpu, lmem := cosLimits(cc)
r.lcpu, r.lmem = lcpu.MilliValue(), lmem.Value()
lcpu, lmem, lgpu := cosLimits(cc)
r.lcpu, r.lmem, r.lgpu = lcpu.MilliValue(), lmem.Value(), lgpu.Value()
ccpu, cmem := currentRes(ccmx)
c.cpu, c.mem = ccpu.MilliValue(), cmem.Value()
@ -312,52 +315,69 @@ func gatherCoMX(spec *v1.PodSpec, ccmx []mv1beta1.ContainerMetrics) (c, r metric
return
}
func cosLimits(cc []v1.Container) (cpuQ, memQ resource.Quantity) {
cpu, mem := new(resource.Quantity), new(resource.Quantity)
func cosLimits(cc []v1.Container) (cpuQ, memQ, gpuQ *resource.Quantity) {
cpuQ, gpuQ, memQ = new(resource.Quantity), new(resource.Quantity), new(resource.Quantity)
for i := range cc {
limits := cc[i].Resources.Limits
if len(limits) == 0 {
continue
}
if limits.Cpu() != nil {
cpu.Add(*limits.Cpu())
if q := limits.Cpu(); q != nil {
cpuQ.Add(*q)
}
if limits.Memory() != nil {
mem.Add(*limits.Memory())
if q := limits.Memory(); q != nil {
memQ.Add(*q)
}
if q := extractGPU(limits); q != nil {
gpuQ.Add(*q)
}
}
return *cpu, *mem
return
}
func cosRequests(cc []v1.Container) (cpuQ, memQ resource.Quantity) {
cpu, mem := new(resource.Quantity), new(resource.Quantity)
func cosRequests(cc []v1.Container) (cpuQ, memQ, gpuQ *resource.Quantity) {
cpuQ, gpuQ, memQ = new(resource.Quantity), new(resource.Quantity), new(resource.Quantity)
for i := range cc {
co := cc[i]
rl := containerRequests(&co)
if rl.Cpu() != nil {
cpu.Add(*rl.Cpu())
if q := rl.Cpu(); q != nil {
cpuQ.Add(*q)
}
if rl.Memory() != nil {
mem.Add(*rl.Memory())
if q := rl.Memory(); q != nil {
memQ.Add(*q)
}
if q := extractGPU(rl); q != nil {
gpuQ.Add(*q)
}
}
return *cpu, *mem
return
}
func currentRes(ccmx []mv1beta1.ContainerMetrics) (cpuQ, memQ resource.Quantity) {
cpu, mem := new(resource.Quantity), new(resource.Quantity)
func extractGPU(rl v1.ResourceList) *resource.Quantity {
for _, v := range config.KnownGPUVendors {
if q, ok := rl[v1.ResourceName(v)]; ok {
return &q
}
}
return &resource.Quantity{Format: resource.DecimalSI}
}
func currentRes(ccmx []mv1beta1.ContainerMetrics) (cpuQ, memQ *resource.Quantity) {
cpuQ = new(resource.Quantity)
memQ = new(resource.Quantity)
if ccmx == nil {
return *cpu, *mem
return
}
for _, co := range ccmx {
c, m := co.Usage.Cpu(), co.Usage.Memory()
cpu.Add(*c)
mem.Add(*m)
cpuQ.Add(*c)
memQ.Add(*m)
}
return *cpu, *mem
return
}
func (*Pod) mapQOS(class v1.PodQOSClass) string {
@ -396,7 +416,7 @@ func (*Pod) ContainerStats(cc []v1.ContainerStatus) (readyCnt, terminatedCnt, re
func (*Pod) initContainerStats(cc []v1.Container, cos []v1.ContainerStatus) (ready, total, restart int) {
for i := range cos {
if !IsSideCarContainer(cc[i].RestartPolicy) {
if !isSideCarContainer(cc[i].RestartPolicy) {
continue
}
total++
@ -462,7 +482,7 @@ func (*Pod) initContainerPhase(spec *v1.PodSpec, pst *v1.PodStatus, status strin
sidecars := sets.New[string]()
for i := range spec.InitContainers {
co := spec.InitContainers[i]
if IsSideCarContainer(co.RestartPolicy) {
if isSideCarContainer(co.RestartPolicy) {
sidecars.Insert(co.Name)
}
}
@ -589,16 +609,15 @@ func hasPodReadyCondition(conditions []v1.PodCondition) bool {
return false
}
func IsSideCarContainer(p *v1.ContainerRestartPolicy) bool {
func isSideCarContainer(p *v1.ContainerRestartPolicy) bool {
return p != nil && *p == v1.ContainerRestartPolicyAlways
}
func filterSidecarCO(cc []v1.Container) []v1.Container {
rcc := make([]v1.Container, 0, len(cc))
for i := range cc {
c := cc[i]
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways {
rcc = append(rcc, c)
if isSideCarContainer(cc[i].RestartPolicy) {
rcc = append(rcc, cc[i])
}
}

View File

@ -27,6 +27,7 @@ func Test_checkInitContainerStatus(t *testing.T) {
"none": {
e: "Init:0/0",
},
"restart": {
status: v1.ContainerStatus{
Name: "ic1",
@ -36,6 +37,7 @@ func Test_checkInitContainerStatus(t *testing.T) {
restart: true,
e: "Init:0/0",
},
"no-restart": {
status: v1.ContainerStatus{
Name: "ic1",
@ -44,6 +46,7 @@ func Test_checkInitContainerStatus(t *testing.T) {
},
e: "Init:0/0",
},
"terminated-reason": {
status: v1.ContainerStatus{
Name: "ic1",
@ -56,6 +59,7 @@ func Test_checkInitContainerStatus(t *testing.T) {
},
e: "Init:blah",
},
"terminated-signal": {
status: v1.ContainerStatus{
Name: "ic1",
@ -68,6 +72,7 @@ func Test_checkInitContainerStatus(t *testing.T) {
},
e: "Init:Signal:9",
},
"terminated-code": {
status: v1.ContainerStatus{
Name: "ic1",
@ -79,6 +84,7 @@ func Test_checkInitContainerStatus(t *testing.T) {
},
e: "Init:ExitCode:1",
},
"terminated-restart": {
status: v1.ContainerStatus{
Name: "ic1",
@ -89,6 +95,7 @@ func Test_checkInitContainerStatus(t *testing.T) {
},
},
},
"waiting": {
status: v1.ContainerStatus{
Name: "ic1",
@ -100,6 +107,7 @@ func Test_checkInitContainerStatus(t *testing.T) {
},
e: "Init:blah",
},
"waiting-init": {
status: v1.ContainerStatus{
Name: "ic1",
@ -111,6 +119,7 @@ func Test_checkInitContainerStatus(t *testing.T) {
},
e: "Init:0/0",
},
"running": {
status: v1.ContainerStatus{
Name: "ic1",
@ -137,11 +146,13 @@ func Test_containerPhase(t *testing.T) {
ok bool
}{
"none": {},
"empty": {
status: v1.PodStatus{
Phase: PhaseUnknown,
},
},
"waiting": {
status: v1.PodStatus{
Phase: PhaseUnknown,
@ -166,6 +177,7 @@ func Test_containerPhase(t *testing.T) {
},
e: "waiting",
},
"terminated": {
status: v1.PodStatus{
Phase: PhaseUnknown,
@ -190,6 +202,7 @@ func Test_containerPhase(t *testing.T) {
},
e: "done",
},
"terminated-sig": {
status: v1.PodStatus{
Phase: PhaseUnknown,
@ -214,6 +227,7 @@ func Test_containerPhase(t *testing.T) {
},
e: "Signal:9",
},
"terminated-code": {
status: v1.PodStatus{
Phase: PhaseUnknown,
@ -238,6 +252,7 @@ func Test_containerPhase(t *testing.T) {
},
e: "ExitCode:2",
},
"running": {
status: v1.PodStatus{
Phase: PhaseUnknown,
@ -274,18 +289,20 @@ func Test_containerPhase(t *testing.T) {
}
}
func Test_restartableInitCO(t *testing.T) {
func Test_isSideCarContainer(t *testing.T) {
always, never := v1.ContainerRestartPolicyAlways, v1.ContainerRestartPolicy("never")
uu := map[string]struct {
p *v1.ContainerRestartPolicy
e bool
}{
"empty": {},
"set": {
"sidecar": {
p: &always,
e: true,
},
"unset": {
"no-sidecar": {
p: &never,
},
}
@ -293,7 +310,7 @@ func Test_restartableInitCO(t *testing.T) {
for k := range uu {
u := uu[k]
t.Run(k, func(t *testing.T) {
assert.Equal(t, u.e, IsSideCarContainer(u.p))
assert.Equal(t, u.e, isSideCarContainer(u.p))
})
}
}
@ -308,6 +325,7 @@ func Test_filterSidecarCO(t *testing.T) {
cc: []v1.Container{},
ecc: []v1.Container{},
},
"restartable": {
cc: []v1.Container{
{
@ -322,6 +340,7 @@ func Test_filterSidecarCO(t *testing.T) {
},
},
},
"not-restartable": {
cc: []v1.Container{
{
@ -330,6 +349,7 @@ func Test_filterSidecarCO(t *testing.T) {
},
ecc: []v1.Container{},
},
"mixed": {
cc: []v1.Container{
{
@ -433,7 +453,7 @@ func Test_lastRestart(t *testing.T) {
}
}
func Test_gatherPodMx(t *testing.T) {
func Test_gatherPodMX(t *testing.T) {
uu := map[string]struct {
spec *v1.PodSpec
mx []mv1beta1.ContainerMetrics
@ -452,15 +472,19 @@ func Test_gatherPodMx(t *testing.T) {
c: metric{
cpu: 1,
mem: 22 * client.MegaByte,
gpu: 1,
},
r: metric{
cpu: 10,
mem: 1 * client.MegaByte,
gpu: 1,
lcpu: 20,
lmem: 2 * client.MegaByte,
lgpu: 1,
},
perc: "10",
},
"multi": {
spec: &v1.PodSpec{
Containers: []v1.Container{
@ -471,8 +495,10 @@ func Test_gatherPodMx(t *testing.T) {
},
r: metric{
cpu: 11 + 93 + 11,
gpu: 1,
mem: (22 + 1402 + 34) * client.MegaByte,
lcpu: 111 + 0 + 0,
lgpu: 1,
lmem: (44 + 2804 + 69) * client.MegaByte,
},
mx: []mv1beta1.ContainerMetrics{
@ -482,10 +508,12 @@ func Test_gatherPodMx(t *testing.T) {
},
c: metric{
cpu: 1 + 51 + 1,
gpu: 1,
mem: (22 + 1275 + 27) * client.MegaByte,
},
perc: "46",
},
"sidecar": {
spec: &v1.PodSpec{
Containers: []v1.Container{
@ -497,8 +525,10 @@ func Test_gatherPodMx(t *testing.T) {
},
r: metric{
cpu: 11 + 93,
gpu: 1,
mem: (22 + 1402) * client.MegaByte,
lcpu: 111 + 0,
lgpu: 1,
lmem: (44 + 2804) * client.MegaByte,
},
mx: []mv1beta1.ContainerMetrics{
@ -507,6 +537,7 @@ func Test_gatherPodMx(t *testing.T) {
},
c: metric{
cpu: 1 + 51,
gpu: 1,
mem: (22 + 1275) * client.MegaByte,
},
perc: "50",
@ -516,16 +547,19 @@ func Test_gatherPodMx(t *testing.T) {
for k := range uu {
u := uu[k]
t.Run(k, func(t *testing.T) {
c, r := gatherCoMX(u.spec, u.mx)
c, r := gatherPodMX(u.spec, u.mx)
assert.Equal(t, u.c.cpu, c.cpu)
assert.Equal(t, u.c.mem, c.mem)
assert.Equal(t, u.c.lcpu, c.lcpu)
assert.Equal(t, u.c.lmem, c.lmem)
assert.Equal(t, u.c.lgpu, c.lgpu)
assert.Equal(t, u.r.cpu, r.cpu)
assert.Equal(t, u.r.mem, r.mem)
assert.Equal(t, u.r.lcpu, r.lcpu)
assert.Equal(t, u.r.lmem, r.lmem)
assert.Equal(t, u.r.gpu, r.gpu)
assert.Equal(t, u.r.lgpu, r.lgpu)
assert.Equal(t, u.perc, client.ToPercentageStr(c.cpu, r.cpu))
})
@ -555,9 +589,10 @@ func Test_podLimits(t *testing.T) {
for k := range uu {
u := uu[k]
t.Run(k, func(t *testing.T) {
c, m := cosLimits(u.cc)
c, m, g := cosLimits(u.cc)
assert.True(t, c.Equal(*u.l.Cpu()))
assert.True(t, m.Equal(*u.l.Memory()))
assert.True(t, g.Equal(*extractGPU(u.l)))
})
}
}
@ -565,29 +600,31 @@ func Test_podLimits(t *testing.T) {
func Test_podRequests(t *testing.T) {
uu := map[string]struct {
cc []v1.Container
l v1.ResourceList
e v1.ResourceList
}{
"plain": {
cc: []v1.Container{
makeContainer("c1", false, "10m", "1Mi", "20m", "2Mi"),
},
l: makeRes("10m", "1Mi"),
e: makeRes("10m", "1Mi"),
},
"multi-co": {
cc: []v1.Container{
makeContainer("c1", false, "10m", "1Mi", "20m", "2Mi"),
makeContainer("c2", false, "10m", "1Mi", "40m", "4Mi"),
},
l: makeRes("20m", "2Mi"),
e: makeRes("20m", "2Mi"),
},
}
for k := range uu {
u := uu[k]
t.Run(k, func(t *testing.T) {
c, m := cosRequests(u.cc)
assert.True(t, c.Equal(*u.l.Cpu()))
assert.True(t, m.Equal(*u.l.Memory()))
c, m, g := cosRequests(u.cc)
assert.True(t, c.Equal(*u.e.Cpu()))
assert.True(t, m.Equal(*u.e.Memory()))
assert.True(t, g.Equal(*extractGPU(u.e)))
})
}
}
@ -611,10 +648,12 @@ func makeContainer(n string, restartable bool, rc, rm, lc, lm string) v1.Contain
func makeRes(c, m string) v1.ResourceList {
cpu, _ := res.ParseQuantity(c)
mem, _ := res.ParseQuantity(m)
gpu, _ := res.ParseQuantity(c)
return v1.ResourceList{
v1.ResourceCPU: cpu,
v1.ResourceMemory: mem,
v1.ResourceCPU: cpu,
v1.ResourceMemory: mem,
v1.ResourceName("nvidia.com/gpu"): gpu,
}
}

View File

@ -164,8 +164,8 @@ func TestPodRender(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "default/nginx", r.ID)
e := model1.Fields{"default", "nginx", "0", "●", "1/1", "Running", "0", "<unknown>", "100", "50", "100:0", "70:170", "100", "n/a", "71", "29", "172.17.0.6", "minikube", "default", "<none>"}
assert.Equal(t, e, r.Fields[:20])
e := model1.Fields{"default", "nginx", "0", "●", "1/1", "Running", "0", "<unknown>", "100", "100:0", "100", "n/a", "50", "70:170", "71", "29", "0:0", "172.17.0.6", "minikube", "default", "<none>"}
assert.Equal(t, e, r.Fields[:21])
}
func BenchmarkPodRender(b *testing.B) {
@ -195,8 +195,8 @@ func TestPodInitRender(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "default/nginx", r.ID)
e := model1.Fields{"default", "nginx", "0", "●", "1/1", "Init:0/1", "0", "<unknown>", "10", "10", "100:0", "70:170", "10", "n/a", "14", "5", "172.17.0.6", "minikube", "default", "<none>"}
assert.Equal(t, e, r.Fields[:20])
e := model1.Fields{"default", "nginx", "0", "●", "1/1", "Init:0/1", "0", "<unknown>", "10", "100:0", "10", "n/a", "10", "70:170", "14", "5", "0:0", "172.17.0.6", "minikube", "default", "<none>"}
assert.Equal(t, e, r.Fields[:21])
}
func TestPodSidecarRender(t *testing.T) {
@ -211,8 +211,8 @@ func TestPodSidecarRender(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "default/sleep", r.ID)
e := model1.Fields{"default", "sleep", "0", "●", "2/2", "Running", "0", "<unknown>", "100", "40", "50:250", "50:80", "200", "40", "80", "50", "10.244.0.8", "kind-control-plane", "default", "<none>"}
assert.Equal(t, e, r.Fields[:20])
e := model1.Fields{"default", "sleep", "0", "●", "2/2", "Running", "0", "<unknown>", "100", "50:250", "200", "40", "40", "50:80", "80", "50", "0:0", "10.244.0.8", "kind-control-plane", "default", "<none>"}
assert.Equal(t, e, r.Fields[:21])
}
func TestCheckPodStatus(t *testing.T) {

View File

@ -174,7 +174,12 @@ func (b *Browser) Start() {
b.Table.Start()
b.CmdBuff().AddListener(b)
if err := b.GetModel().Watch(b.prepareContext()); err != nil {
b.App().Flash().Errf("Watcher failed for %s -- %s", b.GVR(), err)
go func() {
time.Sleep(500 * time.Millisecond)
b.app.QueueUpdateDraw(func() {
b.App().Flash().Errf("Watcher failed for %s -- %s", b.GVR(), err)
})
}()
}
}
@ -335,7 +340,11 @@ func (b *Browser) TableDataChanged(mdata *model1.TableData) {
b.setUpdating(true)
defer b.setUpdating(false)
if b.GetColumnCount() == 0 {
b.app.Flash().Infof("Viewing %s in namespace %s", b.GVR(), client.PrintNamespace(b.GetNamespace()))
if client.IsClusterScoped(b.GetNamespace()) {
b.app.Flash().Infof("Viewing %s...", b.GVR())
} else {
b.app.Flash().Infof("Viewing %s in namespace %s", b.GVR(), client.PrintNamespace(b.GetNamespace()))
}
}
b.refreshActions()
b.UpdateUI(cdata, mdata)
@ -518,7 +527,7 @@ func (b *Browser) switchNamespaceCmd(evt *tcell.EventKey) *tcell.EventKey {
auth, err := b.App().factory.Client().CanI(ns, b.GVR(), "", client.ListAccess)
if !auth {
if err == nil {
err = fmt.Errorf("current user can't access namespace %s", ns)
err = fmt.Errorf("access denied for user on: %s/%s", ns, b.GVR())
}
b.App().Flash().Err(err)
return nil
@ -529,7 +538,11 @@ func (b *Browser) switchNamespaceCmd(evt *tcell.EventKey) *tcell.EventKey {
return nil
}
b.setNamespace(ns)
b.app.Flash().Infof("Viewing %s in namespace `%s`...", b.GVR(), client.PrintNamespace(ns))
if client.IsClusterScoped(ns) {
b.app.Flash().Infof("Viewing %s...", b.GVR())
} else {
b.app.Flash().Infof("Viewing %s in namespace `%s`...", b.GVR(), client.PrintNamespace(ns))
}
b.refresh()
b.UpdateTitle()
b.SelectRow(1, 0, true)
@ -628,9 +641,12 @@ func (b *Browser) namespaceActions(aa *ui.KeyActions) {
aa.Add(ui.KeyN, ui.NewKeyAction("Copy Namespace", b.cpNsCmd, false))
b.namespaces = make(map[int]string, data.MaxFavoritesNS)
aa.Add(ui.Key0, ui.NewKeyAction(client.NamespaceAll, b.switchNamespaceCmd, true))
b.namespaces[0] = client.NamespaceAll
index := 1
var index int
if ok, _ := b.app.Conn().CanI(client.NamespaceAll, client.NsGVR, "", client.ListAccess); ok {
aa.Add(ui.Key0, ui.NewKeyAction(client.NamespaceAll, b.switchNamespaceCmd, true))
b.namespaces[0] = client.NamespaceAll
index = 1
}
favNamespaces := b.app.Config.FavNamespaces()
for _, ns := range favNamespaces {
if ns == client.NamespaceAll {

View File

@ -27,13 +27,24 @@ func NewInterpreter(s string) *Interpreter {
return &c
}
func (c *Interpreter) TrimNS() string {
// ClearNS clears the current namespace if any.
func (c *Interpreter) ClearNS() {
if !c.HasNS() {
return c.line
return
}
ns, _ := c.NSArg()
if ons, ok := c.NSArg(); ok {
c.Reset(strings.TrimSpace(strings.Replace(c.line, " "+ons, "", 1)))
}
}
return strings.TrimSpace(strings.Replace(c.line, ns, "", 1))
// SwitchNS replaces the current namespace with the provided one.
func (c *Interpreter) SwitchNS(ns string) {
if !c.HasNS() {
c.Reset(c.line + " " + ns)
}
if ons, ok := c.NSArg(); ok {
c.Reset(strings.TrimSpace(strings.Replace(c.line, ons, ns, 1)))
}
}
func (c *Interpreter) grok() {

View File

@ -77,6 +77,76 @@ func TestNsCmd(t *testing.T) {
}
}
func TestSwitchNS(t *testing.T) {
uu := map[string]struct {
cmd string
ns string
e string
}{
"empty": {},
"no-op": {
cmd: "pod fred",
ns: "blee",
e: "pod blee",
},
"no-ns": {
cmd: "pod",
ns: "blee",
e: "pod blee",
},
"happy": {
cmd: "pod app=blee @zorg fred",
ns: "blee",
e: "pod app=blee @zorg blee",
},
}
for k := range uu {
u := uu[k]
t.Run(k, func(t *testing.T) {
p := cmd.NewInterpreter(u.cmd)
p.SwitchNS(u.ns)
assert.Equal(t, u.e, p.GetLine())
})
}
}
func TestClearNS(t *testing.T) {
uu := map[string]struct {
cmd string
e string
}{
"empty": {},
"no-op": {
cmd: "pod fred",
e: "pod",
},
"no-ns": {
cmd: "pod",
e: "pod",
},
"happy": {
cmd: "pod app=blee @zorg zorg",
e: "pod app=blee @zorg",
},
}
for k := range uu {
u := uu[k]
t.Run(k, func(t *testing.T) {
p := cmd.NewInterpreter(u.cmd)
p.ClearNS()
assert.Equal(t, u.e, p.GetLine())
})
}
}
func TestFilterCmd(t *testing.T) {
uu := map[string]struct {
cmd string

View File

@ -9,6 +9,7 @@ import (
"log/slog"
"regexp"
"runtime/debug"
"strings"
"sync"
"github.com/derailed/k9s/internal/client"
@ -114,7 +115,7 @@ func (*Command) namespaceCmd(p *cmd.Interpreter) bool {
}
if ns != "" {
_ = p.Reset("pod " + ns)
_ = p.Reset(client.PodGVR.String())
}
return false
@ -194,8 +195,13 @@ func (c *Command) run(p *cmd.Interpreter, fqn string, clearStack, pushCmd bool)
if cns, ok := p.NSArg(); ok {
ns = cns
}
if err := c.app.switchNS(ns); err != nil {
return err
if ok, err := dao.MetaAccess.IsNamespaced(gvr); ok && err == nil {
if err := c.app.switchNS(ns); err != nil {
return err
}
p.SwitchNS(ns)
} else {
p.ClearNS()
}
co := c.componentFor(gvr, fqn, v)
@ -354,7 +360,7 @@ func (c *Command) exec(p *cmd.Interpreter, gvr *client.GVR, comp model.Component
if pushCmd {
c.app.cmdHistory.Push(p.GetLine())
}
slog.Debug("History", slogs.Stack, c.app.cmdHistory.List())
slog.Debug("History", slogs.Stack, strings.Join(c.app.cmdHistory.List(), "|"))
return
}

View File

@ -7,7 +7,6 @@ import (
"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/model1"
"github.com/derailed/k9s/internal/ui"
cmd2 "github.com/derailed/k9s/internal/view/cmd"
"github.com/derailed/tcell/v2"
"k8s.io/apimachinery/pkg/util/sets"
)
@ -43,14 +42,8 @@ func (n *Namespace) bindKeys(aa *ui.KeyActions) {
func (n *Namespace) switchNs(app *App, _ ui.Tabular, _ *client.GVR, path string) {
n.useNamespace(path)
cmd, ok := app.cmdHistory.Last(2)
if !ok || cmd == "" {
cmd = client.PodGVR.String()
} else {
i := cmd2.NewInterpreter(cmd)
cmd = i.TrimNS()
}
app.gotoResource(cmd, "", false, true)
_, ns := client.Namespaced(path)
app.gotoResource(client.PodGVR.String()+" "+ns, "", false, true)
}
func (n *Namespace) useNsCmd(*tcell.EventKey) *tcell.EventKey {

View File

@ -1,6 +1,6 @@
name: k9s
base: core22
version: 'v0.50.7'
version: 'v0.50.8'
summary: K9s is a CLI to view and manage your Kubernetes clusters.
description: |
K9s is a CLI to view and manage your Kubernetes clusters. By leveraging a terminal UI, you can easily traverse Kubernetes resources and view the state of your clusters in a single powerful session.