diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 355bf2eb..2cf2d41c 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -20,5 +20,5 @@ jobs:
- name: Lint
uses: golangci/golangci-lint-action@v7.0.0
with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
- reporter: github-pr-check
\ No newline at end of file
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ version: v2.1.1
\ No newline at end of file
diff --git a/.golangci.yml b/.golangci.yml
index 1bd7424b..561f3c6e 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -41,9 +41,6 @@ linters:
- goconst
- dogsled
- lll
- # - dupl
- # - gochecknoinits
- # - mnd
settings:
dogsled:
@@ -96,20 +93,24 @@ linters:
goconst:
min-len: 2
min-occurrences: 3
- ignore-strings: 'blee|duh|cl-1|ct-1-1'
+ ignore-string-values:
+ - blee
+ - duh
+ - cl-1
+ - ct-1-1
- # gocritic:
- # enabled-tags:
- # - diagnostic
- # - experimental
- # - opinionated
- # - performance
- # - style
- # disabled-checks:
- # - dupImport # https://github.com/go-critic/go-critic/issues/845
- # - ifElseChain
- # - octalLiteral
- # - whyNoLint
+ gocritic:
+ enabled-tags:
+ - diagnostic
+ - experimental
+ - opinionated
+ - performance
+ - style
+ disabled-checks:
+ - dupImport # https://github.com/go-critic/go-critic/issues/845
+ - ifElseChain
+ - octalLiteral
+ - whyNoLint
gocyclo:
min-complexity: 35
diff --git a/Makefile b/Makefile
index fdbdb0fe..7ed1066f 100644
--- a/Makefile
+++ b/Makefile
@@ -11,7 +11,7 @@ DATE ?= $(shell TZ=UTC date -j -f "%s" ${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:
else
DATE ?= $(shell date -u -d @${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:%M:%SZ")
endif
-VERSION ?= v0.50.2
+VERSION ?= v0.50.3
IMG_NAME := derailed/k9s
IMAGE := ${IMG_NAME}:${VERSION}
diff --git a/change_logs/release_v0.50.3.md b/change_logs/release_v0.50.3.md
new file mode 100644
index 00000000..4ff32ee9
--- /dev/null
+++ b/change_logs/release_v0.50.3.md
@@ -0,0 +1,38 @@
+
+
+# Release v0.50.3
+
+## Notes
+
+Thank you to all that contributed with flushing out issues and enhancements for K9s!
+I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev
+and see if we're happier with some of the fixes!
+If you've filed an issue please help me verify and close.
+
+Your support, kindness and awesome suggestions to make K9s better are, as ever, very much noted and appreciated!
+Also big thanks to all that have allocated their own time to help others on both slack and on this repo!!
+
+As you may know, K9s is not pimped out by corps with deep pockets, thus if you feel K9s is helping your Kubernetes journey,
+please consider joining our [sponsorship program](https://github.com/sponsors/derailed) and/or make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer)
+
+On Slack? Please join us [K9slackers](https://join.slack.com/t/k9sers/shared_invite/zt-3360a389v-ElLHrb0Dp1kAXqYUItSAFA)
+
+## Maintenance Release!
+
+A bit more code spring cleaning/TLC and address a few bugs:
+
+1. [RBAC View] Fix issue bombing out on RBAC cluster roles
+2. [Custom Views] Fix issue with parsing `jq` filters and bombing out (Big Thanks to Pierre for flagging it!)
+
+---
+
+## Contributed PRs
+
+Please be sure to give `Big Thanks!` and `ATTA Girls/Boys!` to all the fine contributors for making K9s better for all of us!!
+
+* [#3273](https://github.com/derailed/k9s/pull/3273) k9s plugin scopes containers issue
+* [#3169](https://github.com/derailed/k9s/pull/3169) feat: pass context and token flags to kubectl exec commands
+
+
+---
+
© 2025 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0)
\ No newline at end of file
diff --git a/internal/client/gvr.go b/internal/client/gvr.go
index f8f944b6..62c2eb6d 100644
--- a/internal/client/gvr.go
+++ b/internal/client/gvr.go
@@ -11,6 +11,7 @@ import (
"github.com/derailed/k9s/internal/slogs"
"github.com/fvbommel/sortorder"
+ "gopkg.in/yaml.v3"
apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -43,9 +44,9 @@ func (c gvrCache) get(gvrs string) *GVR {
var gvrsCache = make(gvrCache)
// NewGVR builds a new gvr from a group, version, resource.
-func NewGVR(path string) *GVR {
- raw := path
- tokens := strings.Split(path, ":")
+func NewGVR(s string) *GVR {
+ raw := s
+ tokens := strings.Split(s, ":")
var g, v, r, sr string
if len(tokens) == 2 {
raw, sr = tokens[0], tokens[1]
@@ -59,10 +60,10 @@ func NewGVR(path string) *GVR {
case 1:
r = tokens[0]
default:
- slog.Error("GVR init failed!", slogs.Error, fmt.Errorf("can't parse GVR %q", path))
+ slog.Error("GVR init failed!", slogs.Error, fmt.Errorf("can't parse GVR %q", s))
}
- gvr := GVR{raw: path, g: g, v: v, r: r, sr: sr}
+ gvr := GVR{raw: s, g: g, v: v, r: r, sr: sr}
if cgvr := gvrsCache.get(gvr.String()); cgvr != nil {
return cgvr
}
@@ -204,6 +205,19 @@ func (g *GVR) IsDecodable() bool {
return g.GVK().Kind == "secrets"
}
+var _ = yaml.Marshaler((*GVR)(nil))
+var _ = yaml.Unmarshaler((*GVR)(nil))
+
+func (g *GVR) MarshalYAML() (any, error) {
+ return g.String(), nil
+}
+
+func (g *GVR) UnmarshalYAML(n *yaml.Node) error {
+ *g = *NewGVR(n.Value)
+
+ return nil
+}
+
// GVRs represents a collection of gvr.
type GVRs []*GVR
diff --git a/internal/client/helpers.go b/internal/client/helpers.go
index 8f677bbe..02e358d2 100644
--- a/internal/client/helpers.go
+++ b/internal/client/helpers.go
@@ -15,7 +15,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-var toFileName = regexp.MustCompile(`[^(\w/\.)]`)
+var toFileName = regexp.MustCompile(`[^(\w/.)]`)
// IsClusterWide returns true if ns designates cluster scope, false otherwise.
func IsClusterWide(ns string) bool {
diff --git a/internal/client/metrics.go b/internal/client/metrics.go
index b239db68..e6a7cd05 100644
--- a/internal/client/metrics.go
+++ b/internal/client/metrics.go
@@ -318,17 +318,19 @@ func ToMB(v int64) int64 {
}
// ToPercentage computes percentage as string otherwise n/aa.
-func ToPercentage(v1, v2 int64) int {
- if v2 == 0 {
+func ToPercentage(v, dv int64) int {
+ if dv == 0 {
return 0
}
- return int(math.Floor((float64(v1) / float64(v2)) * 100))
+
+ return int(math.Floor((float64(v) / float64(dv)) * 100))
}
// ToPercentageStr computes percentage, but if v2 is 0, it will return NAValue instead of 0.
-func ToPercentageStr(v1, v2 int64) string {
- if v2 == 0 {
+func ToPercentageStr(v, dv int64) string {
+ if dv == 0 {
return NA
}
- return strconv.Itoa(ToPercentage(v1, v2))
+
+ return strconv.Itoa(ToPercentage(v, dv))
}
diff --git a/internal/config/alias.go b/internal/config/alias.go
index 275effd1..2ab18bd4 100644
--- a/internal/config/alias.go
+++ b/internal/config/alias.go
@@ -19,17 +19,19 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
)
-// Alias tracks shortname to GVR mappings.
-type Alias map[string]*client.GVR
+type (
+ // Alias tracks shortname to GVR mappings.
+ Alias map[string]*client.GVR
-// ShortNames represents a collection of shortnames for aliases.
-type ShortNames map[*client.GVR][]string
+ // ShortNames represents a collection of shortnames for aliases.
+ ShortNames map[*client.GVR][]string
-// Aliases represents a collection of aliases.
-type Aliases struct {
- Alias Alias `yaml:"aliases"`
- mx sync.RWMutex
-}
+ // Aliases represents a collection of aliases.
+ Aliases struct {
+ Alias Alias `yaml:"aliases"`
+ mx sync.RWMutex
+ }
+)
// NewAliases return a new alias.
func NewAliases() *Aliases {
@@ -108,6 +110,7 @@ func (a *Aliases) Get(alias string) (*client.GVR, bool) {
func (a *Aliases) Define(gvr *client.GVR, aliases ...string) {
a.mx.Lock()
defer a.mx.Unlock()
+
for _, alias := range aliases {
if _, ok := a.Alias[alias]; !ok && alias != "" {
a.Alias[alias] = gvr
@@ -131,16 +134,6 @@ func (a *Aliases) Load(path string) error {
return a.LoadFile(path)
}
-type aliases struct {
- Alias map[string]string `yaml:"aliases"`
-}
-
-func newAliases(s int) aliases {
- return aliases{
- Alias: make(map[string]string, s),
- }
-}
-
// LoadFile loads alias from a given file.
func (a *Aliases) LoadFile(path string) error {
if _, err := os.Stat(path); errors.Is(err, fs.ErrNotExist) {
@@ -155,14 +148,10 @@ func (a *Aliases) LoadFile(path string) error {
slog.Warn("Aliases validation failed", slogs.Error, err)
}
- var aa aliases
- if err := yaml.Unmarshal(bb, &aa); err != nil {
- return err
- }
a.mx.Lock()
defer a.mx.Unlock()
- for alias, cmd := range aa.Alias {
- a.Alias[alias] = client.NewGVR(cmd)
+ if err := yaml.Unmarshal(bb, a); err != nil {
+ return err
}
return nil
@@ -198,18 +187,17 @@ func (a *Aliases) loadDefaultAliases() {
// Save alias to disk.
func (a *Aliases) Save() error {
slog.Debug("Saving Aliases...")
- return a.SaveAliases(AppAliasesFile)
+ a.mx.RLock()
+ defer a.mx.RUnlock()
+
+ return a.saveAliases(AppAliasesFile)
}
// SaveAliases saves aliases to a given file.
-func (a *Aliases) SaveAliases(path string) error {
+func (a *Aliases) saveAliases(path string) error {
if err := data.EnsureDirPath(path, data.DefaultDirMod); err != nil {
return err
}
- aa := newAliases(len(a.Alias))
- for alias, gvr := range a.Alias {
- aa.Alias[alias] = gvr.String()
- }
- return data.SaveYAML(path, aa)
+ return data.SaveYAML(path, a)
}
diff --git a/internal/config/config.go b/internal/config/config.go
index 64fe162f..1a276a61 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -186,16 +186,16 @@ func (c *Config) ActiveView() string {
if err != nil {
return data.DefaultView
}
- cmd := ct.View.Active
+ v := ct.View.Active
if c.K9s.manualCommand != nil && *c.K9s.manualCommand != "" {
- cmd = *c.K9s.manualCommand
+ v = *c.K9s.manualCommand
// We reset the manualCommand property because
// the command-line switch should only be considered once,
// on startup.
*c.K9s.manualCommand = ""
}
- return cmd
+ return v
}
func (c *Config) ResetActiveView() {
diff --git a/internal/dao/accessor.go b/internal/dao/accessor.go
index 5069cdc0..e2a77923 100644
--- a/internal/dao/accessor.go
+++ b/internal/dao/accessor.go
@@ -8,44 +8,44 @@ import (
)
var accessors = Accessors{
- *client.WkGVR: new(Workload),
- *client.CtGVR: new(Context),
- *client.CoGVR: new(Container),
- *client.ScnGVR: new(ImageScan),
- *client.SdGVR: new(ScreenDump),
- *client.BeGVR: new(Benchmark),
- *client.PfGVR: new(PortForward),
- *client.DirGVR: new(Dir),
+ client.WkGVR: new(Workload),
+ client.CtGVR: new(Context),
+ client.CoGVR: new(Container),
+ client.ScnGVR: new(ImageScan),
+ client.SdGVR: new(ScreenDump),
+ client.BeGVR: new(Benchmark),
+ client.PfGVR: new(PortForward),
+ client.DirGVR: new(Dir),
- *client.SvcGVR: new(Service),
- *client.PodGVR: new(Pod),
- *client.NodeGVR: new(Node),
- *client.NsGVR: new(Namespace),
- *client.CmGVR: new(ConfigMap),
- *client.SecGVR: new(Secret),
+ client.SvcGVR: new(Service),
+ client.PodGVR: new(Pod),
+ client.NodeGVR: new(Node),
+ client.NsGVR: new(Namespace),
+ client.CmGVR: new(ConfigMap),
+ client.SecGVR: new(Secret),
- *client.DpGVR: new(Deployment),
- *client.DsGVR: new(DaemonSet),
- *client.StsGVR: new(StatefulSet),
- *client.RsGVR: new(ReplicaSet),
+ client.DpGVR: new(Deployment),
+ client.DsGVR: new(DaemonSet),
+ client.StsGVR: new(StatefulSet),
+ client.RsGVR: new(ReplicaSet),
- *client.CjGVR: new(CronJob),
- *client.JobGVR: new(Job),
+ client.CjGVR: new(CronJob),
+ client.JobGVR: new(Job),
- *client.HmGVR: new(HelmChart),
- *client.HmhGVR: new(HelmHistory),
+ client.HmGVR: new(HelmChart),
+ client.HmhGVR: new(HelmHistory),
- *client.CrdGVR: new(CustomResourceDefinition),
+ client.CrdGVR: new(CustomResourceDefinition),
}
// Accessors represents a collection of dao accessors.
-type Accessors map[client.GVR]Accessor
+type Accessors map[*client.GVR]Accessor
// AccessorFor returns a client accessor for a resource if registered.
// Otherwise it returns a generic accessor.
// Customize here for non resource types or types with metrics or logs.
func AccessorFor(f Factory, gvr *client.GVR) (Accessor, error) {
- r, ok := accessors[*gvr]
+ r, ok := accessors[gvr]
if !ok {
r = new(Scaler)
slog.Debug("No DAO registry entry. Using generics!", slogs.GVR, gvr)
diff --git a/internal/dao/helm_history.go b/internal/dao/helm_history.go
index 10900ee4..73f64c40 100644
--- a/internal/dao/helm_history.go
+++ b/internal/dao/helm_history.go
@@ -146,10 +146,10 @@ func (h *HelmHistory) Rollback(_ context.Context, path, rev string) error {
if err != nil {
return fmt.Errorf("could not convert revision to a number: %w", err)
}
- client := action.NewRollback(cfg)
- client.Version = ver
+ clt := action.NewRollback(cfg)
+ clt.Version = ver
- return client.Run(n)
+ return clt.Run(n)
}
// Delete uninstall a Helm.
diff --git a/internal/dao/helpers.go b/internal/dao/helpers.go
index 5e280107..b0149ad3 100644
--- a/internal/dao/helpers.go
+++ b/internal/dao/helpers.go
@@ -40,7 +40,7 @@ func GetDefaultContainer(m *metav1.ObjectMeta, spec *v1.PodSpec) (string, bool)
}
}
slog.Warn("Container not found. Annotation ignored",
- slogs.CO, defaultContainer,
+ slogs.Container, defaultContainer,
slogs.Annotation, DefaultContainerAnnotation,
)
@@ -74,11 +74,12 @@ func inList(ll []string, s string) bool {
return false
}
-func toPerc(v1, v2 float64) float64 {
- if v2 == 0 {
+func toPerc(v, dv float64) float64 {
+ if dv == 0 {
return 0
}
- return math.Round((v1 / v2) * 100)
+
+ return math.Round((v / dv) * 100)
}
// ToYAML converts a resource to its YAML representation.
@@ -121,6 +122,7 @@ func serviceAccountMatches(podSA, saName string) bool {
if podSA == "" {
podSA = defaultServiceAccount
}
+
return podSA == saName
}
diff --git a/internal/dao/pod.go b/internal/dao/pod.go
index c32e22c3..ddd53a9c 100644
--- a/internal/dao/pod.go
+++ b/internal/dao/pod.go
@@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/sets"
restclient "k8s.io/client-go/rest"
mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
)
@@ -487,6 +488,17 @@ func (p *Pod) isControlled(path string) (fqn string, ok bool, err error) {
return "", false, nil
}
+var toastPhases = sets.New(
+ render.PhaseCompleted,
+ render.PhasePending,
+ render.PhaseCrashLoop,
+ render.PhaseError,
+ render.PhaseImagePullBackOff,
+ render.PhaseContainerStatusUnknown,
+ render.PhaseEvicted,
+ render.PhaseOOMKilled,
+)
+
func (p *Pod) Sanitize(ctx context.Context, ns string) (int, error) {
oo, err := p.Resource.List(ctx, ns)
if err != nil {
@@ -504,22 +516,8 @@ func (p *Pod) Sanitize(ctx context.Context, ns string) (int, error) {
if err != nil {
continue
}
- switch render.PodStatus(&pod) {
- case render.PhaseCompleted:
- fallthrough
- case render.PhasePending:
- fallthrough
- case render.PhaseCrashLoop:
- fallthrough
- case render.PhaseError:
- fallthrough
- case render.PhaseImagePullBackOff:
- fallthrough
- case render.PhaseContainerStatusUnknown:
- fallthrough
- case render.PhaseEvicted:
- fallthrough
- case render.PhaseOOMKilled:
+
+ if toastPhases.Has(render.PodStatus(&pod)) {
// !!BOZO!! Might need to bump timeout otherwise rev limit if too many??
fqn := client.FQN(pod.Namespace, pod.Name)
slog.Debug("Sanitizing resource", slogs.FQN, fqn)
diff --git a/internal/dao/port_forward.go b/internal/dao/port_forward.go
index 8d9cc424..ea9b6383 100644
--- a/internal/dao/port_forward.go
+++ b/internal/dao/port_forward.go
@@ -44,20 +44,20 @@ func (p *PortForward) List(ctx context.Context, _ string) ([]runtime.Object, err
}
path, _ := ctx.Value(internal.KeyPath).(string)
- config, err := config.NewBench(benchFile)
+ bcfg, err := config.NewBench(benchFile)
if err != nil {
slog.Debug("No custom benchmark config file found", slogs.FileName, benchFile)
}
- ff, cc := p.getFactory().Forwarders(), config.Benchmarks.Containers
+ ff, cc := p.getFactory().Forwarders(), bcfg.Benchmarks.Containers
oo := make([]runtime.Object, 0, len(ff))
for k, f := range ff {
if !strings.HasPrefix(k, path) {
continue
}
cfg := render.BenchCfg{
- C: config.Benchmarks.Defaults.C,
- N: config.Benchmarks.Defaults.N,
+ C: bcfg.Benchmarks.Defaults.C,
+ N: bcfg.Benchmarks.Defaults.N,
}
if cust, ok := cc[PodToKey(k)]; ok {
cfg.C, cfg.N = cust.C, cust.N
diff --git a/internal/dao/port_forwarder.go b/internal/dao/port_forwarder.go
index bd96451d..f0c3a5fd 100644
--- a/internal/dao/port_forwarder.go
+++ b/internal/dao/port_forwarder.go
@@ -170,7 +170,7 @@ func (p *PortForwarder) Start(path string, tt port.PortTunnel) (*portforward.Por
return p.forwardPorts("POST", req.URL(), tt.Address, tt.PortMap())
}
-func (p *PortForwarder) forwardPorts(method string, url *url.URL, addr, portMap string) (*portforward.PortForwarder, error) {
+func (p *PortForwarder) forwardPorts(method string, u *url.URL, addr, portMap string) (*portforward.PortForwarder, error) {
cfg, err := p.Client().Config().RESTConfig()
if err != nil {
return nil, err
@@ -179,10 +179,10 @@ func (p *PortForwarder) forwardPorts(method string, url *url.URL, addr, portMap
if err != nil {
return nil, err
}
- dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport, Timeout: defaultTimeout}, method, url)
+ dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport, Timeout: defaultTimeout}, method, u)
if !cmdutil.PortForwardWebsockets.IsDisabled() {
- tunnelingDialer, err := portforward.NewSPDYOverWebsocketDialer(url, cfg)
+ tunnelingDialer, err := portforward.NewSPDYOverWebsocketDialer(u, cfg)
if err != nil {
return nil, err
}
diff --git a/internal/dao/rbac.go b/internal/dao/rbac.go
index 2e078e2e..e45d7cff 100644
--- a/internal/dao/rbac.go
+++ b/internal/dao/rbac.go
@@ -6,12 +6,10 @@ package dao
import (
"context"
"fmt"
- "log/slog"
"github.com/derailed/k9s/internal"
"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/render"
- "github.com/derailed/k9s/internal/slogs"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
@@ -69,7 +67,7 @@ func (r *Rbac) loadClusterRoleBinding(path string) ([]runtime.Object, error) {
return nil, err
}
var cr rbacv1.ClusterRole
- err = runtime.DefaultUnstructuredConverter.FromUnstructured(crbo.(*unstructured.Unstructured).Object, &cro)
+ err = runtime.DefaultUnstructuredConverter.FromUnstructured(cro.(*unstructured.Unstructured).Object, &cr)
if err != nil {
return nil, err
}
@@ -114,7 +112,6 @@ func (r *Rbac) loadRoleBinding(path string) ([]runtime.Object, error) {
}
func (r *Rbac) loadClusterRole(fqn string) ([]runtime.Object, error) {
- slog.Debug("LOAD-CR", slogs.FQN, fqn)
o, err := r.getFactory().Get(client.CrGVR, fqn, true, labels.Everything())
if err != nil {
return nil, err
diff --git a/internal/model/helpers.go b/internal/model/helpers.go
index a2354eb6..e4d6f5a9 100644
--- a/internal/model/helpers.go
+++ b/internal/model/helpers.go
@@ -30,7 +30,7 @@ func getMeta(ctx context.Context, gvr *client.GVR) (ResourceMeta, error) {
}
func resourceMeta(gvr *client.GVR) ResourceMeta {
- meta, ok := Registry[gvr.String()]
+ meta, ok := Registry[gvr]
if !ok {
meta = ResourceMeta{
DAO: new(dao.Table),
diff --git a/internal/model/pulse_health.go b/internal/model/pulse_health.go
index 2929d329..3f13bf4e 100644
--- a/internal/model/pulse_health.go
+++ b/internal/model/pulse_health.go
@@ -102,7 +102,7 @@ func (h *PulseHealth) checkMetrics(ctx context.Context) (health.Checks, error) {
}
func (h *PulseHealth) check(ctx context.Context, ns string, gvr *client.GVR) (*health.Check, error) {
- meta, ok := Registry[gvr.String()]
+ meta, ok := Registry[gvr]
if !ok {
meta = ResourceMeta{
DAO: new(dao.Table),
diff --git a/internal/model/registry.go b/internal/model/registry.go
index f62a6c3b..b8e9bd36 100644
--- a/internal/model/registry.go
+++ b/internal/model/registry.go
@@ -13,181 +13,181 @@ import (
// Registry tracks resources metadata.
// BOZO!! Break up deps and merge into single registrar.
-var Registry = map[string]ResourceMeta{
+var Registry = map[*client.GVR]ResourceMeta{
// Custom...
- client.WkGVR.String(): {
+ client.WkGVR: {
DAO: new(dao.Workload),
Renderer: new(render.Workload),
},
- client.RefGVR.String(): {
+ client.RefGVR: {
DAO: new(dao.Reference),
Renderer: new(render.Reference),
},
- client.DirGVR.String(): {
+ client.DirGVR: {
DAO: new(dao.Dir),
Renderer: new(render.Dir),
},
- client.PuGVR.String(): {
+ client.PuGVR: {
DAO: new(dao.Pulse),
},
- client.HmGVR.String(): {
+ client.HmGVR: {
DAO: new(dao.HelmChart),
Renderer: new(helm.Chart),
},
- client.HmhGVR.String(): {
+ client.HmhGVR: {
DAO: new(dao.HelmHistory),
Renderer: new(helm.History),
},
- client.CoGVR.String(): {
+ client.CoGVR: {
DAO: new(dao.Container),
Renderer: new(render.Container),
TreeRenderer: new(xray.Container),
},
- client.ScnGVR.String(): {
+ client.ScnGVR: {
DAO: new(dao.ImageScan),
Renderer: new(render.ImageScan),
},
- client.CtGVR.String(): {
+ client.CtGVR: {
DAO: new(dao.Context),
Renderer: new(render.Context),
},
- client.SdGVR.String(): {
+ client.SdGVR: {
DAO: new(dao.ScreenDump),
Renderer: new(render.ScreenDump),
},
- client.RbacGVR.String(): {
+ client.RbacGVR: {
DAO: new(dao.Rbac),
Renderer: new(render.Rbac),
},
- client.PolGVR.String(): {
+ client.PolGVR: {
DAO: new(dao.Policy),
Renderer: new(render.Policy),
},
- client.UsrGVR.String(): {
+ client.UsrGVR: {
DAO: new(dao.Subject),
Renderer: new(render.Subject),
},
- client.GrpGVR.String(): {
+ client.GrpGVR: {
DAO: new(dao.Subject),
Renderer: new(render.Subject),
},
- client.PfGVR.String(): {
+ client.PfGVR: {
DAO: new(dao.PortForward),
Renderer: new(render.PortForward),
},
- client.BeGVR.String(): {
+ client.BeGVR: {
DAO: new(dao.Benchmark),
Renderer: new(render.Benchmark),
},
- client.AliGVR.String(): {
+ client.AliGVR: {
DAO: new(dao.Alias),
Renderer: new(render.Alias),
},
// Core...
- client.EpGVR.String(): {
+ client.EpGVR: {
Renderer: new(render.Endpoints),
},
- client.PodGVR.String(): {
+ client.PodGVR: {
DAO: new(dao.Pod),
Renderer: render.NewPod(),
TreeRenderer: new(xray.Pod),
},
- client.NsGVR.String(): {
+ client.NsGVR: {
DAO: new(dao.Namespace),
Renderer: new(render.Namespace),
},
- client.SecGVR.String(): {
+ client.SecGVR: {
DAO: new(dao.Secret),
Renderer: new(render.Secret),
},
- client.CmGVR.String(): {
+ client.CmGVR: {
DAO: new(dao.ConfigMap),
Renderer: new(render.ConfigMap),
},
- client.NodeGVR.String(): {
+ client.NodeGVR: {
DAO: new(dao.Node),
Renderer: new(render.Node),
},
- client.SvcGVR.String(): {
+ client.SvcGVR: {
DAO: new(dao.Service),
Renderer: new(render.Service),
TreeRenderer: new(xray.Service),
},
- client.SaGVR.String(): {
+ client.SaGVR: {
Renderer: new(render.ServiceAccount),
},
- client.PvGVR.String(): {
+ client.PvGVR: {
Renderer: new(render.PersistentVolume),
},
- client.PvcGVR.String(): {
+ client.PvcGVR: {
Renderer: new(render.PersistentVolumeClaim),
},
// Apps...
- client.DpGVR.String(): {
+ client.DpGVR: {
DAO: new(dao.Deployment),
Renderer: new(render.Deployment),
TreeRenderer: new(xray.Deployment),
},
- client.RsGVR.String(): {
+ client.RsGVR: {
Renderer: new(render.ReplicaSet),
TreeRenderer: new(xray.ReplicaSet),
},
- client.StsGVR.String(): {
+ client.StsGVR: {
DAO: new(dao.StatefulSet),
Renderer: new(render.StatefulSet),
TreeRenderer: new(xray.StatefulSet),
},
- client.DsGVR.String(): {
+ client.DsGVR: {
DAO: new(dao.DaemonSet),
Renderer: new(render.DaemonSet),
TreeRenderer: new(xray.DaemonSet),
},
// Extensions...
- client.NpGVR.String(): {
+ client.NpGVR: {
Renderer: &render.NetworkPolicy{},
},
// Batch...
- client.CjGVR.String(): {
+ client.CjGVR: {
DAO: new(dao.CronJob),
Renderer: new(render.CronJob),
},
- client.JobGVR.String(): {
+ client.JobGVR: {
DAO: new(dao.Job),
Renderer: new(render.Job),
},
// CRDs...
- client.CrdGVR.String(): {
+ client.CrdGVR: {
DAO: new(dao.CustomResourceDefinition),
Renderer: new(render.CustomResourceDefinition),
},
// Storage...
- client.ScGVR.String(): {
+ client.ScGVR: {
Renderer: &render.StorageClass{},
},
// Policy...
- client.PdbGVR.String(): {
+ client.PdbGVR: {
Renderer: &render.PodDisruptionBudget{},
},
// RBAC...
- client.CrGVR.String(): {
+ client.CrGVR: {
DAO: new(dao.Rbac),
Renderer: new(render.ClusterRole),
},
- client.CrbGVR.String(): {
+ client.CrbGVR: {
Renderer: new(render.ClusterRoleBinding),
},
- client.RoGVR.String(): {
+ client.RoGVR: {
Renderer: new(render.Role),
},
- client.RobGVR.String(): {
+ client.RobGVR: {
Renderer: new(render.RoleBinding),
},
}
diff --git a/internal/model/rev_values.go b/internal/model/rev_values.go
index e41bd599..cd123422 100644
--- a/internal/model/rev_values.go
+++ b/internal/model/rev_values.go
@@ -44,7 +44,7 @@ func NewRevValues(gvr *client.GVR, path, rev string) *RevValues {
}
func getHelmHistDao() *dao.HelmHistory {
- return Registry[client.HmhGVR.String()].DAO.(*dao.HelmHistory)
+ return Registry[client.HmhGVR].DAO.(*dao.HelmHistory)
}
func getRevValues(path, _ string) []string {
diff --git a/internal/model/tree.go b/internal/model/tree.go
index c31b60ca..429f692f 100644
--- a/internal/model/tree.go
+++ b/internal/model/tree.go
@@ -237,7 +237,7 @@ func (t *Tree) reconcile(ctx context.Context) error {
}
func (t *Tree) resourceMeta() ResourceMeta {
- meta, ok := Registry[t.gvr.String()]
+ meta, ok := Registry[t.gvr]
if !ok {
meta = ResourceMeta{
DAO: &dao.Table{},
diff --git a/internal/perf/benchmark.go b/internal/perf/benchmark.go
index 41a24681..5b417a80 100644
--- a/internal/perf/benchmark.go
+++ b/internal/perf/benchmark.go
@@ -106,10 +106,10 @@ func (b *Benchmark) Canceled() bool {
}
// Run starts a benchmark.
-func (b *Benchmark) Run(cluster, context string, done func()) {
+func (b *Benchmark) Run(cluster, ct string, done func()) {
slog.Debug("Running benchmark",
slogs.Cluster, cluster,
- slogs.Context, context,
+ slogs.Context, ct,
)
buff := new(bytes.Buffer)
b.worker.Writer = buff
@@ -117,18 +117,18 @@ func (b *Benchmark) Run(cluster, context string, done func()) {
b.worker.Run()
b.worker.Stop()
if buff.Len() > 0 {
- if err := b.save(cluster, context, buff); err != nil {
+ if err := b.save(cluster, ct, buff); err != nil {
slog.Error("Saving Benchmark", slogs.Error, err)
}
}
done()
}
-func (b *Benchmark) save(cluster, context string, r io.Reader) error {
+func (b *Benchmark) save(cluster, ct string, r io.Reader) error {
ns, n := client.Namespaced(b.config.Name)
n = strings.ReplaceAll(n, "|", "_")
n = strings.ReplaceAll(n, ":", "_")
- dir, err := config.EnsureBenchmarksDir(cluster, context)
+ dir, err := config.EnsureBenchmarksDir(cluster, ct)
if err != nil {
return err
}
diff --git a/internal/port/pfs_test.go b/internal/port/pfs_test.go
index e7d7bf5f..6a7bb2a7 100644
--- a/internal/port/pfs_test.go
+++ b/internal/port/pfs_test.go
@@ -137,9 +137,9 @@ func TestPFsToPortSpec(t *testing.T) {
if err != nil {
return
}
- spec, port := pfs.ToPortSpec(u.specs)
+ spec, prt := pfs.ToPortSpec(u.specs)
assert.Equal(t, u.spec, spec)
- assert.Equal(t, u.port, port)
+ assert.Equal(t, u.port, prt)
})
}
}
diff --git a/internal/render/cust_cols.go b/internal/render/cust_cols.go
index 6caa0468..d3c5ac88 100644
--- a/internal/render/cust_cols.go
+++ b/internal/render/cust_cols.go
@@ -119,8 +119,11 @@ func (cc ColumnSpecs) realize(o runtime.Object, rh model1.Header, row *model1.Ro
parsers[ix] = jsonpath.New(
fmt.Sprintf("column%d", ix),
).AllowMissingKeys(true)
- if err := parsers[ix].Parse(cc[ix].Spec); err != nil {
- return nil, err
+ if err := parsers[ix].Parse(cc[ix].Spec); err != nil && !isJQSpec(cc[ix].Spec) {
+ slog.Warn("Unable to parse custom column",
+ slogs.Name, cc[ix].Header.Name,
+ slogs.Error, err,
+ )
}
}
diff --git a/internal/render/pod_int_test.go b/internal/render/pod_int_test.go
index e9215318..8bee5f7a 100644
--- a/internal/render/pod_int_test.go
+++ b/internal/render/pod_int_test.go
@@ -596,17 +596,16 @@ func Test_podRequests(t *testing.T) {
func makeContainer(n string, restartable bool, rc, rm, lc, lm string) v1.Container {
always := v1.ContainerRestartPolicyAlways
- var res v1.ResourceRequirements
- var rp *v1.ContainerRestartPolicy
- res = v1.ResourceRequirements{
+ rq := v1.ResourceRequirements{
Requests: makeRes(rc, rm),
Limits: makeRes(lc, lm),
}
+ var rp *v1.ContainerRestartPolicy
if restartable {
rp = &always
}
- return v1.Container{Name: n, Resources: res, RestartPolicy: rp}
+ return v1.Container{Name: n, Resources: rq, RestartPolicy: rp}
}
func makeRes(c, m string) v1.ResourceList {
diff --git a/internal/slogs/keys.go b/internal/slogs/keys.go
index 24ee165a..9fb007d9 100644
--- a/internal/slogs/keys.go
+++ b/internal/slogs/keys.go
@@ -147,9 +147,6 @@ const (
// Log tracks a log logger key.
Log = "log"
- // CO tracks a container logger key.
- CO = "container"
-
// Annotation tracks an annotation logger key.
Annotation = "annotation"
diff --git a/internal/ui/config.go b/internal/ui/config.go
index 0a4cdeaf..cca88fa3 100644
--- a/internal/ui/config.go
+++ b/internal/ui/config.go
@@ -215,7 +215,7 @@ func (c *Configurator) activeSkin() (string, bool) {
return skin, skin != ""
}
-func (c *Configurator) activeConfig() (cluster, context string, ok bool) {
+func (c *Configurator) activeConfig() (cluster, contxt string, ok bool) {
if c.Config == nil || c.Config.K9s == nil {
return
}
@@ -223,8 +223,8 @@ func (c *Configurator) activeConfig() (cluster, context string, ok bool) {
if err != nil {
return
}
- cluster, context = ct.GetClusterName(), c.Config.K9s.ActiveContextName()
- if cluster != "" && context != "" {
+ cluster, contxt = ct.GetClusterName(), c.Config.K9s.ActiveContextName()
+ if cluster != "" && contxt != "" {
ok = true
}
diff --git a/internal/ui/prompt_test.go b/internal/ui/prompt_test.go
index e6267c3d..61a5b7c2 100644
--- a/internal/ui/prompt_test.go
+++ b/internal/ui/prompt_test.go
@@ -15,48 +15,48 @@ import (
func TestCmdNew(t *testing.T) {
v := ui.NewPrompt(nil, true, config.NewStyles())
- model := model.NewFishBuff(':', model.CommandBuffer)
- v.SetModel(model)
- model.AddListener(v)
+ m := model.NewFishBuff(':', model.CommandBuffer)
+ v.SetModel(m)
+ m.AddListener(v)
for _, r := range "blee" {
- model.Add(r)
+ m.Add(r)
}
assert.Equal(t, "\x00> [::b]blee\n", v.GetText(false))
}
func TestCmdUpdate(t *testing.T) {
- model := model.NewFishBuff(':', model.CommandBuffer)
+ m := model.NewFishBuff(':', model.CommandBuffer)
v := ui.NewPrompt(nil, true, config.NewStyles())
- v.SetModel(model)
+ v.SetModel(m)
- model.AddListener(v)
- model.SetText("blee", "")
- model.Add('!')
+ m.AddListener(v)
+ m.SetText("blee", "")
+ m.Add('!')
assert.Equal(t, "\x00> [::b]blee!\n", v.GetText(false))
assert.False(t, v.InCmdMode())
}
func TestCmdMode(t *testing.T) {
- model := model.NewFishBuff(':', model.CommandBuffer)
+ m := model.NewFishBuff(':', model.CommandBuffer)
v := ui.NewPrompt(&ui.App{}, true, config.NewStyles())
- v.SetModel(model)
- model.AddListener(v)
+ v.SetModel(m)
+ m.AddListener(v)
for _, f := range []bool{false, true} {
- model.SetActive(f)
+ m.SetActive(f)
assert.Equal(t, f, v.InCmdMode())
}
}
func TestPrompt_Deactivate(t *testing.T) {
- model := model.NewFishBuff(':', model.CommandBuffer)
+ m := model.NewFishBuff(':', model.CommandBuffer)
v := ui.NewPrompt(&ui.App{}, true, config.NewStyles())
- v.SetModel(model)
- model.AddListener(v)
+ v.SetModel(m)
+ m.AddListener(v)
- model.SetActive(true)
+ m.SetActive(true)
if assert.True(t, v.InCmdMode()) {
v.Deactivate()
assert.False(t, v.InCmdMode())
@@ -92,13 +92,13 @@ func TestPromptColor(t *testing.T) {
}
for _, testCase := range testCases {
- model := model.NewFishBuff(':', testCase.kind)
+ m := model.NewFishBuff(':', testCase.kind)
prompt := ui.NewPrompt(&app, true, styles)
- prompt.SetModel(model)
- model.AddListener(prompt)
+ prompt.SetModel(m)
+ m.AddListener(prompt)
- model.SetActive(true)
+ m.SetActive(true)
assert.Equal(t, testCase.expectedColor, prompt.GetBorderColor())
}
}
@@ -135,17 +135,17 @@ func TestPromptStyleChanged(t *testing.T) {
}
for _, testCase := range testCases {
- model := model.NewFishBuff(':', testCase.kind)
+ m := model.NewFishBuff(':', testCase.kind)
prompt := ui.NewPrompt(&app, true, styles)
- model.SetActive(true)
+ m.SetActive(true)
- prompt.SetModel(model)
- model.AddListener(prompt)
+ prompt.SetModel(m)
+ m.AddListener(prompt)
prompt.StylesChanged(newStyles)
- model.SetActive(true)
+ m.SetActive(true)
assert.Equal(t, testCase.expectedColor, prompt.GetBorderColor())
}
}
diff --git a/internal/view/benchmark.go b/internal/view/benchmark.go
index e7043917..11626e10 100644
--- a/internal/view/benchmark.go
+++ b/internal/view/benchmark.go
@@ -44,13 +44,13 @@ func (b *Benchmark) benchContext(ctx context.Context) context.Context {
}
func (b *Benchmark) viewBench(app *App, _ ui.Tabular, _ *client.GVR, path string) {
- data, err := readBenchFile(app.Config, b.benchFile())
+ mdata, err := readBenchFile(app.Config, b.benchFile())
if err != nil {
app.Flash().Errf("Unable to load bench file %s", err)
return
}
- details := NewDetails(b.App(), "Results", fileToSubject(path), contentYAML, false).Update(data)
+ details := NewDetails(b.App(), "Results", fileToSubject(path), contentYAML, false).Update(mdata)
if err := app.inject(details, false); err != nil {
app.Flash().Err(err)
}
@@ -85,9 +85,10 @@ func benchDir(cfg *config.Config) string {
}
func readBenchFile(cfg *config.Config, n string) (string, error) {
- data, err := os.ReadFile(filepath.Join(benchDir(cfg), n))
+ bb, err := os.ReadFile(filepath.Join(benchDir(cfg), n))
if err != nil {
return "", err
}
- return string(data), nil
+
+ return string(bb), nil
}
diff --git a/internal/view/browser.go b/internal/view/browser.go
index 9f8ba17a..3e3b044a 100644
--- a/internal/view/browser.go
+++ b/internal/view/browser.go
@@ -63,8 +63,8 @@ func (b *Browser) getUpdating() bool {
}
// SetCommand sets the current command.
-func (b *Browser) SetCommand(cmd *cmd.Interpreter) {
- b.GetTable().SetCommand(cmd)
+func (b *Browser) SetCommand(i *cmd.Interpreter) {
+ b.GetTable().SetCommand(i)
}
// Init watches all running pods in given namespace.
@@ -76,7 +76,7 @@ func (b *Browser) Init(ctx context.Context) error {
return err
}
colorerFn := model1.DefaultColorer
- if r, ok := model.Registry[b.GVR().String()]; ok && r.Renderer != nil {
+ if r, ok := model.Registry[b.GVR()]; ok && r.Renderer != nil {
colorerFn = r.Renderer.ColorerFunc()
}
b.GetTable().SetColorerFn(colorerFn)
@@ -222,15 +222,15 @@ func (b *Browser) BufferActive(state bool, _ model.BufferKind) {
slogs.Error, err,
)
}
- data := b.GetModel().Peek()
- cdata := b.Update(data, b.App().Conn().HasMetrics())
+ mdata := b.GetModel().Peek()
+ cdata := b.Update(mdata, b.App().Conn().HasMetrics())
b.app.QueueUpdateDraw(func() {
if b.getUpdating() {
return
}
b.setUpdating(true)
defer b.setUpdating(false)
- b.UpdateUI(cdata, data)
+ b.UpdateUI(cdata, mdata)
if b.GetRowCount() > 1 {
b.App().filterHistory.Push(b.CmdBuff().GetText())
}
@@ -282,7 +282,7 @@ func (b *Browser) Aliases() sets.Set[string] {
// Model Protocol...
// TableNoData notifies view no data is available.
-func (b *Browser) TableNoData(data *model1.TableData) {
+func (b *Browser) TableNoData(mdata *model1.TableData) {
var cancel context.CancelFunc
b.mx.RLock()
cancel = b.cancelFn
@@ -296,7 +296,7 @@ func (b *Browser) TableNoData(data *model1.TableData) {
return
}
- cdata := b.Update(data, b.app.Conn().HasMetrics())
+ cdata := b.Update(mdata, b.app.Conn().HasMetrics())
b.app.QueueUpdateDraw(func() {
if b.getUpdating() {
return
@@ -307,12 +307,12 @@ func (b *Browser) TableNoData(data *model1.TableData) {
b.app.Flash().Warnf("No resources found for %s in namespace %s", b.GVR(), client.PrintNamespace(b.GetNamespace()))
}
b.refreshActions()
- b.UpdateUI(cdata, data)
+ b.UpdateUI(cdata, mdata)
})
}
// TableDataChanged notifies view new data is available.
-func (b *Browser) TableDataChanged(data *model1.TableData) {
+func (b *Browser) TableDataChanged(mdata *model1.TableData) {
var cancel context.CancelFunc
b.mx.RLock()
cancel = b.cancelFn
@@ -322,7 +322,7 @@ func (b *Browser) TableDataChanged(data *model1.TableData) {
return
}
- cdata := b.Update(data, b.app.Conn().HasMetrics())
+ cdata := b.Update(mdata, b.app.Conn().HasMetrics())
b.app.QueueUpdateDraw(func() {
if b.getUpdating() {
return
@@ -333,7 +333,7 @@ func (b *Browser) TableDataChanged(data *model1.TableData) {
b.app.Flash().Infof("Viewing %s in namespace %s", b.GVR(), client.PrintNamespace(b.GetNamespace()))
}
b.refreshActions()
- b.UpdateUI(cdata, data)
+ b.UpdateUI(cdata, mdata)
})
}
@@ -358,6 +358,7 @@ func (b *Browser) viewCmd(evt *tcell.EventKey) *tcell.EventKey {
if err := v.app.inject(v, false); err != nil {
v.app.Flash().Err(err)
}
+
return nil
}
diff --git a/internal/view/command.go b/internal/view/command.go
index 9b3cacf0..ba250549 100644
--- a/internal/view/command.go
+++ b/internal/view/command.go
@@ -345,8 +345,8 @@ func (c *Command) exec(p *cmd.Interpreter, gvr *client.GVR, comp model.Component
comp.SetCommand(p)
if clearStack {
- cmd := contextRX.ReplaceAllString(p.GetLine(), "")
- c.app.Config.SetActiveView(cmd)
+ v := contextRX.ReplaceAllString(p.GetLine(), "")
+ c.app.Config.SetActiveView(v)
}
if err := c.app.inject(comp, clearStack); err != nil {
return err
diff --git a/internal/view/dir.go b/internal/view/dir.go
index 58de96ec..0fa328ef 100644
--- a/internal/view/dir.go
+++ b/internal/view/dir.go
@@ -34,10 +34,10 @@ type Dir struct {
}
// NewDir returns a new instance.
-func NewDir(path string) ResourceViewer {
+func NewDir(s string) ResourceViewer {
d := Dir{
ResourceViewer: NewBrowser(client.DirGVR),
- path: path,
+ path: s,
}
d.GetTable().SetBorderFocusColor(tcell.ColorAliceBlue)
d.GetTable().SetSelectedStyle(tcell.StyleDefault.Foreground(tcell.ColorWhite).Background(tcell.ColorAliceBlue).Attributes(tcell.AttrNone))
diff --git a/internal/view/exec.go b/internal/view/exec.go
index 3ecc67f6..f3d98cd4 100644
--- a/internal/view/exec.go
+++ b/internal/view/exec.go
@@ -344,18 +344,18 @@ func launchPodShell(v model.Igniter, a *App) {
func sshIn(a *App, fqn, co string) error {
cfg := a.Config.K9s.ShellPod
- os, err := getPodOS(a.factory, fqn)
+ platform, err := getPodOS(a.factory, fqn)
if err != nil {
return fmt.Errorf("os detect failed: %w", err)
}
- args := buildShellArgs("exec", fqn, co, a.Conn().Config().Flags().KubeConfig)
+ args := buildShellArgs("exec", fqn, co, a.Conn().Config().Flags())
args = append(args, "--")
if len(cfg.Command) > 0 {
args = append(args, cfg.Command...)
args = append(args, cfg.Args...)
} else {
- if os == windowsOS {
+ if platform == windowsOS {
args = append(args, "--", powerShell)
}
args = append(args, "sh", "-c", shellCheck)
@@ -380,7 +380,7 @@ func nukeK9sShell(a *App) error {
if err != nil {
return err
}
- if !ct.FeatureGates.NodeShell {
+ if !ct.FeatureGates.NodeShell || a.Config.K9s.ShellPod == nil {
return nil
}
diff --git a/internal/view/node.go b/internal/view/node.go
index 25daea71..ff184006 100644
--- a/internal/view/node.go
+++ b/internal/view/node.go
@@ -72,7 +72,7 @@ func (n *Node) bindDangerousKeys(aa *ui.KeyActions) {
slog.Error("No active context located", slogs.Error, err)
return
}
- if ct.FeatureGates.NodeShell {
+ if ct.FeatureGates.NodeShell && n.App().Config.K9s.ShellPod != nil {
aa.Add(ui.KeyS, ui.NewKeyAction("Shell", n.sshCmd, true))
}
}
diff --git a/internal/view/pf_dialog.go b/internal/view/pf_dialog.go
index 6bb53024..349a9b24 100644
--- a/internal/view/pf_dialog.go
+++ b/internal/view/pf_dialog.go
@@ -52,9 +52,9 @@ func ShowPortForwards(v ResourceViewer, path string, ports port.ContainerPortSpe
coField.SetPlaceholder("Enter a container name::port")
}
coField.SetChangedFunc(func(s string) {
- port := extractPort(s)
- loField.SetText(port)
- p2 = port
+ p := extractPort(s)
+ loField.SetText(p)
+ p2 = p
})
if loField.GetText() == "" {
loField.SetPlaceholder("Enter a local port")
@@ -121,16 +121,16 @@ func DismissPortForwards(v ResourceViewer, p *ui.Pages) {
// ----------------------------------------------------------------------------
// Helpers...
-func extractPort(port string) string {
- tokens := strings.Split(port, "::")
+func extractPort(p string) string {
+ tokens := strings.Split(p, "::")
if len(tokens) < 2 {
- ports := strings.Split(port, ",")
+ ports := strings.Split(p, ",")
for _, t := range ports {
if _, err := strconv.Atoi(strings.TrimSpace(t)); err != nil {
return ""
}
}
- return port
+ return p
}
return tokens[1]
diff --git a/internal/view/pod.go b/internal/view/pod.go
index 5782ff0f..d22cd1bc 100644
--- a/internal/view/pod.go
+++ b/internal/view/pod.go
@@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
)
const (
@@ -399,11 +400,11 @@ func resumeShellIn(a *App, c model.Component, path, co string) {
}
func shellIn(a *App, fqn, co string) {
- os, err := getPodOS(a.factory, fqn)
+ platform, err := getPodOS(a.factory, fqn)
if err != nil {
slog.Warn("OS detect failed", slogs.Error, err)
}
- args := computeShellArgs(fqn, co, a.Conn().Config().Flags().KubeConfig, os)
+ args := computeShellArgs(fqn, co, a.Conn().Config().Flags(), platform)
c := color.New(color.BgGreen).Add(color.FgBlack).Add(color.Bold)
err = runK(a, &shellOpts{
@@ -451,31 +452,49 @@ func resumeAttachIn(a *App, c model.Component, path, co string) {
}
func attachIn(a *App, path, co string) {
- args := buildShellArgs("attach", path, co, a.Conn().Config().Flags().KubeConfig)
+ args := buildShellArgs("attach", path, co, a.Conn().Config().Flags())
c := color.New(color.BgGreen).Add(color.FgBlack).Add(color.Bold)
if err := runK(a, &shellOpts{clear: true, banner: c.Sprintf(bannerFmt, path, co), args: args}); err != nil {
a.Flash().Errf("Attach exec failed: %s", err)
}
}
-func computeShellArgs(path, co string, kcfg *string, os string) []string {
- args := buildShellArgs("exec", path, co, kcfg)
- if os == windowsOS {
+func computeShellArgs(path, co string, flags *genericclioptions.ConfigFlags, platform string) []string {
+ args := buildShellArgs("exec", path, co, flags)
+ if platform == windowsOS {
return append(args, "--", powerShell)
}
+
return append(args, "--", "sh", "-c", shellCheck)
}
-func buildShellArgs(cmd, path, co string, kcfg *string) []string {
+func isFlagSet(flag *string) (string, bool) {
+ if flag == nil || *flag == "" {
+ return "", false
+ }
+
+ return *flag, true
+}
+
+func buildShellArgs(cmd, path, co string, flags *genericclioptions.ConfigFlags) []string {
args := make([]string, 0, 15)
+
args = append(args, cmd, "-it")
ns, po := client.Namespaced(path)
if ns != client.BlankNamespace {
args = append(args, "-n", ns)
}
args = append(args, po)
- if kcfg != nil && *kcfg != "" {
- args = append(args, "--kubeconfig", *kcfg)
+ if flags != nil {
+ if v, ok := isFlagSet(flags.KubeConfig); ok {
+ args = append(args, "--kubeconfig", v)
+ }
+ if v, ok := isFlagSet(flags.Context); ok {
+ args = append(args, "--context", v)
+ }
+ if v, ok := isFlagSet(flags.BearerToken); ok {
+ args = append(args, "--token", v)
+ }
}
if co != "" {
args = append(args, "-c", co)
@@ -560,12 +579,12 @@ func getPodOS(f dao.Factory, fqn string) (string, error) {
}
func osFromSelector(s map[string]string) (string, bool) {
- if os, ok := s[osBetaSelector]; ok {
- return os, ok
+ if platform, ok := s[osBetaSelector]; ok {
+ return platform, ok
}
+ platform, ok := s[osSelector]
- os, ok := s[osSelector]
- return os, ok
+ return platform, ok
}
func resourceSorters(t *Table) *ui.KeyActions {
diff --git a/internal/view/pod_int_test.go b/internal/view/pod_int_test.go
index 3a84f727..95975f2d 100644
--- a/internal/view/pod_int_test.go
+++ b/internal/view/pod_int_test.go
@@ -8,50 +8,67 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
)
+func newStr(s string) *string {
+ return &s
+}
+
func TestComputeShellArgs(t *testing.T) {
- config, empty := "coolConfig", ""
- _ = config
uu := map[string]struct {
fqn, co, os string
- cfg *string
+ cfg *genericclioptions.ConfigFlags
e string
}{
"config": {
- "fred/blee",
- "c1",
- "darwin",
- &config,
- "exec -it -n fred blee --kubeconfig coolConfig -c c1 -- sh -c " + shellCheck,
+ fqn: "fred/blee",
+ co: "c1",
+ os: "darwin",
+ cfg: &genericclioptions.ConfigFlags{
+ KubeConfig: newStr("coolConfig"),
+ },
+ e: "exec -it -n fred blee --kubeconfig coolConfig -c c1 -- sh -c " + shellCheck,
},
+
"no-config": {
- "fred/blee",
- "c1",
- "linux",
- nil,
- "exec -it -n fred blee -c c1 -- sh -c " + shellCheck,
+ fqn: "fred/blee",
+ co: "c1",
+ os: "linux",
+ e: "exec -it -n fred blee -c c1 -- sh -c " + shellCheck,
},
+
"empty-config": {
- "fred/blee",
- "",
- "",
- &empty,
- "exec -it -n fred blee -- sh -c " + shellCheck,
+ fqn: "fred/blee",
+ cfg: new(genericclioptions.ConfigFlags),
+ e: "exec -it -n fred blee -- sh -c " + shellCheck,
},
+
"single-container": {
- "fred/blee",
- "",
- "linux",
- &empty,
- "exec -it -n fred blee -- sh -c " + shellCheck,
+ fqn: "fred/blee",
+ os: "linux",
+ cfg: new(genericclioptions.ConfigFlags),
+ e: "exec -it -n fred blee -- sh -c " + shellCheck,
},
+
"windows": {
- "fred/blee",
- "c1",
- windowsOS,
- &empty,
- "exec -it -n fred blee -c c1 -- powershell",
+ fqn: "fred/blee",
+ co: "c1",
+ os: windowsOS,
+ cfg: new(genericclioptions.ConfigFlags),
+ e: "exec -it -n fred blee -c c1 -- powershell",
+ },
+
+ "full": {
+ fqn: "fred/blee",
+ co: "c1",
+ os: windowsOS,
+ cfg: &genericclioptions.ConfigFlags{
+ KubeConfig: newStr("coolConfig"),
+ Context: newStr("coolContext"),
+ BearerToken: newStr("coolToken"),
+ },
+ e: "exec -it -n fred blee --kubeconfig coolConfig --context coolContext --token coolToken -c c1 -- powershell",
},
}
@@ -63,46 +80,3 @@ func TestComputeShellArgs(t *testing.T) {
})
}
}
-
-// func TestComputeShellArgs(t *testing.T) {
-// config, empty := "coolConfig", ""
-// uu := map[string]struct {
-// path, co string
-// cfg *string
-// e string
-// }{
-// "config": {
-// "fred/blee",
-// "c1",
-// &config,
-// "exec -it -n fred blee --kubeconfig coolConfig -c c1 -- sh -c " + shellCheck,
-// },
-// "noconfig": {
-// "fred/blee",
-// "c1",
-// nil,
-// "exec -it -n fred blee -c c1 -- sh -c " + shellCheck,
-// },
-// "emptyConfig": {
-// "fred/blee",
-// "c1",
-// &empty,
-// "exec -it -n fred blee -c c1 -- sh -c " + shellCheck,
-// },
-// "singleContainer": {
-// "fred/blee",
-// "",
-// &empty,
-// "exec -it -n fred blee -- sh -c " + shellCheck,
-// },
-// }
-
-// for k := range uu {
-// u := uu[k]
-// t.Run(k, func(t *testing.T) {
-// args := computeShellArgs(u.path, u.co, u.cfg)
-
-// assert.Equal(t, u.e, strings.Join(args, " "))
-// })
-// }
-// }
diff --git a/internal/view/table.go b/internal/view/table.go
index d00c40bb..2312bd27 100644
--- a/internal/view/table.go
+++ b/internal/view/table.go
@@ -67,8 +67,8 @@ func (t *Table) Init(ctx context.Context) (err error) {
}
// SetCommand sets the current command.
-func (t *Table) SetCommand(cmd *cmd.Interpreter) {
- t.command = cmd
+func (t *Table) SetCommand(i *cmd.Interpreter) {
+ t.command = i
}
// HeaderIndex returns index of a given column or false if not found.
diff --git a/internal/view/table_helper.go b/internal/view/table_helper.go
index 31efe246..5fda907d 100644
--- a/internal/view/table_helper.go
+++ b/internal/view/table_helper.go
@@ -42,8 +42,8 @@ func computeFilename(dumpPath, ns, title, path string) (string, error) {
return strings.ToLower(filepath.Join(dir, fName)), nil
}
-func saveTable(dir, title, path string, data *model1.TableData) (string, error) {
- ns := data.GetNamespace()
+func saveTable(dir, title, path string, mdata *model1.TableData) (string, error) {
+ ns := mdata.GetNamespace()
if client.IsClusterWide(ns) {
ns = client.NamespaceAll
}
@@ -69,9 +69,9 @@ func saveTable(dir, title, path string, data *model1.TableData) (string, error)
}()
w := csv.NewWriter(out)
- _ = w.Write(data.ColumnNames(true))
+ _ = w.Write(mdata.ColumnNames(true))
- data.RowsRange(func(_ int, re model1.RowEvent) bool {
+ mdata.RowsRange(func(_ int, re model1.RowEvent) bool {
_ = w.Write(re.Row.Fields)
return true
})
diff --git a/internal/watch/factory.go b/internal/watch/factory.go
index 17c9382a..e65d1eb1 100644
--- a/internal/watch/factory.go
+++ b/internal/watch/factory.go
@@ -35,9 +35,9 @@ type Factory struct {
}
// NewFactory returns a new informers factory.
-func NewFactory(client client.Connection) *Factory {
+func NewFactory(clt client.Connection) *Factory {
return &Factory{
- client: client,
+ client: clt,
factories: make(map[string]di.DynamicSharedInformerFactory),
forwarders: NewForwarders(),
}
@@ -72,7 +72,7 @@ func (f *Factory) Terminate() {
}
// List returns a resource collection.
-func (f *Factory) List(gvr *client.GVR, ns string, wait bool, labels labels.Selector) ([]runtime.Object, error) {
+func (f *Factory) List(gvr *client.GVR, ns string, wait bool, lbls labels.Selector) ([]runtime.Object, error) {
if client.IsAllNamespace(ns) {
ns = client.BlankNamespace
}
@@ -83,9 +83,9 @@ func (f *Factory) List(gvr *client.GVR, ns string, wait bool, labels labels.Sele
var oo []runtime.Object
if client.IsClusterScoped(ns) {
- oo, err = inf.Lister().List(labels)
+ oo, err = inf.Lister().List(lbls)
} else {
- oo, err = inf.Lister().ByNamespace(ns).List(labels)
+ oo, err = inf.Lister().ByNamespace(ns).List(lbls)
}
if !wait || (wait && inf.Informer().HasSynced()) {
return oo, err
@@ -93,9 +93,9 @@ func (f *Factory) List(gvr *client.GVR, ns string, wait bool, labels labels.Sele
f.waitForCacheSync(ns)
if client.IsClusterScoped(ns) {
- return inf.Lister().List(labels)
+ return inf.Lister().List(lbls)
}
- return inf.Lister().ByNamespace(ns).List(labels)
+ return inf.Lister().ByNamespace(ns).List(lbls)
}
// HasSynced checks if given informer is up to date.
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 551c1973..0227c9a4 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,6 @@
name: k9s
base: core22
-version: 'v0.50.2'
+version: 'v0.50.3'
summary: K9s is a CLI to view and manage your Kubernetes clusters.
description: |
K9s is a CLI to view and manage your Kubernetes clusters. By leveraging a terminal UI, you can easily traverse Kubernetes resources and view the state of your clusters in a single powerful session.