Rel v0.50.3 (#3276)

* update linter

* spring cleaning

- m'o code cleanup
- small bugs fixin

* fix cust col jq parser support

* add context,token to shell args

* rel notes
mine
Fernand Galiana 2025-04-13 18:25:04 -06:00 committed by GitHub
parent 630f82cacd
commit 594c2c6e4c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
41 changed files with 380 additions and 346 deletions

View File

@ -20,5 +20,5 @@ jobs:
- name: Lint
uses: golangci/golangci-lint-action@v7.0.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-pr-check
github-token: ${{ secrets.GITHUB_TOKEN }}
version: v2.1.1

View File

@ -41,9 +41,6 @@ linters:
- goconst
- dogsled
- lll
# - dupl
# - gochecknoinits
# - mnd
settings:
dogsled:
@ -96,20 +93,24 @@ linters:
goconst:
min-len: 2
min-occurrences: 3
ignore-strings: 'blee|duh|cl-1|ct-1-1'
ignore-string-values:
- blee
- duh
- cl-1
- ct-1-1
# gocritic:
# enabled-tags:
# - diagnostic
# - experimental
# - opinionated
# - performance
# - style
# disabled-checks:
# - dupImport # https://github.com/go-critic/go-critic/issues/845
# - ifElseChain
# - octalLiteral
# - whyNoLint
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- dupImport # https://github.com/go-critic/go-critic/issues/845
- ifElseChain
- octalLiteral
- whyNoLint
gocyclo:
min-complexity: 35

View File

@ -11,7 +11,7 @@ DATE ?= $(shell TZ=UTC date -j -f "%s" ${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:
else
DATE ?= $(shell date -u -d @${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:%M:%SZ")
endif
VERSION ?= v0.50.2
VERSION ?= v0.50.3
IMG_NAME := derailed/k9s
IMAGE := ${IMG_NAME}:${VERSION}

View File

@ -0,0 +1,38 @@
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/k9s.png" align="center" width="800" height="auto"/>
# Release v0.50.3
## Notes
Thank you to all that contributed with flushing out issues and enhancements for K9s!
I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev
and see if we're happier with some of the fixes!
If you've filed an issue please help me verify and close.
Your support, kindness and awesome suggestions to make K9s better are, as ever, very much noted and appreciated!
Also big thanks to all that have allocated their own time to help others on both slack and on this repo!!
As you may know, K9s is not pimped out by corps with deep pockets, thus if you feel K9s is helping your Kubernetes journey,
please consider joining our [sponsorship program](https://github.com/sponsors/derailed) and/or make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer)
On Slack? Please join us [K9slackers](https://join.slack.com/t/k9sers/shared_invite/zt-3360a389v-ElLHrb0Dp1kAXqYUItSAFA)
## Maintenance Release!
A bit more code spring cleaning/TLC and address a few bugs:
1. [RBAC View] Fix issue bombing out on RBAC cluster roles
2. [Custom Views] Fix issue with parsing `jq` filters and bombing out (Big Thanks to Pierre for flagging it!)
---
## Contributed PRs
Please be sure to give `Big Thanks!` and `ATTA Girls/Boys!` to all the fine contributors for making K9s better for all of us!!
* [#3273](https://github.com/derailed/k9s/pull/3273) k9s plugin scopes containers issue
* [#3169](https://github.com/derailed/k9s/pull/3169) feat: pass context and token flags to kubectl exec commands
---
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/imhotep_logo.png" width="32" height="auto"/> © 2025 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -11,6 +11,7 @@ import (
"github.com/derailed/k9s/internal/slogs"
"github.com/fvbommel/sortorder"
"gopkg.in/yaml.v3"
apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -43,9 +44,9 @@ func (c gvrCache) get(gvrs string) *GVR {
var gvrsCache = make(gvrCache)
// NewGVR builds a new gvr from a group, version, resource.
func NewGVR(path string) *GVR {
raw := path
tokens := strings.Split(path, ":")
func NewGVR(s string) *GVR {
raw := s
tokens := strings.Split(s, ":")
var g, v, r, sr string
if len(tokens) == 2 {
raw, sr = tokens[0], tokens[1]
@ -59,10 +60,10 @@ func NewGVR(path string) *GVR {
case 1:
r = tokens[0]
default:
slog.Error("GVR init failed!", slogs.Error, fmt.Errorf("can't parse GVR %q", path))
slog.Error("GVR init failed!", slogs.Error, fmt.Errorf("can't parse GVR %q", s))
}
gvr := GVR{raw: path, g: g, v: v, r: r, sr: sr}
gvr := GVR{raw: s, g: g, v: v, r: r, sr: sr}
if cgvr := gvrsCache.get(gvr.String()); cgvr != nil {
return cgvr
}
@ -204,6 +205,19 @@ func (g *GVR) IsDecodable() bool {
return g.GVK().Kind == "secrets"
}
var _ = yaml.Marshaler((*GVR)(nil))
var _ = yaml.Unmarshaler((*GVR)(nil))
func (g *GVR) MarshalYAML() (any, error) {
return g.String(), nil
}
func (g *GVR) UnmarshalYAML(n *yaml.Node) error {
*g = *NewGVR(n.Value)
return nil
}
// GVRs represents a collection of gvr.
type GVRs []*GVR

View File

@ -15,7 +15,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var toFileName = regexp.MustCompile(`[^(\w/\.)]`)
var toFileName = regexp.MustCompile(`[^(\w/.)]`)
// IsClusterWide returns true if ns designates cluster scope, false otherwise.
func IsClusterWide(ns string) bool {

View File

@ -318,17 +318,19 @@ func ToMB(v int64) int64 {
}
// ToPercentage computes percentage as string otherwise n/aa.
func ToPercentage(v1, v2 int64) int {
if v2 == 0 {
func ToPercentage(v, dv int64) int {
if dv == 0 {
return 0
}
return int(math.Floor((float64(v1) / float64(v2)) * 100))
return int(math.Floor((float64(v) / float64(dv)) * 100))
}
// ToPercentageStr computes percentage, but if v2 is 0, it will return NAValue instead of 0.
func ToPercentageStr(v1, v2 int64) string {
if v2 == 0 {
func ToPercentageStr(v, dv int64) string {
if dv == 0 {
return NA
}
return strconv.Itoa(ToPercentage(v1, v2))
return strconv.Itoa(ToPercentage(v, dv))
}

View File

@ -19,17 +19,19 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
)
// Alias tracks shortname to GVR mappings.
type Alias map[string]*client.GVR
type (
// Alias tracks shortname to GVR mappings.
Alias map[string]*client.GVR
// ShortNames represents a collection of shortnames for aliases.
type ShortNames map[*client.GVR][]string
// ShortNames represents a collection of shortnames for aliases.
ShortNames map[*client.GVR][]string
// Aliases represents a collection of aliases.
type Aliases struct {
Alias Alias `yaml:"aliases"`
mx sync.RWMutex
}
// Aliases represents a collection of aliases.
Aliases struct {
Alias Alias `yaml:"aliases"`
mx sync.RWMutex
}
)
// NewAliases return a new alias.
func NewAliases() *Aliases {
@ -108,6 +110,7 @@ func (a *Aliases) Get(alias string) (*client.GVR, bool) {
func (a *Aliases) Define(gvr *client.GVR, aliases ...string) {
a.mx.Lock()
defer a.mx.Unlock()
for _, alias := range aliases {
if _, ok := a.Alias[alias]; !ok && alias != "" {
a.Alias[alias] = gvr
@ -131,16 +134,6 @@ func (a *Aliases) Load(path string) error {
return a.LoadFile(path)
}
type aliases struct {
Alias map[string]string `yaml:"aliases"`
}
func newAliases(s int) aliases {
return aliases{
Alias: make(map[string]string, s),
}
}
// LoadFile loads alias from a given file.
func (a *Aliases) LoadFile(path string) error {
if _, err := os.Stat(path); errors.Is(err, fs.ErrNotExist) {
@ -155,14 +148,10 @@ func (a *Aliases) LoadFile(path string) error {
slog.Warn("Aliases validation failed", slogs.Error, err)
}
var aa aliases
if err := yaml.Unmarshal(bb, &aa); err != nil {
return err
}
a.mx.Lock()
defer a.mx.Unlock()
for alias, cmd := range aa.Alias {
a.Alias[alias] = client.NewGVR(cmd)
if err := yaml.Unmarshal(bb, a); err != nil {
return err
}
return nil
@ -198,18 +187,17 @@ func (a *Aliases) loadDefaultAliases() {
// Save alias to disk.
func (a *Aliases) Save() error {
slog.Debug("Saving Aliases...")
return a.SaveAliases(AppAliasesFile)
a.mx.RLock()
defer a.mx.RUnlock()
return a.saveAliases(AppAliasesFile)
}
// SaveAliases saves aliases to a given file.
func (a *Aliases) SaveAliases(path string) error {
func (a *Aliases) saveAliases(path string) error {
if err := data.EnsureDirPath(path, data.DefaultDirMod); err != nil {
return err
}
aa := newAliases(len(a.Alias))
for alias, gvr := range a.Alias {
aa.Alias[alias] = gvr.String()
}
return data.SaveYAML(path, aa)
return data.SaveYAML(path, a)
}

View File

@ -186,16 +186,16 @@ func (c *Config) ActiveView() string {
if err != nil {
return data.DefaultView
}
cmd := ct.View.Active
v := ct.View.Active
if c.K9s.manualCommand != nil && *c.K9s.manualCommand != "" {
cmd = *c.K9s.manualCommand
v = *c.K9s.manualCommand
// We reset the manualCommand property because
// the command-line switch should only be considered once,
// on startup.
*c.K9s.manualCommand = ""
}
return cmd
return v
}
func (c *Config) ResetActiveView() {

View File

@ -8,44 +8,44 @@ import (
)
var accessors = Accessors{
*client.WkGVR: new(Workload),
*client.CtGVR: new(Context),
*client.CoGVR: new(Container),
*client.ScnGVR: new(ImageScan),
*client.SdGVR: new(ScreenDump),
*client.BeGVR: new(Benchmark),
*client.PfGVR: new(PortForward),
*client.DirGVR: new(Dir),
client.WkGVR: new(Workload),
client.CtGVR: new(Context),
client.CoGVR: new(Container),
client.ScnGVR: new(ImageScan),
client.SdGVR: new(ScreenDump),
client.BeGVR: new(Benchmark),
client.PfGVR: new(PortForward),
client.DirGVR: new(Dir),
*client.SvcGVR: new(Service),
*client.PodGVR: new(Pod),
*client.NodeGVR: new(Node),
*client.NsGVR: new(Namespace),
*client.CmGVR: new(ConfigMap),
*client.SecGVR: new(Secret),
client.SvcGVR: new(Service),
client.PodGVR: new(Pod),
client.NodeGVR: new(Node),
client.NsGVR: new(Namespace),
client.CmGVR: new(ConfigMap),
client.SecGVR: new(Secret),
*client.DpGVR: new(Deployment),
*client.DsGVR: new(DaemonSet),
*client.StsGVR: new(StatefulSet),
*client.RsGVR: new(ReplicaSet),
client.DpGVR: new(Deployment),
client.DsGVR: new(DaemonSet),
client.StsGVR: new(StatefulSet),
client.RsGVR: new(ReplicaSet),
*client.CjGVR: new(CronJob),
*client.JobGVR: new(Job),
client.CjGVR: new(CronJob),
client.JobGVR: new(Job),
*client.HmGVR: new(HelmChart),
*client.HmhGVR: new(HelmHistory),
client.HmGVR: new(HelmChart),
client.HmhGVR: new(HelmHistory),
*client.CrdGVR: new(CustomResourceDefinition),
client.CrdGVR: new(CustomResourceDefinition),
}
// Accessors represents a collection of dao accessors.
type Accessors map[client.GVR]Accessor
type Accessors map[*client.GVR]Accessor
// AccessorFor returns a client accessor for a resource if registered.
// Otherwise it returns a generic accessor.
// Customize here for non resource types or types with metrics or logs.
func AccessorFor(f Factory, gvr *client.GVR) (Accessor, error) {
r, ok := accessors[*gvr]
r, ok := accessors[gvr]
if !ok {
r = new(Scaler)
slog.Debug("No DAO registry entry. Using generics!", slogs.GVR, gvr)

View File

@ -146,10 +146,10 @@ func (h *HelmHistory) Rollback(_ context.Context, path, rev string) error {
if err != nil {
return fmt.Errorf("could not convert revision to a number: %w", err)
}
client := action.NewRollback(cfg)
client.Version = ver
clt := action.NewRollback(cfg)
clt.Version = ver
return client.Run(n)
return clt.Run(n)
}
// Delete uninstall a Helm.

View File

@ -40,7 +40,7 @@ func GetDefaultContainer(m *metav1.ObjectMeta, spec *v1.PodSpec) (string, bool)
}
}
slog.Warn("Container not found. Annotation ignored",
slogs.CO, defaultContainer,
slogs.Container, defaultContainer,
slogs.Annotation, DefaultContainerAnnotation,
)
@ -74,11 +74,12 @@ func inList(ll []string, s string) bool {
return false
}
func toPerc(v1, v2 float64) float64 {
if v2 == 0 {
func toPerc(v, dv float64) float64 {
if dv == 0 {
return 0
}
return math.Round((v1 / v2) * 100)
return math.Round((v / dv) * 100)
}
// ToYAML converts a resource to its YAML representation.
@ -121,6 +122,7 @@ func serviceAccountMatches(podSA, saName string) bool {
if podSA == "" {
podSA = defaultServiceAccount
}
return podSA == saName
}

View File

@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
restclient "k8s.io/client-go/rest"
mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
)
@ -487,6 +488,17 @@ func (p *Pod) isControlled(path string) (fqn string, ok bool, err error) {
return "", false, nil
}
var toastPhases = sets.New(
render.PhaseCompleted,
render.PhasePending,
render.PhaseCrashLoop,
render.PhaseError,
render.PhaseImagePullBackOff,
render.PhaseContainerStatusUnknown,
render.PhaseEvicted,
render.PhaseOOMKilled,
)
func (p *Pod) Sanitize(ctx context.Context, ns string) (int, error) {
oo, err := p.Resource.List(ctx, ns)
if err != nil {
@ -504,22 +516,8 @@ func (p *Pod) Sanitize(ctx context.Context, ns string) (int, error) {
if err != nil {
continue
}
switch render.PodStatus(&pod) {
case render.PhaseCompleted:
fallthrough
case render.PhasePending:
fallthrough
case render.PhaseCrashLoop:
fallthrough
case render.PhaseError:
fallthrough
case render.PhaseImagePullBackOff:
fallthrough
case render.PhaseContainerStatusUnknown:
fallthrough
case render.PhaseEvicted:
fallthrough
case render.PhaseOOMKilled:
if toastPhases.Has(render.PodStatus(&pod)) {
// !!BOZO!! Might need to bump timeout otherwise rev limit if too many??
fqn := client.FQN(pod.Namespace, pod.Name)
slog.Debug("Sanitizing resource", slogs.FQN, fqn)

View File

@ -44,20 +44,20 @@ func (p *PortForward) List(ctx context.Context, _ string) ([]runtime.Object, err
}
path, _ := ctx.Value(internal.KeyPath).(string)
config, err := config.NewBench(benchFile)
bcfg, err := config.NewBench(benchFile)
if err != nil {
slog.Debug("No custom benchmark config file found", slogs.FileName, benchFile)
}
ff, cc := p.getFactory().Forwarders(), config.Benchmarks.Containers
ff, cc := p.getFactory().Forwarders(), bcfg.Benchmarks.Containers
oo := make([]runtime.Object, 0, len(ff))
for k, f := range ff {
if !strings.HasPrefix(k, path) {
continue
}
cfg := render.BenchCfg{
C: config.Benchmarks.Defaults.C,
N: config.Benchmarks.Defaults.N,
C: bcfg.Benchmarks.Defaults.C,
N: bcfg.Benchmarks.Defaults.N,
}
if cust, ok := cc[PodToKey(k)]; ok {
cfg.C, cfg.N = cust.C, cust.N

View File

@ -170,7 +170,7 @@ func (p *PortForwarder) Start(path string, tt port.PortTunnel) (*portforward.Por
return p.forwardPorts("POST", req.URL(), tt.Address, tt.PortMap())
}
func (p *PortForwarder) forwardPorts(method string, url *url.URL, addr, portMap string) (*portforward.PortForwarder, error) {
func (p *PortForwarder) forwardPorts(method string, u *url.URL, addr, portMap string) (*portforward.PortForwarder, error) {
cfg, err := p.Client().Config().RESTConfig()
if err != nil {
return nil, err
@ -179,10 +179,10 @@ func (p *PortForwarder) forwardPorts(method string, url *url.URL, addr, portMap
if err != nil {
return nil, err
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport, Timeout: defaultTimeout}, method, url)
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport, Timeout: defaultTimeout}, method, u)
if !cmdutil.PortForwardWebsockets.IsDisabled() {
tunnelingDialer, err := portforward.NewSPDYOverWebsocketDialer(url, cfg)
tunnelingDialer, err := portforward.NewSPDYOverWebsocketDialer(u, cfg)
if err != nil {
return nil, err
}

View File

@ -6,12 +6,10 @@ package dao
import (
"context"
"fmt"
"log/slog"
"github.com/derailed/k9s/internal"
"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/render"
"github.com/derailed/k9s/internal/slogs"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
@ -69,7 +67,7 @@ func (r *Rbac) loadClusterRoleBinding(path string) ([]runtime.Object, error) {
return nil, err
}
var cr rbacv1.ClusterRole
err = runtime.DefaultUnstructuredConverter.FromUnstructured(crbo.(*unstructured.Unstructured).Object, &cro)
err = runtime.DefaultUnstructuredConverter.FromUnstructured(cro.(*unstructured.Unstructured).Object, &cr)
if err != nil {
return nil, err
}
@ -114,7 +112,6 @@ func (r *Rbac) loadRoleBinding(path string) ([]runtime.Object, error) {
}
func (r *Rbac) loadClusterRole(fqn string) ([]runtime.Object, error) {
slog.Debug("LOAD-CR", slogs.FQN, fqn)
o, err := r.getFactory().Get(client.CrGVR, fqn, true, labels.Everything())
if err != nil {
return nil, err

View File

@ -30,7 +30,7 @@ func getMeta(ctx context.Context, gvr *client.GVR) (ResourceMeta, error) {
}
func resourceMeta(gvr *client.GVR) ResourceMeta {
meta, ok := Registry[gvr.String()]
meta, ok := Registry[gvr]
if !ok {
meta = ResourceMeta{
DAO: new(dao.Table),

View File

@ -102,7 +102,7 @@ func (h *PulseHealth) checkMetrics(ctx context.Context) (health.Checks, error) {
}
func (h *PulseHealth) check(ctx context.Context, ns string, gvr *client.GVR) (*health.Check, error) {
meta, ok := Registry[gvr.String()]
meta, ok := Registry[gvr]
if !ok {
meta = ResourceMeta{
DAO: new(dao.Table),

View File

@ -13,181 +13,181 @@ import (
// Registry tracks resources metadata.
// BOZO!! Break up deps and merge into single registrar.
var Registry = map[string]ResourceMeta{
var Registry = map[*client.GVR]ResourceMeta{
// Custom...
client.WkGVR.String(): {
client.WkGVR: {
DAO: new(dao.Workload),
Renderer: new(render.Workload),
},
client.RefGVR.String(): {
client.RefGVR: {
DAO: new(dao.Reference),
Renderer: new(render.Reference),
},
client.DirGVR.String(): {
client.DirGVR: {
DAO: new(dao.Dir),
Renderer: new(render.Dir),
},
client.PuGVR.String(): {
client.PuGVR: {
DAO: new(dao.Pulse),
},
client.HmGVR.String(): {
client.HmGVR: {
DAO: new(dao.HelmChart),
Renderer: new(helm.Chart),
},
client.HmhGVR.String(): {
client.HmhGVR: {
DAO: new(dao.HelmHistory),
Renderer: new(helm.History),
},
client.CoGVR.String(): {
client.CoGVR: {
DAO: new(dao.Container),
Renderer: new(render.Container),
TreeRenderer: new(xray.Container),
},
client.ScnGVR.String(): {
client.ScnGVR: {
DAO: new(dao.ImageScan),
Renderer: new(render.ImageScan),
},
client.CtGVR.String(): {
client.CtGVR: {
DAO: new(dao.Context),
Renderer: new(render.Context),
},
client.SdGVR.String(): {
client.SdGVR: {
DAO: new(dao.ScreenDump),
Renderer: new(render.ScreenDump),
},
client.RbacGVR.String(): {
client.RbacGVR: {
DAO: new(dao.Rbac),
Renderer: new(render.Rbac),
},
client.PolGVR.String(): {
client.PolGVR: {
DAO: new(dao.Policy),
Renderer: new(render.Policy),
},
client.UsrGVR.String(): {
client.UsrGVR: {
DAO: new(dao.Subject),
Renderer: new(render.Subject),
},
client.GrpGVR.String(): {
client.GrpGVR: {
DAO: new(dao.Subject),
Renderer: new(render.Subject),
},
client.PfGVR.String(): {
client.PfGVR: {
DAO: new(dao.PortForward),
Renderer: new(render.PortForward),
},
client.BeGVR.String(): {
client.BeGVR: {
DAO: new(dao.Benchmark),
Renderer: new(render.Benchmark),
},
client.AliGVR.String(): {
client.AliGVR: {
DAO: new(dao.Alias),
Renderer: new(render.Alias),
},
// Core...
client.EpGVR.String(): {
client.EpGVR: {
Renderer: new(render.Endpoints),
},
client.PodGVR.String(): {
client.PodGVR: {
DAO: new(dao.Pod),
Renderer: render.NewPod(),
TreeRenderer: new(xray.Pod),
},
client.NsGVR.String(): {
client.NsGVR: {
DAO: new(dao.Namespace),
Renderer: new(render.Namespace),
},
client.SecGVR.String(): {
client.SecGVR: {
DAO: new(dao.Secret),
Renderer: new(render.Secret),
},
client.CmGVR.String(): {
client.CmGVR: {
DAO: new(dao.ConfigMap),
Renderer: new(render.ConfigMap),
},
client.NodeGVR.String(): {
client.NodeGVR: {
DAO: new(dao.Node),
Renderer: new(render.Node),
},
client.SvcGVR.String(): {
client.SvcGVR: {
DAO: new(dao.Service),
Renderer: new(render.Service),
TreeRenderer: new(xray.Service),
},
client.SaGVR.String(): {
client.SaGVR: {
Renderer: new(render.ServiceAccount),
},
client.PvGVR.String(): {
client.PvGVR: {
Renderer: new(render.PersistentVolume),
},
client.PvcGVR.String(): {
client.PvcGVR: {
Renderer: new(render.PersistentVolumeClaim),
},
// Apps...
client.DpGVR.String(): {
client.DpGVR: {
DAO: new(dao.Deployment),
Renderer: new(render.Deployment),
TreeRenderer: new(xray.Deployment),
},
client.RsGVR.String(): {
client.RsGVR: {
Renderer: new(render.ReplicaSet),
TreeRenderer: new(xray.ReplicaSet),
},
client.StsGVR.String(): {
client.StsGVR: {
DAO: new(dao.StatefulSet),
Renderer: new(render.StatefulSet),
TreeRenderer: new(xray.StatefulSet),
},
client.DsGVR.String(): {
client.DsGVR: {
DAO: new(dao.DaemonSet),
Renderer: new(render.DaemonSet),
TreeRenderer: new(xray.DaemonSet),
},
// Extensions...
client.NpGVR.String(): {
client.NpGVR: {
Renderer: &render.NetworkPolicy{},
},
// Batch...
client.CjGVR.String(): {
client.CjGVR: {
DAO: new(dao.CronJob),
Renderer: new(render.CronJob),
},
client.JobGVR.String(): {
client.JobGVR: {
DAO: new(dao.Job),
Renderer: new(render.Job),
},
// CRDs...
client.CrdGVR.String(): {
client.CrdGVR: {
DAO: new(dao.CustomResourceDefinition),
Renderer: new(render.CustomResourceDefinition),
},
// Storage...
client.ScGVR.String(): {
client.ScGVR: {
Renderer: &render.StorageClass{},
},
// Policy...
client.PdbGVR.String(): {
client.PdbGVR: {
Renderer: &render.PodDisruptionBudget{},
},
// RBAC...
client.CrGVR.String(): {
client.CrGVR: {
DAO: new(dao.Rbac),
Renderer: new(render.ClusterRole),
},
client.CrbGVR.String(): {
client.CrbGVR: {
Renderer: new(render.ClusterRoleBinding),
},
client.RoGVR.String(): {
client.RoGVR: {
Renderer: new(render.Role),
},
client.RobGVR.String(): {
client.RobGVR: {
Renderer: new(render.RoleBinding),
},
}

View File

@ -44,7 +44,7 @@ func NewRevValues(gvr *client.GVR, path, rev string) *RevValues {
}
func getHelmHistDao() *dao.HelmHistory {
return Registry[client.HmhGVR.String()].DAO.(*dao.HelmHistory)
return Registry[client.HmhGVR].DAO.(*dao.HelmHistory)
}
func getRevValues(path, _ string) []string {

View File

@ -237,7 +237,7 @@ func (t *Tree) reconcile(ctx context.Context) error {
}
func (t *Tree) resourceMeta() ResourceMeta {
meta, ok := Registry[t.gvr.String()]
meta, ok := Registry[t.gvr]
if !ok {
meta = ResourceMeta{
DAO: &dao.Table{},

View File

@ -106,10 +106,10 @@ func (b *Benchmark) Canceled() bool {
}
// Run starts a benchmark.
func (b *Benchmark) Run(cluster, context string, done func()) {
func (b *Benchmark) Run(cluster, ct string, done func()) {
slog.Debug("Running benchmark",
slogs.Cluster, cluster,
slogs.Context, context,
slogs.Context, ct,
)
buff := new(bytes.Buffer)
b.worker.Writer = buff
@ -117,18 +117,18 @@ func (b *Benchmark) Run(cluster, context string, done func()) {
b.worker.Run()
b.worker.Stop()
if buff.Len() > 0 {
if err := b.save(cluster, context, buff); err != nil {
if err := b.save(cluster, ct, buff); err != nil {
slog.Error("Saving Benchmark", slogs.Error, err)
}
}
done()
}
func (b *Benchmark) save(cluster, context string, r io.Reader) error {
func (b *Benchmark) save(cluster, ct string, r io.Reader) error {
ns, n := client.Namespaced(b.config.Name)
n = strings.ReplaceAll(n, "|", "_")
n = strings.ReplaceAll(n, ":", "_")
dir, err := config.EnsureBenchmarksDir(cluster, context)
dir, err := config.EnsureBenchmarksDir(cluster, ct)
if err != nil {
return err
}

View File

@ -137,9 +137,9 @@ func TestPFsToPortSpec(t *testing.T) {
if err != nil {
return
}
spec, port := pfs.ToPortSpec(u.specs)
spec, prt := pfs.ToPortSpec(u.specs)
assert.Equal(t, u.spec, spec)
assert.Equal(t, u.port, port)
assert.Equal(t, u.port, prt)
})
}
}

View File

@ -119,8 +119,11 @@ func (cc ColumnSpecs) realize(o runtime.Object, rh model1.Header, row *model1.Ro
parsers[ix] = jsonpath.New(
fmt.Sprintf("column%d", ix),
).AllowMissingKeys(true)
if err := parsers[ix].Parse(cc[ix].Spec); err != nil {
return nil, err
if err := parsers[ix].Parse(cc[ix].Spec); err != nil && !isJQSpec(cc[ix].Spec) {
slog.Warn("Unable to parse custom column",
slogs.Name, cc[ix].Header.Name,
slogs.Error, err,
)
}
}

View File

@ -596,17 +596,16 @@ func Test_podRequests(t *testing.T) {
func makeContainer(n string, restartable bool, rc, rm, lc, lm string) v1.Container {
always := v1.ContainerRestartPolicyAlways
var res v1.ResourceRequirements
var rp *v1.ContainerRestartPolicy
res = v1.ResourceRequirements{
rq := v1.ResourceRequirements{
Requests: makeRes(rc, rm),
Limits: makeRes(lc, lm),
}
var rp *v1.ContainerRestartPolicy
if restartable {
rp = &always
}
return v1.Container{Name: n, Resources: res, RestartPolicy: rp}
return v1.Container{Name: n, Resources: rq, RestartPolicy: rp}
}
func makeRes(c, m string) v1.ResourceList {

View File

@ -147,9 +147,6 @@ const (
// Log tracks a log logger key.
Log = "log"
// CO tracks a container logger key.
CO = "container"
// Annotation tracks an annotation logger key.
Annotation = "annotation"

View File

@ -215,7 +215,7 @@ func (c *Configurator) activeSkin() (string, bool) {
return skin, skin != ""
}
func (c *Configurator) activeConfig() (cluster, context string, ok bool) {
func (c *Configurator) activeConfig() (cluster, contxt string, ok bool) {
if c.Config == nil || c.Config.K9s == nil {
return
}
@ -223,8 +223,8 @@ func (c *Configurator) activeConfig() (cluster, context string, ok bool) {
if err != nil {
return
}
cluster, context = ct.GetClusterName(), c.Config.K9s.ActiveContextName()
if cluster != "" && context != "" {
cluster, contxt = ct.GetClusterName(), c.Config.K9s.ActiveContextName()
if cluster != "" && contxt != "" {
ok = true
}

View File

@ -15,48 +15,48 @@ import (
func TestCmdNew(t *testing.T) {
v := ui.NewPrompt(nil, true, config.NewStyles())
model := model.NewFishBuff(':', model.CommandBuffer)
v.SetModel(model)
model.AddListener(v)
m := model.NewFishBuff(':', model.CommandBuffer)
v.SetModel(m)
m.AddListener(v)
for _, r := range "blee" {
model.Add(r)
m.Add(r)
}
assert.Equal(t, "\x00> [::b]blee\n", v.GetText(false))
}
func TestCmdUpdate(t *testing.T) {
model := model.NewFishBuff(':', model.CommandBuffer)
m := model.NewFishBuff(':', model.CommandBuffer)
v := ui.NewPrompt(nil, true, config.NewStyles())
v.SetModel(model)
v.SetModel(m)
model.AddListener(v)
model.SetText("blee", "")
model.Add('!')
m.AddListener(v)
m.SetText("blee", "")
m.Add('!')
assert.Equal(t, "\x00> [::b]blee!\n", v.GetText(false))
assert.False(t, v.InCmdMode())
}
func TestCmdMode(t *testing.T) {
model := model.NewFishBuff(':', model.CommandBuffer)
m := model.NewFishBuff(':', model.CommandBuffer)
v := ui.NewPrompt(&ui.App{}, true, config.NewStyles())
v.SetModel(model)
model.AddListener(v)
v.SetModel(m)
m.AddListener(v)
for _, f := range []bool{false, true} {
model.SetActive(f)
m.SetActive(f)
assert.Equal(t, f, v.InCmdMode())
}
}
func TestPrompt_Deactivate(t *testing.T) {
model := model.NewFishBuff(':', model.CommandBuffer)
m := model.NewFishBuff(':', model.CommandBuffer)
v := ui.NewPrompt(&ui.App{}, true, config.NewStyles())
v.SetModel(model)
model.AddListener(v)
v.SetModel(m)
m.AddListener(v)
model.SetActive(true)
m.SetActive(true)
if assert.True(t, v.InCmdMode()) {
v.Deactivate()
assert.False(t, v.InCmdMode())
@ -92,13 +92,13 @@ func TestPromptColor(t *testing.T) {
}
for _, testCase := range testCases {
model := model.NewFishBuff(':', testCase.kind)
m := model.NewFishBuff(':', testCase.kind)
prompt := ui.NewPrompt(&app, true, styles)
prompt.SetModel(model)
model.AddListener(prompt)
prompt.SetModel(m)
m.AddListener(prompt)
model.SetActive(true)
m.SetActive(true)
assert.Equal(t, testCase.expectedColor, prompt.GetBorderColor())
}
}
@ -135,17 +135,17 @@ func TestPromptStyleChanged(t *testing.T) {
}
for _, testCase := range testCases {
model := model.NewFishBuff(':', testCase.kind)
m := model.NewFishBuff(':', testCase.kind)
prompt := ui.NewPrompt(&app, true, styles)
model.SetActive(true)
m.SetActive(true)
prompt.SetModel(model)
model.AddListener(prompt)
prompt.SetModel(m)
m.AddListener(prompt)
prompt.StylesChanged(newStyles)
model.SetActive(true)
m.SetActive(true)
assert.Equal(t, testCase.expectedColor, prompt.GetBorderColor())
}
}

View File

@ -44,13 +44,13 @@ func (b *Benchmark) benchContext(ctx context.Context) context.Context {
}
func (b *Benchmark) viewBench(app *App, _ ui.Tabular, _ *client.GVR, path string) {
data, err := readBenchFile(app.Config, b.benchFile())
mdata, err := readBenchFile(app.Config, b.benchFile())
if err != nil {
app.Flash().Errf("Unable to load bench file %s", err)
return
}
details := NewDetails(b.App(), "Results", fileToSubject(path), contentYAML, false).Update(data)
details := NewDetails(b.App(), "Results", fileToSubject(path), contentYAML, false).Update(mdata)
if err := app.inject(details, false); err != nil {
app.Flash().Err(err)
}
@ -85,9 +85,10 @@ func benchDir(cfg *config.Config) string {
}
func readBenchFile(cfg *config.Config, n string) (string, error) {
data, err := os.ReadFile(filepath.Join(benchDir(cfg), n))
bb, err := os.ReadFile(filepath.Join(benchDir(cfg), n))
if err != nil {
return "", err
}
return string(data), nil
return string(bb), nil
}

View File

@ -63,8 +63,8 @@ func (b *Browser) getUpdating() bool {
}
// SetCommand sets the current command.
func (b *Browser) SetCommand(cmd *cmd.Interpreter) {
b.GetTable().SetCommand(cmd)
func (b *Browser) SetCommand(i *cmd.Interpreter) {
b.GetTable().SetCommand(i)
}
// Init watches all running pods in given namespace.
@ -76,7 +76,7 @@ func (b *Browser) Init(ctx context.Context) error {
return err
}
colorerFn := model1.DefaultColorer
if r, ok := model.Registry[b.GVR().String()]; ok && r.Renderer != nil {
if r, ok := model.Registry[b.GVR()]; ok && r.Renderer != nil {
colorerFn = r.Renderer.ColorerFunc()
}
b.GetTable().SetColorerFn(colorerFn)
@ -222,15 +222,15 @@ func (b *Browser) BufferActive(state bool, _ model.BufferKind) {
slogs.Error, err,
)
}
data := b.GetModel().Peek()
cdata := b.Update(data, b.App().Conn().HasMetrics())
mdata := b.GetModel().Peek()
cdata := b.Update(mdata, b.App().Conn().HasMetrics())
b.app.QueueUpdateDraw(func() {
if b.getUpdating() {
return
}
b.setUpdating(true)
defer b.setUpdating(false)
b.UpdateUI(cdata, data)
b.UpdateUI(cdata, mdata)
if b.GetRowCount() > 1 {
b.App().filterHistory.Push(b.CmdBuff().GetText())
}
@ -282,7 +282,7 @@ func (b *Browser) Aliases() sets.Set[string] {
// Model Protocol...
// TableNoData notifies view no data is available.
func (b *Browser) TableNoData(data *model1.TableData) {
func (b *Browser) TableNoData(mdata *model1.TableData) {
var cancel context.CancelFunc
b.mx.RLock()
cancel = b.cancelFn
@ -296,7 +296,7 @@ func (b *Browser) TableNoData(data *model1.TableData) {
return
}
cdata := b.Update(data, b.app.Conn().HasMetrics())
cdata := b.Update(mdata, b.app.Conn().HasMetrics())
b.app.QueueUpdateDraw(func() {
if b.getUpdating() {
return
@ -307,12 +307,12 @@ func (b *Browser) TableNoData(data *model1.TableData) {
b.app.Flash().Warnf("No resources found for %s in namespace %s", b.GVR(), client.PrintNamespace(b.GetNamespace()))
}
b.refreshActions()
b.UpdateUI(cdata, data)
b.UpdateUI(cdata, mdata)
})
}
// TableDataChanged notifies view new data is available.
func (b *Browser) TableDataChanged(data *model1.TableData) {
func (b *Browser) TableDataChanged(mdata *model1.TableData) {
var cancel context.CancelFunc
b.mx.RLock()
cancel = b.cancelFn
@ -322,7 +322,7 @@ func (b *Browser) TableDataChanged(data *model1.TableData) {
return
}
cdata := b.Update(data, b.app.Conn().HasMetrics())
cdata := b.Update(mdata, b.app.Conn().HasMetrics())
b.app.QueueUpdateDraw(func() {
if b.getUpdating() {
return
@ -333,7 +333,7 @@ func (b *Browser) TableDataChanged(data *model1.TableData) {
b.app.Flash().Infof("Viewing %s in namespace %s", b.GVR(), client.PrintNamespace(b.GetNamespace()))
}
b.refreshActions()
b.UpdateUI(cdata, data)
b.UpdateUI(cdata, mdata)
})
}
@ -358,6 +358,7 @@ func (b *Browser) viewCmd(evt *tcell.EventKey) *tcell.EventKey {
if err := v.app.inject(v, false); err != nil {
v.app.Flash().Err(err)
}
return nil
}

View File

@ -345,8 +345,8 @@ func (c *Command) exec(p *cmd.Interpreter, gvr *client.GVR, comp model.Component
comp.SetCommand(p)
if clearStack {
cmd := contextRX.ReplaceAllString(p.GetLine(), "")
c.app.Config.SetActiveView(cmd)
v := contextRX.ReplaceAllString(p.GetLine(), "")
c.app.Config.SetActiveView(v)
}
if err := c.app.inject(comp, clearStack); err != nil {
return err

View File

@ -34,10 +34,10 @@ type Dir struct {
}
// NewDir returns a new instance.
func NewDir(path string) ResourceViewer {
func NewDir(s string) ResourceViewer {
d := Dir{
ResourceViewer: NewBrowser(client.DirGVR),
path: path,
path: s,
}
d.GetTable().SetBorderFocusColor(tcell.ColorAliceBlue)
d.GetTable().SetSelectedStyle(tcell.StyleDefault.Foreground(tcell.ColorWhite).Background(tcell.ColorAliceBlue).Attributes(tcell.AttrNone))

View File

@ -344,18 +344,18 @@ func launchPodShell(v model.Igniter, a *App) {
func sshIn(a *App, fqn, co string) error {
cfg := a.Config.K9s.ShellPod
os, err := getPodOS(a.factory, fqn)
platform, err := getPodOS(a.factory, fqn)
if err != nil {
return fmt.Errorf("os detect failed: %w", err)
}
args := buildShellArgs("exec", fqn, co, a.Conn().Config().Flags().KubeConfig)
args := buildShellArgs("exec", fqn, co, a.Conn().Config().Flags())
args = append(args, "--")
if len(cfg.Command) > 0 {
args = append(args, cfg.Command...)
args = append(args, cfg.Args...)
} else {
if os == windowsOS {
if platform == windowsOS {
args = append(args, "--", powerShell)
}
args = append(args, "sh", "-c", shellCheck)
@ -380,7 +380,7 @@ func nukeK9sShell(a *App) error {
if err != nil {
return err
}
if !ct.FeatureGates.NodeShell {
if !ct.FeatureGates.NodeShell || a.Config.K9s.ShellPod == nil {
return nil
}

View File

@ -72,7 +72,7 @@ func (n *Node) bindDangerousKeys(aa *ui.KeyActions) {
slog.Error("No active context located", slogs.Error, err)
return
}
if ct.FeatureGates.NodeShell {
if ct.FeatureGates.NodeShell && n.App().Config.K9s.ShellPod != nil {
aa.Add(ui.KeyS, ui.NewKeyAction("Shell", n.sshCmd, true))
}
}

View File

@ -52,9 +52,9 @@ func ShowPortForwards(v ResourceViewer, path string, ports port.ContainerPortSpe
coField.SetPlaceholder("Enter a container name::port")
}
coField.SetChangedFunc(func(s string) {
port := extractPort(s)
loField.SetText(port)
p2 = port
p := extractPort(s)
loField.SetText(p)
p2 = p
})
if loField.GetText() == "" {
loField.SetPlaceholder("Enter a local port")
@ -121,16 +121,16 @@ func DismissPortForwards(v ResourceViewer, p *ui.Pages) {
// ----------------------------------------------------------------------------
// Helpers...
func extractPort(port string) string {
tokens := strings.Split(port, "::")
func extractPort(p string) string {
tokens := strings.Split(p, "::")
if len(tokens) < 2 {
ports := strings.Split(port, ",")
ports := strings.Split(p, ",")
for _, t := range ports {
if _, err := strconv.Atoi(strings.TrimSpace(t)); err != nil {
return ""
}
}
return port
return p
}
return tokens[1]

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
const (
@ -399,11 +400,11 @@ func resumeShellIn(a *App, c model.Component, path, co string) {
}
func shellIn(a *App, fqn, co string) {
os, err := getPodOS(a.factory, fqn)
platform, err := getPodOS(a.factory, fqn)
if err != nil {
slog.Warn("OS detect failed", slogs.Error, err)
}
args := computeShellArgs(fqn, co, a.Conn().Config().Flags().KubeConfig, os)
args := computeShellArgs(fqn, co, a.Conn().Config().Flags(), platform)
c := color.New(color.BgGreen).Add(color.FgBlack).Add(color.Bold)
err = runK(a, &shellOpts{
@ -451,31 +452,49 @@ func resumeAttachIn(a *App, c model.Component, path, co string) {
}
func attachIn(a *App, path, co string) {
args := buildShellArgs("attach", path, co, a.Conn().Config().Flags().KubeConfig)
args := buildShellArgs("attach", path, co, a.Conn().Config().Flags())
c := color.New(color.BgGreen).Add(color.FgBlack).Add(color.Bold)
if err := runK(a, &shellOpts{clear: true, banner: c.Sprintf(bannerFmt, path, co), args: args}); err != nil {
a.Flash().Errf("Attach exec failed: %s", err)
}
}
func computeShellArgs(path, co string, kcfg *string, os string) []string {
args := buildShellArgs("exec", path, co, kcfg)
if os == windowsOS {
func computeShellArgs(path, co string, flags *genericclioptions.ConfigFlags, platform string) []string {
args := buildShellArgs("exec", path, co, flags)
if platform == windowsOS {
return append(args, "--", powerShell)
}
return append(args, "--", "sh", "-c", shellCheck)
}
func buildShellArgs(cmd, path, co string, kcfg *string) []string {
func isFlagSet(flag *string) (string, bool) {
if flag == nil || *flag == "" {
return "", false
}
return *flag, true
}
func buildShellArgs(cmd, path, co string, flags *genericclioptions.ConfigFlags) []string {
args := make([]string, 0, 15)
args = append(args, cmd, "-it")
ns, po := client.Namespaced(path)
if ns != client.BlankNamespace {
args = append(args, "-n", ns)
}
args = append(args, po)
if kcfg != nil && *kcfg != "" {
args = append(args, "--kubeconfig", *kcfg)
if flags != nil {
if v, ok := isFlagSet(flags.KubeConfig); ok {
args = append(args, "--kubeconfig", v)
}
if v, ok := isFlagSet(flags.Context); ok {
args = append(args, "--context", v)
}
if v, ok := isFlagSet(flags.BearerToken); ok {
args = append(args, "--token", v)
}
}
if co != "" {
args = append(args, "-c", co)
@ -560,12 +579,12 @@ func getPodOS(f dao.Factory, fqn string) (string, error) {
}
func osFromSelector(s map[string]string) (string, bool) {
if os, ok := s[osBetaSelector]; ok {
return os, ok
if platform, ok := s[osBetaSelector]; ok {
return platform, ok
}
platform, ok := s[osSelector]
os, ok := s[osSelector]
return os, ok
return platform, ok
}
func resourceSorters(t *Table) *ui.KeyActions {

View File

@ -8,50 +8,67 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
func newStr(s string) *string {
return &s
}
func TestComputeShellArgs(t *testing.T) {
config, empty := "coolConfig", ""
_ = config
uu := map[string]struct {
fqn, co, os string
cfg *string
cfg *genericclioptions.ConfigFlags
e string
}{
"config": {
"fred/blee",
"c1",
"darwin",
&config,
"exec -it -n fred blee --kubeconfig coolConfig -c c1 -- sh -c " + shellCheck,
fqn: "fred/blee",
co: "c1",
os: "darwin",
cfg: &genericclioptions.ConfigFlags{
KubeConfig: newStr("coolConfig"),
},
e: "exec -it -n fred blee --kubeconfig coolConfig -c c1 -- sh -c " + shellCheck,
},
"no-config": {
"fred/blee",
"c1",
"linux",
nil,
"exec -it -n fred blee -c c1 -- sh -c " + shellCheck,
fqn: "fred/blee",
co: "c1",
os: "linux",
e: "exec -it -n fred blee -c c1 -- sh -c " + shellCheck,
},
"empty-config": {
"fred/blee",
"",
"",
&empty,
"exec -it -n fred blee -- sh -c " + shellCheck,
fqn: "fred/blee",
cfg: new(genericclioptions.ConfigFlags),
e: "exec -it -n fred blee -- sh -c " + shellCheck,
},
"single-container": {
"fred/blee",
"",
"linux",
&empty,
"exec -it -n fred blee -- sh -c " + shellCheck,
fqn: "fred/blee",
os: "linux",
cfg: new(genericclioptions.ConfigFlags),
e: "exec -it -n fred blee -- sh -c " + shellCheck,
},
"windows": {
"fred/blee",
"c1",
windowsOS,
&empty,
"exec -it -n fred blee -c c1 -- powershell",
fqn: "fred/blee",
co: "c1",
os: windowsOS,
cfg: new(genericclioptions.ConfigFlags),
e: "exec -it -n fred blee -c c1 -- powershell",
},
"full": {
fqn: "fred/blee",
co: "c1",
os: windowsOS,
cfg: &genericclioptions.ConfigFlags{
KubeConfig: newStr("coolConfig"),
Context: newStr("coolContext"),
BearerToken: newStr("coolToken"),
},
e: "exec -it -n fred blee --kubeconfig coolConfig --context coolContext --token coolToken -c c1 -- powershell",
},
}
@ -63,46 +80,3 @@ func TestComputeShellArgs(t *testing.T) {
})
}
}
// func TestComputeShellArgs(t *testing.T) {
// config, empty := "coolConfig", ""
// uu := map[string]struct {
// path, co string
// cfg *string
// e string
// }{
// "config": {
// "fred/blee",
// "c1",
// &config,
// "exec -it -n fred blee --kubeconfig coolConfig -c c1 -- sh -c " + shellCheck,
// },
// "noconfig": {
// "fred/blee",
// "c1",
// nil,
// "exec -it -n fred blee -c c1 -- sh -c " + shellCheck,
// },
// "emptyConfig": {
// "fred/blee",
// "c1",
// &empty,
// "exec -it -n fred blee -c c1 -- sh -c " + shellCheck,
// },
// "singleContainer": {
// "fred/blee",
// "",
// &empty,
// "exec -it -n fred blee -- sh -c " + shellCheck,
// },
// }
// for k := range uu {
// u := uu[k]
// t.Run(k, func(t *testing.T) {
// args := computeShellArgs(u.path, u.co, u.cfg)
// assert.Equal(t, u.e, strings.Join(args, " "))
// })
// }
// }

View File

@ -67,8 +67,8 @@ func (t *Table) Init(ctx context.Context) (err error) {
}
// SetCommand sets the current command.
func (t *Table) SetCommand(cmd *cmd.Interpreter) {
t.command = cmd
func (t *Table) SetCommand(i *cmd.Interpreter) {
t.command = i
}
// HeaderIndex returns index of a given column or false if not found.

View File

@ -42,8 +42,8 @@ func computeFilename(dumpPath, ns, title, path string) (string, error) {
return strings.ToLower(filepath.Join(dir, fName)), nil
}
func saveTable(dir, title, path string, data *model1.TableData) (string, error) {
ns := data.GetNamespace()
func saveTable(dir, title, path string, mdata *model1.TableData) (string, error) {
ns := mdata.GetNamespace()
if client.IsClusterWide(ns) {
ns = client.NamespaceAll
}
@ -69,9 +69,9 @@ func saveTable(dir, title, path string, data *model1.TableData) (string, error)
}()
w := csv.NewWriter(out)
_ = w.Write(data.ColumnNames(true))
_ = w.Write(mdata.ColumnNames(true))
data.RowsRange(func(_ int, re model1.RowEvent) bool {
mdata.RowsRange(func(_ int, re model1.RowEvent) bool {
_ = w.Write(re.Row.Fields)
return true
})

View File

@ -35,9 +35,9 @@ type Factory struct {
}
// NewFactory returns a new informers factory.
func NewFactory(client client.Connection) *Factory {
func NewFactory(clt client.Connection) *Factory {
return &Factory{
client: client,
client: clt,
factories: make(map[string]di.DynamicSharedInformerFactory),
forwarders: NewForwarders(),
}
@ -72,7 +72,7 @@ func (f *Factory) Terminate() {
}
// List returns a resource collection.
func (f *Factory) List(gvr *client.GVR, ns string, wait bool, labels labels.Selector) ([]runtime.Object, error) {
func (f *Factory) List(gvr *client.GVR, ns string, wait bool, lbls labels.Selector) ([]runtime.Object, error) {
if client.IsAllNamespace(ns) {
ns = client.BlankNamespace
}
@ -83,9 +83,9 @@ func (f *Factory) List(gvr *client.GVR, ns string, wait bool, labels labels.Sele
var oo []runtime.Object
if client.IsClusterScoped(ns) {
oo, err = inf.Lister().List(labels)
oo, err = inf.Lister().List(lbls)
} else {
oo, err = inf.Lister().ByNamespace(ns).List(labels)
oo, err = inf.Lister().ByNamespace(ns).List(lbls)
}
if !wait || (wait && inf.Informer().HasSynced()) {
return oo, err
@ -93,9 +93,9 @@ func (f *Factory) List(gvr *client.GVR, ns string, wait bool, labels labels.Sele
f.waitForCacheSync(ns)
if client.IsClusterScoped(ns) {
return inf.Lister().List(labels)
return inf.Lister().List(lbls)
}
return inf.Lister().ByNamespace(ns).List(labels)
return inf.Lister().ByNamespace(ns).List(lbls)
}
// HasSynced checks if given informer is up to date.

View File

@ -1,6 +1,6 @@
name: k9s
base: core22
version: 'v0.50.2'
version: 'v0.50.3'
summary: K9s is a CLI to view and manage your Kubernetes clusters.
description: |
K9s is a CLI to view and manage your Kubernetes clusters. By leveraging a terminal UI, you can easily traverse Kubernetes resources and view the state of your clusters in a single powerful session.