K9s/release v0.30.3 (#2381)

* fix #2377

* fix #2379

* cleaning up

* Release docs
mine
Fernand Galiana 2023-12-25 12:06:23 -07:00 committed by GitHub
parent 463be69a9c
commit 26d1585699
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 346 additions and 218 deletions

View File

@ -11,7 +11,7 @@ DATE ?= $(shell TZ=UTC date -j -f "%s" ${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:
else
DATE ?= $(shell date -u -d @${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:%M:%SZ")
endif
VERSION ?= v0.30.2
VERSION ?= v0.30.3
IMG_NAME := derailed/k9s
IMAGE := ${IMG_NAME}:${VERSION}

View File

@ -0,0 +1,45 @@
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/k9s-xmas.png" align="center" width="800" height="auto"/>
# Release v0.30.3
## Notes
Thank you to all that contributed with flushing out issues and enhancements for K9s!
I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev
and see if we're happier with some of the fixes!
If you've filed an issue please help me verify and close.
Your support, kindness and awesome suggestions to make K9s better are, as ever, very much noted and appreciated!
Also big thanks to all that have allocated their own time to help others on both slack and on this repo!!
As you may know, K9s is not pimped out by corps with deep pockets, thus if you feel K9s is helping your Kubernetes journey,
please consider joining our [sponsorship program](https://github.com/sponsors/derailed) and/or make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer)
On Slack? Please join us [K9slackers](https://join.slack.com/t/k9sers/shared_invite/enQtOTA5MDEyNzI5MTU0LWQ1ZGI3MzliYzZhZWEyNzYxYzA3NjE0YTk1YmFmNzViZjIyNzhkZGI0MmJjYzhlNjdlMGJhYzE2ZGU1NjkyNTM)
## 🎄 Maintenance Release! 🎄
🎵 `On The twelfth day of Christmas my true love gave to me... More Bugs!!` 🎵
Thank you all for pitching in and help flesh out issues!!
---
## Videos Are In The Can!
Please dial [K9s Channel](https://www.youtube.com/channel/UC897uwPygni4QIjkPCpgjmw) for up coming content...
* [K9s v0.30.0 Sneak peek](https://youtu.be/mVBc1XneRJ4)
* [Vulnerability Scans](https://youtu.be/ULkl0MsaidU)
---
## Resolved Issues
* [#2379](https://github.com/derailed/k9s/issues/2379) Filtering with equal sign (=) does not work in 0.30.X
* [#2378](https://github.com/derailed/k9s/issues/2378) Logs directory not created in the k9s config/home dir 0.30.1
* [#2377](https://github.com/derailed/k9s/issues/2377) Opening AWS EKS contexts create two directories per cluster 0.30.1
---
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/imhotep_logo.png" width="32" height="auto"/> © 2023 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -34,9 +34,14 @@ func (d Dir) Load(n string, ct *api.Context) (*Config, error) {
}
var (
path = filepath.Join(d.root, ct.Cluster, n, MainConfigFile)
cfg *Config
err error
path = filepath.Join(
d.root,
SanitizeFileName(ct.Cluster),
SanitizeFileName(n),
MainConfigFile,
)
cfg *Config
err error
)
if f, e := os.Stat(path); os.IsNotExist(e) || f.Size() == 0 {
log.Debug().Msgf("Context config not found! Generating... %q", path)

View File

@ -50,6 +50,12 @@ func TestDirLoad(t *testing.T) {
flags: makeFlags("cl-test", "ct-test-1"),
cfg: mustLoadConfig("testdata/configs/def_ct.yaml"),
},
"non-sanitized-path": {
dir: "/tmp/data/k9s",
flags: makeFlags("arn:aws:eks:eu-central-1:xxx:cluster/fred-blee", "fred-blee"),
cfg: mustLoadConfig("testdata/configs/aws_ct.yaml"),
},
}
for k := range uu {

View File

@ -6,8 +6,21 @@ package data
import (
"os"
"path/filepath"
"regexp"
)
var invalidPathCharsRX = regexp.MustCompile(`[:/]+`)
// SanitizeContextSubpath ensure cluster/context produces a valid path.
func SanitizeContextSubpath(cluster, context string) string {
return filepath.Join(SanitizeFileName(cluster), SanitizeFileName(context))
}
// SanitizeFileName ensure file spec is valid.
func SanitizeFileName(name string) string {
return invalidPathCharsRX.ReplaceAllString(name, "-")
}
// InList check if string is in a collection of strings.
func InList(ll []string, n string) bool {
for _, l := range ll {

View File

@ -0,0 +1,12 @@
k9s:
cluster: arn:aws:eks:eu-central-1:xxx:cluster/fred-blee
namespace:
active: default
lockFavorites: false
favorites:
- default
view:
active: po
featureGates:
nodeShell: false
portForwardAddress: localhost

View File

@ -8,7 +8,6 @@ import (
"os"
"os/user"
"path/filepath"
"regexp"
"github.com/derailed/k9s/internal/config/data"
@ -81,19 +80,13 @@ var (
// InitLogsLoc initializes K9s logs location.
func InitLogLoc() error {
if hasK9sConfigEnv() {
tmpDir, err := userTmpDir()
if err != nil {
return err
}
AppLogFile = filepath.Join(tmpDir, K9sLogsFile)
return nil
tmpDir, err := userTmpDir()
if err != nil {
return err
}
AppLogFile = filepath.Join(tmpDir, K9sLogsFile)
var err error
AppLogFile, err = xdg.StateFile(filepath.Join(AppName, K9sLogsFile))
return err
return nil
}
// InitLocs initializes k9s artifacts locations.
@ -182,31 +175,24 @@ func initXDGLocs() error {
return nil
}
var invalidPathCharsRX = regexp.MustCompile(`[:/]+`)
// SanitizeFileName ensure file spec is valid.
func SanitizeFileName(name string) string {
return invalidPathCharsRX.ReplaceAllString(name, "-")
}
// AppContextDir generates a valid context config dir.
func AppContextDir(cluster, context string) string {
return filepath.Join(AppContextsDir, sanContextSubpath(cluster, context))
return filepath.Join(AppContextsDir, data.SanitizeContextSubpath(cluster, context))
}
// AppContextAliasesFile generates a valid context specific aliases file path.
func AppContextAliasesFile(cluster, context string) string {
return filepath.Join(AppContextsDir, sanContextSubpath(cluster, context), "aliases.yaml")
return filepath.Join(AppContextsDir, data.SanitizeContextSubpath(cluster, context), "aliases.yaml")
}
// AppContextPluginsFile generates a valid context specific plugins file path.
func AppContextPluginsFile(cluster, context string) string {
return filepath.Join(AppContextsDir, sanContextSubpath(cluster, context), "plugins.yaml")
return filepath.Join(AppContextsDir, data.SanitizeContextSubpath(cluster, context), "plugins.yaml")
}
// AppContextHotkeysFile generates a valid context specific hotkeys file path.
func AppContextHotkeysFile(cluster, context string) string {
return filepath.Join(AppContextsDir, sanContextSubpath(cluster, context), "hotkeys.yaml")
return filepath.Join(AppContextsDir, data.SanitizeContextSubpath(cluster, context), "hotkeys.yaml")
}
// AppContextConfig generates a valid context config file path.
@ -216,14 +202,14 @@ func AppContextConfig(cluster, context string) string {
// DumpsDir generates a valid context dump directory.
func DumpsDir(cluster, context string) (string, error) {
dir := filepath.Join(AppDumpsDir, sanContextSubpath(cluster, context))
dir := filepath.Join(AppDumpsDir, data.SanitizeContextSubpath(cluster, context))
return dir, data.EnsureDirPath(dir, data.DefaultDirMod)
}
// EnsureBenchmarksDir generates a valid benchmark results directory.
func EnsureBenchmarksDir(cluster, context string) (string, error) {
dir := filepath.Join(AppBenchmarksDir, sanContextSubpath(cluster, context))
dir := filepath.Join(AppBenchmarksDir, data.SanitizeContextSubpath(cluster, context))
return dir, data.EnsureDirPath(dir, data.DefaultDirMod)
}
@ -274,10 +260,6 @@ func SkinFileFromName(n string) string {
// Helpers...
func sanContextSubpath(cluster, context string) string {
return filepath.Join(SanitizeFileName(cluster), SanitizeFileName(context))
}
func hasK9sConfigEnv() bool {
return os.Getenv(K9sConfigDir) != ""
}

View File

@ -11,11 +11,6 @@ import (
v1 "k8s.io/api/core/v1"
)
// SanitizeFilename sanitizes the dump filename.
func SanitizeFilename(name string) string {
return invalidPathCharsRX.ReplaceAllString(name, "-")
}
// InNSList check if ns is in an ns collection.
func InNSList(nn []interface{}, ns string) bool {
ss := make([]string, len(nn))

View File

@ -56,6 +56,7 @@ func NewK9s(conn client.Connection, ks data.KubeSettings) *K9s {
}
}
// Save saves the k9s config to dis.
func (k *K9s) Save() error {
if k.activeConfig != nil {
path := filepath.Join(
@ -70,6 +71,7 @@ func (k *K9s) Save() error {
return nil
}
// Refine merges k9s configs.
func (k *K9s) Refine(k1 *K9s) {
k.LiveViewAutoRefresh = k1.LiveViewAutoRefresh
k.ScreenDumpDir = k1.ScreenDumpDir
@ -86,6 +88,7 @@ func (k *K9s) Refine(k1 *K9s) {
k.Thresholds = k1.Thresholds
}
// Override overrides k9s config from cli args.
func (k *K9s) Override(k9sFlags *Flags) {
if *k9sFlags.RefreshRate != DefaultRefreshRate {
k.OverrideRefreshRate(*k9sFlags.RefreshRate)
@ -105,6 +108,7 @@ func (k *K9s) OverrideScreenDumpDir(dir string) {
k.manualScreenDumpDir = &dir
}
// GetScreenDumpDir fetch screen dumps dir.
func (k *K9s) GetScreenDumpDir() string {
screenDumpDir := k.ScreenDumpDir
if k.manualScreenDumpDir != nil && *k.manualScreenDumpDir != "" {
@ -117,21 +121,29 @@ func (k *K9s) GetScreenDumpDir() string {
return screenDumpDir
}
// Reset resets configuration and context.
func (k *K9s) Reset() {
k.activeConfig, k.activeContextName = nil, ""
}
// ActiveScreenDumpsDir fetch context specific screen dumps dir.
func (k *K9s) ActiveScreenDumpsDir() string {
return filepath.Join(k.GetScreenDumpDir(), k.ActiveContextDir())
}
// ActiveContextDir fetch current cluster/context path.
func (k *K9s) ActiveContextDir() string {
if k.activeConfig == nil {
return "na"
}
return filepath.Join(
SanitizeFileName(k.activeConfig.Context.ClusterName),
SanitizeFileName(k.ActiveContextName()),
return data.SanitizeContextSubpath(
k.activeConfig.Context.ClusterName,
k.ActiveContextName(),
)
}
// ActiveContextNamespace fetch the context active ns.
func (k *K9s) ActiveContextNamespace() (string, error) {
if k.activeConfig != nil {
return k.activeConfig.Context.Namespace.Active, nil
@ -140,6 +152,7 @@ func (k *K9s) ActiveContextNamespace() (string, error) {
return "", errors.New("context config is not set")
}
// ActiveContextName returns the active context name.
func (k *K9s) ActiveContextName() string {
return k.activeContextName
}

View File

@ -69,6 +69,10 @@ func NewMockKubeSettings(f *genericclioptions.ConfigFlags) mockKubeSettings {
Cluster: *f.ClusterName,
Namespace: client.DefaultNamespace,
},
"fred-blee": {
Cluster: "arn:aws:eks:eu-central-1:xxx:cluster/fred-blee",
Namespace: client.DefaultNamespace,
},
},
}
}

View File

@ -142,7 +142,11 @@ func (p Pod) Render(o interface{}, ns string, row *Row) error {
cs := po.Status.ContainerStatuses
cr, _, rc := p.Statuses(cs)
c, r := p.gatherPodMX(&po, pwm.MX)
var ccmx []mv1beta1.ContainerMetrics
if pwm.MX != nil {
ccmx = pwm.MX.Containers
}
c, r := gatherCoMX(po.Spec.Containers, ccmx)
phase := p.Phase(&po)
row.ID = client.MetaFQN(po.ObjectMeta)
row.Fields = Fields{
@ -232,22 +236,20 @@ func (p *PodWithMetrics) DeepCopyObject() runtime.Object {
return p
}
func (*Pod) gatherPodMX(pod *v1.Pod, mx *mv1beta1.PodMetrics) (c, r metric) {
rcpu, rmem := podRequests(pod.Spec.Containers)
func gatherCoMX(cc []v1.Container, ccmx []mv1beta1.ContainerMetrics) (c, r metric) {
rcpu, rmem := cosRequests(cc)
r.cpu, r.mem = rcpu.MilliValue(), rmem.Value()
lcpu, lmem := podLimits(pod.Spec.Containers)
lcpu, lmem := cosLimits(cc)
r.lcpu, r.lmem = lcpu.MilliValue(), lmem.Value()
if mx != nil {
ccpu, cmem := currentRes(mx)
c.cpu, c.mem = ccpu.MilliValue(), cmem.Value()
}
ccpu, cmem := currentRes(ccmx)
c.cpu, c.mem = ccpu.MilliValue(), cmem.Value()
return
}
func podLimits(cc []v1.Container) (resource.Quantity, resource.Quantity) {
func cosLimits(cc []v1.Container) (resource.Quantity, resource.Quantity) {
cpu, mem := new(resource.Quantity), new(resource.Quantity)
for _, c := range cc {
limits := c.Resources.Limits
@ -264,7 +266,7 @@ func podLimits(cc []v1.Container) (resource.Quantity, resource.Quantity) {
return *cpu, *mem
}
func podRequests(cc []v1.Container) (resource.Quantity, resource.Quantity) {
func cosRequests(cc []v1.Container) (resource.Quantity, resource.Quantity) {
cpu, mem := new(resource.Quantity), new(resource.Quantity)
for _, c := range cc {
co := c
@ -280,12 +282,12 @@ func podRequests(cc []v1.Container) (resource.Quantity, resource.Quantity) {
return *cpu, *mem
}
func currentRes(mx *mv1beta1.PodMetrics) (resource.Quantity, resource.Quantity) {
func currentRes(ccmx []mv1beta1.ContainerMetrics) (resource.Quantity, resource.Quantity) {
cpu, mem := new(resource.Quantity), new(resource.Quantity)
if mx == nil {
if ccmx == nil {
return *cpu, *mem
}
for _, co := range mx.Containers {
for _, co := range ccmx {
c, m := co.Usage.Cpu(), co.Usage.Memory()
cpu.Add(*c)
mem.Add(*m)

View File

@ -0,0 +1,177 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of K9s
package render
import (
"testing"
"github.com/derailed/k9s/internal/client"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
res "k8s.io/apimachinery/pkg/api/resource"
mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
)
func Test_gatherPodMx(t *testing.T) {
uu := map[string]struct {
cc []v1.Container
mx []mv1beta1.ContainerMetrics
c, r metric
perc string
}{
"single": {
cc: []v1.Container{
makeContainer("c1", false, "10m", "1Mi", "20m", "2Mi"),
},
mx: []mv1beta1.ContainerMetrics{
makeCoMX("c1", "1m", "22Mi"),
},
c: metric{
cpu: 1,
mem: 22 * client.MegaByte,
},
r: metric{
cpu: 10,
mem: 1 * client.MegaByte,
lcpu: 20,
lmem: 2 * client.MegaByte,
},
perc: "10",
},
"multi": {
cc: []v1.Container{
makeContainer("c1", false, "11m", "22Mi", "111m", "44Mi"),
makeContainer("c2", false, "93m", "1402Mi", "0m", "2804Mi"),
makeContainer("c3", false, "11m", "34Mi", "0m", "69Mi"),
},
r: metric{
cpu: 11 + 93 + 11,
mem: (22 + 1402 + 34) * client.MegaByte,
lcpu: 111 + 0 + 0,
lmem: (44 + 2804 + 69) * client.MegaByte,
},
mx: []mv1beta1.ContainerMetrics{
makeCoMX("c1", "1m", "22Mi"),
makeCoMX("c2", "51m", "1275Mi"),
makeCoMX("c3", "1m", "27Mi"),
},
c: metric{
cpu: 1 + 51 + 1,
mem: (22 + 1275 + 27) * client.MegaByte,
},
perc: "46",
},
}
for k := range uu {
u := uu[k]
t.Run(k, func(t *testing.T) {
c, r := gatherCoMX(u.cc, u.mx)
assert.Equal(t, u.c.cpu, c.cpu)
assert.Equal(t, u.c.mem, c.mem)
assert.Equal(t, u.c.lcpu, c.lcpu)
assert.Equal(t, u.c.lmem, c.lmem)
assert.Equal(t, u.r.cpu, r.cpu)
assert.Equal(t, u.r.mem, r.mem)
assert.Equal(t, u.r.lcpu, r.lcpu)
assert.Equal(t, u.r.lmem, r.lmem)
assert.Equal(t, u.perc, client.ToPercentageStr(c.cpu, r.cpu))
})
}
}
func Test_podLimits(t *testing.T) {
uu := map[string]struct {
cc []v1.Container
l v1.ResourceList
}{
"plain": {
cc: []v1.Container{
makeContainer("c1", false, "10m", "1Mi", "20m", "2Mi"),
},
l: makeRes("20m", "2Mi"),
},
"multi-co": {
cc: []v1.Container{
makeContainer("c1", false, "10m", "1Mi", "20m", "2Mi"),
makeContainer("c2", false, "10m", "1Mi", "40m", "4Mi"),
},
l: makeRes("60m", "6Mi"),
},
}
for k := range uu {
u := uu[k]
t.Run(k, func(t *testing.T) {
c, m := cosLimits(u.cc)
assert.True(t, c.Equal(*u.l.Cpu()))
assert.True(t, m.Equal(*u.l.Memory()))
})
}
}
func Test_podRequests(t *testing.T) {
uu := map[string]struct {
cc []v1.Container
l v1.ResourceList
}{
"plain": {
cc: []v1.Container{
makeContainer("c1", false, "10m", "1Mi", "20m", "2Mi"),
},
l: makeRes("10m", "1Mi"),
},
"multi-co": {
cc: []v1.Container{
makeContainer("c1", false, "10m", "1Mi", "20m", "2Mi"),
makeContainer("c2", false, "10m", "1Mi", "40m", "4Mi"),
},
l: makeRes("20m", "2Mi"),
},
}
for k := range uu {
u := uu[k]
t.Run(k, func(t *testing.T) {
c, m := cosRequests(u.cc)
assert.True(t, c.Equal(*u.l.Cpu()))
assert.True(t, m.Equal(*u.l.Memory()))
})
}
}
// Helpers...
func makeContainer(n string, init bool, rc, rm, lc, lm string) v1.Container {
var res v1.ResourceRequirements
if init {
res = v1.ResourceRequirements{}
} else {
res = v1.ResourceRequirements{
Requests: makeRes(rc, rm),
Limits: makeRes(lc, lm),
}
}
return v1.Container{Name: n, Resources: res}
}
func makeRes(c, m string) v1.ResourceList {
cpu, _ := res.ParseQuantity(c)
mem, _ := res.ParseQuantity(m)
return v1.ResourceList{
v1.ResourceCPU: cpu,
v1.ResourceMemory: mem,
}
}
func makeCoMX(n string, c, m string) mv1beta1.ContainerMetrics {
return mv1beta1.ContainerMetrics{
Name: n,
Usage: makeRes(c, m),
}
}

View File

@ -281,123 +281,3 @@ func makeRes(c, m string) v1.ResourceList {
v1.ResourceMemory: mem,
}
}
// apiVersion: v1
// kind: Pod
// metadata:
// creationTimestamp: "2023-11-11T17:01:40Z"
// finalizers:
// - batch.kubernetes.io/job-tracking
// generateName: hello-28328646-
// labels:
// batch.kubernetes.io/controller-uid: 35cf5552-7180-48c1-b7b2-8b6e630a7860
// batch.kubernetes.io/job-name: hello-28328646
// controller-uid: 35cf5552-7180-48c1-b7b2-8b6e630a7860
// job-name: hello-28328646
// name: hello-28328646-h9fnh
// namespace: fred
// ownerReferences:
// - apiVersion: batch/v1
// blockOwnerDeletion: true
// controller: true
// kind: Job
// name: hello-28328646
// uid: 35cf5552-7180-48c1-b7b2-8b6e630a7860
// resourceVersion: "381637"
// uid: ea77c360-6375-459b-8b30-2ac0c59404cd
// spec:
// containers:
// - args:
// - /bin/bash
// - -c
// - for i in {1..5}; do echo "hello";sleep 1; done
// image: blang/busybox-bash
// imagePullPolicy: Always
// name: c1
// resources: {}
// terminationMessagePath: /dev/termination-log
// terminationMessagePolicy: File
// volumeMounts:
// - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
// name: kube-api-access-7sztm
// readOnly: true
// dnsPolicy: ClusterFirst
// enableServiceLinks: true
// nodeName: kind-worker
// preemptionPolicy: PreemptLowerPriority
// priority: 0
// restartPolicy: OnFailure
// schedulerName: default-scheduler
// securityContext: {}
// serviceAccount: default
// serviceAccountName: default
// terminationGracePeriodSeconds: 30
// tolerations:
// - effect: NoExecute
// key: node.kubernetes.io/not-ready
// operator: Exists
// tolerationSeconds: 300
// - effect: NoExecute
// key: node.kubernetes.io/unreachable
// operator: Exists
// tolerationSeconds: 300
// volumes:
// - name: kube-api-access-7sztm
// projected:
// defaultMode: 420
// sources:
// - serviceAccountToken:
// expirationSeconds: 3607
// path: token
// - configMap:
// items:
// - key: ca.crt
// path: ca.crt
// name: kube-root-ca.crt
// - downwardAPI:
// items:
// - fieldRef:
// apiVersion: v1
// fieldPath: metadata.namespace
// path: namespace
// status:
// conditions:
// - lastProbeTime: null
// lastTransitionTime: "2023-11-11T17:01:40Z"
// status: "True"
// type: Initialized
// - lastProbeTime: null
// lastTransitionTime: "2023-11-11T17:01:40Z"
// message: 'containers with unready status: [c1[]'
// reason: ContainersNotReady
// status: "False"
// type: Ready
// - lastProbeTime: null
// lastTransitionTime: "2023-11-11T17:01:40Z"
// message: 'containers with unready status: [c1[]'
// reason: ContainersNotReady
// status: "False"
// type: ContainersReady
// - lastProbeTime: null
// lastTransitionTime: "2023-11-11T17:01:40Z"
// status: "True"
// type: PodScheduled
// containerStatuses:
// - image: blang/busybox-bash
// imageID: ""
// lastState: {}
// name: c1
// ready: false
// restartCount: 0
// started: false
// state:
// waiting:
// message: Back-off pulling image "blang/busybox-bash"
// reason: ImagePullBackOff
// hostIP: 172.18.0.3
// phase: Pending
// podIP: 10.244.1.59
// podIPs:
// - ip: 10.244.1.59
// qosClass: BestEffort
// startTime: "2023-11-11T17:01:40Z"

View File

@ -12,6 +12,7 @@ import (
"github.com/derailed/k9s/internal"
"github.com/derailed/k9s/internal/config"
"github.com/derailed/k9s/internal/render"
"github.com/derailed/k9s/internal/view/cmd"
"github.com/rs/zerolog/log"
"github.com/sahilm/fuzzy"
)
@ -41,11 +42,9 @@ const (
var (
// LabelRx identifies a label query.
LabelRx = regexp.MustCompile(`\A\-l`)
LabelRx = regexp.MustCompile(`\A\-l`)
inverseRx = regexp.MustCompile(`\A\!`)
fuzzyRx = regexp.MustCompile(`\A\-f`)
fuzzyRx = regexp.MustCompile(`\A\-f`)
)
func mustExtractStyles(ctx context.Context) *config.Styles {
@ -71,12 +70,11 @@ func IsLabelSelector(s string) bool {
if s == "" {
return false
}
if LabelRx.MatchString(s) {
return true
}
return !strings.Contains(s, " ") && strings.Contains(s, "=")
return !strings.Contains(s, " ") && cmd.ToLabels(s) != nil
}
// IsFuzzySelector checks if query is fuzzy.

View File

@ -37,11 +37,13 @@ func TestIsLabelSelector(t *testing.T) {
s string
ok bool
}{
"empty": {s: ""},
"cool": {s: "-l app=fred,env=blee", ok: true},
"no-flag": {s: "app=fred,env=blee", ok: true},
"no-space": {s: "-lapp=fred,env=blee", ok: true},
"wrong-flag": {s: "-f app=fred,env=blee"},
"empty": {s: ""},
"cool": {s: "-l app=fred,env=blee", ok: true},
"no-flag": {s: "app=fred,env=blee", ok: true},
"no-space": {s: "-lapp=fred,env=blee", ok: true},
"wrong-flag": {s: "-f app=fred,env=blee"},
"missing-key": {s: "=fred"},
"missing-val": {s: "fred="},
}
for k := range uu {

View File

@ -12,6 +12,7 @@ import (
"github.com/derailed/k9s/internal"
"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/config"
"github.com/derailed/k9s/internal/config/data"
"github.com/derailed/k9s/internal/ui"
"github.com/derailed/tcell/v2"
"github.com/rs/zerolog/log"
@ -74,8 +75,8 @@ func benchDir(cfg *config.Config) string {
}
return filepath.Join(
config.AppBenchmarksDir,
config.SanitizeFileName(ct.ClusterName),
config.SanitizeFilename(cfg.K9s.ActiveContextName()),
data.SanitizeFileName(ct.ClusterName),
data.SanitizeFileName(cfg.K9s.ActiveContextName()),
)
}

View File

@ -8,7 +8,6 @@ import (
"strings"
"github.com/derailed/k9s/internal/client"
"github.com/rs/zerolog/log"
)
func ToLabels(s string) map[string]string {
@ -74,7 +73,6 @@ func SuggestSubCommand(command string, namespaces client.NamespaceNames, context
if n, ok := p.HasContext(); ok {
suggests = completeCtx(n, contexts)
}
log.Debug().Msgf("!!SUGG CTX!! %#v", suggests)
if len(suggests) > 0 {
break
}
@ -84,7 +82,6 @@ func SuggestSubCommand(command string, namespaces client.NamespaceNames, context
return nil
}
suggests = completeNS(ns, namespaces)
log.Debug().Msgf("!!SUGG NS!! %#v", suggests)
default:
if n, ok := p.HasContext(); ok {

View File

@ -297,7 +297,7 @@ func (d *Details) resetCmd(evt *tcell.EventKey) *tcell.EventKey {
}
func (d *Details) saveCmd(evt *tcell.EventKey) *tcell.EventKey {
if path, err := saveYAML(d.app.Config.K9s.GetScreenDumpDir(), d.app.Config.K9s.ActiveContextDir(), d.title, d.text.GetText(true)); err != nil {
if path, err := saveYAML(d.app.Config.K9s.ActiveScreenDumpsDir(), d.title, d.text.GetText(true)); err != nil {
d.app.Flash().Err(err)
} else {
d.app.Flash().Infof("Log %s saved successfully!", path)

View File

@ -357,7 +357,7 @@ func (v *LiveView) resetCmd(evt *tcell.EventKey) *tcell.EventKey {
func (v *LiveView) saveCmd(evt *tcell.EventKey) *tcell.EventKey {
name := fmt.Sprintf("%s--%s", strings.Replace(v.model.GetPath(), "/", "-", 1), strings.ToLower(v.title))
if _, err := saveYAML(v.app.Config.K9s.GetScreenDumpDir(), v.app.Config.K9s.ActiveContextDir(), name, sanitizeEsc(v.text.GetText(true))); err != nil {
if _, err := saveYAML(v.app.Config.K9s.ActiveScreenDumpsDir(), name, sanitizeEsc(v.text.GetText(true))); err != nil {
v.app.Flash().Err(err)
} else {
v.app.Flash().Infof("File %q saved successfully!", name)

View File

@ -406,7 +406,7 @@ func (l *Log) filterCmd(evt *tcell.EventKey) *tcell.EventKey {
// SaveCmd dumps the logs to file.
func (l *Log) SaveCmd(*tcell.EventKey) *tcell.EventKey {
path, err := saveData(l.app.Config.K9s.GetScreenDumpDir(), l.app.Config.K9s.ActiveContextDir(), l.model.GetPath(), l.logs.GetText(true))
path, err := saveData(l.app.Config.K9s.ActiveScreenDumpsDir(), l.model.GetPath(), l.logs.GetText(true))
if err != nil {
l.app.Flash().Err(err)
return nil
@ -420,8 +420,7 @@ func ensureDir(dir string) error {
return os.MkdirAll(dir, 0744)
}
func saveData(screenDumpDir, context, fqn, data string) (string, error) {
dir := filepath.Join(screenDumpDir, context)
func saveData(dir, fqn, data string) (string, error) {
if err := ensureDir(dir); err != nil {
return "", err
}

View File

@ -7,7 +7,6 @@ import (
"bytes"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/derailed/k9s/internal/client"
@ -113,7 +112,7 @@ func TestLogViewSave(t *testing.T) {
dd := "/tmp/test-dumps/na"
assert.NoError(t, ensureDumpDir(dd))
app.Config.K9s.ScreenDumpDir = "/tmp/test-dumps"
dir := filepath.Join(app.Config.K9s.GetScreenDumpDir(), app.Config.K9s.ActiveContextDir())
dir := app.Config.K9s.ActiveScreenDumpsDir()
c1, err := os.ReadDir(dir)
assert.NoError(t, err, fmt.Sprintf("Dir: %q", dir))
v.SaveCmd(nil)

View File

@ -154,7 +154,7 @@ func (l *Logger) resetCmd(evt *tcell.EventKey) *tcell.EventKey {
}
func (l *Logger) saveCmd(evt *tcell.EventKey) *tcell.EventKey {
if path, err := saveYAML(l.app.Config.K9s.GetScreenDumpDir(), l.app.Config.K9s.ActiveContextDir(), l.title, l.GetText(true)); err != nil {
if path, err := saveYAML(l.app.Config.K9s.ActiveScreenDumpsDir(), l.title, l.GetText(true)); err != nil {
l.app.Flash().Err(err)
} else {
l.app.Flash().Infof("Log %s saved successfully!", path)

View File

@ -5,7 +5,6 @@ package view
import (
"context"
"path/filepath"
"github.com/derailed/k9s/internal"
"github.com/derailed/k9s/internal/client"
@ -36,7 +35,7 @@ func NewScreenDump(gvr client.GVR) ResourceViewer {
}
func (s *ScreenDump) dirContext(ctx context.Context) context.Context {
dir := filepath.Join(s.App().Config.K9s.GetScreenDumpDir(), s.App().Config.K9s.ActiveContextDir())
dir := s.App().Config.K9s.ActiveScreenDumpsDir()
if err := data.EnsureFullPath(dir, data.DefaultDirMod); err != nil {
s.App().Flash().Err(err)
return ctx

View File

@ -170,7 +170,7 @@ func (t *Table) BufferActive(state bool, k model.BufferKind) {
}
func (t *Table) saveCmd(evt *tcell.EventKey) *tcell.EventKey {
if path, err := saveTable(t.app.Config.K9s.GetScreenDumpDir(), t.app.Config.K9s.ActiveContextDir(), t.GVR().R(), t.Path, t.GetFilteredData()); err != nil {
if path, err := saveTable(t.app.Config.K9s.ActiveScreenDumpsDir(), t.GVR().R(), t.Path, t.GetFilteredData()); err != nil {
t.app.Flash().Err(err)
} else {
t.app.Flash().Infof("File %s saved successfully!", path)

View File

@ -12,21 +12,21 @@ import (
"time"
"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/config"
"github.com/derailed/k9s/internal/config/data"
"github.com/derailed/k9s/internal/render"
"github.com/derailed/k9s/internal/ui"
"github.com/rs/zerolog/log"
)
func computeFilename(screenDumpDir, context, ns, title, path string) (string, error) {
func computeFilename(dumpPath, ns, title, path string) (string, error) {
now := time.Now().UnixNano()
dir := filepath.Join(screenDumpDir, context)
dir := filepath.Join(dumpPath)
if err := ensureDir(dir); err != nil {
return "", err
}
name := title + "-" + config.SanitizeFilename(path)
name := title + "-" + data.SanitizeFileName(path)
if path == "" {
name = title
}
@ -41,13 +41,13 @@ func computeFilename(screenDumpDir, context, ns, title, path string) (string, er
return strings.ToLower(filepath.Join(dir, fName)), nil
}
func saveTable(screenDumpDir, context, title, path string, data *render.TableData) (string, error) {
func saveTable(dir, title, path string, data *render.TableData) (string, error) {
ns := data.Namespace
if client.IsClusterWide(ns) {
ns = client.NamespaceAll
}
fPath, err := computeFilename(screenDumpDir, context, ns, title, path)
fPath, err := computeFilename(dir, ns, title, path)
if err != nil {
return "", err
}

View File

@ -6,7 +6,6 @@ package view
import (
"context"
"os"
"path/filepath"
"testing"
"time"
@ -30,7 +29,7 @@ func TestTableSave(t *testing.T) {
v.SetTitle("k9s-test")
assert.NoError(t, ensureDumpDir("/tmp/test-dumps"))
dir := filepath.Join(v.app.Config.K9s.GetScreenDumpDir(), v.app.Config.K9s.ActiveContextDir())
dir := v.app.Config.K9s.ActiveScreenDumpsDir()
c1, _ := os.ReadDir(dir)
v.saveCmd(nil)

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/derailed/k9s/internal/config"
"github.com/derailed/k9s/internal/config/data"
"github.com/derailed/tview"
"github.com/rs/zerolog/log"
)
@ -62,18 +63,17 @@ func enableRegion(str string) string {
return strings.ReplaceAll(strings.ReplaceAll(str, "<<<", "["), ">>>", "]")
}
func saveYAML(screenDumpDir, context, name, data string) (string, error) {
dir := filepath.Join(screenDumpDir, config.SanitizeFilename(context))
func saveYAML(dir, name, raw string) (string, error) {
if err := ensureDir(dir); err != nil {
return "", err
}
fName := fmt.Sprintf("%s--%d.yaml", config.SanitizeFilename(name), time.Now().Unix())
path := filepath.Join(dir, fName)
fName := fmt.Sprintf("%s--%d.yaml", data.SanitizeFileName(name), time.Now().Unix())
fpath := filepath.Join(dir, fName)
mod := os.O_CREATE | os.O_WRONLY
file, err := os.OpenFile(path, mod, 0600)
file, err := os.OpenFile(fpath, mod, 0600)
if err != nil {
log.Error().Err(err).Msgf("YAML create %s", path)
log.Error().Err(err).Msgf("YAML create %s", fpath)
return "", nil
}
defer func() {
@ -81,9 +81,9 @@ func saveYAML(screenDumpDir, context, name, data string) (string, error) {
log.Error().Err(err).Msg("Closing yaml file")
}
}()
if _, err := file.Write([]byte(data)); err != nil {
if _, err := file.Write([]byte(raw)); err != nil {
return "", err
}
return path, nil
return fpath, nil
}

View File

@ -1,6 +1,6 @@
name: k9s
base: core20
version: 'v0.30.2'
version: 'v0.30.3'
summary: K9s is a CLI to view and manage your Kubernetes clusters.
description: |
K9s is a CLI to view and manage your Kubernetes clusters. By leveraging a terminal UI, you can easily traverse Kubernetes resources and view the state of your clusters in a single powerful session.