parent
5445ff4da1
commit
a543f47319
2
Makefile
2
Makefile
|
|
@ -11,7 +11,7 @@ DATE ?= $(shell TZ=UTC date -j -f "%s" ${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:
|
|||
else
|
||||
DATE ?= $(shell date -u -d @${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:%M:%SZ")
|
||||
endif
|
||||
VERSION ?= v0.31.5
|
||||
VERSION ?= v0.31.6
|
||||
IMG_NAME := derailed/k9s
|
||||
IMAGE := ${IMG_NAME}:${VERSION}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,75 @@
|
|||
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/k9s.png" align="center" width="800" height="auto"/>
|
||||
|
||||
# Release v0.31.6
|
||||
|
||||
## Notes
|
||||
|
||||
Thank you to all that contributed with flushing out issues and enhancements for K9s!
|
||||
I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev
|
||||
and see if we're happier with some of the fixes!
|
||||
If you've filed an issue please help me verify and close.
|
||||
|
||||
Your support, kindness and awesome suggestions to make K9s better are, as ever, very much noted and appreciated!
|
||||
Also big thanks to all that have allocated their own time to help others on both slack and on this repo!!
|
||||
|
||||
As you may know, K9s is not pimped out by corps with deep pockets, thus if you feel K9s is helping your Kubernetes journey,
|
||||
please consider joining our [sponsorship program](https://github.com/sponsors/derailed) and/or make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer)
|
||||
|
||||
On Slack? Please join us [K9slackers](https://join.slack.com/t/k9sers/shared_invite/enQtOTA5MDEyNzI5MTU0LWQ1ZGI3MzliYzZhZWEyNzYxYzA3NjE0YTk1YmFmNzViZjIyNzhkZGI0MmJjYzhlNjdlMGJhYzE2ZGU1NjkyNTM)
|
||||
|
||||
## Maintenance Release!
|
||||
|
||||
😱 More aftermath... 😱
|
||||
|
||||
Thank you all for pitching in and helping flesh out issues!!
|
||||
|
||||
Please make sure to add gory details to issues ie relevant configs, debug logs, etc...
|
||||
|
||||
Comments like: `same here!` or `me to!` doesn't really cut it for us to zero in ;(
|
||||
Everyone has slightly different settings/platforms so every little bits of info helps with the resolves even if seemingly irrelevant.
|
||||
|
||||
---
|
||||
|
||||
## NOTE
|
||||
|
||||
In this drop, we've made k9s a bit more resilient (hopefully!) to configuration issues and in most cases k9s will come up but may exhibit `limp mode` behaviors.
|
||||
Please double check your k9s logs if things don't work as expected and file an issue with the `gory` details!
|
||||
|
||||
☢️ This drop may cause `some disturbance in the farce!` ☢️
|
||||
|
||||
Please proceed with caution with this one as we did our best to attempt to address potential context config file corruption by eliminating race conditions.
|
||||
It's late and I am operating on minimal sleep so I may have hosed some behaviors 🫣
|
||||
If you experience k9s locking up or misbehaving, as per the above👆 you know what to do now and as customary
|
||||
we will do our best to address them quickly to get you back up and running!
|
||||
|
||||
Thank you for your support, kindness and patience!
|
||||
|
||||
---
|
||||
|
||||
## Videos Are In The Can!
|
||||
|
||||
Please dial [K9s Channel](https://www.youtube.com/channel/UC897uwPygni4QIjkPCpgjmw) for up coming content...
|
||||
|
||||
* [K9s v0.31.0 Configs+Sneak peek](https://youtu.be/X3444KfjguE)
|
||||
* [K9s v0.30.0 Sneak peek](https://youtu.be/mVBc1XneRJ4)
|
||||
* [Vulnerability Scans](https://youtu.be/ULkl0MsaidU)
|
||||
|
||||
---
|
||||
|
||||
## Resolved Issues
|
||||
|
||||
* [#2476](https://github.com/derailed/k9s/issues/2476) Pod's are not displayed for the selected namespace. Hopefully!
|
||||
* [#2471](https://github.com/derailed/k9s/issues/2471) Shell autocomplete functions do not work correctly
|
||||
|
||||
---
|
||||
|
||||
## Contributed PRs
|
||||
|
||||
Please be sure to give `Big Thanks!` and `ATTA Girls/Boys!` to all the fine contributors for making K9s better for all of us!!
|
||||
|
||||
* [#2480](https://github.com/derailed/k9s/pull/2480) Adding system arch to nodes view
|
||||
* [#2477](https://github.com/derailed/k9s/pull/2477) Shell autocomplete for k8s flags
|
||||
|
||||
---
|
||||
|
||||
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/imhotep_logo.png" width="32" height="auto"/> © 2024 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0)
|
||||
76
cmd/root.go
76
cmd/root.go
|
|
@ -47,9 +47,9 @@ var (
|
|||
out = colorable.NewColorableStdout()
|
||||
)
|
||||
|
||||
type FlagError struct{ err error }
|
||||
type flagError struct{ err error }
|
||||
|
||||
func (e *FlagError) Error() string { return e.err.Error() }
|
||||
func (e flagError) Error() string { return e.err.Error() }
|
||||
|
||||
func init() {
|
||||
if err := config.InitLogLoc(); err != nil {
|
||||
|
|
@ -57,7 +57,7 @@ func init() {
|
|||
}
|
||||
|
||||
rootCmd.SetFlagErrorFunc(func(command *cobra.Command, err error) error {
|
||||
return &FlagError{err: err}
|
||||
return flagError{err: err}
|
||||
})
|
||||
|
||||
rootCmd.AddCommand(versionCmd(), infoCmd())
|
||||
|
|
@ -68,8 +68,7 @@ func init() {
|
|||
// Execute root command.
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
var flagError *FlagError
|
||||
if !errors.As(err, &flagError) {
|
||||
if !errors.As(err, &flagError{}) {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -128,35 +127,36 @@ func loadConfiguration() (*config.Config, error) {
|
|||
|
||||
k8sCfg := client.NewConfig(k8sFlags)
|
||||
k9sCfg := config.NewConfig(k8sCfg)
|
||||
var errs error
|
||||
conn, err := client.InitConnection(k8sCfg)
|
||||
k9sCfg.SetConnection(conn)
|
||||
if err != nil {
|
||||
return k9sCfg, err
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
|
||||
if err := k9sCfg.Load(config.AppConfigFile); err != nil {
|
||||
return k9sCfg, err
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
k9sCfg.K9s.Override(k9sFlags)
|
||||
if err := k9sCfg.Refine(k8sFlags, k9sFlags, k8sCfg); err != nil {
|
||||
log.Error().Err(err).Msgf("config refine failed")
|
||||
return k9sCfg, err
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
// Try to access server version if that fail. Connectivity issue?
|
||||
if !conn.CheckConnectivity() {
|
||||
return k9sCfg, fmt.Errorf("cannot connect to context: %s", k9sCfg.K9s.ActiveContextName())
|
||||
errs = errors.Join(errs, fmt.Errorf("cannot connect to context: %s", k9sCfg.K9s.ActiveContextName()))
|
||||
}
|
||||
if !conn.ConnectionOK() {
|
||||
return k9sCfg, fmt.Errorf("k8s connection failed for context: %s", k9sCfg.K9s.ActiveContextName())
|
||||
errs = errors.Join(errs, fmt.Errorf("k8s connection failed for context: %s", k9sCfg.K9s.ActiveContextName()))
|
||||
}
|
||||
|
||||
log.Info().Msg("✅ Kubernetes connectivity")
|
||||
if err := k9sCfg.Save(); err != nil {
|
||||
log.Error().Err(err).Msg("Config save")
|
||||
return k9sCfg, err
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
|
||||
return k9sCfg, nil
|
||||
return k9sCfg, errs
|
||||
}
|
||||
|
||||
func parseLevel(level string) zerolog.Level {
|
||||
|
|
@ -351,50 +351,58 @@ func initCertFlags() {
|
|||
)
|
||||
}
|
||||
|
||||
type (
|
||||
k8sPickerFn[T any] func(cfg *api.Config) map[string]T
|
||||
completeFn func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective)
|
||||
)
|
||||
|
||||
func initK8sFlagCompletion() {
|
||||
_ = rootCmd.RegisterFlagCompletionFunc("context", k8sFlagCompletionFunc(func(cfg *api.Config) map[string]*api.Context {
|
||||
conn := client.NewConfig(k8sFlags)
|
||||
cfg, err := conn.RawConfig()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("k8s config getter failed")
|
||||
}
|
||||
|
||||
_ = rootCmd.RegisterFlagCompletionFunc("context", k8sFlagCompletion(&cfg, func(cfg *api.Config) map[string]*api.Context {
|
||||
return cfg.Contexts
|
||||
}))
|
||||
|
||||
_ = rootCmd.RegisterFlagCompletionFunc("cluster", k8sFlagCompletionFunc(func(cfg *api.Config) map[string]*api.Cluster {
|
||||
_ = rootCmd.RegisterFlagCompletionFunc("cluster", k8sFlagCompletion(&cfg, func(cfg *api.Config) map[string]*api.Cluster {
|
||||
return cfg.Clusters
|
||||
}))
|
||||
|
||||
_ = rootCmd.RegisterFlagCompletionFunc("user", k8sFlagCompletionFunc(func(cfg *api.Config) map[string]*api.AuthInfo {
|
||||
_ = rootCmd.RegisterFlagCompletionFunc("user", k8sFlagCompletion(&cfg, func(cfg *api.Config) map[string]*api.AuthInfo {
|
||||
return cfg.AuthInfos
|
||||
}))
|
||||
|
||||
_ = rootCmd.RegisterFlagCompletionFunc("namespace", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
conn, err := client.InitConnection(client.NewConfig(k8sFlags))
|
||||
if err != nil {
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
_ = rootCmd.RegisterFlagCompletionFunc("namespace", func(cmd *cobra.Command, args []string, s string) ([]string, cobra.ShellCompDirective) {
|
||||
if c, err := client.InitConnection(conn); err == nil {
|
||||
if nss, err := c.ValidNamespaceNames(); err == nil {
|
||||
return filterFlagCompletions(nss, s)
|
||||
}
|
||||
}
|
||||
|
||||
nss, err := conn.ValidNamespaceNames()
|
||||
if err != nil {
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
return filterFlagCompletions(nss, toComplete)
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
})
|
||||
}
|
||||
|
||||
func k8sFlagCompletionFunc[T any](picker func(cfg *api.Config) map[string]T) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) {
|
||||
func k8sFlagCompletion[T any](cfg *api.Config, picker k8sPickerFn[T]) completeFn {
|
||||
return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
k8sCfg, err := client.NewConfig(k8sFlags).RawConfig()
|
||||
if err != nil {
|
||||
if cfg == nil {
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
return filterFlagCompletions(picker(&k8sCfg), toComplete)
|
||||
|
||||
return filterFlagCompletions(picker(cfg), toComplete)
|
||||
}
|
||||
}
|
||||
|
||||
func filterFlagCompletions[T any](m map[string]T, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
var completions []string
|
||||
func filterFlagCompletions[T any](m map[string]T, s string) ([]string, cobra.ShellCompDirective) {
|
||||
cc := make([]string, 0, len(m))
|
||||
for name := range m {
|
||||
if strings.HasPrefix(name, toComplete) {
|
||||
completions = append(completions, name)
|
||||
if strings.HasPrefix(name, s) {
|
||||
cc = append(cc, name)
|
||||
}
|
||||
}
|
||||
return completions, cobra.ShellCompDirectiveNoFileComp
|
||||
|
||||
return cc, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ type APIClient struct {
|
|||
mxsClient *versioned.Clientset
|
||||
cachedClient *disk.CachedDiscoveryClient
|
||||
config *Config
|
||||
mx sync.Mutex
|
||||
mx sync.RWMutex
|
||||
cache *cache.LRUExpireCache
|
||||
connOK bool
|
||||
}
|
||||
|
|
@ -143,10 +143,7 @@ func (a *APIClient) clearCache() {
|
|||
|
||||
// CanI checks if user has access to a certain resource.
|
||||
func (a *APIClient) CanI(ns, gvr string, verbs []string) (auth bool, err error) {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
if !a.connOK {
|
||||
if !a.getConnOK() {
|
||||
return false, errors.New("ACCESS -- No API server connection")
|
||||
}
|
||||
if IsClusterWide(ns) {
|
||||
|
|
@ -305,14 +302,11 @@ func (a *APIClient) ValidNamespaceNames() (NamespaceNames, error) {
|
|||
|
||||
// CheckConnectivity return true if api server is cool or false otherwise.
|
||||
func (a *APIClient) CheckConnectivity() bool {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
a.connOK = false
|
||||
a.setConnOK(false)
|
||||
}
|
||||
if !a.connOK {
|
||||
if !a.getConnOK() {
|
||||
a.clearCache()
|
||||
}
|
||||
}()
|
||||
|
|
@ -328,21 +322,21 @@ func (a *APIClient) CheckConnectivity() bool {
|
|||
client, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Unable to connect to api server")
|
||||
a.connOK = false
|
||||
return a.connOK
|
||||
a.setConnOK(false)
|
||||
return a.getConnOK()
|
||||
}
|
||||
|
||||
// Check connection
|
||||
if _, err := client.ServerVersion(); err == nil {
|
||||
if !a.connOK {
|
||||
if !a.getConnOK() {
|
||||
a.reset()
|
||||
}
|
||||
} else {
|
||||
log.Error().Err(err).Msgf("can't connect to cluster")
|
||||
a.connOK = false
|
||||
a.setConnOK(false)
|
||||
}
|
||||
|
||||
return a.connOK
|
||||
return a.getConnOK()
|
||||
}
|
||||
|
||||
// Config return a kubernetes configuration.
|
||||
|
|
@ -355,13 +349,97 @@ func (a *APIClient) HasMetrics() bool {
|
|||
return a.supportsMetricsResources() == nil
|
||||
}
|
||||
|
||||
func (a *APIClient) getMxsClient() *versioned.Clientset {
|
||||
a.mx.RLock()
|
||||
defer a.mx.RUnlock()
|
||||
|
||||
return a.mxsClient
|
||||
}
|
||||
|
||||
func (a *APIClient) setMxsClient(c *versioned.Clientset) {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
a.mxsClient = c
|
||||
}
|
||||
|
||||
func (a *APIClient) getCachedClient() *disk.CachedDiscoveryClient {
|
||||
a.mx.RLock()
|
||||
defer a.mx.RUnlock()
|
||||
|
||||
return a.cachedClient
|
||||
}
|
||||
|
||||
func (a *APIClient) setCachedClient(c *disk.CachedDiscoveryClient) {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
a.cachedClient = c
|
||||
}
|
||||
|
||||
func (a *APIClient) getDClient() dynamic.Interface {
|
||||
a.mx.RLock()
|
||||
defer a.mx.RUnlock()
|
||||
|
||||
return a.dClient
|
||||
}
|
||||
|
||||
func (a *APIClient) setDClient(c dynamic.Interface) {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
a.dClient = c
|
||||
}
|
||||
|
||||
func (a *APIClient) getConnOK() bool {
|
||||
a.mx.RLock()
|
||||
defer a.mx.RUnlock()
|
||||
|
||||
return a.connOK
|
||||
}
|
||||
|
||||
func (a *APIClient) setConnOK(b bool) {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
a.connOK = b
|
||||
}
|
||||
|
||||
func (a *APIClient) setLogClient(k kubernetes.Interface) {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
a.logClient = k
|
||||
}
|
||||
|
||||
func (a *APIClient) getLogClient() kubernetes.Interface {
|
||||
a.mx.RLock()
|
||||
defer a.mx.RUnlock()
|
||||
|
||||
return a.logClient
|
||||
}
|
||||
|
||||
func (a *APIClient) setClient(k kubernetes.Interface) {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
a.client = k
|
||||
}
|
||||
|
||||
func (a *APIClient) getClient() kubernetes.Interface {
|
||||
a.mx.RLock()
|
||||
defer a.mx.RUnlock()
|
||||
|
||||
return a.client
|
||||
}
|
||||
|
||||
// DialLogs returns a handle to api server for logs.
|
||||
func (a *APIClient) DialLogs() (kubernetes.Interface, error) {
|
||||
if !a.connOK {
|
||||
return nil, errors.New("no connection to dial")
|
||||
if !a.getConnOK() {
|
||||
return nil, errors.New("dialLogs - no connection to dial")
|
||||
}
|
||||
if a.logClient != nil {
|
||||
return a.logClient, nil
|
||||
if clt := a.getLogClient(); clt != nil {
|
||||
return clt, nil
|
||||
}
|
||||
|
||||
cfg, err := a.RestConfig()
|
||||
|
|
@ -369,31 +447,35 @@ func (a *APIClient) DialLogs() (kubernetes.Interface, error) {
|
|||
return nil, err
|
||||
}
|
||||
cfg.Timeout = 0
|
||||
if a.logClient, err = kubernetes.NewForConfig(cfg); err != nil {
|
||||
c, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.setLogClient(c)
|
||||
|
||||
return a.logClient, nil
|
||||
return a.getLogClient(), nil
|
||||
}
|
||||
|
||||
// Dial returns a handle to api server or die.
|
||||
func (a *APIClient) Dial() (kubernetes.Interface, error) {
|
||||
if !a.connOK {
|
||||
if !a.getConnOK() {
|
||||
return nil, errors.New("no connection to dial")
|
||||
}
|
||||
if a.client != nil {
|
||||
return a.client, nil
|
||||
if c := a.getClient(); c != nil {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
cfg, err := a.RestConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if a.client, err = kubernetes.NewForConfig(cfg); err != nil {
|
||||
if c, err := kubernetes.NewForConfig(cfg); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
a.setClient(c)
|
||||
}
|
||||
|
||||
return a.client, nil
|
||||
return a.getClient(), nil
|
||||
}
|
||||
|
||||
// RestConfig returns a rest api client.
|
||||
|
|
@ -403,15 +485,12 @@ func (a *APIClient) RestConfig() (*restclient.Config, error) {
|
|||
|
||||
// CachedDiscovery returns a cached discovery client.
|
||||
func (a *APIClient) CachedDiscovery() (*disk.CachedDiscoveryClient, error) {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
if !a.connOK {
|
||||
if !a.getConnOK() {
|
||||
return nil, errors.New("no connection to cached dial")
|
||||
}
|
||||
|
||||
if a.cachedClient != nil {
|
||||
return a.cachedClient, nil
|
||||
if c := a.getCachedClient(); c != nil {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
cfg, err := a.RestConfig()
|
||||
|
|
@ -422,37 +501,38 @@ func (a *APIClient) CachedDiscovery() (*disk.CachedDiscoveryClient, error) {
|
|||
httpCacheDir := filepath.Join(mustHomeDir(), ".kube", "http-cache")
|
||||
discCacheDir := filepath.Join(mustHomeDir(), ".kube", "cache", "discovery", toHostDir(cfg.Host))
|
||||
|
||||
a.cachedClient, err = disk.NewCachedDiscoveryClientForConfig(cfg, discCacheDir, httpCacheDir, cacheExpiry)
|
||||
c, err := disk.NewCachedDiscoveryClientForConfig(cfg, discCacheDir, httpCacheDir, cacheExpiry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a.cachedClient, nil
|
||||
a.setCachedClient(c)
|
||||
|
||||
return a.getCachedClient(), nil
|
||||
}
|
||||
|
||||
// DynDial returns a handle to a dynamic interface.
|
||||
func (a *APIClient) DynDial() (dynamic.Interface, error) {
|
||||
if a.dClient != nil {
|
||||
return a.dClient, nil
|
||||
if c := a.getDClient(); c != nil {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
cfg, err := a.RestConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if a.dClient, err = dynamic.NewForConfig(cfg); err != nil {
|
||||
log.Panic().Err(err)
|
||||
c, err := dynamic.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.setDClient(c)
|
||||
|
||||
return a.dClient, nil
|
||||
return a.getDClient(), nil
|
||||
}
|
||||
|
||||
// MXDial returns a handle to the metrics server.
|
||||
func (a *APIClient) MXDial() (*versioned.Clientset, error) {
|
||||
a.mx.Lock()
|
||||
defer a.mx.Unlock()
|
||||
|
||||
if a.mxsClient != nil {
|
||||
return a.mxsClient, nil
|
||||
if c := a.getMxsClient(); c != nil {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
cfg, err := a.RestConfig()
|
||||
|
|
@ -460,11 +540,13 @@ func (a *APIClient) MXDial() (*versioned.Clientset, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if a.mxsClient, err = versioned.NewForConfig(cfg); err != nil {
|
||||
log.Error().Err(err)
|
||||
c, err := versioned.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.setMxsClient(c)
|
||||
|
||||
return a.mxsClient, err
|
||||
return a.getMxsClient(), err
|
||||
}
|
||||
|
||||
// SwitchContext handles kubeconfig context switches.
|
||||
|
|
@ -473,12 +555,8 @@ func (a *APIClient) SwitchContext(name string) error {
|
|||
if err := a.config.SwitchContext(name); err != nil {
|
||||
return err
|
||||
}
|
||||
a.mx.Lock()
|
||||
{
|
||||
a.reset()
|
||||
ResetMetrics()
|
||||
}
|
||||
a.mx.Unlock()
|
||||
a.reset()
|
||||
ResetMetrics()
|
||||
|
||||
if !a.CheckConnectivity() {
|
||||
return fmt.Errorf("unable to connect to context %q", name)
|
||||
|
|
@ -490,9 +568,14 @@ func (a *APIClient) SwitchContext(name string) error {
|
|||
func (a *APIClient) reset() {
|
||||
a.config.reset()
|
||||
a.cache = cache.NewLRUExpireCache(cacheSize)
|
||||
a.client, a.dClient, a.nsClient, a.mxsClient = nil, nil, nil, nil
|
||||
a.cachedClient, a.logClient = nil, nil
|
||||
a.connOK = true
|
||||
a.nsClient = nil
|
||||
|
||||
a.setDClient(nil)
|
||||
a.setMxsClient(nil)
|
||||
a.setCachedClient(nil)
|
||||
a.setClient(nil)
|
||||
a.setLogClient(nil)
|
||||
a.setConnOK(true)
|
||||
}
|
||||
|
||||
func (a *APIClient) checkCacheBool(key string) (state bool, ok bool) {
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
|
|
@ -58,7 +59,7 @@ func (c *Config) ContextPluginsPath() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
return AppContextPluginsFile(ct.ClusterName, c.K9s.activeContextName)
|
||||
return AppContextPluginsFile(ct.GetClusterName(), c.K9s.activeContextName)
|
||||
}
|
||||
|
||||
// Refine the configuration based on cli args.
|
||||
|
|
@ -218,17 +219,18 @@ func (c *Config) Load(path string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var errs error
|
||||
if err := data.JSONValidator.Validate(json.K9sSchema, bb); err != nil {
|
||||
return fmt.Errorf("k9s config file %q load failed:\n%w", path, err)
|
||||
errs = errors.Join(errs, fmt.Errorf("k9s config file %q load failed:\n%w", path, err))
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if err := yaml.Unmarshal(bb, &cfg); err != nil {
|
||||
return fmt.Errorf("main config yaml load failed: %w", err)
|
||||
errs = errors.Join(errs, fmt.Errorf("main config.yaml load failed: %w", err))
|
||||
}
|
||||
c.Merge(&cfg)
|
||||
|
||||
return nil
|
||||
return errs
|
||||
}
|
||||
|
||||
// Save configuration to disk.
|
||||
|
|
|
|||
|
|
@ -583,90 +583,3 @@ func TestSetup(t *testing.T) {
|
|||
fmt.Println("Boom!", m, i)
|
||||
})
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Test Data...
|
||||
|
||||
// var expectedConfig = `k9s:
|
||||
// liveViewAutoRefresh: true
|
||||
// screenDumpDir: /tmp/screen-dumps
|
||||
// refreshRate: 100
|
||||
// maxConnRetry: 5
|
||||
// readOnly: true
|
||||
// noExitOnCtrlC: false
|
||||
// ui:
|
||||
// enableMouse: false
|
||||
// headless: false
|
||||
// logoless: false
|
||||
// crumbsless: false
|
||||
// noIcons: false
|
||||
// skipLatestRevCheck: false
|
||||
// disablePodCounting: false
|
||||
// shellPod:
|
||||
// image: busybox:1.35.0
|
||||
// namespace: default
|
||||
// limits:
|
||||
// cpu: 100m
|
||||
// memory: 100Mi
|
||||
// imageScans:
|
||||
// enable: false
|
||||
// exclusions:
|
||||
// namespaces: []
|
||||
// labels: {}
|
||||
// logger:
|
||||
// tail: 500
|
||||
// buffer: 800
|
||||
// sinceSeconds: -1
|
||||
// fullScreen: false
|
||||
// textWrap: false
|
||||
// showTime: false
|
||||
// thresholds:
|
||||
// cpu:
|
||||
// critical: 90
|
||||
// warn: 70
|
||||
// memory:
|
||||
// critical: 90
|
||||
// warn: 70
|
||||
// `
|
||||
|
||||
// var resetConfig = `k9s:
|
||||
// liveViewAutoRefresh: true
|
||||
// screenDumpDir: /tmp/screen-dumps
|
||||
// refreshRate: 2
|
||||
// maxConnRetry: 5
|
||||
// readOnly: false
|
||||
// noExitOnCtrlC: false
|
||||
// ui:
|
||||
// enableMouse: false
|
||||
// headless: false
|
||||
// logoless: false
|
||||
// crumbsless: false
|
||||
// noIcons: false
|
||||
// skipLatestRevCheck: false
|
||||
// disablePodCounting: false
|
||||
// shellPod:
|
||||
// image: busybox:1.35.0
|
||||
// namespace: default
|
||||
// limits:
|
||||
// cpu: 100m
|
||||
// memory: 100Mi
|
||||
// imageScans:
|
||||
// enable: false
|
||||
// exclusions:
|
||||
// namespaces: []
|
||||
// labels: {}
|
||||
// logger:
|
||||
// tail: 200
|
||||
// buffer: 2000
|
||||
// sinceSeconds: -1
|
||||
// fullScreen: false
|
||||
// textWrap: false
|
||||
// showTime: false
|
||||
// thresholds:
|
||||
// cpu:
|
||||
// critical: 90
|
||||
// warn: 70
|
||||
// memory:
|
||||
// critical: 90
|
||||
// warn: 70
|
||||
// `
|
||||
|
|
|
|||
|
|
@ -29,8 +29,6 @@ func NewConfig(ct *api.Context) *Config {
|
|||
|
||||
// Validate ensures config is in norms.
|
||||
func (c *Config) Validate(conn client.Connection, ks KubeSettings) {
|
||||
c.mx.Lock()
|
||||
defer c.mx.Unlock()
|
||||
|
||||
if c.Context == nil {
|
||||
c.Context = NewContext()
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
package data
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/derailed/k9s/internal/client"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
|
@ -18,6 +20,7 @@ type Namespace struct {
|
|||
Active string `yaml:"active"`
|
||||
LockFavorites bool `yaml:"lockFavorites"`
|
||||
Favorites []string `yaml:"favorites"`
|
||||
mx sync.RWMutex
|
||||
}
|
||||
|
||||
// NewNamespace create a new namespace configuration.
|
||||
|
|
@ -37,6 +40,9 @@ func NewActiveNamespace(n string) *Namespace {
|
|||
|
||||
// Validate validates a namespace is setup correctly.
|
||||
func (n *Namespace) Validate(c client.Connection) {
|
||||
n.mx.RLock()
|
||||
defer n.mx.RUnlock()
|
||||
|
||||
if c == nil || !c.IsValidNamespace(n.Active) {
|
||||
return
|
||||
}
|
||||
|
|
@ -50,14 +56,18 @@ func (n *Namespace) Validate(c client.Connection) {
|
|||
|
||||
// SetActive set the active namespace.
|
||||
func (n *Namespace) SetActive(ns string, ks KubeSettings) error {
|
||||
if n == nil {
|
||||
n = NewActiveNamespace(ns)
|
||||
}
|
||||
|
||||
n.mx.Lock()
|
||||
defer n.mx.Unlock()
|
||||
|
||||
if ns == client.BlankNamespace {
|
||||
ns = client.NamespaceAll
|
||||
}
|
||||
if n == nil {
|
||||
n = NewActiveNamespace(ns)
|
||||
} else {
|
||||
n.Active = ns
|
||||
}
|
||||
n.Active = ns
|
||||
|
||||
if ns != "" && !n.LockFavorites {
|
||||
n.addFavNS(ns)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -68,17 +68,17 @@ func (k *K9s) resetConnection(conn client.Connection) {
|
|||
|
||||
// Save saves the k9s config to disk.
|
||||
func (k *K9s) Save() error {
|
||||
if k.activeConfig == nil {
|
||||
if k.getActiveConfig() == nil {
|
||||
log.Warn().Msgf("Save failed. no active config detected")
|
||||
return nil
|
||||
}
|
||||
path := filepath.Join(
|
||||
AppContextsDir,
|
||||
data.SanitizeContextSubpath(k.activeConfig.Context.ClusterName, k.activeContextName),
|
||||
data.SanitizeContextSubpath(k.activeConfig.Context.GetClusterName(), k.getActiveContextName()),
|
||||
data.MainConfigFile,
|
||||
)
|
||||
|
||||
return k.activeConfig.Save(path)
|
||||
return k.getActiveConfig().Save(path)
|
||||
}
|
||||
|
||||
// Merge merges k9s configs.
|
||||
|
|
@ -124,19 +124,20 @@ func (k *K9s) ContextScreenDumpDir() string {
|
|||
}
|
||||
|
||||
func (k *K9s) contextPath() string {
|
||||
if k.activeConfig == nil {
|
||||
if k.getActiveConfig() == nil {
|
||||
return "na"
|
||||
}
|
||||
|
||||
return data.SanitizeContextSubpath(
|
||||
k.activeConfig.Context.ClusterName,
|
||||
k.getActiveConfig().Context.GetClusterName(),
|
||||
k.ActiveContextName(),
|
||||
)
|
||||
}
|
||||
|
||||
// Reset resets configuration and context.
|
||||
func (k *K9s) Reset() {
|
||||
k.activeConfig, k.activeContextName = nil, ""
|
||||
k.setActiveConfig(nil)
|
||||
k.setActiveContextName("")
|
||||
}
|
||||
|
||||
// ActiveContextNamespace fetch the context active ns.
|
||||
|
|
@ -151,23 +152,16 @@ func (k *K9s) ActiveContextNamespace() (string, error) {
|
|||
|
||||
// ActiveContextName returns the active context name.
|
||||
func (k *K9s) ActiveContextName() string {
|
||||
k.mx.RLock()
|
||||
defer k.mx.RUnlock()
|
||||
|
||||
return k.activeContextName
|
||||
return k.getActiveContextName()
|
||||
}
|
||||
|
||||
// ActiveContext returns the currently active context.
|
||||
func (k *K9s) ActiveContext() (*data.Context, error) {
|
||||
var ac *data.Config
|
||||
k.mx.RLock()
|
||||
ac = k.activeConfig
|
||||
k.mx.RUnlock()
|
||||
|
||||
if ac != nil && ac.Context != nil {
|
||||
return ac.Context, nil
|
||||
if cfg := k.getActiveConfig(); cfg != nil && cfg.Context != nil {
|
||||
return cfg.Context, nil
|
||||
}
|
||||
ct, err := k.ActivateContext(k.activeContextName)
|
||||
ct, err := k.ActivateContext(k.getActiveContextName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -175,46 +169,75 @@ func (k *K9s) ActiveContext() (*data.Context, error) {
|
|||
return ct, nil
|
||||
}
|
||||
|
||||
func (k *K9s) setActiveConfig(c *data.Config) {
|
||||
k.mx.Lock()
|
||||
defer k.mx.Unlock()
|
||||
|
||||
k.activeConfig = c
|
||||
}
|
||||
|
||||
func (k *K9s) getActiveConfig() *data.Config {
|
||||
k.mx.RLock()
|
||||
defer k.mx.RUnlock()
|
||||
|
||||
return k.activeConfig
|
||||
}
|
||||
|
||||
func (k *K9s) setActiveContextName(n string) {
|
||||
k.mx.Lock()
|
||||
defer k.mx.Unlock()
|
||||
|
||||
k.activeContextName = n
|
||||
}
|
||||
|
||||
func (k *K9s) getActiveContextName() string {
|
||||
k.mx.RLock()
|
||||
defer k.mx.RUnlock()
|
||||
|
||||
return k.activeContextName
|
||||
}
|
||||
|
||||
// ActivateContext initializes the active context if not present.
|
||||
func (k *K9s) ActivateContext(n string) (*data.Context, error) {
|
||||
k.activeContextName = n
|
||||
k.setActiveContextName(n)
|
||||
ct, err := k.ks.GetContext(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.activeConfig, err = k.dir.Load(n, ct)
|
||||
|
||||
cfg, err := k.dir.Load(n, ct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setActiveConfig(cfg)
|
||||
|
||||
k.Validate(k.conn, k.ks)
|
||||
// If the context specifies a namespace, use it!
|
||||
if ns := ct.Namespace; ns != client.BlankNamespace {
|
||||
k.activeConfig.Context.Namespace.Active = ns
|
||||
k.getActiveConfig().Context.Namespace.Active = ns
|
||||
} else if k.activeConfig.Context.Namespace.Active == "" {
|
||||
k.activeConfig.Context.Namespace.Active = client.DefaultNamespace
|
||||
k.getActiveConfig().Context.Namespace.Active = client.DefaultNamespace
|
||||
}
|
||||
if k.activeConfig.Context == nil {
|
||||
if k.getActiveConfig().Context == nil {
|
||||
return nil, fmt.Errorf("context activation failed for: %s", n)
|
||||
}
|
||||
|
||||
return k.activeConfig.Context, nil
|
||||
return k.getActiveConfig().Context, nil
|
||||
}
|
||||
|
||||
// Reload reloads the context config from disk.
|
||||
func (k *K9s) Reload() error {
|
||||
k.mx.Lock()
|
||||
defer k.mx.Unlock()
|
||||
ct, err := k.ks.GetContext(k.getActiveContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ct, err := k.ks.GetContext(k.activeContextName)
|
||||
cfg, err := k.dir.Load(k.getActiveContextName(), ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.activeConfig, err = k.dir.Load(k.activeContextName, ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.activeConfig.Validate(k.conn, k.ks)
|
||||
k.setActiveConfig(cfg)
|
||||
k.getActiveConfig().Validate(k.conn, k.ks)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -277,12 +300,9 @@ func (k *K9s) GetRefreshRate() int {
|
|||
|
||||
// IsReadOnly returns the readonly setting.
|
||||
func (k *K9s) IsReadOnly() bool {
|
||||
k.mx.RLock()
|
||||
defer k.mx.RUnlock()
|
||||
|
||||
ro := k.ReadOnly
|
||||
if k.activeConfig != nil && k.activeConfig.Context.ReadOnly != nil {
|
||||
ro = *k.activeConfig.Context.ReadOnly
|
||||
if cfg := k.getActiveConfig(); cfg != nil && cfg.Context.ReadOnly != nil {
|
||||
ro = *cfg.Context.ReadOnly
|
||||
}
|
||||
if k.manualReadOnly != nil {
|
||||
ro = true
|
||||
|
|
@ -300,7 +320,7 @@ func (k *K9s) Validate(c client.Connection, ks data.KubeSettings) {
|
|||
k.MaxConnRetry = defaultMaxConnRetry
|
||||
}
|
||||
|
||||
if k.activeConfig == nil {
|
||||
if k.getActiveConfig() == nil {
|
||||
if n, err := ks.CurrentContextName(); err == nil {
|
||||
_, _ = k.ActivateContext(n)
|
||||
}
|
||||
|
|
@ -309,7 +329,7 @@ func (k *K9s) Validate(c client.Connection, ks data.KubeSettings) {
|
|||
k.Logger = k.Logger.Validate()
|
||||
k.Thresholds = k.Thresholds.Validate()
|
||||
|
||||
if k.activeConfig != nil {
|
||||
k.activeConfig.Validate(c, ks)
|
||||
if cfg := k.getActiveConfig(); cfg != nil {
|
||||
cfg.Validate(c, ks)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ func getContainerStatus(co string, status v1.PodStatus) *v1.ContainerStatus {
|
|||
}
|
||||
|
||||
func (c *Container) fetchPod(fqn string) (*v1.Pod, error) {
|
||||
o, err := c.GetFactory().Get("v1/pods", fqn, true, labels.Everything())
|
||||
o, err := c.getFactory().Get("v1/pods", fqn, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ type Context struct {
|
|||
}
|
||||
|
||||
func (c *Context) config() *client.Config {
|
||||
return c.GetFactory().Client().Config()
|
||||
return c.getFactory().Client().Config()
|
||||
}
|
||||
|
||||
// Get a Context.
|
||||
|
|
@ -60,5 +60,5 @@ func (c *Context) MustCurrentContextName() string {
|
|||
|
||||
// Switch to another context.
|
||||
func (c *Context) Switch(ctx string) error {
|
||||
return c.GetFactory().Client().SwitchContext(ctx)
|
||||
return c.getFactory().Client().SwitchContext(ctx)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,5 +44,5 @@ func (c *CustomResourceDefinition) List(ctx context.Context, _ string) ([]runtim
|
|||
}
|
||||
|
||||
const gvr = "apiextensions.k8s.io/v1/customresourcedefinitions"
|
||||
return c.GetFactory().List(gvr, "-", false, labelSel)
|
||||
return c.getFactory().List(gvr, "-", false, labelSel)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ func (c *CronJob) Run(path string) error {
|
|||
return fmt.Errorf("user is not authorized to run jobs")
|
||||
}
|
||||
|
||||
o, err := c.GetFactory().Get(c.GVR(), path, true, labels.Everything())
|
||||
o, err := c.getFactory().Get(c.GVR(), path, true, labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -102,7 +102,7 @@ func (c *CronJob) Run(path string) error {
|
|||
// ScanSA scans for serviceaccount refs.
|
||||
func (c *CronJob) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := c.GetFactory().List(c.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := c.getFactory().List(c.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -127,7 +127,7 @@ func (c *CronJob) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, erro
|
|||
|
||||
// GetInstance fetch a matching cronjob.
|
||||
func (c *CronJob) GetInstance(fqn string) (*batchv1.CronJob, error) {
|
||||
o, err := c.GetFactory().Get(c.GVR(), fqn, true, labels.Everything())
|
||||
o, err := c.getFactory().Get(c.GVR(), fqn, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -175,7 +175,7 @@ func (c *CronJob) ToggleSuspend(ctx context.Context, path string) error {
|
|||
// Scan scans for cluster resource refs.
|
||||
func (c *CronJob) Scan(ctx context.Context, gvr client.GVR, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := c.GetFactory().List(c.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := c.getFactory().List(c.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ func (d *Deployment) Scale(ctx context.Context, path string, replicas int32) err
|
|||
|
||||
// Restart a Deployment rollout.
|
||||
func (d *Deployment) Restart(ctx context.Context, path string) error {
|
||||
o, err := d.GetFactory().Get("apps/v1/deployments", path, true, labels.Everything())
|
||||
o, err := d.getFactory().Get("apps/v1/deployments", path, true, labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -170,7 +170,7 @@ func (d *Deployment) GetInstance(fqn string) (*appsv1.Deployment, error) {
|
|||
// ScanSA scans for serviceaccount refs.
|
||||
func (d *Deployment) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := d.GetFactory().List(d.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := d.getFactory().List(d.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -196,7 +196,7 @@ func (d *Deployment) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, e
|
|||
// Scan scans for resource references.
|
||||
func (d *Deployment) Scan(ctx context.Context, gvr client.GVR, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := d.GetFactory().List(d.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := d.getFactory().List(d.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ func (d *DaemonSet) IsHappy(ds appsv1.DaemonSet) bool {
|
|||
|
||||
// Restart a DaemonSet rollout.
|
||||
func (d *DaemonSet) Restart(ctx context.Context, path string) error {
|
||||
o, err := d.GetFactory().Get("apps/v1/daemonsets", path, true, labels.Everything())
|
||||
o, err := d.getFactory().Get("apps/v1/daemonsets", path, true, labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -173,7 +173,7 @@ func (d *DaemonSet) Pod(fqn string) (string, error) {
|
|||
|
||||
// GetInstance returns a daemonset instance.
|
||||
func (d *DaemonSet) GetInstance(fqn string) (*appsv1.DaemonSet, error) {
|
||||
o, err := d.GetFactory().Get(d.gvr.String(), fqn, true, labels.Everything())
|
||||
o, err := d.getFactory().Get(d.gvrStr(), fqn, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -190,7 +190,7 @@ func (d *DaemonSet) GetInstance(fqn string) (*appsv1.DaemonSet, error) {
|
|||
// ScanSA scans for serviceaccount refs.
|
||||
func (d *DaemonSet) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := d.GetFactory().List(d.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := d.getFactory().List(d.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -216,7 +216,7 @@ func (d *DaemonSet) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, er
|
|||
// Scan scans for cluster refs.
|
||||
func (d *DaemonSet) Scan(ctx context.Context, gvr client.GVR, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := d.GetFactory().List(d.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := d.getFactory().List(d.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ func (g *Generic) ToYAML(path string, showManaged bool) (string, error) {
|
|||
// Delete deletes a resource.
|
||||
func (g *Generic) Delete(ctx context.Context, path string, propagation *metav1.DeletionPropagation, grace Grace) error {
|
||||
ns, n := client.Namespaced(path)
|
||||
auth, err := g.Client().CanI(ns, g.gvr.String(), []string{client.DeleteVerb})
|
||||
auth, err := g.Client().CanI(ns, g.gvrStr(), []string{client.DeleteVerb})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ func (j *Job) List(ctx context.Context, ns string) ([]runtime.Object, error) {
|
|||
|
||||
// TailLogs tail logs for all pods represented by this Job.
|
||||
func (j *Job) TailLogs(ctx context.Context, opts *LogOptions) ([]LogChan, error) {
|
||||
o, err := j.GetFactory().Get(j.gvr.String(), opts.Path, true, labels.Everything())
|
||||
o, err := j.getFactory().Get(j.gvrStr(), opts.Path, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -92,7 +92,7 @@ func (j *Job) TailLogs(ctx context.Context, opts *LogOptions) ([]LogChan, error)
|
|||
}
|
||||
|
||||
func (j *Job) GetInstance(fqn string) (*batchv1.Job, error) {
|
||||
o, err := j.GetFactory().Get(j.gvr.String(), fqn, true, labels.Everything())
|
||||
o, err := j.getFactory().Get(j.gvrStr(), fqn, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -109,7 +109,7 @@ func (j *Job) GetInstance(fqn string) (*batchv1.Job, error) {
|
|||
// ScanSA scans for serviceaccount refs.
|
||||
func (j *Job) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := j.GetFactory().List(j.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := j.getFactory().List(j.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -135,7 +135,7 @@ func (j *Job) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, error) {
|
|||
// Scan scans for resource references.
|
||||
func (j *Job) Scan(ctx context.Context, gvr client.GVR, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := j.GetFactory().List(j.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := j.getFactory().List(j.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ func (n *Node) ToggleCordon(path string, cordon bool) error {
|
|||
}
|
||||
return fmt.Errorf("node is already uncordoned")
|
||||
}
|
||||
dial, err := n.GetFactory().Client().Dial()
|
||||
dial, err := n.getFactory().Client().Dial()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -98,7 +98,7 @@ func (n *Node) Drain(path string, opts DrainOptions, w io.Writer) error {
|
|||
}
|
||||
}
|
||||
|
||||
dial, err := n.GetFactory().Client().Dial()
|
||||
dial, err := n.getFactory().Client().Dial()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -189,7 +189,7 @@ func (n *Node) List(ctx context.Context, ns string) ([]runtime.Object, error) {
|
|||
// CountPods counts the pods scheduled on a given node.
|
||||
func (n *Node) CountPods(nodeName string) (int, error) {
|
||||
var count int
|
||||
oo, err := n.GetFactory().List("v1/pods", client.BlankNamespace, false, labels.Everything())
|
||||
oo, err := n.getFactory().List("v1/pods", client.BlankNamespace, false, labels.Everything())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
@ -213,7 +213,7 @@ func (n *Node) CountPods(nodeName string) (int, error) {
|
|||
|
||||
// GetPods returns all pods running on given node.
|
||||
func (n *Node) GetPods(nodeName string) ([]*v1.Pod, error) {
|
||||
oo, err := n.GetFactory().List("v1/pods", client.BlankNamespace, false, labels.Everything())
|
||||
oo, err := n.getFactory().List("v1/pods", client.BlankNamespace, false, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,14 @@ func (n *NonResource) Init(f Factory, gvr client.GVR) {
|
|||
n.mx.Unlock()
|
||||
}
|
||||
|
||||
func (n *NonResource) GetFactory() Factory {
|
||||
func (n *NonResource) gvrStr() string {
|
||||
n.mx.RLock()
|
||||
defer n.mx.RUnlock()
|
||||
|
||||
return n.gvr.String()
|
||||
}
|
||||
|
||||
func (n *NonResource) getFactory() Factory {
|
||||
n.mx.RLock()
|
||||
defer n.mx.RUnlock()
|
||||
|
||||
|
|
@ -41,7 +48,7 @@ func (n *NonResource) GVR() string {
|
|||
n.mx.RLock()
|
||||
defer n.mx.RUnlock()
|
||||
|
||||
return n.gvr.String()
|
||||
return n.gvrStr()
|
||||
}
|
||||
|
||||
// Get returns the given resource.
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ func (p *Pod) Pod(fqn string) (string, error) {
|
|||
|
||||
// GetInstance returns a pod instance.
|
||||
func (p *Pod) GetInstance(fqn string) (*v1.Pod, error) {
|
||||
o, err := p.GetFactory().Get(p.gvr.String(), fqn, true, labels.Everything())
|
||||
o, err := p.getFactory().Get(p.gvrStr(), fqn, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -197,7 +197,7 @@ func (p *Pod) TailLogs(ctx context.Context, opts *LogOptions) ([]LogChan, error)
|
|||
if !ok {
|
||||
return nil, errors.New("no factory in context")
|
||||
}
|
||||
o, err := fac.Get(p.gvr.String(), opts.Path, true, labels.Everything())
|
||||
o, err := fac.Get(p.gvrStr(), opts.Path, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -240,7 +240,7 @@ func (p *Pod) TailLogs(ctx context.Context, opts *LogOptions) ([]LogChan, error)
|
|||
// ScanSA scans for ServiceAccount refs.
|
||||
func (p *Pod) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := p.GetFactory().List(p.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := p.getFactory().List(p.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -270,7 +270,7 @@ func (p *Pod) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, error) {
|
|||
// Scan scans for cluster resource refs.
|
||||
func (p *Pod) Scan(ctx context.Context, gvr client.GVR, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := p.GetFactory().List(p.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := p.getFactory().List(p.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func (p *Popeye) List(ctx context.Context, ns string) ([]runtime.Object, error)
|
|||
flags.ActiveNamespace = &ns
|
||||
}
|
||||
spinach := filepath.Join(cfg.AppConfigDir, "spinach.yaml")
|
||||
if c, err := p.GetFactory().Client().Config().CurrentContextName(); err == nil {
|
||||
if c, err := p.getFactory().Client().Config().CurrentContextName(); err == nil {
|
||||
spinach = filepath.Join(cfg.AppConfigDir, fmt.Sprintf("%s_spinach.yaml", c))
|
||||
}
|
||||
if _, err := os.Stat(spinach); err == nil {
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ type PortForward struct {
|
|||
|
||||
// Delete deletes a portforward.
|
||||
func (p *PortForward) Delete(_ context.Context, path string, _ *metav1.DeletionPropagation, _ Grace) error {
|
||||
p.GetFactory().DeleteForwarder(path)
|
||||
p.getFactory().DeleteForwarder(path)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -48,7 +48,7 @@ func (p *PortForward) List(ctx context.Context, _ string) ([]runtime.Object, err
|
|||
log.Debug().Msgf("No custom benchmark config file found: %q", benchFile)
|
||||
}
|
||||
|
||||
ff, cc := p.GetFactory().Forwarders(), config.Benchmarks.Containers
|
||||
ff, cc := p.getFactory().Forwarders(), config.Benchmarks.Containers
|
||||
oo := make([]runtime.Object, 0, len(ff))
|
||||
for k, f := range ff {
|
||||
if !strings.HasPrefix(k, path) {
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ func (r *Rbac) List(ctx context.Context, ns string) ([]runtime.Object, error) {
|
|||
}
|
||||
|
||||
func (r *Rbac) loadClusterRoleBinding(path string) ([]runtime.Object, error) {
|
||||
o, err := r.GetFactory().Get(crbGVR, path, true, labels.Everything())
|
||||
o, err := r.getFactory().Get(crbGVR, path, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -71,7 +71,7 @@ func (r *Rbac) loadClusterRoleBinding(path string) ([]runtime.Object, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
crbo, err := r.GetFactory().Get(crGVR, client.FQN("-", crb.RoleRef.Name), true, labels.Everything())
|
||||
crbo, err := r.getFactory().Get(crGVR, client.FQN("-", crb.RoleRef.Name), true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -85,7 +85,7 @@ func (r *Rbac) loadClusterRoleBinding(path string) ([]runtime.Object, error) {
|
|||
}
|
||||
|
||||
func (r *Rbac) loadRoleBinding(path string) ([]runtime.Object, error) {
|
||||
o, err := r.GetFactory().Get(rbGVR, path, true, labels.Everything())
|
||||
o, err := r.getFactory().Get(rbGVR, path, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -96,7 +96,7 @@ func (r *Rbac) loadRoleBinding(path string) ([]runtime.Object, error) {
|
|||
}
|
||||
|
||||
if rb.RoleRef.Kind == "ClusterRole" {
|
||||
o, e := r.GetFactory().Get(crGVR, client.FQN("-", rb.RoleRef.Name), true, labels.Everything())
|
||||
o, e := r.getFactory().Get(crGVR, client.FQN("-", rb.RoleRef.Name), true, labels.Everything())
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
|
@ -108,7 +108,7 @@ func (r *Rbac) loadRoleBinding(path string) ([]runtime.Object, error) {
|
|||
return asRuntimeObjects(parseRules(client.ClusterScope, "-", cr.Rules)), nil
|
||||
}
|
||||
|
||||
ro, err := r.GetFactory().Get(rGVR, client.FQN(rb.Namespace, rb.RoleRef.Name), true, labels.Everything())
|
||||
ro, err := r.getFactory().Get(rGVR, client.FQN(rb.Namespace, rb.RoleRef.Name), true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -123,7 +123,7 @@ func (r *Rbac) loadRoleBinding(path string) ([]runtime.Object, error) {
|
|||
|
||||
func (r *Rbac) loadClusterRole(path string) ([]runtime.Object, error) {
|
||||
log.Debug().Msgf("LOAD-CR %q", path)
|
||||
o, err := r.GetFactory().Get(crGVR, path, true, labels.Everything())
|
||||
o, err := r.getFactory().Get(crGVR, path, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -138,7 +138,7 @@ func (r *Rbac) loadClusterRole(path string) ([]runtime.Object, error) {
|
|||
}
|
||||
|
||||
func (r *Rbac) loadRole(path string) ([]runtime.Object, error) {
|
||||
o, err := r.GetFactory().Get(rGVR, path, true, labels.Everything())
|
||||
o, err := r.getFactory().Get(rGVR, path, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ func isSameSubject(kind, ns, name string, subject *rbacv1.Subject) bool {
|
|||
func (p *Policy) fetchClusterRoles() ([]rbacv1.ClusterRole, error) {
|
||||
const gvr = "rbac.authorization.k8s.io/v1/clusterroles"
|
||||
|
||||
oo, err := p.GetFactory().List(gvr, client.ClusterScope, false, labels.Everything())
|
||||
oo, err := p.getFactory().List(gvr, client.ClusterScope, false, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -215,7 +215,7 @@ func (p *Policy) fetchClusterRoles() ([]rbacv1.ClusterRole, error) {
|
|||
func (p *Policy) fetchRoles() ([]rbacv1.Role, error) {
|
||||
const gvr = "rbac.authorization.k8s.io/v1/roles"
|
||||
|
||||
oo, err := p.GetFactory().List(gvr, client.BlankNamespace, false, labels.Everything())
|
||||
oo, err := p.getFactory().List(gvr, client.BlankNamespace, false, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,12 +33,12 @@ func (r *Resource) List(ctx context.Context, ns string) ([]runtime.Object, error
|
|||
}
|
||||
}
|
||||
|
||||
return r.GetFactory().List(r.gvr.String(), ns, false, lsel)
|
||||
return r.getFactory().List(r.gvrStr(), ns, false, lsel)
|
||||
}
|
||||
|
||||
// Get returns a resource instance if found, else an error.
|
||||
func (r *Resource) Get(_ context.Context, path string) (runtime.Object, error) {
|
||||
return r.GetFactory().Get(r.gvr.String(), path, true, labels.Everything())
|
||||
return r.getFactory().Get(r.gvrStr(), path, true, labels.Everything())
|
||||
}
|
||||
|
||||
// ToYAML returns a resource yaml.
|
||||
|
|
|
|||
|
|
@ -174,7 +174,7 @@ func (s *StatefulSet) Pod(fqn string) (string, error) {
|
|||
}
|
||||
|
||||
func (s *StatefulSet) getStatefulSet(fqn string) (*appsv1.StatefulSet, error) {
|
||||
o, err := s.GetFactory().Get(s.gvr.String(), fqn, true, labels.Everything())
|
||||
o, err := s.getFactory().Get(s.gvrStr(), fqn, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -191,7 +191,7 @@ func (s *StatefulSet) getStatefulSet(fqn string) (*appsv1.StatefulSet, error) {
|
|||
// ScanSA scans for serviceaccount refs.
|
||||
func (s *StatefulSet) ScanSA(ctx context.Context, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := s.GetFactory().List(s.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := s.getFactory().List(s.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -217,7 +217,7 @@ func (s *StatefulSet) ScanSA(ctx context.Context, fqn string, wait bool) (Refs,
|
|||
// Scan scans for cluster resource refs.
|
||||
func (s *StatefulSet) Scan(ctx context.Context, gvr client.GVR, fqn string, wait bool) (Refs, error) {
|
||||
ns, n := client.Namespaced(fqn)
|
||||
oo, err := s.GetFactory().List(s.GVR(), ns, wait, labels.Everything())
|
||||
oo, err := s.getFactory().List(s.GVR(), ns, wait, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ func (s *Service) Pod(fqn string) (string, error) {
|
|||
|
||||
// GetInstance returns a service instance.
|
||||
func (s *Service) GetInstance(fqn string) (*v1.Service, error) {
|
||||
o, err := s.GetFactory().Get(s.gvr.String(), fqn, true, labels.Everything())
|
||||
o, err := s.getFactory().Get(s.gvrStr(), fqn, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -166,6 +166,8 @@ func (s *Stack) Top() Component {
|
|||
return nil
|
||||
}
|
||||
|
||||
s.mx.RLock()
|
||||
defer s.mx.RUnlock()
|
||||
return s.components[len(s.components)-1]
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -76,6 +76,9 @@ func (t *Table) SetInstance(path string) {
|
|||
|
||||
// AddListener adds a new model listener.
|
||||
func (t *Table) AddListener(l TableListener) {
|
||||
t.mx.Lock()
|
||||
defer t.mx.Unlock()
|
||||
|
||||
t.listeners = append(t.listeners, l)
|
||||
}
|
||||
|
||||
|
|
@ -91,8 +94,8 @@ func (t *Table) RemoveListener(l TableListener) {
|
|||
|
||||
if victim >= 0 {
|
||||
t.mx.Lock()
|
||||
defer t.mx.Unlock()
|
||||
t.listeners = append(t.listeners[:victim], t.listeners[victim+1:]...)
|
||||
t.mx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -289,16 +292,23 @@ func (t *Table) reconcile(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (t *Table) fireTableChanged(data *render.TableData) {
|
||||
var ll []TableListener
|
||||
t.mx.RLock()
|
||||
defer t.mx.RUnlock()
|
||||
ll = t.listeners
|
||||
t.mx.RUnlock()
|
||||
|
||||
for _, l := range t.listeners {
|
||||
for _, l := range ll {
|
||||
l.TableDataChanged(data)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Table) fireTableLoadFailed(err error) {
|
||||
for _, l := range t.listeners {
|
||||
var ll []TableListener
|
||||
t.mx.RLock()
|
||||
ll = t.listeners
|
||||
t.mx.RUnlock()
|
||||
|
||||
for _, l := range ll {
|
||||
l.TableLoadFailed(err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
// Synchronizer manages ui event queue.
|
||||
type synchronizer interface {
|
||||
Flash() *model.Flash
|
||||
Logo() *Logo
|
||||
UpdateClusterInfo()
|
||||
QueueUpdateDraw(func())
|
||||
QueueUpdate(func())
|
||||
|
|
@ -101,7 +102,7 @@ func (c *Configurator) SkinsDirWatcher(ctx context.Context, s synchronizer) erro
|
|||
for {
|
||||
select {
|
||||
case evt := <-w.Events:
|
||||
if evt.Name == c.skinFile && evt.Op != fsnotify.Chmod {
|
||||
if evt.Op != fsnotify.Chmod && filepath.Base(evt.Name) == filepath.Base(c.skinFile) {
|
||||
log.Debug().Msgf("Skin changed: %s", c.skinFile)
|
||||
s.QueueUpdateDraw(func() {
|
||||
c.RefreshStyles(s)
|
||||
|
|
@ -141,11 +142,13 @@ func (c *Configurator) ConfigWatcher(ctx context.Context, s synchronizer) error
|
|||
if err := c.Config.Load(evt.Name); err != nil {
|
||||
log.Error().Err(err).Msgf("k9s config reload failed")
|
||||
s.Flash().Warn("k9s config reload failed. Check k9s logs!")
|
||||
s.Logo().Warn("K9s config reload failed!")
|
||||
}
|
||||
} else {
|
||||
if err := c.Config.K9s.Reload(); err != nil {
|
||||
log.Error().Err(err).Msgf("k9s context config reload failed")
|
||||
s.Flash().Warn("Context config reload failed. Check k9s logs!")
|
||||
s.Logo().Warn("Context config reload failed!")
|
||||
}
|
||||
}
|
||||
s.QueueUpdateDraw(func() {
|
||||
|
|
@ -252,10 +255,11 @@ func (c *Configurator) loadSkinFile(s synchronizer) {
|
|||
if err := c.Styles.Load(skinFile); err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
s.Flash().Warnf("Skin file %q not found in skins dir: %s", filepath.Base(skinFile), config.AppSkinsDir)
|
||||
c.updateStyles("")
|
||||
} else {
|
||||
s.Flash().Errf("Failed to parse skin file -- %s: %s.", filepath.Base(skinFile), err)
|
||||
c.updateStyles(skinFile)
|
||||
}
|
||||
c.updateStyles("")
|
||||
} else {
|
||||
s.Flash().Infof("Skin file loaded: %q", skinFile)
|
||||
c.updateStyles(skinFile)
|
||||
|
|
|
|||
|
|
@ -72,6 +72,7 @@ func newMockSynchronizer() synchronizer {
|
|||
func (s synchronizer) Flash() *model.Flash {
|
||||
return model.NewFlash(100 * time.Millisecond)
|
||||
}
|
||||
func (s synchronizer) Logo() *ui.Logo { return nil }
|
||||
func (s synchronizer) UpdateClusterInfo() {}
|
||||
func (s synchronizer) QueueUpdateDraw(func()) {}
|
||||
func (s synchronizer) QueueUpdate(func()) {}
|
||||
|
|
|
|||
|
|
@ -56,8 +56,8 @@ func (s *StatusIndicator) ClusterInfoUpdated(data model.ClusterMeta) {
|
|||
s.SetPermanent(fmt.Sprintf(
|
||||
statusIndicatorFmt,
|
||||
data.K9sVer,
|
||||
data.Context,
|
||||
data.Cluster,
|
||||
data.User,
|
||||
data.K8sVer,
|
||||
render.PrintPerc(data.Cpu),
|
||||
render.PrintPerc(data.Mem),
|
||||
|
|
|
|||
|
|
@ -52,7 +52,10 @@ func (l *Logo) Status() *tview.TextView {
|
|||
// StylesChanged notifies the skin changed.
|
||||
func (l *Logo) StylesChanged(s *config.Styles) {
|
||||
l.styles = s
|
||||
l.Reset()
|
||||
l.SetBackgroundColor(l.styles.BgColor())
|
||||
l.status.SetBackgroundColor(l.styles.BgColor())
|
||||
l.logo.SetBackgroundColor(l.styles.BgColor())
|
||||
l.refreshLogo(l.styles.Body().LogoColor)
|
||||
}
|
||||
|
||||
// IsBenchmarking checks if benchmarking is active or not.
|
||||
|
|
@ -64,10 +67,7 @@ func (l *Logo) IsBenchmarking() bool {
|
|||
// Reset clears out the logo view and resets colors.
|
||||
func (l *Logo) Reset() {
|
||||
l.status.Clear()
|
||||
l.SetBackgroundColor(l.styles.BgColor())
|
||||
l.status.SetBackgroundColor(l.styles.BgColor())
|
||||
l.logo.SetBackgroundColor(l.styles.BgColor())
|
||||
l.refreshLogo(l.styles.Body().LogoColor)
|
||||
l.StylesChanged(l.styles)
|
||||
}
|
||||
|
||||
// Err displays a log error state.
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package ui
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/derailed/k9s/internal/config"
|
||||
"github.com/derailed/k9s/internal/model"
|
||||
|
|
@ -83,6 +84,7 @@ type Prompt struct {
|
|||
styles *config.Styles
|
||||
model PromptModel
|
||||
spacer int
|
||||
mx sync.RWMutex
|
||||
}
|
||||
|
||||
// NewPrompt returns a new command view.
|
||||
|
|
@ -206,17 +208,29 @@ func (p *Prompt) activate() {
|
|||
p.model.Notify(false)
|
||||
}
|
||||
|
||||
func (p *Prompt) Clear() {
|
||||
p.mx.Lock()
|
||||
defer p.mx.Unlock()
|
||||
|
||||
p.TextView.Clear()
|
||||
}
|
||||
|
||||
func (p *Prompt) Draw(sc tcell.Screen) {
|
||||
p.mx.RLock()
|
||||
defer p.mx.RUnlock()
|
||||
|
||||
p.TextView.Draw(sc)
|
||||
}
|
||||
|
||||
func (p *Prompt) update(text, suggestion string) {
|
||||
p.Clear()
|
||||
p.write(text, suggestion)
|
||||
}
|
||||
|
||||
func (p *Prompt) suggest(text, suggestion string) {
|
||||
p.Clear()
|
||||
p.write(text, suggestion)
|
||||
}
|
||||
|
||||
func (p *Prompt) write(text, suggest string) {
|
||||
p.mx.Lock()
|
||||
defer p.mx.Unlock()
|
||||
|
||||
p.SetCursorIndex(p.spacer + len(text))
|
||||
txt := text
|
||||
if suggest != "" {
|
||||
|
|
@ -240,7 +254,7 @@ func (p *Prompt) BufferChanged(text, suggestion string) {
|
|||
|
||||
// SuggestionChanged notifies the suggestion changed.
|
||||
func (p *Prompt) SuggestionChanged(text, suggestion string) {
|
||||
p.suggest(text, suggestion)
|
||||
p.update(text, suggestion)
|
||||
}
|
||||
|
||||
// BufferActive indicates the buff activity changed.
|
||||
|
|
|
|||
|
|
@ -58,6 +58,9 @@ func (n *Namespace) useNsCmd(evt *tcell.EventKey) *tcell.EventKey {
|
|||
|
||||
func (n *Namespace) useNamespace(fqn string) {
|
||||
_, ns := client.Namespaced(fqn)
|
||||
if client.CleanseNamespace(n.App().Config.ActiveNamespace()) == ns {
|
||||
return
|
||||
}
|
||||
if err := n.App().switchNS(ns); err != nil {
|
||||
n.App().Flash().Err(err)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
name: k9s
|
||||
base: core20
|
||||
version: 'v0.31.5'
|
||||
version: 'v0.31.6'
|
||||
summary: K9s is a CLI to view and manage your Kubernetes clusters.
|
||||
description: |
|
||||
K9s is a CLI to view and manage your Kubernetes clusters. By leveraging a terminal UI, you can easily traverse Kubernetes resources and view the state of your clusters in a single powerful session.
|
||||
|
|
|
|||
Loading…
Reference in New Issue