Add sanitize command (#2286)
- Sanitize provides for clearing out pods in either completed/failed state #2277mine
parent
19952cd282
commit
5540b5a825
2
Makefile
2
Makefile
|
|
@ -11,7 +11,7 @@ DATE ?= $(shell TZ=UTC date -j -f "%s" ${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:
|
|||
else
|
||||
DATE ?= $(shell date -u -d @${SOURCE_DATE_EPOCH} +"%Y-%m-%dT%H:%M:%SZ")
|
||||
endif
|
||||
VERSION ?= v0.28.0
|
||||
VERSION ?= v0.28.1
|
||||
IMG_NAME := derailed/k9s
|
||||
IMAGE := ${IMG_NAME}:${VERSION}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,63 @@
|
|||
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/k9s.png" align="center" width="800" height="auto"/>
|
||||
|
||||
# Release v0.28.1
|
||||
|
||||
## Notes
|
||||
|
||||
Thank you to all that contributed with flushing out issues and enhancements for K9s! I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev and see if we're happier with some of the fixes! If you've filed an issue please help me verify and close. Your support, kindness and awesome suggestions to make K9s better are, as ever, very much noted and appreciated! Also big thanks to all that have allocated their own time to help others on both slack and on this repo!!
|
||||
|
||||
As you may know, K9s is not pimped out by corps with deep pockets, thus if you feel K9s is helping your Kubernetes journey, please consider joining our [sponsorship program](https://github.com/sponsors/derailed) and/or make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer)
|
||||
|
||||
On Slack? Please join us [K9slackers](https://join.slack.com/t/k9sers/shared_invite/enQtOTA5MDEyNzI5MTU0LWQ1ZGI3MzliYzZhZWEyNzYxYzA3NjE0YTk1YmFmNzViZjIyNzhkZGI0MmJjYzhlNjdlMGJhYzE2ZGU1NjkyNTM)
|
||||
|
||||
---
|
||||
|
||||
## ♫ Sounds Behind The Release ♭
|
||||
|
||||
* [If Trouble Was Money - Albert Collins](https://www.youtube.com/watch?v=cz6LbWWqX-g)
|
||||
* [Old Love - Eric Clapton](https://www.youtube.com/watch?v=EklciRHZnUQ)
|
||||
* [Touch And GO - The Cars](https://www.youtube.com/watch?v=L7Gpr_Auz8Y)
|
||||
|
||||
---
|
||||
|
||||
## A Word From Our Sponsors...
|
||||
|
||||
To all the good folks below that opted to `pay it forward` and join our sponsorship program, I salute you!!
|
||||
|
||||
* [Bradley Heilbrun](https://github.com/bheilbrun)
|
||||
|
||||
> Sponsorship cancellations since the last release: `2` ;(
|
||||
|
||||
---
|
||||
|
||||
## Feature Release
|
||||
|
||||
### Sanitize Me!
|
||||
|
||||
Over time, you might end up with a lot of pod cruft on your cluster. Pods that might be completed, erroring out, etc... Once you've completed your pod analysis it could be useful to clear out these pods from your cluster.
|
||||
|
||||
In this drop, we introduce a new command `sanitize` aka `z` available on pod views otherwise known as `The Axe!`. This command performs a clean up of all pods that are in either in completed, crashloopBackoff or failed state. This could be especially handy if you run workflows jobs or commands on your cluster that might leave lots of `turd` pods. Tho this has a `phat` fail safe dialog please be careful with this one as it is a blunt tool!
|
||||
|
||||
---
|
||||
|
||||
## Resolved Issues
|
||||
|
||||
* [Issue #2281](https://github.com/derailed/k9s/issues/2281) Can't run Node shell
|
||||
* [Issue #2277](https://github.com/derailed/k9s/issues/2277) bulk actions applied to power filters
|
||||
* [Issue #2273](https://github.com/derailed/k9s/issues/2273) Error when draining node that is cordoned bug
|
||||
* [Issue #2233](https://github.com/derailed/k9s/issues/2233) Invalid port-forwarding status displayed over the k9s UI
|
||||
|
||||
---
|
||||
|
||||
## Contributed PRs
|
||||
|
||||
Please be sure to give `Big Thanks!` and `ATTA Girls/Boys!` to all the fine contributors for making K9s better for all of us!!
|
||||
|
||||
* [PR #2280](https://github.com/derailed/k9s/pull/2280) chore: replace github.com/ghodss/yaml with sigs.k8s.
|
||||
* [PR #2278](https://github.com/derailed/k9s/pull/2278) README.md: fix typo in netshoot URL
|
||||
* [PR #2275](https://github.com/derailed/k9s/pull/2275) check if the Node already cordoned when executing Drain
|
||||
* [PR #2247](https://github.com/derailed/k9s/pull/2247) Delete port forwards when pods get deleted
|
||||
|
||||
---
|
||||
|
||||
<img src="https://raw.githubusercontent.com/derailed/k9s/master/assets/imhotep_logo.png" width="32" height="auto"/> © 2023 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0)
|
||||
1
go.mod
1
go.mod
|
|
@ -30,6 +30,7 @@ require (
|
|||
k8s.io/client-go v0.28.3
|
||||
k8s.io/klog/v2 v2.100.1
|
||||
k8s.io/kubectl v0.28.3
|
||||
k8s.io/kubernetes v1.28.3
|
||||
k8s.io/metrics v0.28.3
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
|
|
|||
6
go.sum
6
go.sum
|
|
@ -245,8 +245,8 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
|
|||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
||||
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
||||
github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI=
|
||||
github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
||||
|
|
@ -628,6 +628,8 @@ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5Ohx
|
|||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||
k8s.io/kubectl v0.28.3 h1:H1Peu1O3EbN9zHkJCcvhiJ4NUj6lb88sGPO5wrWIM6k=
|
||||
k8s.io/kubectl v0.28.3/go.mod h1:RDAudrth/2wQ3Sg46fbKKl4/g+XImzvbsSRZdP2RiyE=
|
||||
k8s.io/kubernetes v1.28.3 h1:XTci6gzk+JR51UZuZQCFJ4CsyUkfivSjLI4O1P9z6LY=
|
||||
k8s.io/kubernetes v1.28.3/go.mod h1:NhAysZWvHtNcJFFHic87ofxQN7loylCQwg3ZvXVDbag=
|
||||
k8s.io/metrics v0.28.3 h1:w2s3kVi7HulXqCVDFkF4hN/OsL1tXTTb4Biif995h/g=
|
||||
k8s.io/metrics v0.28.3/go.mod h1:OZZ23AHFojPzU6r3xoHGRUcV3I9pauLua+07sAUbwLc=
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ func (d *Deployment) Restart(ctx context.Context, path string) error {
|
|||
|
||||
// TailLogs tail logs for all pods represented by this Deployment.
|
||||
func (d *Deployment) TailLogs(ctx context.Context, opts *LogOptions) ([]LogChan, error) {
|
||||
dp, err := d.Load(d.Factory, opts.Path)
|
||||
dp, err := d.GetInstance(d.Factory, opts.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -128,7 +128,7 @@ func (d *Deployment) TailLogs(ctx context.Context, opts *LogOptions) ([]LogChan,
|
|||
|
||||
// Pod returns a pod victim by name.
|
||||
func (d *Deployment) Pod(fqn string) (string, error) {
|
||||
dp, err := d.Load(d.Factory, fqn)
|
||||
dp, err := d.GetInstance(d.Factory, fqn)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
@ -136,8 +136,8 @@ func (d *Deployment) Pod(fqn string) (string, error) {
|
|||
return podFromSelector(d.Factory, dp.Namespace, dp.Spec.Selector.MatchLabels)
|
||||
}
|
||||
|
||||
// Load returns a deployment instance.
|
||||
func (*Deployment) Load(f Factory, fqn string) (*appsv1.Deployment, error) {
|
||||
// GetInstance fetch a matching deployment.
|
||||
func (*Deployment) GetInstance(f Factory, fqn string) (*appsv1.Deployment, error) {
|
||||
o, err := f.Get("apps/v1/deployments", fqn, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -240,7 +240,7 @@ func (d *Deployment) Scan(ctx context.Context, gvr, fqn string, wait bool) (Refs
|
|||
|
||||
// GetPodSpec returns a pod spec given a resource.
|
||||
func (d *Deployment) GetPodSpec(path string) (*v1.PodSpec, error) {
|
||||
dp, err := d.Load(d.Factory, path)
|
||||
dp, err := d.GetInstance(d.Factory, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -500,3 +500,37 @@ func GetDefaultContainer(m metav1.ObjectMeta, spec v1.PodSpec) (string, bool) {
|
|||
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (p *Pod) Sanitize(ctx context.Context, ns string) (int, error) {
|
||||
oo, err := p.Resource.List(ctx, ns)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var count int
|
||||
for _, o := range oo {
|
||||
u, ok := o.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
var pod v1.Pod
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, &pod)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
log.Debug().Msgf("Pod status: %q", render.PodStatus(&pod))
|
||||
switch render.PodStatus(&pod) {
|
||||
case render.PhaseCompleted, render.PhaseCrashLoop, render.PhaseError, render.PhaseImagePullBackOff, render.PhaseOOMKilled:
|
||||
log.Debug().Msgf("Sanitizing %s:%s", pod.Namespace, pod.Name)
|
||||
fqn := client.FQN(pod.Namespace, pod.Name)
|
||||
if err := p.Resource.Delete(ctx, fqn, nil, NowGrace); err != nil {
|
||||
log.Warn().Err(err).Msgf("Pod %s deletion failed", fqn)
|
||||
continue
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
log.Debug().Msgf("Sanitizer deleted %d pods", count)
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,6 +43,11 @@ func NewPortForwarder(f Factory) *PortForwarder {
|
|||
}
|
||||
}
|
||||
|
||||
// String dumps as string.
|
||||
func (p *PortForwarder) String() string {
|
||||
return fmt.Sprintf("%s|%s", p.path, p.tunnel)
|
||||
}
|
||||
|
||||
// Age returns the port forward age.
|
||||
func (p *PortForwarder) Age() string {
|
||||
return time.Since(p.age).String()
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ func (r *ReplicaSet) Rollback(fqn string) error {
|
|||
}
|
||||
|
||||
var ddp Deployment
|
||||
dp, err := ddp.Load(r.Factory, client.FQN(rs.Namespace, name))
|
||||
dp, err := ddp.GetInstance(r.Factory, client.FQN(rs.Namespace, name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,15 +67,19 @@ func (s *StatefulSet) Scale(ctx context.Context, path string, replicas int32) er
|
|||
|
||||
// Restart a StatefulSet rollout.
|
||||
func (s *StatefulSet) Restart(ctx context.Context, path string) error {
|
||||
o, err := s.GetFactory().Get("apps/v1/statefulsets", path, true, labels.Everything())
|
||||
sts, err := s.GetInstance(s.Factory, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var sts appsv1.StatefulSet
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(o.(*unstructured.Unstructured).Object, &sts)
|
||||
|
||||
ns, _ := client.Namespaced(path)
|
||||
pp, err := podsFromSelector(s.Factory, ns, sts.Spec.Selector.MatchLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range pp {
|
||||
s.Forwarders().Kill(client.FQN(p.Namespace, p.Name))
|
||||
}
|
||||
|
||||
auth, err := s.Client().CanI(sts.Namespace, "apps/v1/statefulsets", []string{client.PatchVerb})
|
||||
if err != nil {
|
||||
|
|
@ -90,12 +94,12 @@ func (s *StatefulSet) Restart(ctx context.Context, path string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
before, err := runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), &sts)
|
||||
before, err := runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), sts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
after, err := polymorphichelpers.ObjectRestarterFn(&sts)
|
||||
after, err := polymorphichelpers.ObjectRestarterFn(sts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -115,8 +119,8 @@ func (s *StatefulSet) Restart(ctx context.Context, path string) error {
|
|||
|
||||
}
|
||||
|
||||
// Load returns a statefulset instance.
|
||||
func (*StatefulSet) Load(f Factory, fqn string) (*appsv1.StatefulSet, error) {
|
||||
// GetInstance returns a statefulset instance.
|
||||
func (*StatefulSet) GetInstance(f Factory, fqn string) (*appsv1.StatefulSet, error) {
|
||||
o, err := f.Get("apps/v1/statefulsets", fqn, true, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -301,3 +305,26 @@ func (s *StatefulSet) SetImages(ctx context.Context, path string, imageSpecs Ima
|
|||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func podsFromSelector(f Factory, ns string, sel map[string]string) ([]*v1.Pod, error) {
|
||||
oo, err := f.List("v1/pods", ns, true, labels.Set(sel).AsSelector())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(oo) == 0 {
|
||||
return nil, fmt.Errorf("no matching pods for %v", sel)
|
||||
}
|
||||
|
||||
pp := make([]*v1.Pod, 0, len(oo))
|
||||
for _, o := range oo {
|
||||
pod := new(v1.Pod)
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(o.(*unstructured.Unstructured).Object, pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pp = append(pp, pod)
|
||||
}
|
||||
|
||||
return pp, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -155,3 +155,9 @@ type ContainsPodSpec interface {
|
|||
// Set Images for a resource
|
||||
SetImages(ctx context.Context, path string, imageSpecs ImageSpecs) error
|
||||
}
|
||||
|
||||
// Sanitizer represents a resource sanitizer.
|
||||
type Sanitizer interface {
|
||||
// Sanitize nukes all resources in unhappy state.
|
||||
Sanitize(context.Context, string) (int, error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,6 +32,11 @@ func NewPortTunnel(a, co, lp, cp string) PortTunnel {
|
|||
}
|
||||
}
|
||||
|
||||
// String dumps as string.
|
||||
func (t PortTunnel) String() string {
|
||||
return fmt.Sprintf("%s|%s|%s:%s", t.Address, t.Container, t.LocalPort, t.ContainerPort)
|
||||
}
|
||||
|
||||
// PortMap returns a port mapping.
|
||||
func (t PortTunnel) PortMap() string {
|
||||
if t.LocalPort == "" {
|
||||
|
|
|
|||
|
|
@ -12,11 +12,27 @@ import (
|
|||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
|
||||
"github.com/derailed/k9s/internal/client"
|
||||
)
|
||||
|
||||
const (
|
||||
PhaseTerminating = "Terminating"
|
||||
PhaseInitialized = "Initialized"
|
||||
PhaseRunning = "Running"
|
||||
PhaseNotReady = "NoReady"
|
||||
PhaseCompleted = "Completed"
|
||||
PhaseContainerCreating = "ContainerCreating"
|
||||
PhasePodInitializing = "PodInitializing"
|
||||
PhaseUnknown = "Unknown"
|
||||
PhaseCrashLoop = "CrashLoopBackOff"
|
||||
PhaseError = "Error"
|
||||
PhaseImagePullBackOff = "ImagePullBackOff"
|
||||
PhaseOOMKilled = "OOMKilled"
|
||||
)
|
||||
|
||||
// Pod renders a K8s Pod to screen.
|
||||
type Pod struct {
|
||||
Base
|
||||
|
|
@ -89,7 +105,7 @@ func (Pod) Header(ns string) Header {
|
|||
func (p Pod) Render(o interface{}, ns string, row *Row) error {
|
||||
pwm, ok := o.(*PodWithMetrics)
|
||||
if !ok {
|
||||
return fmt.Errorf("Expected PodWithMetrics, but got %T", o)
|
||||
return fmt.Errorf("expected PodWithMetrics, but got %T", o)
|
||||
}
|
||||
|
||||
var po v1.Pod
|
||||
|
|
@ -369,3 +385,89 @@ func checkContainerStatus(cs v1.ContainerStatus, i, initCount int) string {
|
|||
return "Init:" + strconv.Itoa(i) + "/" + strconv.Itoa(initCount)
|
||||
}
|
||||
}
|
||||
|
||||
// PosStatus computes pod status.
|
||||
func PodStatus(pod *v1.Pod) string {
|
||||
reason := string(pod.Status.Phase)
|
||||
if pod.Status.Reason != "" {
|
||||
reason = pod.Status.Reason
|
||||
}
|
||||
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
|
||||
reason = v1.PodReasonSchedulingGated
|
||||
}
|
||||
}
|
||||
|
||||
var initializing bool
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
container := pod.Status.InitContainerStatuses[i]
|
||||
switch {
|
||||
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
|
||||
continue
|
||||
case container.State.Terminated != nil:
|
||||
if len(container.State.Terminated.Reason) == 0 {
|
||||
if container.State.Terminated.Signal != 0 {
|
||||
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
|
||||
} else {
|
||||
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
|
||||
}
|
||||
} else {
|
||||
reason = "Init:" + container.State.Terminated.Reason
|
||||
}
|
||||
initializing = true
|
||||
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
|
||||
reason = "Init:" + container.State.Waiting.Reason
|
||||
initializing = true
|
||||
default:
|
||||
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
|
||||
initializing = true
|
||||
}
|
||||
break
|
||||
}
|
||||
if !initializing {
|
||||
var hasRunning bool
|
||||
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
||||
container := pod.Status.ContainerStatuses[i]
|
||||
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
|
||||
reason = container.State.Waiting.Reason
|
||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
|
||||
reason = container.State.Terminated.Reason
|
||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
|
||||
if container.State.Terminated.Signal != 0 {
|
||||
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
|
||||
} else {
|
||||
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
|
||||
}
|
||||
} else if container.Ready && container.State.Running != nil {
|
||||
hasRunning = true
|
||||
}
|
||||
}
|
||||
|
||||
if reason == PhaseCompleted && hasRunning {
|
||||
if hasPodReadyCondition(pod.Status.Conditions) {
|
||||
reason = PhaseRunning
|
||||
} else {
|
||||
reason = PhaseNotReady
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pod.DeletionTimestamp != nil && pod.Status.Reason == node.NodeUnreachablePodReason {
|
||||
reason = PhaseUnknown
|
||||
} else if pod.DeletionTimestamp != nil {
|
||||
reason = PhaseTerminating
|
||||
}
|
||||
|
||||
return reason
|
||||
}
|
||||
|
||||
func hasPodReadyCondition(conditions []v1.PodCondition) bool {
|
||||
for _, condition := range conditions {
|
||||
if condition.Type == v1.PodReady && condition.Status == v1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -194,6 +194,66 @@ func TestPodInitRender(t *testing.T) {
|
|||
assert.Equal(t, e, r.Fields[:17])
|
||||
}
|
||||
|
||||
func TestCheckPodStatus(t *testing.T) {
|
||||
uu := map[string]struct {
|
||||
pod v1.Pod
|
||||
e string
|
||||
}{
|
||||
"unknown": {
|
||||
pod: v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: render.PhaseUnknown,
|
||||
},
|
||||
},
|
||||
e: render.PhaseUnknown,
|
||||
},
|
||||
"running": {
|
||||
pod: v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
InitContainerStatuses: []v1.ContainerStatus{},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
e: render.PhaseRunning,
|
||||
},
|
||||
"backoff": {
|
||||
pod: v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
InitContainerStatuses: []v1.ContainerStatus{},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{
|
||||
Reason: render.PhaseImagePullBackOff,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
e: render.PhaseImagePullBackOff,
|
||||
},
|
||||
}
|
||||
|
||||
for k := range uu {
|
||||
u := uu[k]
|
||||
t.Run(k, func(t *testing.T) {
|
||||
assert.Equal(t, u.e, render.PodStatus(&u.pod))
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Helpers...
|
||||
|
||||
|
|
@ -218,3 +278,123 @@ func makeRes(c, m string) v1.ResourceList {
|
|||
v1.ResourceMemory: mem,
|
||||
}
|
||||
}
|
||||
|
||||
// apiVersion: v1
|
||||
// kind: Pod
|
||||
// metadata:
|
||||
// creationTimestamp: "2023-11-11T17:01:40Z"
|
||||
// finalizers:
|
||||
// - batch.kubernetes.io/job-tracking
|
||||
// generateName: hello-28328646-
|
||||
// labels:
|
||||
// batch.kubernetes.io/controller-uid: 35cf5552-7180-48c1-b7b2-8b6e630a7860
|
||||
// batch.kubernetes.io/job-name: hello-28328646
|
||||
// controller-uid: 35cf5552-7180-48c1-b7b2-8b6e630a7860
|
||||
// job-name: hello-28328646
|
||||
// name: hello-28328646-h9fnh
|
||||
// namespace: fred
|
||||
// ownerReferences:
|
||||
// - apiVersion: batch/v1
|
||||
// blockOwnerDeletion: true
|
||||
// controller: true
|
||||
// kind: Job
|
||||
// name: hello-28328646
|
||||
// uid: 35cf5552-7180-48c1-b7b2-8b6e630a7860
|
||||
// resourceVersion: "381637"
|
||||
// uid: ea77c360-6375-459b-8b30-2ac0c59404cd
|
||||
// spec:
|
||||
// containers:
|
||||
// - args:
|
||||
// - /bin/bash
|
||||
// - -c
|
||||
// - for i in {1..5}; do echo "hello";sleep 1; done
|
||||
// image: blang/busybox-bash
|
||||
// imagePullPolicy: Always
|
||||
// name: c1
|
||||
// resources: {}
|
||||
// terminationMessagePath: /dev/termination-log
|
||||
// terminationMessagePolicy: File
|
||||
// volumeMounts:
|
||||
// - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
// name: kube-api-access-7sztm
|
||||
// readOnly: true
|
||||
// dnsPolicy: ClusterFirst
|
||||
// enableServiceLinks: true
|
||||
// nodeName: kind-worker
|
||||
// preemptionPolicy: PreemptLowerPriority
|
||||
// priority: 0
|
||||
// restartPolicy: OnFailure
|
||||
// schedulerName: default-scheduler
|
||||
// securityContext: {}
|
||||
// serviceAccount: default
|
||||
// serviceAccountName: default
|
||||
// terminationGracePeriodSeconds: 30
|
||||
// tolerations:
|
||||
// - effect: NoExecute
|
||||
// key: node.kubernetes.io/not-ready
|
||||
// operator: Exists
|
||||
// tolerationSeconds: 300
|
||||
// - effect: NoExecute
|
||||
// key: node.kubernetes.io/unreachable
|
||||
// operator: Exists
|
||||
// tolerationSeconds: 300
|
||||
// volumes:
|
||||
// - name: kube-api-access-7sztm
|
||||
// projected:
|
||||
// defaultMode: 420
|
||||
// sources:
|
||||
// - serviceAccountToken:
|
||||
// expirationSeconds: 3607
|
||||
// path: token
|
||||
// - configMap:
|
||||
// items:
|
||||
// - key: ca.crt
|
||||
// path: ca.crt
|
||||
// name: kube-root-ca.crt
|
||||
// - downwardAPI:
|
||||
// items:
|
||||
// - fieldRef:
|
||||
// apiVersion: v1
|
||||
// fieldPath: metadata.namespace
|
||||
// path: namespace
|
||||
// status:
|
||||
// conditions:
|
||||
// - lastProbeTime: null
|
||||
// lastTransitionTime: "2023-11-11T17:01:40Z"
|
||||
// status: "True"
|
||||
// type: Initialized
|
||||
// - lastProbeTime: null
|
||||
// lastTransitionTime: "2023-11-11T17:01:40Z"
|
||||
// message: 'containers with unready status: [c1[]'
|
||||
// reason: ContainersNotReady
|
||||
// status: "False"
|
||||
// type: Ready
|
||||
// - lastProbeTime: null
|
||||
// lastTransitionTime: "2023-11-11T17:01:40Z"
|
||||
// message: 'containers with unready status: [c1[]'
|
||||
// reason: ContainersNotReady
|
||||
// status: "False"
|
||||
// type: ContainersReady
|
||||
// - lastProbeTime: null
|
||||
// lastTransitionTime: "2023-11-11T17:01:40Z"
|
||||
// status: "True"
|
||||
// type: PodScheduled
|
||||
// containerStatuses:
|
||||
// - image: blang/busybox-bash
|
||||
// imageID: ""
|
||||
// lastState: {}
|
||||
// name: c1
|
||||
// ready: false
|
||||
// restartCount: 0
|
||||
// started: false
|
||||
// state:
|
||||
// waiting:
|
||||
// message: Back-off pulling image "blang/busybox-bash"
|
||||
// reason: ImagePullBackOff
|
||||
// hostIP: 172.18.0.3
|
||||
// phase: Pending
|
||||
// podIP: 10.244.1.59
|
||||
// podIPs:
|
||||
// - ip: 10.244.1.59
|
||||
// qosClass: BestEffort
|
||||
// startTime: "2023-11-11T17:01:40Z"
|
||||
|
|
|
|||
|
|
@ -10,6 +10,59 @@ const dialogKey = "dialog"
|
|||
|
||||
type confirmFunc func()
|
||||
|
||||
func ShowConfirmAck(app *ui.App, pages *ui.Pages, acceptStr string, override bool, title, msg string, ack confirmFunc, cancel cancelFunc) {
|
||||
styles := app.Styles.Dialog()
|
||||
|
||||
f := tview.NewForm()
|
||||
f.SetItemPadding(0)
|
||||
f.SetButtonsAlign(tview.AlignCenter).
|
||||
SetButtonBackgroundColor(styles.ButtonBgColor.Color()).
|
||||
SetButtonTextColor(styles.ButtonFgColor.Color()).
|
||||
SetLabelColor(styles.LabelFgColor.Color()).
|
||||
SetFieldTextColor(styles.FieldFgColor.Color())
|
||||
f.AddButton("Cancel", func() {
|
||||
dismissConfirm(pages)
|
||||
cancel()
|
||||
})
|
||||
|
||||
var accept bool
|
||||
if override {
|
||||
changedFn := func(t string) {
|
||||
accept = (t == acceptStr)
|
||||
}
|
||||
f.AddInputField("Confirm:", "", 30, nil, changedFn)
|
||||
} else {
|
||||
accept = true
|
||||
}
|
||||
|
||||
f.AddButton("OK", func() {
|
||||
if !accept {
|
||||
return
|
||||
}
|
||||
ack()
|
||||
dismissConfirm(pages)
|
||||
cancel()
|
||||
})
|
||||
for i := 0; i < 2; i++ {
|
||||
b := f.GetButton(i)
|
||||
if b == nil {
|
||||
continue
|
||||
}
|
||||
b.SetBackgroundColorActivated(styles.ButtonFocusBgColor.Color())
|
||||
b.SetLabelColorActivated(styles.ButtonFocusFgColor.Color())
|
||||
}
|
||||
f.SetFocus(0)
|
||||
modal := tview.NewModalForm("<"+title+">", f)
|
||||
modal.SetText(msg)
|
||||
modal.SetTextColor(styles.FgColor.Color())
|
||||
modal.SetDoneFunc(func(int, string) {
|
||||
dismissConfirm(pages)
|
||||
cancel()
|
||||
})
|
||||
pages.AddPage(confirmKey, modal, false, false)
|
||||
pages.ShowPage(confirmKey)
|
||||
}
|
||||
|
||||
// ShowConfirm pops a confirmation dialog.
|
||||
func ShowConfirm(styles config.Dialog, pages *ui.Pages, title, msg string, ack confirmFunc, cancel cancelFunc) {
|
||||
f := tview.NewForm()
|
||||
|
|
|
|||
|
|
@ -479,7 +479,7 @@ func (b *Browser) refreshActions() {
|
|||
aa[ui.KeyE] = ui.NewKeyAction("Edit", b.editCmd, true)
|
||||
}
|
||||
if client.Can(b.meta.Verbs, "delete") {
|
||||
aa[tcell.KeyCtrlD] = ui.NewKeyAction("Delete", b.deleteCmd, true)
|
||||
aa[ui.KeyZ] = ui.NewKeyAction("Delete", b.deleteCmd, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ func (d *Deploy) logOptions(prev bool) (*dao.LogOptions, error) {
|
|||
|
||||
func (d *Deploy) showPods(app *App, model ui.Tabular, gvr, path string) {
|
||||
var ddp dao.Deployment
|
||||
dp, err := ddp.Load(app.factory, path)
|
||||
dp, err := ddp.GetInstance(app.factory, path)
|
||||
if err != nil {
|
||||
app.Flash().Err(err)
|
||||
return
|
||||
|
|
@ -99,7 +99,7 @@ func (d *Deploy) showPods(app *App, model ui.Tabular, gvr, path string) {
|
|||
|
||||
func (d *Deploy) dp(path string) (*appsv1.Deployment, error) {
|
||||
var dp dao.Deployment
|
||||
return dp.Load(d.App().factory, path)
|
||||
return dp.GetInstance(d.App().factory, path)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -323,7 +323,7 @@ func launchShellPod(a *App, node string) error {
|
|||
return err
|
||||
}
|
||||
conn := dial.CoreV1().Pods(ns)
|
||||
if _, err := conn.Create(ctx, &spec, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := conn.Create(ctx, spec, metav1.CreateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -351,7 +351,7 @@ func k9sShellPodName() string {
|
|||
return fmt.Sprintf("%s-%d", k9sShell, os.Getpid())
|
||||
}
|
||||
|
||||
func k9sShellPod(node string, cfg *config.ShellPod) v1.Pod {
|
||||
func k9sShellPod(node string, cfg *config.ShellPod) *v1.Pod {
|
||||
var grace int64
|
||||
var priv bool = true
|
||||
|
||||
|
|
@ -379,7 +379,7 @@ func k9sShellPod(node string, cfg *config.ShellPod) v1.Pod {
|
|||
c.Args = cfg.Args
|
||||
}
|
||||
|
||||
return v1.Pod{
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k9sShellPodName(),
|
||||
Namespace: cfg.Namespace,
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ func TestHelp(t *testing.T) {
|
|||
v := view.NewHelp(app)
|
||||
|
||||
assert.Nil(t, v.Init(ctx))
|
||||
assert.Equal(t, 27, v.GetRowCount())
|
||||
assert.Equal(t, 28, v.GetRowCount())
|
||||
assert.Equal(t, 6, v.GetColumnCount())
|
||||
assert.Equal(t, "<a>", strings.TrimSpace(v.GetCell(1, 0).Text))
|
||||
assert.Equal(t, "Attach", strings.TrimSpace(v.GetCell(1, 1).Text))
|
||||
|
|
|
|||
|
|
@ -27,8 +27,8 @@ import (
|
|||
const (
|
||||
windowsOS = "windows"
|
||||
powerShell = "powershell"
|
||||
osBetaSelector = "beta.kubernetes.io/os"
|
||||
osSelector = "kubernetes.io/os"
|
||||
osBetaSelector = "beta." + osSelector
|
||||
trUpload = "Upload"
|
||||
trDownload = "Download"
|
||||
)
|
||||
|
|
@ -71,6 +71,7 @@ func (p *Pod) bindDangerousKeys(aa ui.KeyActions) {
|
|||
ui.KeyS: ui.NewKeyAction("Shell", p.shellCmd, true),
|
||||
ui.KeyA: ui.NewKeyAction("Attach", p.attachCmd, true),
|
||||
ui.KeyT: ui.NewKeyAction("Transfer", p.transferCmd, true),
|
||||
ui.KeyZ: ui.NewKeyAction("Sanitize", p.sanitizeCmd, true),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -255,6 +256,35 @@ func (p *Pod) attachCmd(evt *tcell.EventKey) *tcell.EventKey {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Pod) sanitizeCmd(evt *tcell.EventKey) *tcell.EventKey {
|
||||
res, err := dao.AccessorFor(p.App().factory, p.GVR())
|
||||
if err != nil {
|
||||
p.App().Flash().Err(err)
|
||||
return nil
|
||||
}
|
||||
s, ok := res.(dao.Sanitizer)
|
||||
if !ok {
|
||||
p.App().Flash().Err(fmt.Errorf("expecting a sanitizer for %q", p.GVR()))
|
||||
return nil
|
||||
}
|
||||
|
||||
ack := "sanitize me pods!"
|
||||
msg := fmt.Sprintf("Sanitize deletes all pods in completed/error state\nPlease enter [orange::b]%s[-::-] to proceed.", ack)
|
||||
dialog.ShowConfirmAck(p.App().App, p.App().Content.Pages, ack, true, "Sanitize", msg, func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*p.App().Conn().Config().CallTimeout())
|
||||
defer cancel()
|
||||
total, err := s.Sanitize(ctx, p.GetTable().GetModel().GetNamespace())
|
||||
if err != nil {
|
||||
p.App().Flash().Err(err)
|
||||
return
|
||||
}
|
||||
p.App().Flash().Infof("Sanitized %d %s", total, p.GVR())
|
||||
p.Refresh()
|
||||
}, func() {})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pod) transferCmd(evt *tcell.EventKey) *tcell.EventKey {
|
||||
path := p.GetTable().GetSelectedItem()
|
||||
if path == "" {
|
||||
|
|
@ -492,15 +522,27 @@ func getPodOS(f dao.Factory, fqn string) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if os, ok := po.Spec.NodeSelector[osBetaSelector]; ok {
|
||||
if os, ok := osFromSelector(po.Spec.NodeSelector); ok {
|
||||
return os, nil
|
||||
}
|
||||
os, ok := po.Spec.NodeSelector[osSelector]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("no os information available")
|
||||
}
|
||||
|
||||
no, err := dao.FetchNode(context.Background(), f, po.Spec.Hostname)
|
||||
if err == nil {
|
||||
if os, ok := osFromSelector(no.Labels); ok {
|
||||
return os, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no os information available")
|
||||
}
|
||||
|
||||
func osFromSelector(s map[string]string) (string, bool) {
|
||||
if os, ok := s[osBetaSelector]; ok {
|
||||
return os, ok
|
||||
}
|
||||
os, ok := s[osSelector]
|
||||
|
||||
return os, ok
|
||||
}
|
||||
|
||||
func resourceSorters(t *Table) ui.KeyActions {
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ func TestPodNew(t *testing.T) {
|
|||
|
||||
assert.Nil(t, po.Init(makeCtx()))
|
||||
assert.Equal(t, "Pods", po.Name())
|
||||
assert.Equal(t, 26, len(po.Hints()))
|
||||
assert.Equal(t, 27, len(po.Hints()))
|
||||
}
|
||||
|
||||
// Helpers...
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ func (s *StatefulSet) logOptions(prev bool) (*dao.LogOptions, error) {
|
|||
return nil, errors.New("you must provide a selection")
|
||||
}
|
||||
|
||||
sts, err := s.sts(path)
|
||||
sts, err := s.getInstance(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -82,16 +82,16 @@ func (s *StatefulSet) bindKeys(aa ui.KeyActions) {
|
|||
}
|
||||
|
||||
func (s *StatefulSet) showPods(app *App, _ ui.Tabular, _, path string) {
|
||||
sts, err := s.sts(path)
|
||||
i, err := s.getInstance(path)
|
||||
if err != nil {
|
||||
app.Flash().Err(err)
|
||||
return
|
||||
}
|
||||
|
||||
showPodsFromSelector(app, path, sts.Spec.Selector)
|
||||
showPodsFromSelector(app, path, i.Spec.Selector)
|
||||
}
|
||||
|
||||
func (s *StatefulSet) sts(path string) (*appsv1.StatefulSet, error) {
|
||||
func (s *StatefulSet) getInstance(path string) (*appsv1.StatefulSet, error) {
|
||||
var sts dao.StatefulSet
|
||||
return sts.Load(s.App().factory, path)
|
||||
return sts.GetInstance(s.App().factory, path)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/derailed/k9s/internal/port"
|
||||
|
|
@ -23,7 +22,7 @@ type Forwarder interface {
|
|||
// Container returns a container name.
|
||||
Container() string
|
||||
|
||||
// Ports returns the port mapping.
|
||||
// Port returns the port mapping.
|
||||
Port() string
|
||||
|
||||
// FQN returns the full port-forward name.
|
||||
|
|
@ -50,9 +49,9 @@ func NewForwarders() Forwarders {
|
|||
return make(map[string]Forwarder)
|
||||
}
|
||||
|
||||
// BOZO!! Review!!!
|
||||
// IsPodForwarded checks if pod has a forward.
|
||||
func (ff Forwarders) IsPodForwarded(fqn string) bool {
|
||||
fqn += "|"
|
||||
for k := range ff {
|
||||
if strings.HasPrefix(k, fqn) {
|
||||
return true
|
||||
|
|
@ -64,9 +63,9 @@ func (ff Forwarders) IsPodForwarded(fqn string) bool {
|
|||
|
||||
// IsContainerForwarded checks if pod has a forward.
|
||||
func (ff Forwarders) IsContainerForwarded(fqn, co string) bool {
|
||||
prefix := fqn + "|" + co
|
||||
fqn += "|" + co
|
||||
for k := range ff {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
if strings.HasPrefix(k, fqn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
@ -91,8 +90,7 @@ func (ff Forwarders) Kill(path string) int {
|
|||
// The '|' is added to make sure we do not delete port forwards from other pods that have the same prefix
|
||||
// Without the `|` port forwards for pods, default/web-0 and default/web-0-bla would be both deleted
|
||||
// even if we want only port forwards for default/web-0 to be deleted
|
||||
prefix := fmt.Sprintf("%s|", path)
|
||||
|
||||
prefix := path + "|"
|
||||
for k, f := range ff {
|
||||
if k == path || strings.HasPrefix(k, prefix) {
|
||||
stats++
|
||||
|
|
@ -109,6 +107,6 @@ func (ff Forwarders) Kill(path string) int {
|
|||
func (ff Forwarders) Dump() {
|
||||
log.Debug().Msgf("----------- PORT-FORWARDS --------------")
|
||||
for k, f := range ff {
|
||||
log.Debug().Msgf(" %s -- %#v", k, f)
|
||||
log.Debug().Msgf(" %s -- %s", k, f)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,183 @@
|
|||
package watch_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/derailed/k9s/internal/port"
|
||||
"github.com/derailed/k9s/internal/watch"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
)
|
||||
|
||||
func init() {
|
||||
zerolog.SetGlobalLevel(zerolog.FatalLevel)
|
||||
}
|
||||
|
||||
func TestIsPodForwarded(t *testing.T) {
|
||||
uu := map[string]struct {
|
||||
ff watch.Forwarders
|
||||
fqn string
|
||||
e bool
|
||||
}{
|
||||
"happy": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1||8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
fqn: "ns1/p1",
|
||||
e: true,
|
||||
},
|
||||
"dud": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1||8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
fqn: "ns1/p2",
|
||||
},
|
||||
"sub": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/freddy||8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
fqn: "ns1/fred",
|
||||
},
|
||||
}
|
||||
|
||||
for k := range uu {
|
||||
u := uu[k]
|
||||
t.Run(k, func(t *testing.T) {
|
||||
assert.Equal(t, u.e, u.ff.IsPodForwarded(u.fqn))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsContainerForwarded(t *testing.T) {
|
||||
uu := map[string]struct {
|
||||
ff watch.Forwarders
|
||||
fqn, co string
|
||||
e bool
|
||||
}{
|
||||
"happy": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
fqn: "ns1/p1",
|
||||
co: "c1",
|
||||
e: true,
|
||||
},
|
||||
"dud": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
fqn: "ns1/p1",
|
||||
co: "c2",
|
||||
},
|
||||
"sub": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/freddy|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
fqn: "ns1/fred",
|
||||
co: "c1",
|
||||
},
|
||||
}
|
||||
|
||||
for k := range uu {
|
||||
u := uu[k]
|
||||
t.Run(k, func(t *testing.T) {
|
||||
assert.Equal(t, u.e, u.ff.IsContainerForwarded(u.fqn, u.co))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKill(t *testing.T) {
|
||||
uu := map[string]struct {
|
||||
ff watch.Forwarders
|
||||
path string
|
||||
kills int
|
||||
}{
|
||||
"partial_match": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p1_1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p2|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
path: "ns1/p1",
|
||||
kills: 1,
|
||||
},
|
||||
"partial_no_match": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p1_1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p2|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
path: "ns1/p",
|
||||
},
|
||||
"path_sub": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p1_1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p2|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
path: "ns1/p1",
|
||||
kills: 1,
|
||||
},
|
||||
"partial_multi": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p1|c2|8081:8081": newNoOpForwarder(),
|
||||
"ns1/p2|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
path: "ns1/p1",
|
||||
kills: 2,
|
||||
},
|
||||
"full_match": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p1_1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p2|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
path: "ns1/p1|c1|8080:8080",
|
||||
kills: 1,
|
||||
},
|
||||
"full_no_match_co": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p1_1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p2|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
path: "ns1/p1|c2|8080:8080",
|
||||
},
|
||||
"full_no_match_ports": {
|
||||
ff: watch.Forwarders{
|
||||
"ns1/p1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p1_1|c1|8080:8080": newNoOpForwarder(),
|
||||
"ns1/p2|c1|8080:8080": newNoOpForwarder(),
|
||||
},
|
||||
path: "ns1/p1|c1|8081:8080",
|
||||
},
|
||||
}
|
||||
|
||||
for k := range uu {
|
||||
u := uu[k]
|
||||
t.Run(k, func(t *testing.T) {
|
||||
assert.Equal(t, u.kills, u.ff.Kill(u.path))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type noOpForwarder struct{}
|
||||
|
||||
func newNoOpForwarder() noOpForwarder {
|
||||
return noOpForwarder{}
|
||||
}
|
||||
|
||||
func (m noOpForwarder) Start(path string, tunnel port.PortTunnel) (*portforward.PortForwarder, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m noOpForwarder) Stop() {}
|
||||
func (m noOpForwarder) ID() string { return "" }
|
||||
func (m noOpForwarder) Container() string { return "" }
|
||||
func (m noOpForwarder) Port() string { return "" }
|
||||
func (m noOpForwarder) FQN() string { return "" }
|
||||
func (m noOpForwarder) Active() bool { return false }
|
||||
func (m noOpForwarder) SetActive(bool) {}
|
||||
func (m noOpForwarder) Age() string { return "" }
|
||||
func (m noOpForwarder) HasPortMapping(string) bool { return false }
|
||||
Loading…
Reference in New Issue