refact jobs

mine
derailed 2019-06-20 18:36:30 -06:00
parent 6b2adca0c0
commit 125e70ebb6
5 changed files with 56 additions and 47 deletions

View File

@ -136,9 +136,9 @@ func makeSAR(ns, name, resURL string) *authorizationv1.SelfSubjectAccessReview {
}
// CanIAccess checks if user has access to a certain resource.
func (a *APIClient) canIAccess(ns, name, resURL string, verbs []string) (bool, error) {
func canIAccess(conn Connection, ns, name, resURL string, verbs []string) (bool, error) {
sar := makeSAR(ns, name, resURL)
dial := a.DialOrDie().AuthorizationV1().SelfSubjectAccessReviews()
dial := conn.DialOrDie().AuthorizationV1().SelfSubjectAccessReviews()
for _, v := range verbs {
sar.Spec.ResourceAttributes.Verb = v
resp, err := dial.Create(sar)
@ -194,7 +194,6 @@ func (a *APIClient) NodePods(node string) (*v1.PodList, error) {
func (a *APIClient) IsNamespaced(res string) bool {
list, _ := a.DialOrDie().Discovery().ServerPreferredResources()
for _, l := range list {
log.Debug().Msgf("GV %s", l.GroupVersion)
for _, r := range l.APIResources {
if r.Name == res {
return r.Namespaced

View File

@ -155,7 +155,7 @@ func (r *Container) Fields(ns string) Row {
// ----------------------------------------------------------------------------
// Helpers...
func gatherMetrics(co v1.Container, mx *mv1beta1.PodMetrics) (scpu string, smem string, pcpu string, pmem string) {
func gatherMetrics(co v1.Container, mx *mv1beta1.PodMetrics) (scpu, smem, pcpu, pmem string) {
scpu, smem, pcpu, pmem = NAValue, NAValue, NAValue, NAValue
if mx == nil {
return

View File

@ -129,20 +129,14 @@ func (r *Job) Fields(ns string) Row {
// ----------------------------------------------------------------------------
// Helpers...
const maxShow = 2
func (*Job) toContainers(p v1.PodSpec) (string, string) {
cc := make([]string, 0, len(p.InitContainers)+len(p.Containers))
ii := make([]string, 0, len(cc))
cc, ii := parseContainers(p.InitContainers)
cn, ci := parseContainers(p.Containers)
for _, c := range p.InitContainers {
cc = append(cc, c.Name)
ii = append(ii, c.Image)
}
for _, c := range p.Containers {
cc = append(cc, c.Name)
ii = append(ii, c.Image)
}
cc, ii = append(cc, cn...), append(ii, ci...)
const maxShow = 2
// Limit to 2 of each...
if len(cc) > maxShow {
cc = append(cc[:2], "(+"+strconv.Itoa(len(cc)-maxShow)+")...")
@ -154,6 +148,15 @@ func (*Job) toContainers(p v1.PodSpec) (string, string) {
return strings.Join(cc, ","), strings.Join(ii, ",")
}
func parseContainers(cos []v1.Container) (nn, ii []string) {
for _, co := range cos {
nn = append(nn, co.Name)
ii = append(ii, co.Image)
}
return nn, ii
}
func (*Job) toCompletion(spec batchv1.JobSpec, status batchv1.JobStatus) (s string) {
if spec.Completions != nil {
return strconv.Itoa(int(status.Succeeded)) + "/" + strconv.Itoa(int(*spec.Completions))

View File

@ -276,15 +276,7 @@ func (r *Pod) Fields(ns string) Row {
ss := i.Status.ContainerStatuses
cr, _, rc := r.statuses(ss)
ccpu, cmem, pcpu, pmem := NAValue, NAValue, NAValue, NAValue
if r.metrics != nil {
c, m := r.currentRes(r.metrics)
ccpu, cmem = ToMillicore(c.MilliValue()), ToMi(k8s.ToMB(m.Value()))
rc, rm := r.requestedRes(i)
pcpu = AsPerc(toPerc(float64(c.MilliValue()), float64(rc.MilliValue())))
pmem = AsPerc(toPerc(k8s.ToMB(m.Value()), k8s.ToMB(rm.Value())))
}
ccpu, cmem, pcpu, pmem := r.gatherPodMetrics(i)
return append(ff,
i.ObjectMeta.Name,
@ -305,6 +297,21 @@ func (r *Pod) Fields(ns string) Row {
// ----------------------------------------------------------------------------
// Helpers...
func (r *Pod) gatherPodMetrics(po *v1.Pod) (ccpu, cmem, pcpu, pmem string) {
ccpu, cmem, pcpu, pmem = NAValue, NAValue, NAValue, NAValue
if r.metrics == nil {
return
}
c, m := r.currentRes(r.metrics)
ccpu, cmem = ToMillicore(c.MilliValue()), ToMi(k8s.ToMB(m.Value()))
rc, rm := r.requestedRes(po)
pcpu = AsPerc(toPerc(float64(c.MilliValue()), float64(rc.MilliValue())))
pmem = AsPerc(toPerc(k8s.ToMB(m.Value()), k8s.ToMB(rm.Value())))
return
}
func containerResources(co v1.Container) (cpu, mem *resource.Quantity) {
req, limit := co.Resources.Requests, co.Resources.Limits
switch {

View File

@ -148,14 +148,10 @@ func (v *svcView) reloadBenchCfg() error {
}
func (v *svcView) benchCmd(evt *tcell.EventKey) *tcell.EventKey {
if !v.rowSelected() {
if !v.rowSelected() || v.bench != nil {
return evt
}
if v.bench != nil {
v.app.flash().err(errors.New("Only one benchmark allowed at a time"))
return nil
}
if err := v.reloadBenchCfg(); err != nil {
v.app.flash().err(err)
return nil
@ -197,28 +193,32 @@ func (v *svcView) runBenchmark(port string, cfg config.BenchConfig) error {
v.app.status(flashWarn, "Benchmark in progress...")
log.Debug().Msg("Bench starting...")
go v.bench.run(v.app.config.K9s.CurrentCluster, func() {
log.Debug().Msg("Bench Completed!")
v.app.QueueUpdate(func() {
if v.bench.canceled {
v.app.status(flashInfo, "Benchmark canceled")
} else {
v.app.status(flashInfo, "Benchmark Completed!")
v.bench.cancel()
}
v.bench = nil
go func() {
<-time.After(2 * time.Second)
v.app.QueueUpdate(func() {
v.app.statusReset()
})
}()
})
})
go v.bench.run(v.app.config.K9s.CurrentCluster, v.benchDone)
return nil
}
func (v *svcView) benchDone() {
log.Debug().Msg("Bench Completed!")
v.app.QueueUpdate(func() {
if v.bench.canceled {
v.app.status(flashInfo, "Benchmark canceled")
} else {
v.app.status(flashInfo, "Benchmark Completed!")
v.bench.cancel()
}
v.bench = nil
go benchTimedOut(v.app)
})
}
func benchTimedOut(app *appView) {
<-time.After(2 * time.Second)
app.QueueUpdate(func() {
app.statusReset()
})
}
func (v *svcView) showSvcPods(ns string, sel map[string]string, b actionHandler) {
var s []string
for k, v := range sel {