From 55db6b588f188a8f738fd30e4d4f75ca8af571a1 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Mon, 6 Aug 2018 13:48:29 -0400 Subject: [PATCH] fix debug panic --- .../diagnostics/cluster/app_create/app.go | 47 ++++++++++++++- pkg/oc/cli/debug/debug.go | 59 +++++++++++++++++-- pkg/oc/cli/newapp/newapp.go | 19 ++++-- 3 files changed, 112 insertions(+), 13 deletions(-) diff --git a/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/app_create/app.go b/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/app_create/app.go index 9594a33b129c..5e8419dcf57a 100644 --- a/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/app_create/app.go +++ b/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/app_create/app.go @@ -5,8 +5,12 @@ import ( "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/watch" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" appsv1 "github.com/openshift/api/apps/v1" @@ -100,7 +104,7 @@ This may be a transient error. Check the master API logs for anomalies near this } defer stopWatcher(watcher) for event := range watcher.ResultChan() { - running, err := kubectl.PodContainerRunning(d.appName)(event) + running, err := podContainerRunning(d.appName)(event) if err != nil { d.out.Error("DCluAC009", err, fmt.Sprintf(` %s: Error while watching for app pod to deploy: @@ -124,3 +128,44 @@ There are many reasons why this can occur; for example: `, now(), d.deployTimeout)) return false } + +// podContainerRunning returns false until the named container has ContainerStatus running (at least once), +// and will return an error if the pod is deleted, runs to completion, or the container pod is not available. +func podContainerRunning(containerName string) watch.ConditionFunc { + return func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodRunning, api.PodPending: + case api.PodFailed, api.PodSucceeded: + return false, kubectl.ErrPodCompleted + default: + return false, nil + } + for _, s := range t.Status.ContainerStatuses { + if s.Name != containerName { + continue + } + if s.State.Terminated != nil { + return false, kubectl.ErrContainerTerminated + } + return s.State.Running != nil, nil + } + for _, s := range t.Status.InitContainerStatuses { + if s.Name != containerName { + continue + } + if s.State.Terminated != nil { + return false, kubectl.ErrContainerTerminated + } + return s.State.Running != nil, nil + } + return false, nil + } + return false, nil + } +} diff --git a/pkg/oc/cli/debug/debug.go b/pkg/oc/cli/debug/debug.go index 2acf18cc1c01..6042ccb91707 100644 --- a/pkg/oc/cli/debug/debug.go +++ b/pkg/oc/cli/debug/debug.go @@ -16,11 +16,13 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" kapierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -103,8 +105,9 @@ type DebugOptions struct { AppsClient appsv1client.AppsV1Interface ImageClient imagev1client.ImageV1Interface - Printer printers.ResourcePrinter - LogsForObject polymorphichelpers.LogsForObjectFunc + Printer printers.ResourcePrinter + LogsForObject polymorphichelpers.LogsForObjectFunc + RESTClientGetter genericclioptions.RESTClientGetter NoStdin bool ForceTTY bool @@ -211,6 +214,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, f kcmdutil.Factory, args []s return kcmdutil.UsageErrorf(cmd, "all resources must be specified before environment changes: %s", strings.Join(args, " ")) } o.Resources = resources + o.RESTClientGetter = f switch { case o.ForceTTY && o.NoStdin: @@ -432,7 +436,7 @@ func (o *DebugOptions) RunDebug() error { } fmt.Fprintf(o.ErrOut, "Waiting for pod to start ...\n") - switch containerRunningEvent, err := watch.Until(o.Timeout, w, kubectl.PodContainerRunning(o.Attach.ContainerName)); { + switch containerRunningEvent, err := watch.Until(o.Timeout, w, podContainerRunning(o.Attach.ContainerName)); { // api didn't error right away but the pod wasn't even created case kapierrors.IsNotFound(err): msg := fmt.Sprintf("unable to create the debug pod %q", pod.Name) @@ -444,12 +448,14 @@ func (o *DebugOptions) RunDebug() error { case err == kubectl.ErrPodCompleted, err == kubectl.ErrContainerTerminated, !o.Attach.Stdin: return kcmd.LogsOptions{ Object: pod, - Options: &kapi.PodLogOptions{ + Options: &corev1.PodLogOptions{ Container: o.Attach.ContainerName, Follow: true, }, - IOStreams: o.IOStreams, - LogsForObject: o.LogsForObject, + RESTClientGetter: o.RESTClientGetter, + ConsumeRequestFn: kcmd.DefaultConsumeRequestFn, + IOStreams: o.IOStreams, + LogsForObject: o.LogsForObject, }.RunLogs() case err != nil: return err @@ -828,3 +834,44 @@ func (o *DebugOptions) approximatePodTemplateForObject(object runtime.Object) (* return nil, fmt.Errorf("unable to extract pod template from type %v", reflect.TypeOf(object)) } + +// podContainerRunning returns false until the named container has ContainerStatus running (at least once), +// and will return an error if the pod is deleted, runs to completion, or the container pod is not available. +func podContainerRunning(containerName string) watch.ConditionFunc { + return func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *corev1.Pod: + switch t.Status.Phase { + case corev1.PodRunning, corev1.PodPending: + case corev1.PodFailed, corev1.PodSucceeded: + return false, kubectl.ErrPodCompleted + default: + return false, nil + } + for _, s := range t.Status.ContainerStatuses { + if s.Name != containerName { + continue + } + if s.State.Terminated != nil { + return false, kubectl.ErrContainerTerminated + } + return s.State.Running != nil, nil + } + for _, s := range t.Status.InitContainerStatuses { + if s.Name != containerName { + continue + } + if s.State.Terminated != nil { + return false, kubectl.ErrContainerTerminated + } + return s.State.Running != nil, nil + } + return false, nil + } + return false, nil + } +} diff --git a/pkg/oc/cli/newapp/newapp.go b/pkg/oc/cli/newapp/newapp.go index 44736e6cda06..c81703c80892 100644 --- a/pkg/oc/cli/newapp/newapp.go +++ b/pkg/oc/cli/newapp/newapp.go @@ -161,8 +161,11 @@ type ObjectGeneratorOptions struct { } type AppOptions struct { - genericclioptions.IOStreams *ObjectGeneratorOptions + + RESTClientGetter genericclioptions.RESTClientGetter + + genericclioptions.IOStreams } type versionedPrintObj struct { @@ -367,6 +370,8 @@ func NewCmdNewApplication(name, baseName string, f kcmdutil.Factory, streams gen // Complete sets any default behavior for the command func (o *AppOptions) Complete(baseName, commandName string, f kcmdutil.Factory, c *cobra.Command, args []string) error { + o.RESTClientGetter = f + cmdutil.WarnAboutCommaSeparation(o.ErrOut, o.ObjectGeneratorOptions.Config.TemplateParameters, "--param") err := o.ObjectGeneratorOptions.Complete(baseName, commandName, f, c, args) if err != nil { @@ -510,7 +515,7 @@ func (o *AppOptions) RunNewApp() error { switch { case len(installing) == 1: - return followInstallation(config, installing[0], o.LogsForObject) + return followInstallation(config, o.RESTClientGetter, installing[0], o.LogsForObject) case len(installing) > 1: for i := range installing { fmt.Fprintf(out, "%sTrack installation of %s with '%s logs %s'.\n", indent, installing[i].Name, o.BaseName, installing[i].Name) @@ -554,7 +559,7 @@ func getServices(items []runtime.Object) []*corev1.Service { return svc } -func followInstallation(config *newcmd.AppConfig, pod *corev1.Pod, logsForObjectFn polymorphichelpers.LogsForObjectFunc) error { +func followInstallation(config *newcmd.AppConfig, clientGetter genericclioptions.RESTClientGetter, pod *corev1.Pod, logsForObjectFn polymorphichelpers.LogsForObjectFunc) error { fmt.Fprintf(config.Out, "--> Installing ...\n") // we cannot retrieve logs until the pod is out of pending @@ -567,12 +572,14 @@ func followInstallation(config *newcmd.AppConfig, pod *corev1.Pod, logsForObject opts := &kcmd.LogsOptions{ Namespace: pod.Namespace, ResourceArg: pod.Name, - Options: &kapi.PodLogOptions{ + Options: &corev1.PodLogOptions{ Follow: true, Container: pod.Spec.Containers[0].Name, }, - LogsForObject: logsForObjectFn, - IOStreams: genericclioptions.IOStreams{Out: config.Out}, + RESTClientGetter: clientGetter, + ConsumeRequestFn: kcmd.DefaultConsumeRequestFn, + LogsForObject: logsForObjectFn, + IOStreams: genericclioptions.IOStreams{Out: config.Out}, } logErr := opts.RunLogs()