diff --git a/contrib/completions/bash/oc b/contrib/completions/bash/oc index bfb72a74276b..76ff7b1deb94 100644 --- a/contrib/completions/bash/oc +++ b/contrib/completions/bash/oc @@ -2454,6 +2454,8 @@ _oc_adm_drain() local_nonpersistent_flags+=("--grace-period=") flags+=("--ignore-daemonsets") local_nonpersistent_flags+=("--ignore-daemonsets") + flags+=("--pod-selector=") + local_nonpersistent_flags+=("--pod-selector=") flags+=("--selector=") two_word_flags+=("-l") local_nonpersistent_flags+=("--selector=") diff --git a/contrib/completions/zsh/oc b/contrib/completions/zsh/oc index 002856bd7b92..1558805c9c33 100644 --- a/contrib/completions/zsh/oc +++ b/contrib/completions/zsh/oc @@ -2596,6 +2596,8 @@ _oc_adm_drain() local_nonpersistent_flags+=("--grace-period=") flags+=("--ignore-daemonsets") local_nonpersistent_flags+=("--ignore-daemonsets") + flags+=("--pod-selector=") + local_nonpersistent_flags+=("--pod-selector=") flags+=("--selector=") two_word_flags+=("-l") local_nonpersistent_flags+=("--selector=") diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go index 910654f6f6ad..37f3ef1175cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" @@ -61,6 +62,7 @@ type DrainOptions struct { backOff clockwork.Clock DeleteLocalData bool Selector string + PodSelector string mapper meta.RESTMapper nodeInfos []*resource.Info Out io.Writer @@ -197,6 +199,8 @@ func NewCmdDrain(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.") cmd.Flags().DurationVar(&options.Timeout, "timeout", 0, "The length of time to wait before giving up, zero means infinite") cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on") + cmd.Flags().StringVarP(&options.PodSelector, "pod-selector", "", options.PodSelector, "Label selector to filter pods on the node") + cmdutil.AddDryRunFlag(cmd) return cmd } @@ -223,6 +227,12 @@ func (o *DrainOptions) SetupDrain(cmd *cobra.Command, args []string) error { return err } + if len(o.PodSelector) > 0 { + if _, err := labels.Parse(o.PodSelector); err != nil { + return errors.New("--pod-selector= must be a valid label selector") + } + } + o.restClient, err = o.Factory.RESTClient() if err != nil { return err @@ -328,38 +338,8 @@ func (o *DrainOptions) deleteOrEvictPodsSimple(nodeInfo *resource.Info) error { return err } -func (o *DrainOptions) getController(namespace string, controllerRef *metav1.OwnerReference) (interface{}, error) { - switch controllerRef.Kind { - case "ReplicationController": - return o.client.Core().ReplicationControllers(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "DaemonSet": - return o.client.Extensions().DaemonSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "Job": - return o.client.Batch().Jobs(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "ReplicaSet": - return o.client.Extensions().ReplicaSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "StatefulSet": - return o.client.AppsV1beta1().StatefulSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - } - return nil, fmt.Errorf("Unknown controller kind %q", controllerRef.Kind) -} - -func (o *DrainOptions) getPodController(pod corev1.Pod) (*metav1.OwnerReference, error) { - controllerRef := metav1.GetControllerOf(&pod) - if controllerRef == nil { - return nil, nil - } - - // We assume the only reason for an error is because the controller is - // gone/missing, not for any other cause. - // TODO(mml): something more sophisticated than this - // TODO(juntee): determine if it's safe to remove getController(), - // so that drain can work for controller types that we don't know about - _, err := o.getController(pod.Namespace, controllerRef) - if err != nil { - return nil, err - } - return controllerRef, nil +func (o *DrainOptions) getPodController(pod corev1.Pod) *metav1.OwnerReference { + return metav1.GetControllerOf(&pod) } func (o *DrainOptions) unreplicatedFilter(pod corev1.Pod) (bool, *warning, *fatal) { @@ -368,21 +348,15 @@ func (o *DrainOptions) unreplicatedFilter(pod corev1.Pod) (bool, *warning, *fata return true, nil, nil } - controllerRef, err := o.getPodController(pod) - if err != nil { - // if we're forcing, remove orphaned pods with a warning - if apierrors.IsNotFound(err) && o.Force { - return true, &warning{err.Error()}, nil - } - return false, nil, &fatal{err.Error()} - } + controllerRef := o.getPodController(pod) if controllerRef != nil { return true, nil, nil } - if !o.Force { - return false, nil, &fatal{kUnmanagedFatal} + if o.Force { + return true, &warning{kUnmanagedWarning}, nil } - return true, &warning{kUnmanagedWarning}, nil + + return false, nil, &fatal{kUnmanagedFatal} } func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) { @@ -393,23 +367,22 @@ func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) // The exception is for pods that are orphaned (the referencing // management resource - including DaemonSet - is not found). // Such pods will be deleted if --force is used. - controllerRef, err := o.getPodController(pod) - if err != nil { - // if we're forcing, remove orphaned pods with a warning - if apierrors.IsNotFound(err) && o.Force { - return true, &warning{err.Error()}, nil - } - return false, nil, &fatal{err.Error()} - } + controllerRef := o.getPodController(pod) if controllerRef == nil || controllerRef.Kind != "DaemonSet" { return true, nil, nil } + if _, err := o.client.Extensions().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { + // remove orphaned pods with a warning if --force is used + if apierrors.IsNotFound(err) && o.Force { + return true, &warning{err.Error()}, nil + } return false, nil, &fatal{err.Error()} } if !o.IgnoreDaemonsets { return false, nil, &fatal{kDaemonsetFatal} } + return false, &warning{kDaemonsetWarning}, nil } @@ -455,7 +428,13 @@ func (ps podStatuses) Message() string { // getPodsForDeletion receives resource info for a node, and returns all the pods from the given node that we // are planning on deleting. If there are any pods preventing us from deleting, we return that list in an error. func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev1.Pod, err error) { + labelSelector, err := labels.Parse(o.PodSelector) + if err != nil { + return pods, err + } + podList, err := o.client.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ + LabelSelector: labelSelector.String(), FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeInfo.Name}).String()}) if err != nil { return pods, err