From 1c6ab3a60c82cb9c088ef15041e89264428928d0 Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 9 Apr 2018 09:17:55 -0400 Subject: [PATCH 1/6] UPSTREAM: 60455: removes custom scalers from kubectl --- vendor/k8s.io/kubernetes/pkg/kubectl/BUILD | 10 +- .../k8s.io/kubernetes/pkg/kubectl/cmd/BUILD | 3 + .../kubernetes/pkg/kubectl/cmd/apply.go | 23 +- .../kubernetes/pkg/kubectl/cmd/apply_test.go | 62 +- .../pkg/kubectl/cmd/rollingupdate.go | 7 +- .../kubernetes/pkg/kubectl/cmd/testing/BUILD | 15 +- .../pkg/kubectl/cmd/testing/fake.go | 6 + .../kubernetes/pkg/kubectl/cmd/util/BUILD | 17 +- .../pkg/kubectl/cmd/util/factory.go | 11 +- .../pkg/kubectl/cmd/util/factory_builder.go | 50 + .../cmd/util/factory_object_mapping.go | 38 - .../k8s.io/kubernetes/pkg/kubectl/delete.go | 29 +- .../kubernetes/pkg/kubectl/delete_test.go | 195 ++- .../kubernetes/pkg/kubectl/rolling_updater.go | 18 +- vendor/k8s.io/kubernetes/pkg/kubectl/scale.go | 302 +---- .../kubernetes/pkg/kubectl/scale_test.go | 1205 ++++------------- .../src/k8s.io/client-go/scale/client.go | 5 +- vendor/k8s.io/kubernetes/test/e2e/apps/BUILD | 1 + .../kubernetes/test/e2e/apps/cronjob.go | 2 +- .../kubernetes/test/e2e/apps/daemon_set.go | 2 +- .../kubernetes/test/e2e/apps/deployment.go | 7 +- vendor/k8s.io/kubernetes/test/e2e/apps/job.go | 2 +- .../test/e2e/autoscaling/autoscaling_timer.go | 2 +- .../cluster_autoscaler_scalability.go | 8 +- .../autoscaling/cluster_size_autoscaling.go | 36 +- .../autoscaling/horizontal_pod_autoscaling.go | 2 +- .../k8s.io/kubernetes/test/e2e/common/BUILD | 1 + .../test/e2e/common/autoscaling_utils.go | 17 +- .../kubernetes/test/e2e/framework/rc_util.go | 4 +- .../test/e2e/framework/service_util.go | 5 +- .../kubernetes/test/e2e/framework/util.go | 4 +- .../instrumentation/monitoring/stackdriver.go | 2 +- .../kubernetes/test/e2e/network/proxy.go | 2 +- .../kubernetes/test/e2e/network/service.go | 12 +- .../kubernetes/test/e2e/node/kubelet.go | 2 +- .../kubernetes/test/e2e/node/kubelet_perf.go | 2 +- .../test/e2e/scalability/density.go | 8 +- .../kubernetes/test/e2e/scalability/load.go | 3 +- .../equivalence_cache_predicates.go | 8 +- .../test/e2e/scheduling/predicates.go | 2 +- .../test/e2e/scheduling/priorities.go | 2 +- .../test/e2e/scheduling/rescheduler.go | 2 +- .../test/e2e/scheduling/ubernetes_lite.go | 2 +- .../test/e2e/storage/empty_dir_wrapper.go | 2 +- .../upgrades/horizontal_pod_autoscalers.go | 3 +- .../kubernetes/test/utils/delete_resources.go | 9 +- 46 files changed, 695 insertions(+), 1455 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD index 7fe68ccde17e..d6405396a5b3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD @@ -41,21 +41,17 @@ go_test( "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", - "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", "//pkg/kubectl/util:go_default_library", "//pkg/printers:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", @@ -68,24 +64,20 @@ go_test( "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/discovery/fake:go_default_library", - "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest/fake:go_default_library", "//vendor/k8s.io/client-go/scale:go_default_library", + "//vendor/k8s.io/client-go/scale/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD index 45707367c48c..cca26088ed0f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD @@ -146,6 +146,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/portforward:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", @@ -234,6 +235,7 @@ go_test( "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/gopkg.in/yaml.v2:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -260,6 +262,7 @@ go_test( "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest/fake:go_default_library", + "//vendor/k8s.io/client-go/scale/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", "//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go index 71486bdac487..c5a1c517e24d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go @@ -37,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" + scaleclient "k8s.io/client-go/scale" oapi "k8s.io/kube-openapi/pkg/util/proto" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -313,6 +314,10 @@ func RunApply(f cmdutil.Factory, cmd *cobra.Command, out, errOut io.Writer, opti fmt.Fprintf(errOut, warningNoLastAppliedConfigAnnotation, options.cmdBaseName) } overwrite := cmdutil.GetFlagBool(cmd, "overwrite") + scaler, err := f.ScaleClient() + if err != nil { + return err + } helper := resource.NewHelper(info.Client, info.Mapping) patcher := &patcher{ encoder: encoder, @@ -328,6 +333,7 @@ func RunApply(f cmdutil.Factory, cmd *cobra.Command, out, errOut io.Writer, opti timeout: options.Timeout, gracePeriod: options.GracePeriod, openapiSchema: openapiSchema, + scaleClient: scaler, } patchBytes, patchedObject, err := patcher.patch(info.Object, modified, info.Source, info.Namespace, info.Name, errOut) @@ -493,6 +499,10 @@ func (p *pruner) prune(f cmdutil.Factory, namespace string, mapping *meta.RESTMa if err != nil { return err } + scaler, err := f.ScaleClient() + if err != nil { + return err + } for _, obj := range objs { annots, err := mapping.MetadataAccessor.Annotations(obj) @@ -516,7 +526,7 @@ func (p *pruner) prune(f cmdutil.Factory, namespace string, mapping *meta.RESTMa return err } if !p.dryRun { - if err := p.delete(namespace, name, mapping); err != nil { + if err := p.delete(namespace, name, mapping, scaler); err != nil { return err } } @@ -525,16 +535,16 @@ func (p *pruner) prune(f cmdutil.Factory, namespace string, mapping *meta.RESTMa return nil } -func (p *pruner) delete(namespace, name string, mapping *meta.RESTMapping) error { +func (p *pruner) delete(namespace, name string, mapping *meta.RESTMapping, scaleClient scaleclient.ScalesGetter) error { c, err := p.clientFunc(mapping) if err != nil { return err } - return runDelete(namespace, name, mapping, c, nil, p.cascade, p.gracePeriod, p.clientsetFunc) + return runDelete(namespace, name, mapping, c, nil, p.cascade, p.gracePeriod, p.clientsetFunc, scaleClient) } -func runDelete(namespace, name string, mapping *meta.RESTMapping, c resource.RESTClient, helper *resource.Helper, cascade bool, gracePeriod int, clientsetFunc func() (internalclientset.Interface, error)) error { +func runDelete(namespace, name string, mapping *meta.RESTMapping, c resource.RESTClient, helper *resource.Helper, cascade bool, gracePeriod int, clientsetFunc func() (internalclientset.Interface, error), scaleClient scaleclient.ScalesGetter) error { if !cascade { if helper == nil { helper = resource.NewHelper(c, mapping) @@ -545,7 +555,7 @@ func runDelete(namespace, name string, mapping *meta.RESTMapping, c resource.RES if err != nil { return err } - r, err := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), cs) + r, err := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), cs, scaleClient) if err != nil { if _, ok := err.(*kubectl.NoSuchReaperError); !ok { return err @@ -567,7 +577,7 @@ func (p *patcher) delete(namespace, name string) error { if err != nil { return err } - return runDelete(namespace, name, p.mapping, c, p.helper, p.cascade, p.gracePeriod, p.clientsetFunc) + return runDelete(namespace, name, p.mapping, c, p.helper, p.cascade, p.gracePeriod, p.clientsetFunc, p.scaleClient) } type patcher struct { @@ -588,6 +598,7 @@ type patcher struct { gracePeriod int openapiSchema openapi.Resources + scaleClient scaleclient.ScalesGetter } func (p *patcher) patchSimple(obj runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_test.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_test.go index aa4919769a3b..84a5b3da7829 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/apply_test.go @@ -31,14 +31,18 @@ import ( "github.com/spf13/cobra" + autoscalingv1 "k8s.io/api/autoscaling/v1" kubeerr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" sptest "k8s.io/apimachinery/pkg/util/strategicpatch/testing" restclient "k8s.io/client-go/rest" "k8s.io/client-go/rest/fake" + fakescale "k8s.io/client-go/scale/fake" + testcore "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" api "k8s.io/kubernetes/pkg/apis/core" @@ -1190,14 +1194,14 @@ func TestForceApply(t *testing.T) { pathRC := "/namespaces/test/replicationcontrollers/" + nameRC pathRCList := "/namespaces/test/replicationcontrollers" expected := map[string]int{ - "getOk": 10, + "getOk": 7, "getNotFound": 1, "getList": 1, "patch": 6, "delete": 1, - "put": 1, "post": 1, } + scaleClientExpected := []string{"get", "update", "get", "get"} for _, fn := range testingOpenAPISchemaFns { t.Run("test apply with --force", func(t *testing.T) { @@ -1277,10 +1281,48 @@ func TestForceApply(t *testing.T) { } }), } + newReplicas := int32(3) + scaleClient := &fakescale.FakeScaleClient{} + scaleClient.AddReactor("get", "replicationcontrollers", func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { + action := rawAction.(testcore.GetAction) + if action.GetName() != "test-rc" { + return true, nil, fmt.Errorf("expected = test-rc, got = %s", action.GetName()) + } + obj := &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: action.GetName(), + Namespace: action.GetNamespace(), + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: newReplicas, + }, + } + return true, obj, nil + }) + scaleClient.AddReactor("update", "replicationcontrollers", func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { + action := rawAction.(testcore.UpdateAction) + obj := action.GetObject().(*autoscalingv1.Scale) + if obj.Name != "test-rc" { + return true, nil, fmt.Errorf("expected = test-rc, got = %s", obj.Name) + } + newReplicas = obj.Spec.Replicas + return true, &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: obj.Name, + Namespace: action.GetNamespace(), + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: newReplicas, + }, + }, nil + }) + + tf.ScaleGetter = scaleClient tf.OpenAPISchemaFunc = fn tf.Client = tf.UnstructuredClient tf.ClientConfigVal = &restclient.Config{} tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) @@ -1302,6 +1344,22 @@ func TestForceApply(t *testing.T) { if errBuf.String() != "" { t.Fatalf("unexpected error output: %s", errBuf.String()) } + + scale, err := scaleClient.Scales(tf.Namespace).Get(schema.GroupResource{Group: "", Resource: "replicationcontrollers"}, nameRC) + if err != nil { + t.Error(err) + } + if scale.Spec.Replicas != 0 { + t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) + } + if len(scaleClient.Actions()) != len(scaleClientExpected) { + t.Fatalf("a fake scale client has unexpected amout of API calls, wanted = %d, got = %d", len(scaleClientExpected), len(scaleClient.Actions())) + } + for index, action := range scaleClient.Actions() { + if scaleClientExpected[index] != action.GetVerb() { + t.Errorf("unexpected API method called on a fake scale client, wanted = %s, got = %s at index = %d", scaleClientExpected[index], action.GetVerb(), index) + } + } }) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go index 029adb8aae60..5e0873b175f2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go @@ -293,7 +293,12 @@ func RunRollingUpdate(f cmdutil.Factory, out io.Writer, cmd *cobra.Command, args filename, oldName) } - updater := kubectl.NewRollingUpdater(newRc.Namespace, coreClient, coreClient) + scalesGetter, err := f.ScaleClient() + if err != nil { + return err + } + + updater := kubectl.NewRollingUpdater(newRc.Namespace, coreClient, coreClient, scalesGetter) // To successfully pull off a rolling update the new and old rc have to differ // by at least one selector. Every new pod should have the selector and every diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testing/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testing/BUILD index 23f686de7b5a..eb226cb83ea0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testing/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testing/BUILD @@ -1,7 +1,4 @@ -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -10,9 +7,7 @@ go_library( "zz_generated.deepcopy.go", ], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/testing", - visibility = [ - "//build/visible_to:pkg_kubectl_cmd_testing_CONSUMERS", - ], + visibility = ["//build/visible_to:pkg_kubectl_cmd_testing_CONSUMERS"], deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", @@ -37,6 +32,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest/fake:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", ], @@ -46,13 +42,12 @@ filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], + visibility = ["//build/visible_to:pkg_kubectl_cmd_testing_CONSUMERS"], ) filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], - visibility = [ - "//build/visible_to:pkg_kubectl_cmd_testing_CONSUMERS", - ], + visibility = ["//build/visible_to:pkg_kubectl_cmd_testing_CONSUMERS"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testing/fake.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testing/fake.go index 751165d2e73c..8ef4e2a61e01 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testing/fake.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testing/fake.go @@ -37,6 +37,7 @@ import ( "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/rest/fake" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -239,6 +240,7 @@ type TestFactory struct { cmdutil.Factory Client kubectl.RESTClient + ScaleGetter scaleclient.ScalesGetter UnstructuredClient kubectl.RESTClient DescriberVal printers.Describer Namespace string @@ -483,6 +485,10 @@ func (f *TestFactory) LogsForObject(object, options runtime.Object, timeout time } } +func (f *TestFactory) ScaleClient() (scaleclient.ScalesGetter, error) { + return f.ScaleGetter, nil +} + func testDynamicResources() []*discovery.APIGroupResources { return []*discovery.APIGroupResources{ { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD index 5abbe5b5bae8..71189b0781b0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD @@ -1,8 +1,4 @@ -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -18,9 +14,7 @@ go_library( "shortcut_restmapper.go", ], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util", - visibility = [ - "//build/visible_to:pkg_kubectl_cmd_util_CONSUMERS", - ], + visibility = ["//build/visible_to:pkg_kubectl_cmd_util_CONSUMERS"], deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps:go_default_library", @@ -93,13 +87,7 @@ go_test( "helpers_test.go", "shortcut_restmapper_test.go", ], - data = [ - "//api/swagger-spec", - ], embed = [":go_default_library"], - visibility = [ - "//build/visible_to:COMMON_testing", - ], deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", @@ -144,6 +132,7 @@ filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], + visibility = ["//visibility:private"], ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go index 3cb44ab4db9b..7e7898a6e18b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go @@ -35,6 +35,7 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" api "k8s.io/kubernetes/pkg/apis/core" apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" @@ -181,10 +182,6 @@ type ObjectMappingFactory interface { // LogsForObject returns a request for the logs associated with the provided object LogsForObject(object, options runtime.Object, timeout time.Duration) (*restclient.Request, error) - // Returns a Scaler for changing the size of the specified RESTMapping type or an error - Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) - // Returns a Reaper for gracefully shutting down resources. - Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) // Returns a HistoryViewer for viewing change history HistoryViewer(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) // Returns a Rollbacker for changing the rollback version of the specified RESTMapping type or an error @@ -216,6 +213,12 @@ type BuilderFactory interface { PluginLoader() plugins.PluginLoader // PluginRunner provides the implementation to be used to run cli plugins. PluginRunner() plugins.PluginRunner + // Returns a Scaler for changing the size of the specified RESTMapping type or an error + Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) + // ScaleClient gives you back scale getter + ScaleClient() (scaleclient.ScalesGetter, error) + // Returns a Reaper for gracefully shutting down resources. + Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) } type factory struct { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go index 98407d179454..080e39fc4b22 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go @@ -21,7 +21,11 @@ package util import ( "os" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + scaleclient "k8s.io/client-go/scale" + "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/plugins" "k8s.io/kubernetes/pkg/kubectl/resource" ) @@ -84,3 +88,49 @@ func (f *ring2Factory) PluginLoader() plugins.PluginLoader { func (f *ring2Factory) PluginRunner() plugins.PluginRunner { return &plugins.ExecPluginRunner{} } + +func (f *ring2Factory) ScaleClient() (scaleclient.ScalesGetter, error) { + discoClient, err := f.clientAccessFactory.DiscoveryClient() + if err != nil { + return nil, err + } + restClient, err := f.clientAccessFactory.RESTClient() + if err != nil { + return nil, err + } + resolver := scaleclient.NewDiscoveryScaleKindResolver(discoClient) + mapper, _ := f.objectMappingFactory.Object() + return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil +} + +func (f *ring2Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) { + clientset, err := f.clientAccessFactory.ClientSet() + if err != nil { + return nil, err + } + + scalesGetter, err := f.ScaleClient() + if err != nil { + return nil, err + } + gvk := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource) + + return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset.Batch(), scalesGetter, gvk.GroupResource()), nil +} + +func (f *ring2Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { + clientset, clientsetErr := f.clientAccessFactory.ClientSet() + if clientsetErr != nil { + return nil, clientsetErr + } + scaler, err := f.ScaleClient() + if err != nil { + return nil, err + } + + reaper, reaperErr := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset, scaler) + if kubectl.IsNoSuchReaperError(reaperErr) { + return nil, reaperErr + } + return reaper, reaperErr +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_object_mapping.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_object_mapping.go index 3a6f6e56c61e..a8ebe75f454a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_object_mapping.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_object_mapping.go @@ -37,7 +37,6 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" - scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" @@ -285,43 +284,6 @@ func (f *ring1Factory) LogsForObject(object, options runtime.Object, timeout tim return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil } -func (f *ring1Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) { - clientset, err := f.clientAccessFactory.ClientSet() - if err != nil { - return nil, err - } - - // create scales getter - // TODO(p0lyn0mial): put scalesGetter to a factory - discoClient, err := f.clientAccessFactory.DiscoveryClient() - if err != nil { - return nil, err - } - restClient, err := f.clientAccessFactory.RESTClient() - if err != nil { - return nil, err - } - mapper, _ := f.Object() - resolver := scaleclient.NewDiscoveryScaleKindResolver(discoClient) - scalesGetter := scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver) - gvk := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource) - - return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset.Batch(), scalesGetter, gvk.GroupResource()), nil -} - -func (f *ring1Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { - clientset, clientsetErr := f.clientAccessFactory.ClientSet() - reaper, reaperErr := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset) - - if kubectl.IsNoSuchReaperError(reaperErr) { - return nil, reaperErr - } - if clientsetErr != nil { - return nil, clientsetErr - } - return reaper, reaperErr -} - func (f *ring1Factory) HistoryViewer(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { external, err := f.clientAccessFactory.KubernetesClientSet() if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go b/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go index 2c8e32676932..974798a247c5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go @@ -28,6 +28,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + scaleclient "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" @@ -67,13 +68,13 @@ func IsNoSuchReaperError(err error) bool { return ok } -func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, error) { +func ReaperFor(kind schema.GroupKind, c internalclientset.Interface, sc scaleclient.ScalesGetter) (Reaper, error) { switch kind { case api.Kind("ReplicationController"): - return &ReplicationControllerReaper{c.Core(), Interval, Timeout}, nil + return &ReplicationControllerReaper{c.Core(), Interval, Timeout, sc}, nil case extensions.Kind("ReplicaSet"), apps.Kind("ReplicaSet"): - return &ReplicaSetReaper{c.Extensions(), Interval, Timeout}, nil + return &ReplicaSetReaper{c.Extensions(), Interval, Timeout, sc, schema.GroupResource{Group: kind.Group, Resource: "replicasets"}}, nil case extensions.Kind("DaemonSet"), apps.Kind("DaemonSet"): return &DaemonSetReaper{c.Extensions(), Interval, Timeout}, nil @@ -85,26 +86,29 @@ func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, er return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil case apps.Kind("StatefulSet"): - return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil + return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout, sc}, nil case extensions.Kind("Deployment"), apps.Kind("Deployment"): - return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout}, nil + return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout, sc, schema.GroupResource{Group: kind.Group, Resource: "deployments"}}, nil } return nil, &NoSuchReaperError{kind} } -func ReaperForReplicationController(rcClient coreclient.ReplicationControllersGetter, timeout time.Duration) (Reaper, error) { - return &ReplicationControllerReaper{rcClient, Interval, timeout}, nil +func ReaperForReplicationController(rcClient coreclient.ReplicationControllersGetter, scaleClient scaleclient.ScalesGetter, timeout time.Duration) (Reaper, error) { + return &ReplicationControllerReaper{rcClient, Interval, timeout, scaleClient}, nil } type ReplicationControllerReaper struct { client coreclient.ReplicationControllersGetter pollInterval, timeout time.Duration + scaleClient scaleclient.ScalesGetter } type ReplicaSetReaper struct { client extensionsclient.ReplicaSetsGetter pollInterval, timeout time.Duration + scaleClient scaleclient.ScalesGetter + gr schema.GroupResource } type DaemonSetReaper struct { client extensionsclient.DaemonSetsGetter @@ -119,6 +123,8 @@ type DeploymentReaper struct { dClient extensionsclient.DeploymentsGetter rsClient extensionsclient.ReplicaSetsGetter pollInterval, timeout time.Duration + scaleClient scaleclient.ScalesGetter + gr schema.GroupResource } type PodReaper struct { client coreclient.PodsGetter @@ -127,6 +133,7 @@ type StatefulSetReaper struct { client appsclient.StatefulSetsGetter podClient coreclient.PodsGetter pollInterval, timeout time.Duration + scaleClient scaleclient.ScalesGetter } // getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller. @@ -148,7 +155,7 @@ func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterfac func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { rc := reaper.client.ReplicationControllers(namespace) - scaler := &ReplicationControllerScaler{reaper.client} + scaler := NewScaler(reaper.scaleClient, schema.GroupResource{Resource: "replicationcontrollers"}) ctrl, err := rc.Get(name, metav1.GetOptions{}) if err != nil { return err @@ -217,7 +224,7 @@ func getOverlappingReplicaSets(c extensionsclient.ReplicaSetInterface, rs *exten func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { rsc := reaper.client.ReplicaSets(namespace) - scaler := &ReplicaSetScaler{reaper.client} + scaler := NewScaler(reaper.scaleClient, reaper.gr) rs, err := rsc.Get(name, metav1.GetOptions{}) if err != nil { return err @@ -318,7 +325,7 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { statefulsets := reaper.client.StatefulSets(namespace) - scaler := &StatefulSetScaler{reaper.client} + scaler := NewScaler(reaper.scaleClient, apps.Resource("statefulsets")) ss, err := statefulsets.Get(name, metav1.GetOptions{}) if err != nil { return err @@ -391,7 +398,7 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { deployments := reaper.dClient.Deployments(namespace) - rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout} + rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout, reaper.scaleClient, schema.GroupResource{Group: reaper.gr.Group, Resource: "replicasets"}} deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { // set deployment's history and scale to 0 diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/delete_test.go b/vendor/k8s.io/kubernetes/pkg/kubectl/delete_test.go index 38fb1e0412bf..e45f550d4b4b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/delete_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/delete_test.go @@ -23,12 +23,15 @@ import ( "testing" "time" + autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/watch" + fakescale "k8s.io/client-go/scale/fake" testcore "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" @@ -41,10 +44,12 @@ func TestReplicationControllerStop(t *testing.T) { name := "foo" ns := "default" tests := []struct { - Name string - Objs []runtime.Object - StopError error - ExpectedActions []string + Name string + Objs []runtime.Object + ScaledDown bool + StopError error + ExpectedActions []string + ScaleClientExpectedAction []string }{ { Name: "OnlyOneRC", @@ -63,8 +68,10 @@ func TestReplicationControllerStop(t *testing.T) { }, }, }, - StopError: nil, - ExpectedActions: []string{"get", "list", "get", "update", "get", "get", "delete"}, + ScaledDown: true, + StopError: nil, + ExpectedActions: []string{"get", "list", "delete"}, + ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, }, { Name: "NoOverlapping", @@ -92,8 +99,10 @@ func TestReplicationControllerStop(t *testing.T) { }, }, }, - StopError: nil, - ExpectedActions: []string{"get", "list", "get", "update", "get", "get", "delete"}, + ScaledDown: true, + StopError: nil, + ExpectedActions: []string{"get", "list", "delete"}, + ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, }, { Name: "OverlappingError", @@ -122,10 +131,10 @@ func TestReplicationControllerStop(t *testing.T) { }, }, }, + ScaledDown: false, // scale resource was not scaled down due to overlapping controllers StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz, please manage deletion individually with --cascade=false."), ExpectedActions: []string{"get", "list"}, }, - { Name: "OverlappingButSafeDelete", Objs: []runtime.Object{ @@ -162,7 +171,7 @@ func TestReplicationControllerStop(t *testing.T) { }, }, }, - + ScaledDown: false, // scale resource was not scaled down due to overlapping controllers StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz,zaz, please manage deletion individually with --cascade=false."), ExpectedActions: []string{"get", "list"}, }, @@ -194,7 +203,7 @@ func TestReplicationControllerStop(t *testing.T) { }, }, }, - + ScaledDown: false, // scale resource was not scaled down because there is still an additional replica StopError: nil, ExpectedActions: []string{"get", "list", "delete"}, }, @@ -202,6 +211,7 @@ func TestReplicationControllerStop(t *testing.T) { for _, test := range tests { copiedForWatch := test.Objs[0].DeepCopyObject() + scaleClient := createFakeScaleClient("replicationcontrollers", "foo", 3, nil) fake := fake.NewSimpleClientset(test.Objs...) fakeWatch := watch.NewFake() fake.PrependWatchReactor("replicationcontrollers", testcore.DefaultWatchReactor(fakeWatch, nil)) @@ -210,7 +220,7 @@ func TestReplicationControllerStop(t *testing.T) { fakeWatch.Add(copiedForWatch) }() - reaper := ReplicationControllerReaper{fake.Core(), time.Millisecond, time.Millisecond} + reaper := ReplicationControllerReaper{fake.Core(), time.Millisecond, time.Millisecond, scaleClient} err := reaper.Stop(ns, name, 0, nil) if !reflect.DeepEqual(err, test.StopError) { t.Errorf("%s unexpected error: %v", test.Name, err) @@ -230,6 +240,24 @@ func TestReplicationControllerStop(t *testing.T) { t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb) } } + if test.ScaledDown { + scale, err := scaleClient.Scales(ns).Get(schema.GroupResource{Group: "", Resource: "replicationcontrollers"}, name) + if err != nil { + t.Error(err) + } + if scale.Spec.Replicas != 0 { + t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) + } + actions := scaleClient.Actions() + if len(actions) != len(test.ScaleClientExpectedAction) { + t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ScaleClientExpectedAction), len(actions)) + } + for i, verb := range test.ScaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("%s unexpected action: %+v, expected %s", test.Name, actions[i].GetVerb(), verb) + } + } + } } } @@ -237,15 +265,22 @@ func TestReplicaSetStop(t *testing.T) { name := "foo" ns := "default" tests := []struct { - Name string - Objs []runtime.Object - StopError error - ExpectedActions []string + Name string + Objs []runtime.Object + DiscoveryResources []*metav1.APIResourceList + PathsResources map[string]runtime.Object + ScaledDown bool + StopError error + ExpectedActions []string + ScaleClientExpectedAction []string }{ { Name: "OnlyOneRS", Objs: []runtime.Object{ &extensions.ReplicaSetList{ // LIST + TypeMeta: metav1.TypeMeta{ + APIVersion: extensions.SchemeGroupVersion.String(), + }, Items: []extensions.ReplicaSet{ { ObjectMeta: metav1.ObjectMeta{ @@ -260,8 +295,10 @@ func TestReplicaSetStop(t *testing.T) { }, }, }, - StopError: nil, - ExpectedActions: []string{"get", "get", "update", "get", "get", "delete"}, + ScaledDown: true, + StopError: nil, + ExpectedActions: []string{"get", "delete"}, + ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, }, { Name: "NoOverlapping", @@ -291,8 +328,10 @@ func TestReplicaSetStop(t *testing.T) { }, }, }, - StopError: nil, - ExpectedActions: []string{"get", "get", "update", "get", "get", "delete"}, + ScaledDown: true, + StopError: nil, + ExpectedActions: []string{"get", "delete"}, + ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, }, // TODO: Implement tests for overlapping replica sets, similar to replication controllers, // when the overlapping checks are implemented for replica sets. @@ -300,7 +339,9 @@ func TestReplicaSetStop(t *testing.T) { for _, test := range tests { fake := fake.NewSimpleClientset(test.Objs...) - reaper := ReplicaSetReaper{fake.Extensions(), time.Millisecond, time.Millisecond} + scaleClient := createFakeScaleClient("replicasets", "foo", 3, nil) + + reaper := ReplicaSetReaper{fake.Extensions(), time.Millisecond, time.Millisecond, scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}} err := reaper.Stop(ns, name, 0, nil) if !reflect.DeepEqual(err, test.StopError) { t.Errorf("%s unexpected error: %v", test.Name, err) @@ -320,6 +361,24 @@ func TestReplicaSetStop(t *testing.T) { t.Errorf("%s unexpected action: %+v, expected %s-replicaSet", test.Name, actions[i], verb) } } + if test.ScaledDown { + scale, err := scaleClient.Scales(ns).Get(schema.GroupResource{Group: "extensions", Resource: "replicasets"}, name) + if err != nil { + t.Error(err) + } + if scale.Spec.Replicas != 0 { + t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) + } + actions := scaleClient.Actions() + if len(actions) != len(test.ScaleClientExpectedAction) { + t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ScaleClientExpectedAction), len(actions)) + } + for i, verb := range test.ScaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("%s unexpected action: %+v, expected %s", test.Name, actions[i].GetVerb(), verb) + } + } + } } } @@ -439,10 +498,12 @@ func TestDeploymentStop(t *testing.T) { } trueVar := true tests := []struct { - Name string - Objs []runtime.Object - StopError error - ExpectedActions []string + Name string + Objs []runtime.Object + ScaledDown bool + StopError error + ExpectedActions []string + ScaleClientExpectedAction []string }{ { Name: "SimpleDeployment", @@ -510,17 +571,20 @@ func TestDeploymentStop(t *testing.T) { }, }, }, - StopError: nil, + ScaledDown: true, + StopError: nil, ExpectedActions: []string{"get:deployments", "update:deployments", "get:deployments", "list:replicasets", "get:replicasets", - "get:replicasets", "update:replicasets", "get:replicasets", - "get:replicasets", "delete:replicasets", "delete:deployments"}, + "delete:replicasets", "delete:deployments"}, + ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, }, } for _, test := range tests { + scaleClient := createFakeScaleClient("deployments", "foo", 3, nil) + fake := fake.NewSimpleClientset(test.Objs...) - reaper := DeploymentReaper{fake.Extensions(), fake.Extensions(), time.Millisecond, time.Millisecond} + reaper := DeploymentReaper{fake.Extensions(), fake.Extensions(), time.Millisecond, time.Millisecond, scaleClient, schema.GroupResource{Group: "extensions", Resource: "deployments"}} err := reaper.Stop(ns, name, 0, nil) if !reflect.DeepEqual(err, test.StopError) { t.Errorf("%s unexpected error: %v", test.Name, err) @@ -544,6 +608,24 @@ func TestDeploymentStop(t *testing.T) { t.Errorf("%s unexpected subresource: %+v, expected %s", test.Name, actions[i], expAction) } } + if test.ScaledDown { + scale, err := scaleClient.Scales(ns).Get(schema.GroupResource{Group: "extensions", Resource: "replicaset"}, name) + if err != nil { + t.Error(err) + } + if scale.Spec.Replicas != 0 { + t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) + } + actions := scaleClient.Actions() + if len(actions) != len(test.ScaleClientExpectedAction) { + t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ScaleClientExpectedAction), len(actions)) + } + for i, verb := range test.ScaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("%s unexpected action: %+v, expected %s", test.Name, actions[i].GetVerb(), verb) + } + } + } } } @@ -637,7 +719,7 @@ func TestSimpleStop(t *testing.T) { } for _, test := range tests { fake := test.fake - reaper, err := ReaperFor(test.kind, fake) + reaper, err := ReaperFor(test.kind, fake, nil) if err != nil { t.Errorf("unexpected error: %v (%s)", err, test.test) } @@ -697,8 +779,59 @@ func TestDeploymentNotFoundError(t *testing.T) { return true, nil, ScaleError{ActualError: errors.NewNotFound(api.Resource("replicaset"), "doesn't-matter")} }) - reaper := DeploymentReaper{fake.Extensions(), fake.Extensions(), time.Millisecond, time.Millisecond} + reaper := DeploymentReaper{fake.Extensions(), fake.Extensions(), time.Millisecond, time.Millisecond, nil, schema.GroupResource{}} if err := reaper.Stop(ns, name, 0, nil); err != nil { t.Fatalf("unexpected error: %#v", err) } } + +func createFakeScaleClient(resource string, resourceName string, replicas int, errorsOnVerb map[string]*kerrors.StatusError) *fakescale.FakeScaleClient { + shouldReturnAnError := func(verb string) (*kerrors.StatusError, bool) { + if anError, anErrorExists := errorsOnVerb[verb]; anErrorExists { + return anError, true + } + return &kerrors.StatusError{}, false + } + newReplicas := int32(replicas) + scaleClient := &fakescale.FakeScaleClient{} + scaleClient.AddReactor("get", resource, func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { + action := rawAction.(testcore.GetAction) + if action.GetName() != resourceName { + return true, nil, fmt.Errorf("expected = %s, got = %s", resourceName, action.GetName()) + } + if anError, should := shouldReturnAnError("get"); should { + return true, nil, anError + } + obj := &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: action.GetName(), + Namespace: action.GetNamespace(), + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: newReplicas, + }, + } + return true, obj, nil + }) + scaleClient.AddReactor("update", resource, func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { + action := rawAction.(testcore.UpdateAction) + obj := action.GetObject().(*autoscalingv1.Scale) + if obj.Name != resourceName { + return true, nil, fmt.Errorf("expected = %s, got = %s", resourceName, obj.Name) + } + if anError, should := shouldReturnAnError("update"); should { + return true, nil, anError + } + newReplicas = obj.Spec.Replicas + return true, &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: obj.Name, + Namespace: action.GetNamespace(), + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: newReplicas, + }, + }, nil + }) + return scaleClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go index 3d640184c484..e1ee2bc10a53 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go @@ -28,8 +28,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/util/integer" "k8s.io/client-go/util/retry" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -114,8 +116,9 @@ const ( // RollingUpdater provides methods for updating replicated pods in a predictable, // fault-tolerant way. type RollingUpdater struct { - rcClient coreclient.ReplicationControllersGetter - podClient coreclient.PodsGetter + rcClient coreclient.ReplicationControllersGetter + podClient coreclient.PodsGetter + scaleClient scaleclient.ScalesGetter // Namespace for resources ns string // scaleAndWait scales a controller and returns its updated state. @@ -132,11 +135,12 @@ type RollingUpdater struct { } // NewRollingUpdater creates a RollingUpdater from a client. -func NewRollingUpdater(namespace string, rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter) *RollingUpdater { +func NewRollingUpdater(namespace string, rcClient coreclient.ReplicationControllersGetter, podClient coreclient.PodsGetter, sc scaleclient.ScalesGetter) *RollingUpdater { updater := &RollingUpdater{ - rcClient: rcClient, - podClient: podClient, - ns: namespace, + rcClient: rcClient, + podClient: podClient, + scaleClient: sc, + ns: namespace, } // Inject real implementations. updater.scaleAndWait = updater.scaleAndWaitWithScaler @@ -396,7 +400,7 @@ func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desi // scalerScaleAndWait scales a controller using a Scaler and a real client. func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { - scaler := &ReplicationControllerScaler{r.rcClient} + scaler := NewScaler(r.scaleClient, schema.GroupResource{Resource: "replicationcontrollers"}) if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait); err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go index 99e4b365db98..a60c991f5962 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go @@ -26,16 +26,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" scaleclient "k8s.io/client-go/scale" - appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" ) // TODO: Figure out if we should be waiting on initializers in the Scale() functions below. @@ -61,10 +55,17 @@ func ScalerFor(kind schema.GroupKind, jobsClient batchclient.JobsGetter, scalesG case batch.Kind("Job"): return &jobScaler{jobsClient} // Either kind of job can be scaled with Batch interface. default: - return &genericScaler{scalesGetter, gr} + return NewScaler(scalesGetter, gr) } } +// NewScaler get a scaler for a given resource +// Note that if you are trying to crate create a scaler for "job" then stop and use ScalerFor instead. +// When scaling jobs is dead, we'll remove ScalerFor method. +func NewScaler(scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) Scaler { + return &genericScaler{scalesGetter, gr} +} + // ScalePrecondition describes a condition that must be true for the scale to take place // If CurrentSize == -1, it is ignored. // If CurrentResourceVersion is the empty string, it is ignored. @@ -139,160 +140,6 @@ func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name s } } -// ValidateStatefulSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidateStatefulSet(ps *apps.StatefulSet) error { - if precondition.Size != -1 && int(ps.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(ps.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && ps.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, ps.ResourceVersion} - } - return nil -} - -// ValidateReplicationController ensures that the preconditions match. Returns nil if they are valid, an error otherwise -func (precondition *ScalePrecondition) ValidateReplicationController(controller *api.ReplicationController) error { - if precondition.Size != -1 && int(controller.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(controller.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && controller.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, controller.ResourceVersion} - } - return nil -} - -// TODO(p0lyn0mial): remove ReplicationControllerScaler -type ReplicationControllerScaler struct { - c coreclient.ReplicationControllersGetter -} - -// ScaleSimple does a simple one-shot attempt at scaling. It returns the -// resourceVersion of the replication controller if the update is successful. -func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - controller, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", ScaleError{ScaleGetFailure, "", err} - } - if preconditions != nil { - if err := preconditions.ValidateReplicationController(controller); err != nil { - return "", err - } - } - controller.Spec.Replicas = int32(newSize) - updatedRC, err := scaler.c.ReplicationControllers(namespace).Update(controller) - if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, controller.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, controller.ResourceVersion, err} - } - return updatedRC.ObjectMeta.ResourceVersion, nil -} - -// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value -// (if wait is not nil). -func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - rc, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - if rc.Initializers != nil { - return nil - } - err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, ControllerHasDesiredReplicas(scaler.c, rc)) - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - return nil -} - -// ValidateReplicaSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise -func (precondition *ScalePrecondition) ValidateReplicaSet(replicaSet *extensions.ReplicaSet) error { - if precondition.Size != -1 && int(replicaSet.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(replicaSet.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && replicaSet.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, replicaSet.ResourceVersion} - } - return nil -} - -// TODO(p0lyn0mial): remove ReplicaSetScaler -type ReplicaSetScaler struct { - c extensionsclient.ReplicaSetsGetter -} - -// ScaleSimple does a simple one-shot attempt at scaling. It returns the -// resourceVersion of the replicaset if the update is successful. -func (scaler *ReplicaSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - rs, err := scaler.c.ReplicaSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", ScaleError{ScaleGetFailure, "", err} - } - if preconditions != nil { - if err := preconditions.ValidateReplicaSet(rs); err != nil { - return "", err - } - } - rs.Spec.Replicas = int32(newSize) - updatedRS, err := scaler.c.ReplicaSets(namespace).Update(rs) - if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, rs.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, rs.ResourceVersion, err} - } - return updatedRS.ObjectMeta.ResourceVersion, nil -} - -// Scale updates a ReplicaSet to a new size, with optional precondition check (if preconditions is -// not nil), optional retries (if retry is not nil), and then optionally waits for it's replica -// count to reach the new value (if wait is not nil). -func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - rs, err := scaler.c.ReplicaSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - if rs.Initializers != nil { - return nil - } - err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, ReplicaSetHasDesiredReplicas(scaler.c, rs)) - - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - return nil -} - // ValidateJob ensures that the preconditions match. Returns nil if they are valid, an error otherwise. func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error { if precondition.Size != -1 && job.Spec.Parallelism == nil { @@ -307,63 +154,6 @@ func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error { return nil } -// TODO(p0lyn0mial): remove StatefulSetsGetter -type StatefulSetScaler struct { - c appsclient.StatefulSetsGetter -} - -// ScaleSimple does a simple one-shot attempt at scaling. It returns the -// resourceVersion of the statefulset if the update is successful. -func (scaler *StatefulSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - ss, err := scaler.c.StatefulSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", ScaleError{ScaleGetFailure, "", err} - } - if preconditions != nil { - if err := preconditions.ValidateStatefulSet(ss); err != nil { - return "", err - } - } - ss.Spec.Replicas = int32(newSize) - updatedStatefulSet, err := scaler.c.StatefulSets(namespace).Update(ss) - if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, ss.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, ss.ResourceVersion, err} - } - return updatedStatefulSet.ResourceVersion, nil -} - -func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - job, err := scaler.c.StatefulSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - if job.Initializers != nil { - return nil - } - err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, StatefulSetHasDesiredReplicas(scaler.c, job)) - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - return nil -} - type jobScaler struct { c batchclient.JobsGetter } @@ -421,80 +211,8 @@ func (scaler *jobScaler) Scale(namespace, name string, newSize uint, preconditio return nil } -// ValidateDeployment ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions.Deployment) error { - if precondition.Size != -1 && int(deployment.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(deployment.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && deployment.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, deployment.ResourceVersion} - } - return nil -} - -// TODO(p0lyn0mial): remove DeploymentScaler -type DeploymentScaler struct { - c extensionsclient.DeploymentsGetter -} - -// ScaleSimple is responsible for updating a deployment's desired replicas -// count. It returns the resourceVersion of the deployment if the update is -// successful. -func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - deployment, err := scaler.c.Deployments(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", ScaleError{ScaleGetFailure, "", err} - } - if preconditions != nil { - if err := preconditions.ValidateDeployment(deployment); err != nil { - return "", err - } - } - - // TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528). - // For now I'm falling back to regular Deployment update operation. - deployment.Spec.Replicas = int32(newSize) - updatedDeployment, err := scaler.c.Deployments(namespace).Update(deployment) - if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, deployment.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err} - } - return updatedDeployment.ObjectMeta.ResourceVersion, nil -} - -// Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count. -func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - deployment, err := scaler.c.Deployments(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, DeploymentHasDesiredReplicas(scaler.c, deployment)) - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - return nil -} - // validateGeneric ensures that the preconditions match. Returns nil if they are valid, otherwise an error -// TODO(p0lyn0mial): when the work on GenericScaler is done, rename validateGeneric to validate -func (precondition *ScalePrecondition) validateGeneric(scale *autoscalingapi.Scale) error { +func (precondition *ScalePrecondition) validate(scale *autoscalingapi.Scale) error { if precondition.Size != -1 && int(scale.Spec.Replicas) != precondition.Size { return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(scale.Spec.Replicas))} } @@ -519,7 +237,7 @@ func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *Scale return "", ScaleError{ScaleGetFailure, "", err} } if preconditions != nil { - if err := preconditions.validateGeneric(scale); err != nil { + if err := preconditions.validate(scale); err != nil { return "", err } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go index 65bf4beb8a05..e4b8973764be 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go @@ -17,79 +17,35 @@ limitations under the License. package kubectl import ( - "bytes" - "encoding/json" "errors" "fmt" - "io" - "io/ioutil" - "net/http" "testing" "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" kerrors "k8s.io/apimachinery/pkg/api/errors" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/discovery" - fakedisco "k8s.io/client-go/discovery/fake" - "k8s.io/client-go/dynamic" - fakerest "k8s.io/client-go/rest/fake" "k8s.io/client-go/scale" testcore "k8s.io/client-go/testing" - "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" ) -type ErrorReplicationControllers struct { - coreclient.ReplicationControllerInterface - conflict bool - invalid bool -} - -func (c *ErrorReplicationControllers) Update(controller *api.ReplicationController) (*api.ReplicationController, error) { - switch { - case c.invalid: - return nil, kerrors.NewInvalid(api.Kind(controller.Kind), controller.Name, nil) - case c.conflict: - return nil, kerrors.NewConflict(api.Resource(controller.Kind), controller.Name, nil) - } - return nil, errors.New("Replication controller update failure") -} - -type ErrorReplicationControllerClient struct { - *fake.Clientset - conflict bool - invalid bool -} - -func (c *ErrorReplicationControllerClient) ReplicationControllers(namespace string) coreclient.ReplicationControllerInterface { - return &ErrorReplicationControllers{ - ReplicationControllerInterface: c.Clientset.Core().ReplicationControllers(namespace), - conflict: c.conflict, - invalid: c.invalid, - } -} - func TestReplicationControllerScaleRetry(t *testing.T) { - fake := &ErrorReplicationControllerClient{Clientset: fake.NewSimpleClientset(oldRc(0, 0)), conflict: true} - scaler := ReplicationControllerScaler{fake} + verbsOnError := map[string]*kerrors.StatusError{ + "update": kerrors.NewConflict(api.Resource("Status"), "foo", nil), + } + scaleClientExpectedAction := []string{"get", "update", "get"} + scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, verbsOnError) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo-v1" namespace := metav1.NamespaceDefault - scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -98,22 +54,35 @@ func TestReplicationControllerScaleRetry(t *testing.T) { t.Errorf("Did not expect an error on update conflict failure, got %v", err) } preconditions = ScalePrecondition{3, ""} - scaleFunc = ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) + scaleFunc = ScaleCondition(scaler, &preconditions, namespace, name, count, nil) pass, err = scaleFunc() if err == nil { t.Errorf("Expected error on precondition failure") } + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) + } + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } + } } func TestReplicationControllerScaleInvalid(t *testing.T) { - fake := &ErrorReplicationControllerClient{Clientset: fake.NewSimpleClientset(oldRc(0, 0)), invalid: true} - scaler := ReplicationControllerScaler{fake} + verbsOnError := map[string]*kerrors.StatusError{ + "update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), + } + scaleClientExpectedAction := []string{"get", "update"} + scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 1, verbsOnError) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo-v1" namespace := "default" - scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -122,161 +91,58 @@ func TestReplicationControllerScaleInvalid(t *testing.T) { if err == nil || !ok || e.FailureType != ScaleUpdateFailure { t.Errorf("Expected error on invalid update failure, got %v", err) } + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) + } + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } + } } func TestReplicationControllerScale(t *testing.T) { - fake := fake.NewSimpleClientset(oldRc(0, 0)) - scaler := ReplicationControllerScaler{fake.Core()} + scaleClientExpectedAction := []string{"get", "update"} + scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, nil) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo-v1" - scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil) - actions := fake.Actions() - if len(actions) != 2 { - t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) + if err != nil { + t.Fatalf("unexpected error occurred = %v while scaling the resource", err) } - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != api.Resource("replicationcontrollers") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) } - if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != api.Resource("replicationcontrollers") || action.GetObject().(*api.ReplicationController).Spec.Replicas != int32(count) { - t.Errorf("unexpected action %v, expected update-replicationController with replicas = %d", actions[1], count) + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } } } func TestReplicationControllerScaleFailsPreconditions(t *testing.T) { - fake := fake.NewSimpleClientset(&api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}, - Spec: api.ReplicationControllerSpec{ - Replicas: 10, - }, - }) - scaler := ReplicationControllerScaler{fake.Core()} + scaleClientExpectedAction := []string{"get"} + scaleClient := createFakeScaleClient("replicationcontrollers", "foo", 10, nil) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" - scaler.Scale("default", name, count, &preconditions, nil, nil) - - actions := fake.Actions() - if len(actions) != 1 { - t.Errorf("unexpected actions: %v, expected 1 action (get)", actions) - } - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != api.Resource("replicationcontrollers") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) + err := scaler.Scale("default", name, count, &preconditions, nil, nil) + if err == nil { + t.Fatal("expected to get an error but none was returned") } -} - -func TestValidateReplicationController(t *testing.T) { - tests := []struct { - preconditions ScalePrecondition - controller api.ReplicationController - expectError bool - test string - }{ - { - preconditions: ScalePrecondition{-1, ""}, - expectError: false, - test: "defaults", - }, - { - preconditions: ScalePrecondition{-1, ""}, - controller: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 10, - }, - }, - expectError: false, - test: "defaults 2", - }, - { - preconditions: ScalePrecondition{0, ""}, - controller: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - }, - }, - expectError: false, - test: "size matches", - }, - { - preconditions: ScalePrecondition{-1, "foo"}, - controller: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 10, - }, - }, - expectError: false, - test: "resource version matches", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - controller: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 10, - }, - }, - expectError: false, - test: "both match", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - controller: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 20, - }, - }, - expectError: true, - test: "size different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - controller: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 10, - }, - }, - expectError: true, - test: "version different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - controller: api.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 20, - }, - }, - expectError: true, - test: "both different", - }, + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) } - for _, test := range tests { - err := test.preconditions.ValidateReplicationController(&test.controller) - if err != nil && !test.expectError { - t.Errorf("unexpected error: %v (%s)", err, test.test) - } - if err == nil && test.expectError { - t.Errorf("expected an error: %v (%s)", err, test.test) + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) } } } @@ -544,47 +410,13 @@ func TestValidateJob(t *testing.T) { } } -type ErrorDeployments struct { - extensionsclient.DeploymentInterface - conflict bool - invalid bool -} - -func (c *ErrorDeployments) Update(deployment *extensions.Deployment) (*extensions.Deployment, error) { - switch { - case c.invalid: - return nil, kerrors.NewInvalid(api.Kind(deployment.Kind), deployment.Name, nil) - case c.conflict: - return nil, kerrors.NewConflict(api.Resource(deployment.Kind), deployment.Name, nil) - } - return nil, errors.New("deployment update failure") -} - -func (c *ErrorDeployments) Get(name string, options metav1.GetOptions) (*extensions.Deployment, error) { - return &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ - Replicas: 0, - }, - }, nil -} - -type ErrorDeploymentClient struct { - extensionsclient.DeploymentsGetter - conflict bool - invalid bool -} - -func (c *ErrorDeploymentClient) Deployments(namespace string) extensionsclient.DeploymentInterface { - return &ErrorDeployments{ - DeploymentInterface: c.DeploymentsGetter.Deployments(namespace), - invalid: c.invalid, - conflict: c.conflict, - } -} - func TestDeploymentScaleRetry(t *testing.T) { - fake := &ErrorDeploymentClient{DeploymentsGetter: fake.NewSimpleClientset().Extensions(), conflict: true} - scaler := &DeploymentScaler{fake} + verbsOnError := map[string]*kerrors.StatusError{ + "update": kerrors.NewConflict(api.Resource("Status"), "foo", nil), + } + scaleClientExpectedAction := []string{"get", "update", "get"} + scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"}) preconditions := &ScalePrecondition{-1, ""} count := uint(3) name := "foo" @@ -604,46 +436,52 @@ func TestDeploymentScaleRetry(t *testing.T) { if err == nil { t.Error("Expected error on precondition failure") } -} - -func deployment() *extensions.Deployment { - return &extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) + } + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } } } func TestDeploymentScale(t *testing.T) { - fake := fake.NewSimpleClientset(deployment()) - scaler := DeploymentScaler{fake.Extensions()} + scaleClientExpectedAction := []string{"get", "update"} + scaleClient := createFakeScaleClient("deployments", "foo", 2, nil) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"}) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" - scaler.Scale("default", name, count, &preconditions, nil, nil) - - actions := fake.Actions() - if len(actions) != 2 { - t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) + err := scaler.Scale("default", name, count, &preconditions, nil, nil) + if err != nil { + t.Fatal(err) } - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != extensions.Resource("deployments") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-deployment %s", actions[0], name) + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) } - if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != extensions.Resource("deployments") || action.GetObject().(*extensions.Deployment).Spec.Replicas != int32(count) { - t.Errorf("unexpected action %v, expected update-deployment with replicas = %d", actions[1], count) + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } } } func TestDeploymentScaleInvalid(t *testing.T) { - fake := &ErrorDeploymentClient{DeploymentsGetter: fake.NewSimpleClientset().Extensions(), invalid: true} - scaler := DeploymentScaler{fake} + scaleClientExpectedAction := []string{"get", "update"} + verbsOnError := map[string]*kerrors.StatusError{ + "update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), + } + scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"}) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" namespace := "default" - scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -652,230 +490,68 @@ func TestDeploymentScaleInvalid(t *testing.T) { if err == nil || !ok || e.FailureType != ScaleUpdateFailure { t.Errorf("Expected error on invalid update failure, got %v", err) } + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) + } + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } + } } func TestDeploymentScaleFailsPreconditions(t *testing.T) { - fake := fake.NewSimpleClientset(&extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - Spec: extensions.DeploymentSpec{ - Replicas: 10, - }, - }) - scaler := DeploymentScaler{fake.Extensions()} + scaleClientExpectedAction := []string{"get"} + scaleClient := createFakeScaleClient("deployments", "foo", 10, nil) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"}) preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" - scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil) + if err == nil { + t.Fatal("exptected to get an error but none was returned") + } + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) + } + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } + } +} - actions := fake.Actions() - if len(actions) != 1 { - t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions) +func TestStatefulSetScale(t *testing.T) { + scaleClientExpectedAction := []string{"get", "update"} + scaleClient := createFakeScaleClient("statefulsets", "foo", 2, nil) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefullset"}) + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + err := scaler.Scale("default", name, count, &preconditions, nil, nil) + if err != nil { + t.Fatal(err) + } + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) } - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != extensions.Resource("deployments") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-deployment %s", actions[0], name) - } -} - -func TestValidateDeployment(t *testing.T) { - zero, ten, twenty := int32(0), int32(10), int32(20) - tests := []struct { - preconditions ScalePrecondition - deployment extensions.Deployment - expectError bool - test string - }{ - { - preconditions: ScalePrecondition{-1, ""}, - expectError: false, - test: "defaults", - }, - { - preconditions: ScalePrecondition{-1, ""}, - deployment: extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.DeploymentSpec{ - Replicas: ten, - }, - }, - expectError: false, - test: "defaults 2", - }, - { - preconditions: ScalePrecondition{0, ""}, - deployment: extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.DeploymentSpec{ - Replicas: zero, - }, - }, - expectError: false, - test: "size matches", - }, - { - preconditions: ScalePrecondition{-1, "foo"}, - deployment: extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.DeploymentSpec{ - Replicas: ten, - }, - }, - expectError: false, - test: "resource version matches", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - deployment: extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.DeploymentSpec{ - Replicas: ten, - }, - }, - expectError: false, - test: "both match", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - deployment: extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.DeploymentSpec{ - Replicas: twenty, - }, - }, - expectError: true, - test: "size different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - deployment: extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - }, - expectError: true, - test: "no replicas", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - deployment: extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: extensions.DeploymentSpec{ - Replicas: ten, - }, - }, - expectError: true, - test: "version different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - deployment: extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: extensions.DeploymentSpec{ - Replicas: twenty, - }, - }, - expectError: true, - test: "both different", - }, - } - for _, test := range tests { - err := test.preconditions.ValidateDeployment(&test.deployment) - if err != nil && !test.expectError { - t.Errorf("unexpected error: %v (%s)", err, test.test) - } - if err == nil && test.expectError { - t.Errorf("expected an error: %v (%s)", err, test.test) - } - } -} - -type ErrorStatefulSets struct { - appsclient.StatefulSetInterface - conflict bool - invalid bool -} - -func (c *ErrorStatefulSets) Update(statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) { - switch { - case c.invalid: - return nil, kerrors.NewInvalid(api.Kind(statefulSet.Kind), statefulSet.Name, nil) - case c.conflict: - return nil, kerrors.NewConflict(api.Resource(statefulSet.Kind), statefulSet.Name, nil) - } - return nil, errors.New("statefulSet update failure") -} - -func (c *ErrorStatefulSets) Get(name string, options metav1.GetOptions) (*apps.StatefulSet, error) { - return &apps.StatefulSet{ - Spec: apps.StatefulSetSpec{ - Replicas: 0, - }, - }, nil -} - -type ErrorStatefulSetClient struct { - appsclient.StatefulSetsGetter - conflict bool - invalid bool -} - -func (c *ErrorStatefulSetClient) StatefulSets(namespace string) appsclient.StatefulSetInterface { - return &ErrorStatefulSets{ - StatefulSetInterface: c.StatefulSetsGetter.StatefulSets(namespace), - invalid: c.invalid, - conflict: c.conflict, - } -} - -func statefulSet() *apps.StatefulSet { - return &apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - } -} - -func TestStatefulSetScale(t *testing.T) { - fake := fake.NewSimpleClientset(statefulSet()) - scaler := StatefulSetScaler{fake.Apps()} - preconditions := ScalePrecondition{-1, ""} - count := uint(3) - name := "foo" - scaler.Scale("default", name, count, &preconditions, nil, nil) - - actions := fake.Actions() - if len(actions) != 2 { - t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) - } - - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != apps.Resource("statefulsets") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-statefulsets %s", actions[0], name) - } - if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != apps.Resource("statefulsets") || action.GetObject().(*apps.StatefulSet).Spec.Replicas != int32(count) { - t.Errorf("unexpected action %v, expected update-statefulset with replicas = %d", actions[1], count) + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } } } func TestStatefulSetScaleRetry(t *testing.T) { - fake := &ErrorStatefulSetClient{StatefulSetsGetter: fake.NewSimpleClientset().Apps(), conflict: true} - scaler := &StatefulSetScaler{fake} + scaleClientExpectedAction := []string{"get", "update", "get"} + verbsOnError := map[string]*kerrors.StatusError{ + "update": kerrors.NewConflict(api.Resource("Status"), "foo", nil), + } + scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) preconditions := &ScalePrecondition{-1, ""} count := uint(3) name := "foo" @@ -895,17 +571,30 @@ func TestStatefulSetScaleRetry(t *testing.T) { if err == nil { t.Error("Expected error on precondition failure") } + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) + } + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } + } } func TestStatefulSetScaleInvalid(t *testing.T) { - fake := &ErrorStatefulSetClient{StatefulSetsGetter: fake.NewSimpleClientset().Apps(), invalid: true} - scaler := StatefulSetScaler{fake} + scaleClientExpectedAction := []string{"get", "update"} + verbsOnError := map[string]*kerrors.StatusError{ + "update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), + } + scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" namespace := "default" - scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -914,229 +603,68 @@ func TestStatefulSetScaleInvalid(t *testing.T) { if err == nil || !ok || e.FailureType != ScaleUpdateFailure { t.Errorf("Expected error on invalid update failure, got %v", err) } + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) + } + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } + } } func TestStatefulSetScaleFailsPreconditions(t *testing.T) { - fake := fake.NewSimpleClientset(&apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - Spec: apps.StatefulSetSpec{ - Replicas: 10, - }, - }) - scaler := StatefulSetScaler{fake.Apps()} + scaleClientExpectedAction := []string{"get"} + scaleClient := createFakeScaleClient("statefulsets", "foo", 10, nil) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" - scaler.Scale("default", name, count, &preconditions, nil, nil) - - actions := fake.Actions() - if len(actions) != 1 { - t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions) - } - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != apps.Resource("statefulsets") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-statefulset %s", actions[0], name) + err := scaler.Scale("default", name, count, &preconditions, nil, nil) + if err == nil { + t.Fatal("expected to get an error but none was returned") } -} - -func TestValidateStatefulSet(t *testing.T) { - zero, ten, twenty := int32(0), int32(10), int32(20) - tests := []struct { - preconditions ScalePrecondition - statefulset apps.StatefulSet - expectError bool - test string - }{ - { - preconditions: ScalePrecondition{-1, ""}, - expectError: false, - test: "defaults", - }, - { - preconditions: ScalePrecondition{-1, ""}, - statefulset: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: apps.StatefulSetSpec{ - Replicas: ten, - }, - }, - expectError: false, - test: "defaults 2", - }, - { - preconditions: ScalePrecondition{0, ""}, - statefulset: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: apps.StatefulSetSpec{ - Replicas: zero, - }, - }, - expectError: false, - test: "size matches", - }, - { - preconditions: ScalePrecondition{-1, "foo"}, - statefulset: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: apps.StatefulSetSpec{ - Replicas: ten, - }, - }, - expectError: false, - test: "resource version matches", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - statefulset: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: apps.StatefulSetSpec{ - Replicas: ten, - }, - }, - expectError: false, - test: "both match", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - statefulset: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: apps.StatefulSetSpec{ - Replicas: twenty, - }, - }, - expectError: true, - test: "size different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - statefulset: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - }, - expectError: true, - test: "no replicas", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - statefulset: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: apps.StatefulSetSpec{ - Replicas: ten, - }, - }, - expectError: true, - test: "version different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - statefulset: apps.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: apps.StatefulSetSpec{ - Replicas: twenty, - }, - }, - expectError: true, - test: "both different", - }, + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) } - for _, test := range tests { - err := test.preconditions.ValidateStatefulSet(&test.statefulset) - if err != nil && !test.expectError { - t.Errorf("unexpected error: %v (%s)", err, test.test) - } - if err == nil && test.expectError { - t.Errorf("expected an error: %v (%s)", err, test.test) + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) } } } -type ErrorReplicaSets struct { - extensionsclient.ReplicaSetInterface - conflict bool - invalid bool -} - -func (c *ErrorReplicaSets) Update(replicaSets *extensions.ReplicaSet) (*extensions.ReplicaSet, error) { - switch { - case c.invalid: - return nil, kerrors.NewInvalid(api.Kind(replicaSets.Kind), replicaSets.Name, nil) - case c.conflict: - return nil, kerrors.NewConflict(api.Resource(replicaSets.Kind), replicaSets.Name, nil) - } - return nil, errors.New("replicaSets update failure") -} - -func (c *ErrorReplicaSets) Get(name string, options metav1.GetOptions) (*extensions.ReplicaSet, error) { - return &extensions.ReplicaSet{ - Spec: extensions.ReplicaSetSpec{ - Replicas: 0, - }, - }, nil -} - -type ErrorReplicaSetClient struct { - extensionsclient.ReplicaSetsGetter - conflict bool - invalid bool -} - -func (c *ErrorReplicaSetClient) ReplicaSets(namespace string) extensionsclient.ReplicaSetInterface { - return &ErrorReplicaSets{ - ReplicaSetInterface: c.ReplicaSetsGetter.ReplicaSets(namespace), - invalid: c.invalid, - conflict: c.conflict, - } -} - -func replicaSet() *extensions.ReplicaSet { - return &extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - } -} - func TestReplicaSetScale(t *testing.T) { - fake := fake.NewSimpleClientset(replicaSet()) - scaler := ReplicaSetScaler{fake.Extensions()} + scaleClientExpectedAction := []string{"get", "update"} + scaleClient := createFakeScaleClient("replicasets", "foo", 10, nil) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" - scaler.Scale("default", name, count, &preconditions, nil, nil) - - actions := fake.Actions() - if len(actions) != 2 { - t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) + err := scaler.Scale("default", name, count, &preconditions, nil, nil) + if err != nil { + t.Fatal(err) } - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != extensions.Resource("replicasets") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-replicationSet %s", actions[0], name) + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) } - if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != extensions.Resource("replicasets") || action.GetObject().(*extensions.ReplicaSet).Spec.Replicas != int32(count) { - t.Errorf("unexpected action %v, expected update-replicaSet with replicas = %d", actions[1], count) + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } } } func TestReplicaSetScaleRetry(t *testing.T) { - fake := &ErrorReplicaSetClient{ReplicaSetsGetter: fake.NewSimpleClientset().Extensions(), conflict: true} - scaler := &ReplicaSetScaler{fake} + verbsOnError := map[string]*kerrors.StatusError{ + "update": kerrors.NewConflict(api.Resource("Status"), "foo", nil), + } + scaleClientExpectedAction := []string{"get", "update", "get"} + scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) preconditions := &ScalePrecondition{-1, ""} count := uint(3) name := "foo" @@ -1156,17 +684,30 @@ func TestReplicaSetScaleRetry(t *testing.T) { if err == nil { t.Error("Expected error on precondition failure") } + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) + } + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } + } } func TestReplicaSetScaleInvalid(t *testing.T) { - fake := &ErrorReplicaSetClient{ReplicaSetsGetter: fake.NewSimpleClientset().Extensions(), invalid: true} - scaler := ReplicaSetScaler{fake} + verbsOnError := map[string]*kerrors.StatusError{ + "update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), + } + scaleClientExpectedAction := []string{"get", "update"} + scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" namespace := "default" - scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -1175,155 +716,35 @@ func TestReplicaSetScaleInvalid(t *testing.T) { if err == nil || !ok || e.FailureType != ScaleUpdateFailure { t.Errorf("Expected error on invalid update failure, got %v", err) } + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) + } + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } + } } func TestReplicaSetsGetterFailsPreconditions(t *testing.T) { - fake := fake.NewSimpleClientset(&extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: 10, - }, - }) - scaler := ReplicaSetScaler{fake.Extensions()} + scaleClientExpectedAction := []string{"get"} + scaleClient := createFakeScaleClient("replicasets", "foo", 10, nil) + scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" - scaler.Scale("default", name, count, &preconditions, nil, nil) - - actions := fake.Actions() - if len(actions) != 1 { - t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions) + err := scaler.Scale("default", name, count, &preconditions, nil, nil) + if err == nil { + t.Fatal("expected to get an error but non was returned") } - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != extensions.Resource("replicasets") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-replicaSets %s", actions[0], name) + actions := scaleClient.Actions() + if len(actions) != len(scaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(scaleClientExpectedAction), len(actions)) } -} - -func TestValidateReplicaSets(t *testing.T) { - zero, ten, twenty := int32(0), int32(10), int32(20) - tests := []struct { - preconditions ScalePrecondition - replicaSets extensions.ReplicaSet - expectError bool - test string - }{ - { - preconditions: ScalePrecondition{-1, ""}, - expectError: false, - test: "defaults", - }, - { - preconditions: ScalePrecondition{-1, ""}, - replicaSets: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: ten, - }, - }, - expectError: false, - test: "defaults 2", - }, - { - preconditions: ScalePrecondition{0, ""}, - replicaSets: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: zero, - }, - }, - expectError: false, - test: "size matches", - }, - { - preconditions: ScalePrecondition{-1, "foo"}, - replicaSets: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: ten, - }, - }, - expectError: false, - test: "resource version matches", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - replicaSets: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: ten, - }, - }, - expectError: false, - test: "both match", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - replicaSets: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: twenty, - }, - }, - expectError: true, - test: "size different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - replicaSets: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - }, - expectError: true, - test: "no replicas", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - replicaSets: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: ten, - }, - }, - expectError: true, - test: "version different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - replicaSets: extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: twenty, - }, - }, - expectError: true, - test: "both different", - }, - } - for _, test := range tests { - err := test.preconditions.ValidateReplicaSet(&test.replicaSets) - if err != nil && !test.expectError { - t.Errorf("unexpected error: %v (%s)", err, test.test) - } - if err == nil && test.expectError { - t.Errorf("expected an error: %v (%s)", err, test.test) + for i, verb := range scaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) } } } @@ -1331,36 +752,7 @@ func TestValidateReplicaSets(t *testing.T) { // TestGenericScaleSimple exercises GenericScaler.ScaleSimple method func TestGenericScaleSimple(t *testing.T) { // test data - discoveryResources := []*metav1.APIResourceList{ - { - GroupVersion: appsv1beta2.SchemeGroupVersion.String(), - APIResources: []metav1.APIResource{ - {Name: "deployments", Namespaced: true, Kind: "Deployment"}, - {Name: "deployments/scale", Namespaced: true, Kind: "Scale", Group: "apps", Version: "v1beta2"}, - }, - }, - } - appsV1beta2Scale := &appsv1beta2.Scale{ - TypeMeta: metav1.TypeMeta{ - Kind: "Scale", - APIVersion: appsv1beta2.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "abc", - }, - Spec: appsv1beta2.ScaleSpec{Replicas: 10}, - Status: appsv1beta2.ScaleStatus{ - Replicas: 10, - }, - } - pathsResources := map[string]runtime.Object{ - "/apis/apps/v1beta2/namespaces/default/deployments/abc/scale": appsV1beta2Scale, - } - - scaleClient, err := fakeScaleClient(discoveryResources, pathsResources) - if err != nil { - t.Fatal(err) - } + scaleClient := createFakeScaleClient("deployments", "abc", 10, nil) // test scenarios scenarios := []struct { @@ -1377,16 +769,16 @@ func TestGenericScaleSimple(t *testing.T) { name: "scale up the \"abc\" deployment", precondition: ScalePrecondition{10, ""}, newSize: 20, - targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployments"}, resName: "abc", scaleGetter: scaleClient, }, // scenario 2: scale down the "abc" deployment { - name: "scale down the \"abs\" deplyment", + name: "scale down the \"abs\" deployment", precondition: ScalePrecondition{20, ""}, newSize: 5, - targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployments"}, resName: "abc", scaleGetter: scaleClient, }, @@ -1396,7 +788,7 @@ func TestGenericScaleSimple(t *testing.T) { name: "precondition error, expected size is 1", precondition: ScalePrecondition{1, ""}, newSize: 5, - targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployments"}, resName: "abc", scaleGetter: scaleClient, expectError: true, @@ -1406,7 +798,7 @@ func TestGenericScaleSimple(t *testing.T) { name: "precondition is not validated when the size is set to -1", precondition: ScalePrecondition{-1, ""}, newSize: 5, - targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployments"}, resName: "abc", scaleGetter: scaleClient, }, @@ -1415,7 +807,7 @@ func TestGenericScaleSimple(t *testing.T) { name: "precondition error, resource version mismatch", precondition: ScalePrecondition{5, "v1"}, newSize: 5, - targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployments"}, resName: "abc", scaleGetter: scaleClient, expectError: true, @@ -1425,7 +817,7 @@ func TestGenericScaleSimple(t *testing.T) { // act for index, scenario := range scenarios { t.Run(fmt.Sprintf("running scenario %d: %s", index+1, scenario.name), func(t *testing.T) { - target := ScalerFor(schema.GroupKind{}, nil, scenario.scaleGetter, scenario.targetGR) + target := NewScaler(scenario.scaleGetter, scenario.targetGR) resVersion, err := target.ScaleSimple("default", scenario.resName, &scenario.precondition, uint(scenario.newSize)) @@ -1445,36 +837,7 @@ func TestGenericScaleSimple(t *testing.T) { // TestGenericScale exercises GenericScaler.Scale method func TestGenericScale(t *testing.T) { // test data - discoveryResources := []*metav1.APIResourceList{ - { - GroupVersion: appsv1beta2.SchemeGroupVersion.String(), - APIResources: []metav1.APIResource{ - {Name: "deployments", Namespaced: true, Kind: "Deployment"}, - {Name: "deployments/scale", Namespaced: true, Kind: "Scale", Group: "apps", Version: "v1beta2"}, - }, - }, - } - appsV1beta2Scale := &appsv1beta2.Scale{ - TypeMeta: metav1.TypeMeta{ - Kind: "Scale", - APIVersion: appsv1beta2.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "abc", - }, - Spec: appsv1beta2.ScaleSpec{Replicas: 10}, - Status: appsv1beta2.ScaleStatus{ - Replicas: 10, - }, - } - pathsResources := map[string]runtime.Object{ - "/apis/apps/v1beta2/namespaces/default/deployments/abc/scale": appsV1beta2Scale, - } - - scaleClient, err := fakeScaleClient(discoveryResources, pathsResources) - if err != nil { - t.Fatal(err) - } + scaleClient := createFakeScaleClient("deployments", "abc", 10, nil) // test scenarios scenarios := []struct { @@ -1492,7 +855,7 @@ func TestGenericScale(t *testing.T) { name: "scale up the \"abc\" deployment", precondition: ScalePrecondition{10, ""}, newSize: 20, - targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployments"}, resName: "abc", scaleGetter: scaleClient, }, @@ -1501,7 +864,7 @@ func TestGenericScale(t *testing.T) { name: "a resource name cannot be empty", precondition: ScalePrecondition{10, ""}, newSize: 20, - targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployments"}, resName: "", scaleGetter: scaleClient, expectError: true, @@ -1511,7 +874,7 @@ func TestGenericScale(t *testing.T) { name: "wait for replicas error due to status.Replicas != spec.Replicas", precondition: ScalePrecondition{10, ""}, newSize: 20, - targetGR: schema.GroupResource{Group: "apps", Resource: "deployment"}, + targetGR: schema.GroupResource{Group: "apps", Resource: "deployments"}, resName: "abc", scaleGetter: scaleClient, waitForReplicas: &RetryParams{time.Duration(5 * time.Second), time.Duration(5 * time.Second)}, @@ -1522,7 +885,7 @@ func TestGenericScale(t *testing.T) { // act for index, scenario := range scenarios { t.Run(fmt.Sprintf("running scenario %d: %s", index+1, scenario.name), func(t *testing.T) { - target := ScalerFor(schema.GroupKind{}, nil, scenario.scaleGetter, scenario.targetGR) + target := NewScaler(scenario.scaleGetter, scenario.targetGR) err := target.Scale("default", scenario.resName, uint(scenario.newSize), &scenario.precondition, nil, scenario.waitForReplicas) @@ -1535,75 +898,3 @@ func TestGenericScale(t *testing.T) { }) } } - -func fakeScaleClient(discoveryResources []*metav1.APIResourceList, pathsResources map[string]runtime.Object) (scale.ScalesGetter, error) { - fakeDiscoveryClient := &fakedisco.FakeDiscovery{Fake: &testcore.Fake{}} - fakeDiscoveryClient.Resources = discoveryResources - restMapperRes, err := discovery.GetAPIGroupResources(fakeDiscoveryClient) - if err != nil { - return nil, err - } - restMapper := discovery.NewRESTMapper(restMapperRes, apimeta.InterfacesForUnstructured) - codecs := serializer.NewCodecFactory(scale.NewScaleConverter().Scheme()) - fakeReqHandler := func(req *http.Request) (*http.Response, error) { - path := req.URL.Path - scale, isScalePath := pathsResources[path] - if !isScalePath { - return nil, fmt.Errorf("unexpected request for URL %q with method %q", req.URL.String(), req.Method) - } - - switch req.Method { - case "GET": - res, err := json.Marshal(scale) - if err != nil { - return nil, err - } - return &http.Response{StatusCode: 200, Header: defaultHeaders(), Body: bytesBody(res)}, nil - case "PUT": - decoder := codecs.UniversalDeserializer() - body, err := ioutil.ReadAll(req.Body) - if err != nil { - return nil, err - } - newScale, newScaleGVK, err := decoder.Decode(body, nil, nil) - if err != nil { - return nil, fmt.Errorf("unexpected request body: %v", err) - } - if *newScaleGVK != scale.GetObjectKind().GroupVersionKind() { - return nil, fmt.Errorf("unexpected scale API version %s (expected %s)", newScaleGVK.String(), scale.GetObjectKind().GroupVersionKind().String()) - } - res, err := json.Marshal(newScale) - if err != nil { - return nil, err - } - - pathsResources[path] = newScale - return &http.Response{StatusCode: 200, Header: defaultHeaders(), Body: bytesBody(res)}, nil - default: - return nil, fmt.Errorf("unexpected request for URL %q with method %q", req.URL.String(), req.Method) - } - } - - fakeClient := &fakerest.RESTClient{ - Client: fakerest.CreateHTTPClient(fakeReqHandler), - NegotiatedSerializer: serializer.DirectCodecFactory{ - CodecFactory: serializer.NewCodecFactory(scale.NewScaleConverter().Scheme()), - }, - GroupVersion: schema.GroupVersion{}, - VersionedAPIPath: "/not/a/real/path", - } - - resolver := scale.NewDiscoveryScaleKindResolver(fakeDiscoveryClient) - client := scale.New(fakeClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) - return client, nil -} - -func bytesBody(bodyBytes []byte) io.ReadCloser { - return ioutil.NopCloser(bytes.NewReader(bodyBytes)) -} - -func defaultHeaders() http.Header { - header := http.Header{} - header.Set("Content-Type", runtime.ContentTypeJSON) - return header -} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go index 2ab7ff22795b..82e3802f512b 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go @@ -197,7 +197,10 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut Body(scaleUpdateBytes). Do() if err := result.Error(); err != nil { - return nil, fmt.Errorf("could not update the scale for %s %s: %v", resource.String(), scale.Name, err) + // propagate "raw" error from the API + // this allows callers to interpret underlying Reason field + // for example: errors.IsConflict(err) + return nil, err } scaleBytes, err := result.Raw() diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/BUILD b/vendor/k8s.io/kubernetes/test/e2e/apps/BUILD index b90b68e24923..66310531f3fe 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/BUILD +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/BUILD @@ -66,6 +66,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go b/vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go index b6c46d52006d..6fe71e09c665 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go @@ -207,7 +207,7 @@ var _ = SIGDescribe("CronJob", func() { By("Deleting the job") job := cronJob.Status.Active[0] - reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset) + reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset, f.ScalesGetter) Expect(err).NotTo(HaveOccurred()) timeout := 1 * time.Minute err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0)) diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go b/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go index cfb66c72d9f7..d7e18e2403c5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go @@ -70,7 +70,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { if daemonsets != nil && len(daemonsets.Items) > 0 { for _, ds := range daemonsets.Items { By(fmt.Sprintf("Deleting DaemonSet %q with reaper", ds.Name)) - dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset) + dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset, f.ScalesGetter) Expect(err).NotTo(HaveOccurred()) err = dsReaper.Stop(f.Namespace.Name, ds.Name, 0, nil) Expect(err).NotTo(HaveOccurred()) diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go b/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go index 74743de98bc5..5112c85cbbdf 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" @@ -158,12 +159,12 @@ func newDeploymentRollback(name string, annotations map[string]string, revision } } -func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) { +func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, scaleClient scaleclient.ScalesGetter, ns, deploymentName string) { deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) framework.Logf("Deleting deployment %s", deploymentName) - reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient) + reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient, scaleClient) Expect(err).NotTo(HaveOccurred()) timeout := 1 * time.Minute @@ -224,7 +225,7 @@ func testDeleteDeployment(f *framework.Framework) { newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) Expect(err).NotTo(HaveOccurred()) Expect(newRS).NotTo(Equal(nilRs)) - stopDeployment(c, internalClient, ns, deploymentName) + stopDeployment(c, internalClient, f.ScalesGetter, ns, deploymentName) } func testRollingUpdateDeployment(f *framework.Framework) { diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/job.go b/vendor/k8s.io/kubernetes/test/e2e/apps/job.go index 8a4b8c0caa75..8543c29d599a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/job.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/job.go @@ -111,7 +111,7 @@ var _ = SIGDescribe("Job", func() { Expect(err).NotTo(HaveOccurred()) By("delete a job") - reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset) + reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset, f.ScalesGetter) Expect(err).NotTo(HaveOccurred()) timeout := 1 * time.Minute err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0)) diff --git a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go index 136b2a23610a..ab2581f739bd 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go +++ b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go @@ -93,7 +93,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024 memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory. replicas := 1 - resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset) + resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset, f.ScalesGetter) defer resourceConsumer.CleanUp() resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough. diff --git a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go index a6fa9ee318ac..8d9f235c3001 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -347,7 +347,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun timeToWait := 5 * time.Minute podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait) framework.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, podsConfig.Name) + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, podsConfig.Name) // Ensure that no new nodes have been added so far. Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount)) @@ -417,7 +417,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC } timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes)) return func() error { - return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, config.extraPods.Name) + return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, config.extraPods.Name) } } @@ -500,7 +500,7 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p err := framework.RunRC(*config) framework.ExpectNoError(err) return func() error { - return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id) + return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id) } } @@ -540,7 +540,7 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist framework.ExpectNoError(framework.RunRC(*rcConfig)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) return func() error { - return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id) + return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id) } } diff --git a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go index 91f6290b0e61..6bf0744bee5f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -168,7 +168,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { By("Creating unschedulable pod") ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") By("Waiting for scale up hoping it won't happen") // Verify that the appropriate event was generated @@ -195,7 +195,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { simpleScaleUpTest := func(unready int) { ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") // Verify that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, @@ -222,7 +222,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By("Schedule more pods than can fit and wait for cluster to scale-up") ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { return s.status == caOngoingScaleUpStatus @@ -265,8 +265,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By("Reserving 0.1x more memory than the cluster holds to trigger scale up") totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb)) + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation") // Verify, that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, @@ -289,7 +289,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() { scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "host-port") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "host-port") framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) @@ -304,12 +304,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } By("starting a pod with anti-affinity on each node") framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "some-pod") framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) By("scheduling extra pods with anti-affinity to existing ones") framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "extra-pod") framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) @@ -323,14 +323,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { "anti-affinity": "yes", } framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "some-pod") By("waiting for all pods before triggering scale up") framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) By("creating a pod requesting EmptyDir") framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "extra-pod") framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) @@ -388,7 +388,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) defer func() { - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod") + framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "some-pod") glog.Infof("RC and pods not using volume deleted") }() @@ -401,7 +401,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { volumes := buildVolumes(pv, pvc) framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes)) defer func() { - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, pvcPodName) + framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, pvcPodName) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }() @@ -506,7 +506,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { defer removeLabels(registeredNodes) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) - framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "node-selector")) + framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "node-selector")) }) It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() { @@ -524,8 +524,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { extraPods := extraNodes + 1 totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb)) By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods)) + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation") // Apparently GKE master is restarted couple minutes after the node pool is added // reseting all the timers in scale down code. Adding 5 extra minutes to workaround @@ -663,7 +663,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By("Run a scale-up test") ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") // Verify that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, @@ -776,7 +776,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction) } else { ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") time.Sleep(scaleUpTimeout) currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount) @@ -974,7 +974,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str labelMap := map[string]string{"test_id": testID} framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, "reschedulable-pods") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, namespace, "reschedulable-pods") By("Create a PodDisruptionBudget") minAvailable := intstr.FromInt(numPods - pdbSize) @@ -1404,7 +1404,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e framework.ExpectNoError(err) } return func() error { - return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id) + return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id) } } framework.Failf("Failed to reserve memory within timeout") @@ -1790,7 +1790,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa func runReplicatedPodOnEachNodeWithCleanup(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) (func(), error) { err := runReplicatedPodOnEachNode(f, nodes, namespace, podsPerNode, id, labels, memRequest) return func() { - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, id) + framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, namespace, id) }, err } diff --git a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go index 9dd853ed642d..4213882f6dc0 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -116,7 +116,7 @@ type HPAScaleTest struct { // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) { const timeToWait = 15 * time.Minute - rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset) + rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter) defer rc.CleanUp() hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name) diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/BUILD b/vendor/k8s.io/kubernetes/test/e2e/common/BUILD index bf9d3625955e..8481fade0fcb 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/BUILD +++ b/vendor/k8s.io/kubernetes/test/e2e/common/BUILD @@ -65,6 +65,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go b/vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go index 3efbf8d33d45..f93a9a599401 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go +++ b/vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go @@ -36,6 +36,7 @@ import ( testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" + scaleclient "k8s.io/client-go/scale" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -86,6 +87,7 @@ type ResourceConsumer struct { nsName string clientSet clientset.Interface internalClientset *internalclientset.Clientset + scaleClient scaleclient.ScalesGetter cpu chan int mem chan int customMetric chan int @@ -104,15 +106,15 @@ func GetResourceConsumerImage() string { return resourceConsumerImage } -func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer { +func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, - dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset) + dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset, scaleClient) } // TODO this still defaults to replication controller -func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer { +func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds, - initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset) + initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset, scaleClient) } /* @@ -123,7 +125,7 @@ memLimit argument is in megabytes, memLimit is a maximum amount of memory that c cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod */ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, - requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer { + requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit) rc := &ResourceConsumer{ @@ -133,6 +135,7 @@ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, repl nsName: nsName, clientSet: clientset, internalClientset: internalClientset, + scaleClient: scaleClient, cpu: make(chan int), mem: make(chan int), customMetric: make(chan int), @@ -401,9 +404,9 @@ func (rc *ResourceConsumer) CleanUp() { // Wait some time to ensure all child goroutines are finished. time.Sleep(10 * time.Second) kind := rc.kind.GroupKind() - framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name)) + framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, rc.scaleClient, kind, rc.nsName, rc.name)) framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil)) - framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName)) + framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, rc.scaleClient, api.Kind("ReplicationController"), rc.nsName, rc.controllerName)) framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil)) } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go index 87d9486cbddf..7e3fedbc76fa 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go @@ -155,8 +155,8 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error { return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name) } -func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error { - return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name) +func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, scaleClient scaleclient.ScalesGetter, ns, name string) error { + return DeleteResourceAndPods(clientset, internalClientset, scaleClient, api.Kind("ReplicationController"), ns, name) } func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go index a450b36fb0cf..9d04c87a659c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go @@ -47,6 +47,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + scaleclient "k8s.io/client-go/scale" ) const ( @@ -1255,8 +1256,8 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli return podNames, serviceIP, nil } -func StopServeHostnameService(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error { - if err := DeleteRCAndPods(clientset, internalClientset, ns, name); err != nil { +func StopServeHostnameService(clientset clientset.Interface, internalClientset internalclientset.Interface, scaleClient scaleclient.ScalesGetter, ns, name string) error { + if err := DeleteRCAndPods(clientset, internalClientset, scaleClient, ns, name); err != nil { return err } if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil { diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go index a3a8db9cb3fb..215f9d29bf0f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go @@ -2984,7 +2984,7 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) { } // DeleteResourceAndPods deletes a given resource and all pods it spawned -func DeleteResourceAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, kind schema.GroupKind, ns, name string) error { +func DeleteResourceAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, scaleClient scaleclient.ScalesGetter, kind schema.GroupKind, ns, name string) error { By(fmt.Sprintf("deleting %v %s in namespace %s", kind, name, ns)) rtObject, err := getRuntimeObjectForKind(clientset, kind, ns, name) @@ -3005,7 +3005,7 @@ func DeleteResourceAndPods(clientset clientset.Interface, internalClientset inte } defer ps.Stop() startTime := time.Now() - if err := testutils.DeleteResourceUsingReaperWithRetries(internalClientset, kind, ns, name, nil); err != nil { + if err := testutils.DeleteResourceUsingReaperWithRetries(internalClientset, kind, ns, name, nil, scaleClient); err != nil { return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err) } deleteTime := time.Now().Sub(startTime) diff --git a/vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/stackdriver.go b/vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/stackdriver.go index c11a92e6522e..582f31ae0018 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/stackdriver.go +++ b/vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/stackdriver.go @@ -101,7 +101,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per framework.ExpectNoError(err) - rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.InternalClientset) + rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.InternalClientset, f.ScalesGetter) defer rc.CleanUp() rc.WaitForReplicas(pods, 15*time.Minute) diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/proxy.go b/vendor/k8s.io/kubernetes/test/e2e/network/proxy.go index d455a6f2b93e..a46ec9986d17 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/proxy.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/proxy.go @@ -163,7 +163,7 @@ var _ = SIGDescribe("Proxy", func() { CreatedPods: &pods, } Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, cfg.Name) + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, cfg.Name) Expect(framework.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(HaveOccurred()) diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/service.go b/vendor/k8s.io/kubernetes/test/e2e/network/service.go index 42130c7d0189..d85c657ab95a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/service.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/service.go @@ -325,7 +325,7 @@ var _ = SIGDescribe("Services", func() { // Stop service 1 and make sure it is gone. By("stopping service1") - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1")) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "service1")) By("verifying service1 is not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svc1IP, servicePort)) @@ -359,13 +359,13 @@ var _ = SIGDescribe("Services", func() { svc2 := "service2" defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc1)) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, svc1)) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, svc1, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc2)) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, svc2)) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, svc2, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) @@ -412,7 +412,7 @@ var _ = SIGDescribe("Services", func() { numPods, servicePort := 3, 80 defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1")) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "service1")) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) @@ -439,7 +439,7 @@ var _ = SIGDescribe("Services", func() { // Create a new service and check if it's not reusing IP. defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service2")) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "service2")) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) @@ -1683,7 +1683,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { framework.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess) Expect(jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)).NotTo(HaveOccurred()) } - framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, serviceName)) + framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, namespace, serviceName)) } }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go b/vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go index 8076d2ae57ad..cd4ae2a572e5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/kubelet.go @@ -334,7 +334,7 @@ var _ = SIGDescribe("kubelet", func() { } By("Deleting the RC") - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName) + framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, rcName) // Check that the pods really are gone by querying /runningpods on the // node. The /runningpods handler checks the container runtime (or its // cache) and returns a list of running pods. Some possible causes of diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/kubelet_perf.go b/vendor/k8s.io/kubernetes/test/e2e/node/kubelet_perf.go index 8f54434b5a89..3380059a371f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/kubelet_perf.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/kubelet_perf.go @@ -117,7 +117,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames verifyCPULimits(expectedCPU, cpuSummary) By("Deleting the RC") - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName) + framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, rcName) } func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { diff --git a/vendor/k8s.io/kubernetes/test/e2e/scalability/density.go b/vendor/k8s.io/kubernetes/test/e2e/scalability/density.go index 788259eab535..d99cc7bd3046 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scalability/density.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scalability/density.go @@ -35,6 +35,7 @@ import ( utiluuid "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/apis/batch" @@ -65,6 +66,7 @@ type DensityTestConfig struct { Configs []testutils.RunObjectConfig ClientSets []clientset.Interface InternalClientsets []internalclientset.Interface + ScaleClients []scaleclient.ScalesGetter PollInterval time.Duration PodCount int // What kind of resource we want to create @@ -115,6 +117,7 @@ func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *t framework.ExpectNoError(framework.DeleteResourceAndPods( dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], + dtc.ScaleClients[i%numberOfClients], extensions.Kind("DaemonSet"), dtc.DaemonConfigs[i].Namespace, dtc.DaemonConfigs[i].Name, @@ -319,7 +322,7 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha framework.ExpectNoError(err) } else { By(fmt.Sprintf("Cleaning up the %v and pods", kind)) - err := framework.DeleteResourceAndPods(dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], kind, namespace, name) + err := framework.DeleteResourceAndPods(dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], dtc.ScaleClients[i%numberOfClients], kind, namespace, name) framework.ExpectNoError(err) } } @@ -612,11 +615,12 @@ var _ = SIGDescribe("Density", func() { } // Single client is running out of http2 connections in delete phase, hence we need more. - clients, internalClients, _, err = createClients(2) + clients, internalClients, scalesClients, err = createClients(2) dConfig := DensityTestConfig{ ClientSets: clients, InternalClientsets: internalClients, + ScaleClients: scalesClients, Configs: configs, PodCount: totalPods, PollInterval: DensityPollInterval, diff --git a/vendor/k8s.io/kubernetes/test/e2e/scalability/load.go b/vendor/k8s.io/kubernetes/test/e2e/scalability/load.go index a717929c8c09..e0a935dcfab9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scalability/load.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scalability/load.go @@ -289,6 +289,7 @@ var _ = SIGDescribe("Load capacity", func() { framework.ExpectNoError(framework.DeleteResourceAndPods( f.ClientSet, f.InternalClientset, + f.ScalesGetter, extensions.Kind("DaemonSet"), config.Namespace, config.Name, @@ -700,7 +701,7 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName())) } else { framework.ExpectNoError(framework.DeleteResourceAndPods( - config.GetClient(), config.GetInternalClient(), config.GetKind(), config.GetNamespace(), config.GetName()), + config.GetClient(), config.GetInternalClient(), config.GetScalesGetter(), config.GetKind(), config.GetNamespace(), config.GetName()), fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName())) } } diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/equivalence_cache_predicates.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/equivalence_cache_predicates.go index 3d5514763999..9eb0d8a44a8d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/equivalence_cache_predicates.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/equivalence_cache_predicates.go @@ -92,7 +92,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false) return err }, rcName, false) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, rcName) + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName) // the first replica pod is scheduled, and the second pod will be rejected. verifyResult(cs, 1, 1, ns) }) @@ -140,7 +140,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { }, } rc := getRCWithInterPodAffinity(affinityRCName, labelsMap, replica, affinity, framework.GetPauseImageName(f.ClientSet)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, affinityRCName) + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName) // RC should be running successfully // TODO: WaitForSchedulerAfterAction() can on be used to wait for failure event, @@ -166,7 +166,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { It("validates pod anti-affinity works properly when new replica pod is scheduled", func() { By("Launching two pods on two distinct nodes to get two node names") CreateHostPortPods(f, "host-port", 2, true) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "host-port") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "host-port") podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{}) framework.ExpectNoError(err) Expect(len(podList.Items)).To(Equal(2)) @@ -217,7 +217,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { } rc := getRCWithInterPodAffinityNodeSelector(labelRCName, labelsMap, replica, affinity, framework.GetPauseImageName(f.ClientSet), map[string]string{k: v}) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, labelRCName) + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, labelRCName) WaitForSchedulerAfterAction(f, func() error { _, err := cs.CoreV1().ReplicationControllers(ns).Create(rc) diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go index 1292022c3632..2a7b5407e070 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/predicates.go @@ -71,7 +71,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{}) if err == nil && *(rc.Spec.Replicas) != 0 { By("Cleaning up the replication controller") - err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, RCName) + err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, RCName) framework.ExpectNoError(err) } }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go index 1bcafa162d99..27ab9c08938f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go @@ -152,7 +152,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. - if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rc.Name); err != nil { + if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, rc.Name); err != nil { framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err) } }() diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go index 6de8df06c7b5..a42c841d054f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() { It("should ensure that critical pod is scheduled in case there is no resources available", func() { By("reserving all available cpu") err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "reserve-all-cpu") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "reserve-all-cpu") framework.ExpectNoError(err) By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled") diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/ubernetes_lite.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/ubernetes_lite.go index 3fdb711325f0..6e78bb24ed4a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/ubernetes_lite.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/ubernetes_lite.go @@ -223,7 +223,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. - if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil { + if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, controller.Name); err != nil { framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } }() diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/empty_dir_wrapper.go b/vendor/k8s.io/kubernetes/test/e2e/storage/empty_dir_wrapper.go index 4f1a45898edc..0e6d8a6dc619 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/empty_dir_wrapper.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/empty_dir_wrapper.go @@ -374,7 +374,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume Expect(err).NotTo(HaveOccurred(), "error creating replication controller") defer func() { - err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName) + err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, rcName) framework.ExpectNoError(err) }() diff --git a/vendor/k8s.io/kubernetes/test/e2e/upgrades/horizontal_pod_autoscalers.go b/vendor/k8s.io/kubernetes/test/e2e/upgrades/horizontal_pod_autoscalers.go index f76ae06355ae..c01c46cd6182 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/upgrades/horizontal_pod_autoscalers.go +++ b/vendor/k8s.io/kubernetes/test/e2e/upgrades/horizontal_pod_autoscalers.go @@ -48,7 +48,8 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) { 500, /* cpuLimit */ 200, /* memLimit */ f.ClientSet, - f.InternalClientset) + f.InternalClientset, + f.ScalesGetter) t.hpa = common.CreateCPUHorizontalPodAutoscaler( t.rc, 20, /* targetCPUUtilizationPercent */ diff --git a/vendor/k8s.io/kubernetes/test/utils/delete_resources.go b/vendor/k8s.io/kubernetes/test/utils/delete_resources.go index de5771c9503a..acc42c3a3c20 100644 --- a/vendor/k8s.io/kubernetes/test/utils/delete_resources.go +++ b/vendor/k8s.io/kubernetes/test/utils/delete_resources.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clientset "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" appsinternal "k8s.io/kubernetes/pkg/apis/apps" batchinternal "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" @@ -58,10 +59,6 @@ func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam } } -func getReaperForKind(c internalclientset.Interface, kind schema.GroupKind) (kubectl.Reaper, error) { - return kubectl.ReaperFor(kind, c) -} - func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { deleteFunc := func() (bool, error) { err := deleteResource(c, kind, namespace, name, options) @@ -76,8 +73,8 @@ func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, nam return RetryWithExponentialBackOff(deleteFunc) } -func DeleteResourceUsingReaperWithRetries(c internalclientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { - reaper, err := getReaperForKind(c, kind) +func DeleteResourceUsingReaperWithRetries(c internalclientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions, scaleClient scaleclient.ScalesGetter) error { + reaper, err := kubectl.ReaperFor(kind, c, scaleClient) if err != nil { return err } From 4b5753d36d160e7a9a8dd6cc218cfd878b3c4823 Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 9 Apr 2018 09:25:01 -0400 Subject: [PATCH 2/6] UPSTREAM: 61790: make reapers tolerate 404s on scaling down --- .../k8s.io/kubernetes/pkg/kubectl/delete.go | 11 ++- .../kubernetes/pkg/kubectl/delete_test.go | 88 +++++++++---------- vendor/k8s.io/kubernetes/pkg/kubectl/scale.go | 54 +++--------- .../kubernetes/pkg/kubectl/scale_test.go | 21 ++--- .../src/k8s.io/client-go/scale/client.go | 2 +- 5 files changed, 69 insertions(+), 107 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go b/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go index 974798a247c5..310ed8bb0292 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go @@ -206,7 +206,7 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout // No overlapping controllers. retry := NewRetryParams(reaper.pollInterval, reaper.timeout) waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil { + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil && !errors.IsNotFound(err) { return err } } @@ -276,7 +276,7 @@ func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Durati // No overlapping ReplicaSets. retry := NewRetryParams(reaper.pollInterval, reaper.timeout) waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil { + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil && !errors.IsNotFound(err) { return err } } @@ -340,7 +340,7 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat retry := NewRetryParams(reaper.pollInterval, reaper.timeout) waitForStatefulSet := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForStatefulSet); err != nil { + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForStatefulSet); err != nil && !errors.IsNotFound(err) { return err } @@ -368,7 +368,7 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra // TODO: handle overlapping jobs retry := NewRetryParams(reaper.pollInterval, reaper.timeout) waitForJobs := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil { + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil && !errors.IsNotFound(err) { return err } // at this point only dead pods are left, that should be removed @@ -444,8 +444,7 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati errList := []error{} for _, rs := range rss { if err := rsReaper.Stop(rs.Namespace, rs.Name, timeout, gracePeriod); err != nil { - scaleGetErr, ok := err.(ScaleError) - if errors.IsNotFound(err) || (ok && errors.IsNotFound(scaleGetErr.ActualError)) { + if errors.IsNotFound(err) { continue } errList = append(errList, err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/delete_test.go b/vendor/k8s.io/kubernetes/pkg/kubectl/delete_test.go index e45f550d4b4b..6bc06d1404d3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/delete_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/delete_test.go @@ -210,54 +210,54 @@ func TestReplicationControllerStop(t *testing.T) { } for _, test := range tests { - copiedForWatch := test.Objs[0].DeepCopyObject() - scaleClient := createFakeScaleClient("replicationcontrollers", "foo", 3, nil) - fake := fake.NewSimpleClientset(test.Objs...) - fakeWatch := watch.NewFake() - fake.PrependWatchReactor("replicationcontrollers", testcore.DefaultWatchReactor(fakeWatch, nil)) - - go func() { - fakeWatch.Add(copiedForWatch) - }() - - reaper := ReplicationControllerReaper{fake.Core(), time.Millisecond, time.Millisecond, scaleClient} - err := reaper.Stop(ns, name, 0, nil) - if !reflect.DeepEqual(err, test.StopError) { - t.Errorf("%s unexpected error: %v", test.Name, err) - continue - } - - actions := fake.Actions() - if len(actions) != len(test.ExpectedActions) { - t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions)) - continue - } - for i, verb := range test.ExpectedActions { - if actions[i].GetResource().GroupResource() != api.Resource("replicationcontrollers") { - t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb) - } - if actions[i].GetVerb() != verb { - t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb) - } - } - if test.ScaledDown { - scale, err := scaleClient.Scales(ns).Get(schema.GroupResource{Group: "", Resource: "replicationcontrollers"}, name) - if err != nil { - t.Error(err) - } - if scale.Spec.Replicas != 0 { - t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) + t.Run(test.Name, func(t *testing.T) { + copiedForWatch := test.Objs[0].DeepCopyObject() + scaleClient := createFakeScaleClient("replicationcontrollers", "foo", 3, nil) + fake := fake.NewSimpleClientset(test.Objs...) + fakeWatch := watch.NewFake() + fake.PrependWatchReactor("replicationcontrollers", testcore.DefaultWatchReactor(fakeWatch, nil)) + + go func() { + fakeWatch.Add(copiedForWatch) + }() + + reaper := ReplicationControllerReaper{fake.Core(), time.Millisecond, time.Millisecond, scaleClient} + err := reaper.Stop(ns, name, 0, nil) + if !reflect.DeepEqual(err, test.StopError) { + t.Fatalf("unexpected error: %v", err) } - actions := scaleClient.Actions() - if len(actions) != len(test.ScaleClientExpectedAction) { - t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ScaleClientExpectedAction), len(actions)) + + actions := fake.Actions() + if len(actions) != len(test.ExpectedActions) { + t.Fatalf("unexpected actions: %v, expected %d actions got %d", actions, len(test.ExpectedActions), len(actions)) } - for i, verb := range test.ScaleClientExpectedAction { + for i, verb := range test.ExpectedActions { + if actions[i].GetResource().GroupResource() != api.Resource("replicationcontrollers") { + t.Errorf("unexpected action: %+v, expected %s-replicationController", actions[i], verb) + } if actions[i].GetVerb() != verb { - t.Errorf("%s unexpected action: %+v, expected %s", test.Name, actions[i].GetVerb(), verb) + t.Errorf("unexpected action: %+v, expected %s-replicationController", actions[i], verb) } } - } + if test.ScaledDown { + scale, err := scaleClient.Scales(ns).Get(schema.GroupResource{Group: "", Resource: "replicationcontrollers"}, name) + if err != nil { + t.Error(err) + } + if scale.Spec.Replicas != 0 { + t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) + } + actions := scaleClient.Actions() + if len(actions) != len(test.ScaleClientExpectedAction) { + t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(test.ScaleClientExpectedAction), len(actions)) + } + for i, verb := range test.ScaleClientExpectedAction { + if actions[i].GetVerb() != verb { + t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) + } + } + } + }) } } @@ -776,7 +776,7 @@ func TestDeploymentNotFoundError(t *testing.T) { }, ) fake.AddReactor("get", "replicasets", func(action testcore.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, ScaleError{ActualError: errors.NewNotFound(api.Resource("replicaset"), "doesn't-matter")} + return true, nil, errors.NewNotFound(api.Resource("replicaset"), "doesn't-matter") }) reaper := DeploymentReaper{fake.Extensions(), fake.Extensions(), time.Millisecond, time.Millisecond, nil, schema.GroupResource{}} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go index a60c991f5962..bc2eb178aa5c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go @@ -87,30 +87,6 @@ func (pe PreconditionError) Error() string { return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue) } -type ScaleErrorType int - -const ( - ScaleGetFailure ScaleErrorType = iota - ScaleUpdateFailure - ScaleUpdateConflictFailure -) - -// A ScaleError is returned when a scale request passes -// preconditions but fails to actually scale the controller. -type ScaleError struct { - FailureType ScaleErrorType - ResourceVersion string - ActualError error -} - -func (c ScaleError) Error() string { - msg := fmt.Sprintf("Scaling the resource failed with: %v", c.ActualError) - if len(c.ResourceVersion) > 0 { - msg += fmt.Sprintf("; Current resource version %s", c.ResourceVersion) - } - return msg -} - // RetryParams encapsulates the retry parameters used by kubectl's scaler. type RetryParams struct { Interval, Timeout time.Duration @@ -127,16 +103,14 @@ func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name s if updatedResourceVersion != nil { *updatedResourceVersion = rv } - switch e, _ := err.(ScaleError); err.(type) { - case nil: - return true, nil - case ScaleError: - // Retry only on update conflicts. - if e.FailureType == ScaleUpdateConflictFailure { - return false, nil - } + // Retry only on update conflicts. + if errors.IsConflict(err) { + return false, nil + } + if err != nil { + return false, err } - return false, err + return true, nil } } @@ -163,7 +137,7 @@ type jobScaler struct { func (scaler *jobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { job, err := scaler.c.Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return "", ScaleError{ScaleGetFailure, "", err} + return "", err } if preconditions != nil { if err := preconditions.ValidateJob(job); err != nil { @@ -174,10 +148,7 @@ func (scaler *jobScaler) ScaleSimple(namespace, name string, preconditions *Scal job.Spec.Parallelism = ¶llelism updatedJob, err := scaler.c.Jobs(namespace).Update(job) if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, job.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, job.ResourceVersion, err} + return "", err } return updatedJob.ObjectMeta.ResourceVersion, nil } @@ -234,7 +205,7 @@ var _ Scaler = &genericScaler{} func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error) { scale, err := s.scaleNamespacer.Scales(namespace).Get(s.targetGR, name) if err != nil { - return "", ScaleError{ScaleGetFailure, "", err} + return "", err } if preconditions != nil { if err := preconditions.validate(scale); err != nil { @@ -245,10 +216,7 @@ func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *Scale scale.Spec.Replicas = int32(newSize) updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(s.targetGR, scale) if err != nil { - if errors.IsConflict(err) { - return "", ScaleError{ScaleUpdateConflictFailure, scale.ResourceVersion, err} - } - return "", ScaleError{ScaleUpdateFailure, scale.ResourceVersion, err} + return "", err } return updatedScale.ResourceVersion, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go index e4b8973764be..d75b9ac15a18 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go @@ -87,8 +87,7 @@ func TestReplicationControllerScaleInvalid(t *testing.T) { if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) } - e, ok := err.(ScaleError) - if err == nil || !ok || e.FailureType != ScaleUpdateFailure { + if err == nil { t.Errorf("Expected error on invalid update failure, got %v", err) } actions := scaleClient.Actions() @@ -252,8 +251,7 @@ func TestJobScaleInvalid(t *testing.T) { if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) } - e, ok := err.(ScaleError) - if err == nil || !ok || e.FailureType != ScaleUpdateFailure { + if err == nil { t.Errorf("Expected error on invalid update failure, got %v", err) } } @@ -486,8 +484,7 @@ func TestDeploymentScaleInvalid(t *testing.T) { if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) } - e, ok := err.(ScaleError) - if err == nil || !ok || e.FailureType != ScaleUpdateFailure { + if err == nil { t.Errorf("Expected error on invalid update failure, got %v", err) } actions := scaleClient.Actions() @@ -599,8 +596,7 @@ func TestStatefulSetScaleInvalid(t *testing.T) { if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) } - e, ok := err.(ScaleError) - if err == nil || !ok || e.FailureType != ScaleUpdateFailure { + if err == nil { t.Errorf("Expected error on invalid update failure, got %v", err) } actions := scaleClient.Actions() @@ -712,8 +708,7 @@ func TestReplicaSetScaleInvalid(t *testing.T) { if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) } - e, ok := err.(ScaleError) - if err == nil || !ok || e.FailureType != ScaleUpdateFailure { + if err == nil { t.Errorf("Expected error on invalid update failure, got %v", err) } actions := scaleClient.Actions() @@ -859,7 +854,7 @@ func TestGenericScale(t *testing.T) { resName: "abc", scaleGetter: scaleClient, }, - // scenario 2: a resource name cannot be empty + //scenario 2: a resource name cannot be empty { name: "a resource name cannot be empty", precondition: ScalePrecondition{10, ""}, @@ -883,8 +878,8 @@ func TestGenericScale(t *testing.T) { } // act - for index, scenario := range scenarios { - t.Run(fmt.Sprintf("running scenario %d: %s", index+1, scenario.name), func(t *testing.T) { + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { target := NewScaler(scenario.scaleGetter, scenario.targetGR) err := target.Scale("default", scenario.resName, uint(scenario.newSize), &scenario.precondition, nil, scenario.waitForReplicas) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go index 82e3802f512b..b410cb52e4f8 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go @@ -139,7 +139,7 @@ func (c *namespacedScaleClient) Get(resource schema.GroupResource, name string) SubResource("scale"). Do() if err := result.Error(); err != nil { - return nil, fmt.Errorf("could not fetch the scale for %s %s: %v", resource.String(), name, err) + return nil, err } scaleBytes, err := result.Raw() From 03cbcf598a120aac8fbaad270cc82efe7e7a2d32 Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 9 Apr 2018 09:29:38 -0400 Subject: [PATCH 3/6] UPSTREAM: 62114: removes job scaler, continued --- vendor/k8s.io/kubernetes/pkg/kubectl/BUILD | 2 +- .../k8s.io/kubernetes/pkg/kubectl/cmd/BUILD | 3 + .../kubernetes/pkg/kubectl/cmd/scale.go | 64 +++- .../kubernetes/pkg/kubectl/cmd/scalejob/BUILD | 47 +++ .../pkg/kubectl/cmd/scalejob/doc.go | 18 + .../pkg/kubectl/cmd/scalejob/scalejob.go | 162 ++++++++ .../pkg/kubectl/cmd/scalejob/scalejob_test.go | 292 +++++++++++++++ .../pkg/kubectl/cmd/util/factory.go | 2 +- .../pkg/kubectl/cmd/util/factory_builder.go | 10 +- .../kubernetes/pkg/kubectl/conditions.go | 26 -- .../k8s.io/kubernetes/pkg/kubectl/delete.go | 21 +- .../kubernetes/pkg/kubectl/rolling_updater.go | 4 +- vendor/k8s.io/kubernetes/pkg/kubectl/scale.go | 111 +----- .../kubernetes/pkg/kubectl/scale_test.go | 347 ++---------------- .../test/e2e/apps/daemon_restart.go | 6 +- vendor/k8s.io/kubernetes/test/e2e/examples.go | 4 +- .../test/e2e/framework/deployment_util.go | 5 +- .../kubernetes/test/e2e/framework/rc_util.go | 10 +- .../kubernetes/test/e2e/framework/util.go | 5 +- .../kubernetes/test/e2e/network/service.go | 2 +- .../kubernetes/test/e2e/scalability/load.go | 1 - .../equivalence_cache_predicates.go | 2 +- .../test/e2e/scheduling/priorities.go | 2 +- .../test/e2e/scheduling/rescheduler.go | 6 +- .../kubernetes/test/utils/update_resources.go | 5 +- 25 files changed, 663 insertions(+), 494 deletions(-) create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob_test.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD index d6405396a5b3..f00064fc5ebe 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD @@ -46,7 +46,6 @@ go_test( "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/kubectl/util:go_default_library", "//pkg/printers:go_default_library", @@ -139,6 +138,7 @@ go_library( "//pkg/controller/statefulset:go_default_library", "//pkg/credentialprovider:go_default_library", "//pkg/kubectl/apps:go_default_library", + "//pkg/kubectl/cmd/scalejob:go_default_library", "//pkg/kubectl/resource:go_default_library", "//pkg/kubectl/util:go_default_library", "//pkg/kubectl/util/hash:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD index cca26088ed0f..41c8c72ac17f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/BUILD @@ -74,6 +74,7 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/apis/core/validation:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", + "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/kubectl:go_default_library", "//pkg/kubectl/apply/parse:go_default_library", @@ -82,6 +83,7 @@ go_library( "//pkg/kubectl/cmd/config:go_default_library", "//pkg/kubectl/cmd/resource:go_default_library", "//pkg/kubectl/cmd/rollout:go_default_library", + "//pkg/kubectl/cmd/scalejob:go_default_library", "//pkg/kubectl/cmd/set:go_default_library", "//pkg/kubectl/cmd/templates:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", @@ -286,6 +288,7 @@ filegroup( "//pkg/kubectl/cmd/config:all-srcs", "//pkg/kubectl/cmd/resource:all-srcs", "//pkg/kubectl/cmd/rollout:all-srcs", + "//pkg/kubectl/cmd/scalejob:all-srcs", "//pkg/kubectl/cmd/set:all-srcs", "//pkg/kubectl/cmd/templates:all-srcs", "//pkg/kubectl/cmd/testdata/edit:all-srcs", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go index c7d42f022a31..c8cd0dcf3439 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go @@ -22,7 +22,9 @@ import ( "github.com/spf13/cobra" + batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/cmd/scalejob" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" @@ -60,7 +62,7 @@ var ( func NewCmdScale(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { options := &resource.FilenameOptions{} - validArgs := []string{"deployment", "replicaset", "replicationcontroller", "job", "statefulset"} + validArgs := []string{"deployment", "replicaset", "replicationcontroller", "statefulset"} argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ @@ -139,6 +141,15 @@ func RunScale(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args return fmt.Errorf("cannot use --resource-version with multiple resources") } + currentSize := cmdutil.GetFlagInt(cmd, "current-replicas") + precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion} + retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout) + + var waitForReplicas *kubectl.RetryParams + if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 { + waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout) + } + counter := 0 err = r.Visit(func(info *resource.Info, err error) error { if err != nil { @@ -147,26 +158,29 @@ func RunScale(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args mapping := info.ResourceMapping() if mapping.Resource == "jobs" { + // go down the legacy jobs path. This can be removed in 3.14 For now, contain it. fmt.Fprintf(errOut, "%s scale job is DEPRECATED and will be removed in a future version.\n", cmd.Parent().Name()) - } - scaler, err := f.Scaler(mapping) - if err != nil { - return err - } + clientset, err := f.ClientSet() + if err != nil { + return err + } + if err := ScaleJob(info, clientset.Batch(), uint(count), precondition, retry, waitForReplicas); err != nil { + return err + } - currentSize := cmdutil.GetFlagInt(cmd, "current-replicas") - precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion} - retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout) + } else { + scaler, err := f.Scaler() + if err != nil { + return err + } - var waitForReplicas *kubectl.RetryParams - if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 { - waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout) + gvk := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource) + if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas, gvk.GroupResource()); err != nil { + return err + } } - if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil { - return err - } if cmdutil.ShouldRecord(cmd, info) { patchBytes, patchType, err := cmdutil.ChangeResourcePatch(info, f.Command(cmd, true)) if err != nil { @@ -195,3 +209,23 @@ func RunScale(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args } return nil } + +func ScaleJob(info *resource.Info, jobsClient batchclient.JobsGetter, count uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error { + scaler := scalejob.JobPsuedoScaler{ + JobsClient: jobsClient, + } + var jobPreconditions *scalejob.ScalePrecondition + if preconditions != nil { + jobPreconditions = &scalejob.ScalePrecondition{Size: preconditions.Size, ResourceVersion: preconditions.ResourceVersion} + } + var jobRetry *scalejob.RetryParams + if retry != nil { + jobRetry = &scalejob.RetryParams{Interval: retry.Interval, Timeout: retry.Timeout} + } + var jobWaitForReplicas *scalejob.RetryParams + if waitForReplicas != nil { + jobWaitForReplicas = &scalejob.RetryParams{Interval: waitForReplicas.Interval, Timeout: waitForReplicas.Timeout} + } + + return scaler.Scale(info.Namespace, info.Name, count, jobPreconditions, jobRetry, jobWaitForReplicas) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/BUILD new file mode 100644 index 000000000000..f95519c7805c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/BUILD @@ -0,0 +1,47 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "scalejob.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/scalejob", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/batch:go_default_library", + "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["scalejob_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", + "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/doc.go new file mode 100644 index 000000000000..589fa1a64ad6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scalejob is deprecated This package contains deprecated functions used to "scale" jobs in a way inconsistent with normal scaling rules +package scalejob diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob.go new file mode 100644 index 000000000000..70264cd9fa39 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob.go @@ -0,0 +1,162 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalejob + +import ( + "fmt" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/apis/batch" + + batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" +) + +// ScalePrecondition is a deprecated precondition +type ScalePrecondition struct { + Size int + ResourceVersion string +} + +// RetryParams is a deprecated retry struct +type RetryParams struct { + Interval, Timeout time.Duration +} + +// PreconditionError is a deprecated error +type PreconditionError struct { + Precondition string + ExpectedValue string + ActualValue string +} + +func (pe PreconditionError) Error() string { + return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue) +} + +// ScaleCondition is a closure around Scale that facilitates retries via util.wait +func scaleCondition(r *JobPsuedoScaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string) wait.ConditionFunc { + return func() (bool, error) { + rv, err := r.ScaleSimple(namespace, name, precondition, count) + if updatedResourceVersion != nil { + *updatedResourceVersion = rv + } + // Retry only on update conflicts. + if errors.IsConflict(err) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil + } +} + +// JobPsuedoScaler is a deprecated scale-similar thing that doesn't obey scale semantics +type JobPsuedoScaler struct { + JobsClient batchclient.JobsGetter +} + +// ScaleSimple is responsible for updating job's parallelism. It returns the +// resourceVersion of the job if the update is successful. +func (scaler *JobPsuedoScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { + job, err := scaler.JobsClient.Jobs(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return "", err + } + if preconditions != nil { + if err := validateJob(job, preconditions); err != nil { + return "", err + } + } + parallelism := int32(newSize) + job.Spec.Parallelism = ¶llelism + updatedJob, err := scaler.JobsClient.Jobs(namespace).Update(job) + if err != nil { + return "", err + } + return updatedJob.ObjectMeta.ResourceVersion, nil +} + +// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil), +// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired +// number, which can be less than requested based on job's current progress. +func (scaler *JobPsuedoScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { + if preconditions == nil { + preconditions = &ScalePrecondition{-1, ""} + } + if retry == nil { + // Make it try only once, immediately + retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} + } + cond := scaleCondition(scaler, preconditions, namespace, name, newSize, nil) + if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { + return err + } + if waitForReplicas != nil { + job, err := scaler.JobsClient.Jobs(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, jobHasDesiredParallelism(scaler.JobsClient, job)) + if err == wait.ErrWaitTimeout { + return fmt.Errorf("timed out waiting for %q to be synced", name) + } + return err + } + return nil +} + +// JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count +// for a job equals the current active counts or is less by an appropriate successful/unsuccessful count. +func jobHasDesiredParallelism(jobClient batchclient.JobsGetter, job *batch.Job) wait.ConditionFunc { + return func() (bool, error) { + job, err := jobClient.Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + + // desired parallelism can be either the exact number, in which case return immediately + if job.Status.Active == *job.Spec.Parallelism { + return true, nil + } + if job.Spec.Completions == nil { + // A job without specified completions needs to wait for Active to reach Parallelism. + return false, nil + } + + // otherwise count successful + progress := *job.Spec.Completions - job.Status.Active - job.Status.Succeeded + return progress == 0, nil + } +} + +func validateJob(job *batch.Job, precondition *ScalePrecondition) error { + if precondition.Size != -1 && job.Spec.Parallelism == nil { + return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"} + } + if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size { + return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))} + } + if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion { + return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion} + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob_test.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob_test.go new file mode 100644 index 000000000000..d8a22f88efc3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scalejob/scalejob_test.go @@ -0,0 +1,292 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scalejob + +import ( + "errors" + "testing" + + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testcore "k8s.io/client-go/testing" + "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" +) + +type errorJobs struct { + batchclient.JobInterface + conflict bool + invalid bool +} + +func (c *errorJobs) Update(job *batch.Job) (*batch.Job, error) { + switch { + case c.invalid: + return nil, kerrors.NewInvalid(api.Kind(job.Kind), job.Name, nil) + case c.conflict: + return nil, kerrors.NewConflict(api.Resource(job.Kind), job.Name, nil) + } + return nil, errors.New("Job update failure") +} + +func (c *errorJobs) Get(name string, options metav1.GetOptions) (*batch.Job, error) { + zero := int32(0) + return &batch.Job{ + Spec: batch.JobSpec{ + Parallelism: &zero, + }, + }, nil +} + +type errorJobClient struct { + batchclient.JobsGetter + conflict bool + invalid bool +} + +func (c *errorJobClient) Jobs(namespace string) batchclient.JobInterface { + return &errorJobs{ + JobInterface: c.JobsGetter.Jobs(namespace), + conflict: c.conflict, + invalid: c.invalid, + } +} + +func TestJobScaleRetry(t *testing.T) { + fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), conflict: true} + scaler := &JobPsuedoScaler{JobsClient: fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := scaleCondition(scaler, &preconditions, namespace, name, count, nil) + pass, err := scaleFunc() + if pass != false { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + if err != nil { + t.Errorf("Did not expect an error on update failure, got %v", err) + } + preconditions = ScalePrecondition{3, ""} + scaleFunc = scaleCondition(scaler, &preconditions, namespace, name, count, nil) + pass, err = scaleFunc() + if err == nil { + t.Error("Expected error on precondition failure") + } +} + +func job() *batch.Job { + return &batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "foo", + }, + } +} + +func TestJobScale(t *testing.T) { + fakeClientset := fake.NewSimpleClientset(job()) + scaler := &JobPsuedoScaler{JobsClient: fakeClientset.Batch()} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fakeClientset.Actions() + if len(actions) != 2 { + t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) + } + if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name) + } + if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) { + t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count) + } +} + +func TestJobScaleInvalid(t *testing.T) { + fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), invalid: true} + scaler := &JobPsuedoScaler{JobsClient: fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := scaleCondition(scaler, &preconditions, namespace, name, count, nil) + pass, err := scaleFunc() + if pass { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + if err == nil { + t.Errorf("Expected error on invalid update failure, got %v", err) + } +} + +func TestJobScaleFailsPreconditions(t *testing.T) { + ten := int32(10) + fake := fake.NewSimpleClientset(&batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }) + scaler := &JobPsuedoScaler{JobsClient: fake.Batch()} + preconditions := ScalePrecondition{2, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 1 { + t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions) + } + if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name) + } +} + +func TestValidateJob(t *testing.T) { + zero, ten, twenty := int32(0), int32(10), int32(20) + tests := []struct { + preconditions ScalePrecondition + job batch.Job + expectError bool + test string + }{ + { + preconditions: ScalePrecondition{-1, ""}, + expectError: false, + test: "defaults", + }, + { + preconditions: ScalePrecondition{-1, ""}, + job: batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }, + expectError: false, + test: "defaults 2", + }, + { + preconditions: ScalePrecondition{0, ""}, + job: batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &zero, + }, + }, + expectError: false, + test: "size matches", + }, + { + preconditions: ScalePrecondition{-1, "foo"}, + job: batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }, + expectError: false, + test: "resource version matches", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }, + expectError: false, + test: "both match", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &twenty, + }, + }, + expectError: true, + test: "size different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "foo", + }, + }, + expectError: true, + test: "parallelism nil", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }, + expectError: true, + test: "version different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: batch.JobSpec{ + Parallelism: &twenty, + }, + }, + expectError: true, + test: "both different", + }, + } + for _, test := range tests { + err := validateJob(&test.job, &test.preconditions) + if err != nil && !test.expectError { + t.Errorf("unexpected error: %v (%s)", err, test.test) + } + if err == nil && test.expectError { + t.Errorf("expected an error: %v (%s)", err, test.test) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go index 7e7898a6e18b..08553af87a86 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go @@ -214,7 +214,7 @@ type BuilderFactory interface { // PluginRunner provides the implementation to be used to run cli plugins. PluginRunner() plugins.PluginRunner // Returns a Scaler for changing the size of the specified RESTMapping type or an error - Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) + Scaler() (kubectl.Scaler, error) // ScaleClient gives you back scale getter ScaleClient() (scaleclient.ScalesGetter, error) // Returns a Reaper for gracefully shutting down resources. diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go index 080e39fc4b22..1d2edd9c5064 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go @@ -103,19 +103,13 @@ func (f *ring2Factory) ScaleClient() (scaleclient.ScalesGetter, error) { return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil } -func (f *ring2Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) { - clientset, err := f.clientAccessFactory.ClientSet() - if err != nil { - return nil, err - } - +func (f *ring2Factory) Scaler() (kubectl.Scaler, error) { scalesGetter, err := f.ScaleClient() if err != nil { return nil, err } - gvk := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource) - return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset.Batch(), scalesGetter, gvk.GroupResource()), nil + return kubectl.NewScaler(scalesGetter), nil } func (f *ring2Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go b/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go index 666b248f416a..771ccbc4fb8d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go @@ -26,11 +26,9 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" ) @@ -99,30 +97,6 @@ func StatefulSetHasDesiredReplicas(ssClient appsclient.StatefulSetsGetter, ss *a } } -// JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count -// for a job equals the current active counts or is less by an appropriate successful/unsuccessful count. -func JobHasDesiredParallelism(jobClient batchclient.JobsGetter, job *batch.Job) wait.ConditionFunc { - return func() (bool, error) { - job, err := jobClient.Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - - // desired parallelism can be either the exact number, in which case return immediately - if job.Status.Active == *job.Spec.Parallelism { - return true, nil - } - if job.Spec.Completions == nil { - // A job without specified completions needs to wait for Active to reach Parallelism. - return false, nil - } - - // otherwise count successful - progress := *job.Spec.Completions - job.Status.Active - job.Status.Succeeded - return progress == 0, nil - } -} - // DeploymentHasDesiredReplicas returns a condition that will be true if and only if // the desired replica count for a deployment equals its updated replicas count. // (non-terminated pods that have the desired template spec). diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go b/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go index 310ed8bb0292..0d1f8a46ea6d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go @@ -39,6 +39,7 @@ import ( coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" + "k8s.io/kubernetes/pkg/kubectl/cmd/scalejob" ) const ( @@ -155,7 +156,7 @@ func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterfac func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { rc := reaper.client.ReplicationControllers(namespace) - scaler := NewScaler(reaper.scaleClient, schema.GroupResource{Resource: "replicationcontrollers"}) + scaler := NewScaler(reaper.scaleClient) ctrl, err := rc.Get(name, metav1.GetOptions{}) if err != nil { return err @@ -206,7 +207,7 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout // No overlapping controllers. retry := NewRetryParams(reaper.pollInterval, reaper.timeout) waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil && !errors.IsNotFound(err) { + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil && !errors.IsNotFound(err) { return err } } @@ -224,7 +225,7 @@ func getOverlappingReplicaSets(c extensionsclient.ReplicaSetInterface, rs *exten func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { rsc := reaper.client.ReplicaSets(namespace) - scaler := NewScaler(reaper.scaleClient, reaper.gr) + scaler := NewScaler(reaper.scaleClient) rs, err := rsc.Get(name, metav1.GetOptions{}) if err != nil { return err @@ -276,7 +277,7 @@ func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Durati // No overlapping ReplicaSets. retry := NewRetryParams(reaper.pollInterval, reaper.timeout) waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil && !errors.IsNotFound(err) { + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas, reaper.gr); err != nil && !errors.IsNotFound(err) { return err } } @@ -325,7 +326,7 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { statefulsets := reaper.client.StatefulSets(namespace) - scaler := NewScaler(reaper.scaleClient, apps.Resource("statefulsets")) + scaler := NewScaler(reaper.scaleClient) ss, err := statefulsets.Get(name, metav1.GetOptions{}) if err != nil { return err @@ -340,7 +341,7 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat retry := NewRetryParams(reaper.pollInterval, reaper.timeout) waitForStatefulSet := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForStatefulSet); err != nil && !errors.IsNotFound(err) { + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForStatefulSet, apps.Resource("statefulsets")); err != nil && !errors.IsNotFound(err) { return err } @@ -354,7 +355,9 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { jobs := reaper.client.Jobs(namespace) pods := reaper.podClient.Pods(namespace) - scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, reaper.client, nil, schema.GroupResource{}) + scaler := &scalejob.JobPsuedoScaler{ + JobsClient: reaper.client, + } job, err := jobs.Get(name, metav1.GetOptions{}) if err != nil { return err @@ -366,8 +369,8 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra } // TODO: handle overlapping jobs - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForJobs := NewRetryParams(reaper.pollInterval, timeout) + retry := &scalejob.RetryParams{Interval: reaper.pollInterval, Timeout: reaper.timeout} + waitForJobs := &scalejob.RetryParams{Interval: reaper.pollInterval, Timeout: reaper.timeout} if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil && !errors.IsNotFound(err) { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go index e1ee2bc10a53..084dce74d492 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go @@ -400,8 +400,8 @@ func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desi // scalerScaleAndWait scales a controller using a Scaler and a real client. func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { - scaler := NewScaler(r.scaleClient, schema.GroupResource{Resource: "replicationcontrollers"}) - if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait); err != nil { + scaler := NewScaler(r.scaleClient) + if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil { return nil, err } return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{}) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go index bc2eb178aa5c..590b600fd498 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go @@ -23,13 +23,10 @@ import ( autoscalingapi "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/apis/batch" scaleclient "k8s.io/client-go/scale" - batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" ) // TODO: Figure out if we should be waiting on initializers in the Scale() functions below. @@ -40,30 +37,15 @@ type Scaler interface { // retries in the event of resource version mismatch (if retry is not nil), // and optionally waits until the status of the resource matches newSize (if wait is not nil) // TODO: Make the implementation of this watch-based (#56075) once #31345 is fixed. - Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams) error + Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams, gr schema.GroupResource) error // ScaleSimple does a simple one-shot attempt at scaling - not useful on its own, but // a necessary building block for Scale - ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error) -} - -// ScalerFor gets a scaler for a given resource -func ScalerFor(kind schema.GroupKind, jobsClient batchclient.JobsGetter, scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) Scaler { - // it seems like jobs dont't follow "normal" scale semantics. - // For example it is not clear whether HPA could make use of it or not. - // For more details see: https://github.com/kubernetes/kubernetes/pull/58468 - switch kind { - case batch.Kind("Job"): - return &jobScaler{jobsClient} // Either kind of job can be scaled with Batch interface. - default: - return NewScaler(scalesGetter, gr) - } + ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gr schema.GroupResource) (updatedResourceVersion string, err error) } // NewScaler get a scaler for a given resource -// Note that if you are trying to crate create a scaler for "job" then stop and use ScalerFor instead. -// When scaling jobs is dead, we'll remove ScalerFor method. -func NewScaler(scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) Scaler { - return &genericScaler{scalesGetter, gr} +func NewScaler(scalesGetter scaleclient.ScalesGetter) Scaler { + return &genericScaler{scalesGetter} } // ScalePrecondition describes a condition that must be true for the scale to take place @@ -97,9 +79,9 @@ func NewRetryParams(interval, timeout time.Duration) *RetryParams { } // ScaleCondition is a closure around Scale that facilitates retries via util.wait -func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string) wait.ConditionFunc { +func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string, gr schema.GroupResource) wait.ConditionFunc { return func() (bool, error) { - rv, err := r.ScaleSimple(namespace, name, precondition, count) + rv, err := r.ScaleSimple(namespace, name, precondition, count, gr) if updatedResourceVersion != nil { *updatedResourceVersion = rv } @@ -114,74 +96,6 @@ func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name s } } -// ValidateJob ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error { - if precondition.Size != -1 && job.Spec.Parallelism == nil { - return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"} - } - if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size { - return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))} - } - if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion} - } - return nil -} - -type jobScaler struct { - c batchclient.JobsGetter -} - -// ScaleSimple is responsible for updating job's parallelism. It returns the -// resourceVersion of the job if the update is successful. -func (scaler *jobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { - job, err := scaler.c.Jobs(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - if preconditions != nil { - if err := preconditions.ValidateJob(job); err != nil { - return "", err - } - } - parallelism := int32(newSize) - job.Spec.Parallelism = ¶llelism - updatedJob, err := scaler.c.Jobs(namespace).Update(job) - if err != nil { - return "", err - } - return updatedJob.ObjectMeta.ResourceVersion, nil -} - -// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired -// number, which can be less than requested based on job's current progress. -func (scaler *jobScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - job, err := scaler.c.Jobs(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, JobHasDesiredParallelism(scaler.c, job)) - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", name) - } - return err - } - return nil -} - // validateGeneric ensures that the preconditions match. Returns nil if they are valid, otherwise an error func (precondition *ScalePrecondition) validate(scale *autoscalingapi.Scale) error { if precondition.Size != -1 && int(scale.Spec.Replicas) != precondition.Size { @@ -196,14 +110,13 @@ func (precondition *ScalePrecondition) validate(scale *autoscalingapi.Scale) err // genericScaler can update scales for resources in a particular namespace type genericScaler struct { scaleNamespacer scaleclient.ScalesGetter - targetGR schema.GroupResource } var _ Scaler = &genericScaler{} // ScaleSimple updates a scale of a given resource. It returns the resourceVersion of the scale if the update was successful. -func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error) { - scale, err := s.scaleNamespacer.Scales(namespace).Get(s.targetGR, name) +func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gr schema.GroupResource) (updatedResourceVersion string, err error) { + scale, err := s.scaleNamespacer.Scales(namespace).Get(gr, name) if err != nil { return "", err } @@ -214,7 +127,7 @@ func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *Scale } scale.Spec.Replicas = int32(newSize) - updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(s.targetGR, scale) + updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(gr, scale) if err != nil { return "", err } @@ -223,7 +136,7 @@ func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *Scale // Scale updates a scale of a given resource to a new size, with optional precondition check (if preconditions is not nil), // optional retries (if retry is not nil), and then optionally waits for the status to reach desired count. -func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { +func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams, gr schema.GroupResource) error { if preconditions == nil { preconditions = &ScalePrecondition{-1, ""} } @@ -231,7 +144,7 @@ func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, prec // make it try only once, immediately retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} } - cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil) + cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil, gr) if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { return err } @@ -239,7 +152,7 @@ func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, prec err := wait.PollImmediate( waitForReplicas.Interval, waitForReplicas.Timeout, - scaleHasDesiredReplicas(s.scaleNamespacer, s.targetGR, resourceName, namespace, int32(newSize))) + scaleHasDesiredReplicas(s.scaleNamespacer, gr, resourceName, namespace, int32(newSize))) if err == wait.ErrWaitTimeout { return fmt.Errorf("timed out waiting for %q to be synced", resourceName) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go index d75b9ac15a18..e6a5106d56c1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scale_test.go @@ -17,7 +17,6 @@ limitations under the License. package kubectl import ( - "errors" "fmt" "testing" "time" @@ -26,11 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/scale" - testcore "k8s.io/client-go/testing" - "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" ) func TestReplicationControllerScaleRetry(t *testing.T) { @@ -39,13 +34,13 @@ func TestReplicationControllerScaleRetry(t *testing.T) { } scaleClientExpectedAction := []string{"get", "update", "get"} scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, verbsOnError) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo-v1" namespace := metav1.NamespaceDefault - scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -54,7 +49,7 @@ func TestReplicationControllerScaleRetry(t *testing.T) { t.Errorf("Did not expect an error on update conflict failure, got %v", err) } preconditions = ScalePrecondition{3, ""} - scaleFunc = ScaleCondition(scaler, &preconditions, namespace, name, count, nil) + scaleFunc = ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) pass, err = scaleFunc() if err == nil { t.Errorf("Expected error on precondition failure") @@ -76,13 +71,13 @@ func TestReplicationControllerScaleInvalid(t *testing.T) { } scaleClientExpectedAction := []string{"get", "update"} scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 1, verbsOnError) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo-v1" namespace := "default" - scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -104,11 +99,11 @@ func TestReplicationControllerScaleInvalid(t *testing.T) { func TestReplicationControllerScale(t *testing.T) { scaleClientExpectedAction := []string{"get", "update"} scaleClient := createFakeScaleClient("replicationcontrollers", "foo-v1", 2, nil) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo-v1" - err := scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) if err != nil { t.Fatalf("unexpected error occurred = %v while scaling the resource", err) @@ -127,11 +122,11 @@ func TestReplicationControllerScale(t *testing.T) { func TestReplicationControllerScaleFailsPreconditions(t *testing.T) { scaleClientExpectedAction := []string{"get"} scaleClient := createFakeScaleClient("replicationcontrollers", "foo", 10, nil) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" - err := scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "", Resource: "replicationcontrollers"}) if err == nil { t.Fatal("expected to get an error but none was returned") } @@ -146,281 +141,19 @@ func TestReplicationControllerScaleFailsPreconditions(t *testing.T) { } } -type errorJobs struct { - batchclient.JobInterface - conflict bool - invalid bool -} - -func (c *errorJobs) Update(job *batch.Job) (*batch.Job, error) { - switch { - case c.invalid: - return nil, kerrors.NewInvalid(api.Kind(job.Kind), job.Name, nil) - case c.conflict: - return nil, kerrors.NewConflict(api.Resource(job.Kind), job.Name, nil) - } - return nil, errors.New("Job update failure") -} - -func (c *errorJobs) Get(name string, options metav1.GetOptions) (*batch.Job, error) { - zero := int32(0) - return &batch.Job{ - Spec: batch.JobSpec{ - Parallelism: &zero, - }, - }, nil -} - -type errorJobClient struct { - batchclient.JobsGetter - conflict bool - invalid bool -} - -func (c *errorJobClient) Jobs(namespace string) batchclient.JobInterface { - return &errorJobs{ - JobInterface: c.JobsGetter.Jobs(namespace), - conflict: c.conflict, - invalid: c.invalid, - } -} - -func TestJobScaleRetry(t *testing.T) { - fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), conflict: true} - scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, fake, nil, schema.GroupResource{}) - preconditions := ScalePrecondition{-1, ""} - count := uint(3) - name := "foo" - namespace := "default" - - scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) - pass, err := scaleFunc() - if pass != false { - t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) - } - if err != nil { - t.Errorf("Did not expect an error on update failure, got %v", err) - } - preconditions = ScalePrecondition{3, ""} - scaleFunc = ScaleCondition(scaler, &preconditions, namespace, name, count, nil) - pass, err = scaleFunc() - if err == nil { - t.Error("Expected error on precondition failure") - } -} - -func job() *batch.Job { - return &batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - } -} - -func TestJobScale(t *testing.T) { - fakeClientset := fake.NewSimpleClientset(job()) - scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, fakeClientset.Batch(), nil, schema.GroupResource{}) - preconditions := ScalePrecondition{-1, ""} - count := uint(3) - name := "foo" - scaler.Scale("default", name, count, &preconditions, nil, nil) - - actions := fakeClientset.Actions() - if len(actions) != 2 { - t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) - } - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name) - } - if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) { - t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count) - } -} - -func TestJobScaleInvalid(t *testing.T) { - fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), invalid: true} - scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, fake, nil, schema.GroupResource{}) - preconditions := ScalePrecondition{-1, ""} - count := uint(3) - name := "foo" - namespace := "default" - - scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) - pass, err := scaleFunc() - if pass { - t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) - } - if err == nil { - t.Errorf("Expected error on invalid update failure, got %v", err) - } -} - -func TestJobScaleFailsPreconditions(t *testing.T) { - ten := int32(10) - fake := fake.NewSimpleClientset(&batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - Spec: batch.JobSpec{ - Parallelism: &ten, - }, - }) - scaler := ScalerFor(schema.GroupKind{Group: batch.GroupName, Kind: "Job"}, fake.Batch(), nil, schema.GroupResource{}) - preconditions := ScalePrecondition{2, ""} - count := uint(3) - name := "foo" - scaler.Scale("default", name, count, &preconditions, nil, nil) - - actions := fake.Actions() - if len(actions) != 1 { - t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions) - } - if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name { - t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name) - } -} - -func TestValidateJob(t *testing.T) { - zero, ten, twenty := int32(0), int32(10), int32(20) - tests := []struct { - preconditions ScalePrecondition - job batch.Job - expectError bool - test string - }{ - { - preconditions: ScalePrecondition{-1, ""}, - expectError: false, - test: "defaults", - }, - { - preconditions: ScalePrecondition{-1, ""}, - job: batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: batch.JobSpec{ - Parallelism: &ten, - }, - }, - expectError: false, - test: "defaults 2", - }, - { - preconditions: ScalePrecondition{0, ""}, - job: batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: batch.JobSpec{ - Parallelism: &zero, - }, - }, - expectError: false, - test: "size matches", - }, - { - preconditions: ScalePrecondition{-1, "foo"}, - job: batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: batch.JobSpec{ - Parallelism: &ten, - }, - }, - expectError: false, - test: "resource version matches", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - job: batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: batch.JobSpec{ - Parallelism: &ten, - }, - }, - expectError: false, - test: "both match", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - job: batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - Spec: batch.JobSpec{ - Parallelism: &twenty, - }, - }, - expectError: true, - test: "size different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - job: batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "foo", - }, - }, - expectError: true, - test: "parallelism nil", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - job: batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: batch.JobSpec{ - Parallelism: &ten, - }, - }, - expectError: true, - test: "version different", - }, - { - preconditions: ScalePrecondition{10, "foo"}, - job: batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "bar", - }, - Spec: batch.JobSpec{ - Parallelism: &twenty, - }, - }, - expectError: true, - test: "both different", - }, - } - for _, test := range tests { - err := test.preconditions.ValidateJob(&test.job) - if err != nil && !test.expectError { - t.Errorf("unexpected error: %v (%s)", err, test.test) - } - if err == nil && test.expectError { - t.Errorf("expected an error: %v (%s)", err, test.test) - } - } -} - func TestDeploymentScaleRetry(t *testing.T) { verbsOnError := map[string]*kerrors.StatusError{ "update": kerrors.NewConflict(api.Resource("Status"), "foo", nil), } scaleClientExpectedAction := []string{"get", "update", "get"} scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"}) + scaler := NewScaler(scaleClient) preconditions := &ScalePrecondition{-1, ""} count := uint(3) name := "foo" namespace := "default" - scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "deployments"}) pass, err := scaleFunc() if pass != false { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -429,7 +162,7 @@ func TestDeploymentScaleRetry(t *testing.T) { t.Errorf("Did not expect an error on update failure, got %v", err) } preconditions = &ScalePrecondition{3, ""} - scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil) + scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "deployments"}) pass, err = scaleFunc() if err == nil { t.Error("Expected error on precondition failure") @@ -448,11 +181,11 @@ func TestDeploymentScaleRetry(t *testing.T) { func TestDeploymentScale(t *testing.T) { scaleClientExpectedAction := []string{"get", "update"} scaleClient := createFakeScaleClient("deployments", "foo", 2, nil) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" - err := scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "apps", Resource: "deployments"}) if err != nil { t.Fatal(err) } @@ -473,13 +206,13 @@ func TestDeploymentScaleInvalid(t *testing.T) { "update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), } scaleClient := createFakeScaleClient("deployments", "foo", 2, verbsOnError) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" namespace := "default" - scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "deployments"}) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -501,11 +234,11 @@ func TestDeploymentScaleInvalid(t *testing.T) { func TestDeploymentScaleFailsPreconditions(t *testing.T) { scaleClientExpectedAction := []string{"get"} scaleClient := createFakeScaleClient("deployments", "foo", 10, nil) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "deployments"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" - err := scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "apps", Resource: "deployments"}) if err == nil { t.Fatal("exptected to get an error but none was returned") } @@ -523,11 +256,11 @@ func TestDeploymentScaleFailsPreconditions(t *testing.T) { func TestStatefulSetScale(t *testing.T) { scaleClientExpectedAction := []string{"get", "update"} scaleClient := createFakeScaleClient("statefulsets", "foo", 2, nil) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefullset"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" - err := scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "apps", Resource: "statefullset"}) if err != nil { t.Fatal(err) } @@ -548,13 +281,13 @@ func TestStatefulSetScaleRetry(t *testing.T) { "update": kerrors.NewConflict(api.Resource("Status"), "foo", nil), } scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) + scaler := NewScaler(scaleClient) preconditions := &ScalePrecondition{-1, ""} count := uint(3) name := "foo" namespace := "default" - scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) pass, err := scaleFunc() if pass != false { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -563,7 +296,7 @@ func TestStatefulSetScaleRetry(t *testing.T) { t.Errorf("Did not expect an error on update failure, got %v", err) } preconditions = &ScalePrecondition{3, ""} - scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil) + scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) pass, err = scaleFunc() if err == nil { t.Error("Expected error on precondition failure") @@ -585,13 +318,13 @@ func TestStatefulSetScaleInvalid(t *testing.T) { "update": kerrors.NewInvalid(api.Kind("Status"), "foo", nil), } scaleClient := createFakeScaleClient("statefulsets", "foo", 2, verbsOnError) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" namespace := "default" - scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -613,11 +346,11 @@ func TestStatefulSetScaleInvalid(t *testing.T) { func TestStatefulSetScaleFailsPreconditions(t *testing.T) { scaleClientExpectedAction := []string{"get"} scaleClient := createFakeScaleClient("statefulsets", "foo", 10, nil) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" - err := scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "apps", Resource: "statefulsets"}) if err == nil { t.Fatal("expected to get an error but none was returned") } @@ -635,11 +368,11 @@ func TestStatefulSetScaleFailsPreconditions(t *testing.T) { func TestReplicaSetScale(t *testing.T) { scaleClientExpectedAction := []string{"get", "update"} scaleClient := createFakeScaleClient("replicasets", "foo", 10, nil) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" - err := scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) if err != nil { t.Fatal(err) } @@ -660,13 +393,13 @@ func TestReplicaSetScaleRetry(t *testing.T) { } scaleClientExpectedAction := []string{"get", "update", "get"} scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) + scaler := NewScaler(scaleClient) preconditions := &ScalePrecondition{-1, ""} count := uint(3) name := "foo" namespace := "default" - scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) pass, err := scaleFunc() if pass != false { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -675,7 +408,7 @@ func TestReplicaSetScaleRetry(t *testing.T) { t.Errorf("Did not expect an error on update failure, got %v", err) } preconditions = &ScalePrecondition{3, ""} - scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil) + scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) pass, err = scaleFunc() if err == nil { t.Error("Expected error on precondition failure") @@ -697,13 +430,13 @@ func TestReplicaSetScaleInvalid(t *testing.T) { } scaleClientExpectedAction := []string{"get", "update"} scaleClient := createFakeScaleClient("replicasets", "foo", 2, verbsOnError) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{-1, ""} count := uint(3) name := "foo" namespace := "default" - scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil) + scaleFunc := ScaleCondition(scaler, &preconditions, namespace, name, count, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) pass, err := scaleFunc() if pass { t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) @@ -725,11 +458,11 @@ func TestReplicaSetScaleInvalid(t *testing.T) { func TestReplicaSetsGetterFailsPreconditions(t *testing.T) { scaleClientExpectedAction := []string{"get"} scaleClient := createFakeScaleClient("replicasets", "foo", 10, nil) - scaler := NewScaler(scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) + scaler := NewScaler(scaleClient) preconditions := ScalePrecondition{2, ""} count := uint(3) name := "foo" - err := scaler.Scale("default", name, count, &preconditions, nil, nil) + err := scaler.Scale("default", name, count, &preconditions, nil, nil, schema.GroupResource{Group: "extensions", Resource: "replicasets"}) if err == nil { t.Fatal("expected to get an error but non was returned") } @@ -812,9 +545,9 @@ func TestGenericScaleSimple(t *testing.T) { // act for index, scenario := range scenarios { t.Run(fmt.Sprintf("running scenario %d: %s", index+1, scenario.name), func(t *testing.T) { - target := NewScaler(scenario.scaleGetter, scenario.targetGR) + target := NewScaler(scenario.scaleGetter) - resVersion, err := target.ScaleSimple("default", scenario.resName, &scenario.precondition, uint(scenario.newSize)) + resVersion, err := target.ScaleSimple("default", scenario.resName, &scenario.precondition, uint(scenario.newSize), scenario.targetGR) if scenario.expectError && err == nil { t.Fatal("expected an error but was not returned") @@ -880,9 +613,9 @@ func TestGenericScale(t *testing.T) { // act for _, scenario := range scenarios { t.Run(scenario.name, func(t *testing.T) { - target := NewScaler(scenario.scaleGetter, scenario.targetGR) + target := NewScaler(scenario.scaleGetter) - err := target.Scale("default", scenario.resName, uint(scenario.newSize), &scenario.precondition, nil, scenario.waitForReplicas) + err := target.Scale("default", scenario.resName, uint(scenario.newSize), &scenario.precondition, nil, scenario.waitForReplicas, scenario.targetGR) if scenario.expectError && err == nil { t.Fatal("expected an error but was not returned") diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go b/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go index 911f8e41d549..9d8732a156e5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go @@ -257,7 +257,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // to the same size achieves this, because the scale operation advances the RC's sequence number // and awaits it to be observed and reported back in the RC's status. - framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods, true) + framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true) // Only check the keys, the pods can be different if the kubelet updated it. // TODO: Can it really? @@ -288,9 +288,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { restarter.kill() // This is best effort to try and create pods while the scheduler is down, // since we don't know exactly when it is restarted after the kill signal. - framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, false)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false)) restarter.waitUp() - framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, true)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true)) }) It("Kubelet should not restart containers across restart", func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/examples.go b/vendor/k8s.io/kubernetes/test/e2e/examples.go index 6fa937c89f69..bb7427f5a467 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/examples.go +++ b/vendor/k8s.io/kubernetes/test/e2e/examples.go @@ -521,7 +521,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { Expect(err).NotTo(HaveOccurred()) By("scaling rethinkdb") - framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "rethinkdb-rc", 2, true) + framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, "rethinkdb-rc", 2, true) checkDbInstances() By("starting admin") @@ -564,7 +564,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { Expect(err).NotTo(HaveOccurred()) By("scaling hazelcast") - framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "hazelcast", 2, true) + framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, "hazelcast", 2, true) forEachPod("name", "hazelcast", func(pod v1.Pod) { _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go index fe7dfab0f505..bd9b50c1bb94 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go @@ -31,7 +31,6 @@ import ( clientset "k8s.io/client-go/kubernetes" scaleclient "k8s.io/client-go/scale" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" testutils "k8s.io/kubernetes/test/utils" ) @@ -179,8 +178,8 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er return err } -func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { - return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments")) +func ScaleDeployment(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { + return ScaleResource(clientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments")) } func RunDeployment(config testutils.DeploymentConfig) error { diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go index 7e3fedbc76fa..ced976c48e95 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go @@ -85,9 +85,7 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str // ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till // none are running, otherwise it does what a synchronous scale operation would do. -//TODO(p0lyn0mial): remove internalClientset. -//TODO(p0lyn0mial): update the callers. -func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error { +func ScaleRCByLabels(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error { listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()} rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts) if err != nil { @@ -99,7 +97,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas) for _, labelRC := range rcs.Items { name := labelRC.Name - if err := ScaleRC(clientset, internalClientset, scalesGetter, ns, name, replicas, false); err != nil { + if err := ScaleRC(clientset, scalesGetter, ns, name, replicas, false); err != nil { return err } rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) @@ -159,8 +157,8 @@ func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalcl return DeleteResourceAndPods(clientset, internalClientset, scaleClient, api.Kind("ReplicationController"), ns, name) } -func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { - return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers")) +func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { + return ScaleResource(clientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers")) } func RunRC(config testutils.RCConfig) error { diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go index 215f9d29bf0f..6b954b764d50 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go @@ -2782,7 +2782,6 @@ func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) { func ScaleResource( clientset clientset.Interface, - internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, @@ -2791,8 +2790,8 @@ func ScaleResource( gr schema.GroupResource, ) error { By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size)) - scaler := kubectl.ScalerFor(kind, internalClientset.Batch(), scalesGetter, gr) - if err := testutils.ScaleResourceWithRetries(scaler, ns, name, size); err != nil { + scaler := kubectl.NewScaler(scalesGetter) + if err := testutils.ScaleResourceWithRetries(scaler, ns, name, size, gr); err != nil { return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err) } if !wait { diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/service.go b/vendor/k8s.io/kubernetes/test/e2e/network/service.go index d85c657ab95a..bddc4dc52ef7 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/service.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/service.go @@ -1276,7 +1276,7 @@ var _ = SIGDescribe("Services", func() { } By("Scaling down replication controller to zero") - framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) + framework.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) By("Update service to not tolerate unready services") _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { diff --git a/vendor/k8s.io/kubernetes/test/e2e/scalability/load.go b/vendor/k8s.io/kubernetes/test/e2e/scalability/load.go index e0a935dcfab9..1fb4cb48cf73 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scalability/load.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scalability/load.go @@ -649,7 +649,6 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2) framework.ExpectNoError(framework.ScaleResource( config.GetClient(), - config.GetInternalClient(), config.GetScalesGetter(), config.GetNamespace(), config.GetName(), diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/equivalence_cache_predicates.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/equivalence_cache_predicates.go index 9eb0d8a44a8d..bbf95edc9c20 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/equivalence_cache_predicates.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/equivalence_cache_predicates.go @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { By("Trying to schedule another equivalent Pod should fail due to node label has been removed.") // use scale to create another equivalent pod and wait for failure event WaitForSchedulerAfterAction(f, func() error { - err := framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false) + err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false) return err }, affinityRCName, false) // and this new pod should be rejected since node label has been updated diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go index 27ab9c08938f..2ac0b6f9c7ce 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go @@ -194,7 +194,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1)) - framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true) + framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true) testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{ LabelSelector: "name=scheduler-priority-avoid-pod", }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go index a42c841d054f..919ef4e8ec7c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/rescheduler.go @@ -68,8 +68,8 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() { deployment := deployments.Items[0] replicas := uint(*(deployment.Spec.Replicas)) - err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true) - defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true)) + err = framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas+1, true) + defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.ScalesGetter, metav1.NamespaceSystem, deployment.Name, replicas, true)) framework.ExpectNoError(err) }) @@ -80,7 +80,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error { replicas := millicores / 100 reserveCpu(f, id, 1, 100) - framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false)) + framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, id, uint(replicas), false)) for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels) diff --git a/vendor/k8s.io/kubernetes/test/utils/update_resources.go b/vendor/k8s.io/kubernetes/test/utils/update_resources.go index df435295d2ba..b666454e54ac 100644 --- a/vendor/k8s.io/kubernetes/test/utils/update_resources.go +++ b/vendor/k8s.io/kubernetes/test/utils/update_resources.go @@ -20,6 +20,7 @@ import ( "fmt" "time" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/kubectl" ) @@ -32,10 +33,10 @@ const ( waitRetryTImeout = 5 * time.Minute ) -func ScaleResourceWithRetries(scaler kubectl.Scaler, namespace, name string, size uint) error { +func ScaleResourceWithRetries(scaler kubectl.Scaler, namespace, name string, size uint, gr schema.GroupResource) error { waitForScale := kubectl.NewRetryParams(updateRetryInterval, updateRetryTimeout) waitForReplicas := kubectl.NewRetryParams(waitRetryInterval, waitRetryTImeout) - if err := scaler.Scale(namespace, name, size, nil, waitForScale, waitForReplicas); err != nil { + if err := scaler.Scale(namespace, name, size, nil, waitForScale, waitForReplicas, gr); err != nil { return fmt.Errorf("Error while scaling %s to %d replicas: %v", name, size, err) } return nil From e52c233878b3f91ce13447f3aa73c094fa6170d6 Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 10 Apr 2018 08:13:52 -0400 Subject: [PATCH 4/6] UPSTREAM: 62336: add statefulset scaling permission to admins, editors, and viewers --- .../pkg/auth/authorizer/rbac/bootstrappolicy/policy.go | 9 ++++++--- .../rbac/bootstrappolicy/testdata/cluster-roles.yaml | 3 +++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 74d86d9d11e0..b02c2279cdbe 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -236,7 +236,8 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets", + rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources( + "statefulsets", "statefulsets/scale", "daemonsets", "deployments", "deployments/scale", "deployments/rollback", "replicasets", "replicasets/scale").RuleOrDie(), @@ -275,7 +276,8 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets", + rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources( + "statefulsets", "statefulsets/scale", "daemonsets", "deployments", "deployments/scale", "deployments/rollback", "replicasets", "replicasets/scale").RuleOrDie(), @@ -307,7 +309,8 @@ func ClusterRoles() []rbac.ClusterRole { // indicator of which namespaces you have access to. rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), - rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets", + rbac.NewRule(Read...).Groups(appsGroup).Resources( + "statefulsets", "statefulsets/scale", "daemonsets", "deployments", "deployments/scale", "replicasets", "replicasets/scale").RuleOrDie(), diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index 4a78d8b1c88c..2cf31046c686 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -137,6 +137,7 @@ items: - replicasets - replicasets/scale - statefulsets + - statefulsets/scale verbs: - create - delete @@ -329,6 +330,7 @@ items: - replicasets - replicasets/scale - statefulsets + - statefulsets/scale verbs: - create - delete @@ -471,6 +473,7 @@ items: - replicasets - replicasets/scale - statefulsets + - statefulsets/scale verbs: - get - list From ef04dae782624a1ba9be9f5a59a1702b92c39833 Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 9 Apr 2018 10:32:49 -0400 Subject: [PATCH 5/6] generated --- contrib/completions/bash/oc | 2 -- contrib/completions/zsh/oc | 2 -- test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml | 3 +++ test/testdata/bootstrappolicy/bootstrap_policy_file.yaml | 3 +++ 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/contrib/completions/bash/oc b/contrib/completions/bash/oc index f4a97e6f8193..addde0eab635 100644 --- a/contrib/completions/bash/oc +++ b/contrib/completions/bash/oc @@ -17432,14 +17432,12 @@ _oc_scale() must_have_one_noun=() must_have_one_noun+=("deployment") must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("job") must_have_one_noun+=("replicaset") must_have_one_noun+=("replicationcontroller") must_have_one_noun+=("statefulset") noun_aliases=() noun_aliases+=("deploy") noun_aliases+=("deployments") - noun_aliases+=("jobs") noun_aliases+=("rc") noun_aliases+=("replicasets") noun_aliases+=("replicationcontrollers") diff --git a/contrib/completions/zsh/oc b/contrib/completions/zsh/oc index d5caf0e21c01..df0c42e6aca1 100644 --- a/contrib/completions/zsh/oc +++ b/contrib/completions/zsh/oc @@ -17574,14 +17574,12 @@ _oc_scale() must_have_one_noun=() must_have_one_noun+=("deployment") must_have_one_noun+=("deploymentconfig") - must_have_one_noun+=("job") must_have_one_noun+=("replicaset") must_have_one_noun+=("replicationcontroller") must_have_one_noun+=("statefulset") noun_aliases=() noun_aliases+=("deploy") noun_aliases+=("deployments") - noun_aliases+=("jobs") noun_aliases+=("rc") noun_aliases+=("replicasets") noun_aliases+=("replicationcontrollers") diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index 6df7c5dfccae..0b0335e38b28 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -5023,6 +5023,7 @@ items: - replicasets - replicasets/scale - statefulsets + - statefulsets/scale verbs: - create - delete @@ -5216,6 +5217,7 @@ items: - replicasets - replicasets/scale - statefulsets + - statefulsets/scale verbs: - create - delete @@ -5359,6 +5361,7 @@ items: - replicasets - replicasets/scale - statefulsets + - statefulsets/scale verbs: - get - list diff --git a/test/testdata/bootstrappolicy/bootstrap_policy_file.yaml b/test/testdata/bootstrappolicy/bootstrap_policy_file.yaml index 700e9b993c77..6babf7f903c8 100644 --- a/test/testdata/bootstrappolicy/bootstrap_policy_file.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_policy_file.yaml @@ -5500,6 +5500,7 @@ items: - replicasets - replicasets/scale - statefulsets + - statefulsets/scale verbs: - create - delete @@ -5706,6 +5707,7 @@ items: - replicasets - replicasets/scale - statefulsets + - statefulsets/scale verbs: - create - delete @@ -5858,6 +5860,7 @@ items: - replicasets - replicasets/scale - statefulsets + - statefulsets/scale verbs: - get - list From e6467058c2ef1766de3eeda95804abab127ead77 Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 9 Apr 2018 09:54:46 -0400 Subject: [PATCH 6/6] fit the new scaler impl --- pkg/apps/strategy/recreate/recreate.go | 2 +- pkg/apps/strategy/rolling/rolling.go | 3 +- pkg/apps/util/test/support.go | 10 +- pkg/apps/util/util.go | 11 +- pkg/cmd/infra/deployer/deployer.go | 2 +- .../cluster/app_create/setup_cleanup.go | 3 +- pkg/oc/cli/deploymentconfigs/delete.go | 27 ++-- pkg/oc/cli/deploymentconfigs/delete_test.go | 78 ---------- pkg/oc/cli/deploymentconfigs/scale.go | 101 ------------ pkg/oc/cli/deploymentconfigs/scale_test.go | 92 ----------- pkg/oc/cli/util/clientcmd/factory.go | 2 +- pkg/oc/cli/util/clientcmd/factory_builder.go | 144 ++++++++++++++++++ .../util/clientcmd/factory_object_mapping.go | 103 +------------ 13 files changed, 180 insertions(+), 398 deletions(-) delete mode 100644 pkg/oc/cli/deploymentconfigs/scale.go delete mode 100644 pkg/oc/cli/deploymentconfigs/scale_test.go create mode 100644 pkg/oc/cli/util/clientcmd/factory_builder.go diff --git a/pkg/apps/strategy/recreate/recreate.go b/pkg/apps/strategy/recreate/recreate.go index c13b974314ed..2499d4328b12 100644 --- a/pkg/apps/strategy/recreate/recreate.go +++ b/pkg/apps/strategy/recreate/recreate.go @@ -230,7 +230,7 @@ func (s *RecreateDeploymentStrategy) scaleAndWait(deployment *kapi.ReplicationCo } var scaleErr error err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) { - scaleErr = s.scaler.Scale(deployment.Namespace, deployment.Name, uint(replicas), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retry, retryParams) + scaleErr = s.scaler.Scale(deployment.Namespace, deployment.Name, uint(replicas), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retry, retryParams, kapi.Resource("replicationcontrollers")) if scaleErr == nil { return true, nil } diff --git a/pkg/apps/strategy/rolling/rolling.go b/pkg/apps/strategy/rolling/rolling.go index c49f4ff30942..d73c2f4ae377 100644 --- a/pkg/apps/strategy/rolling/rolling.go +++ b/pkg/apps/strategy/rolling/rolling.go @@ -93,6 +93,7 @@ func NewRollingDeploymentStrategy(namespace string, client kclientset.Interface, if errOut == nil { errOut = ioutil.Discard } + return &RollingDeploymentStrategy{ out: out, errOut: errOut, @@ -105,7 +106,7 @@ func NewRollingDeploymentStrategy(namespace string, client kclientset.Interface, apiRetryPeriod: defaultApiRetryPeriod, apiRetryTimeout: defaultApiRetryTimeout, rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error { - updater := kubectl.NewRollingUpdater(namespace, client.Core(), client.Core()) + updater := kubectl.NewRollingUpdater(namespace, client.Core(), client.Core(), appsutil.NewReplicationControllerV1ScaleClient(client)) return updater.Update(config) }, hookExecutor: stratsupport.NewHookExecutor(client.Core(), tags, client.Core(), os.Stdout, decoder), diff --git a/pkg/apps/util/test/support.go b/pkg/apps/util/test/support.go index 0a075c0ba252..23210162578c 100644 --- a/pkg/apps/util/test/support.go +++ b/pkg/apps/util/test/support.go @@ -17,12 +17,12 @@ type ScaleEvent struct { Size uint } -func (t *FakeScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, wait *kubectl.RetryParams) error { +func (t *FakeScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, wait *kubectl.RetryParams, resource schema.GroupResource) error { t.Events = append(t.Events, ScaleEvent{name, newSize}) return nil } -func (t *FakeScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint) (string, error) { +func (t *FakeScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint, resource schema.GroupResource) (string, error) { return "", fmt.Errorf("unexpected call to ScaleSimple") } @@ -31,17 +31,17 @@ type FakeLaggedScaler struct { RetryCount int } -func (t *FakeLaggedScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, wait *kubectl.RetryParams) error { +func (t *FakeLaggedScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, wait *kubectl.RetryParams, resource schema.GroupResource) error { if t.RetryCount != 2 { t.RetryCount += 1 // This is faking a real error from the // "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" package. - return errors.NewForbidden(schema.GroupResource{Resource: "ReplicationController"}, name, fmt.Errorf("%s: not yet ready to handle request", name)) + return errors.NewForbidden(resource, name, fmt.Errorf("%s: not yet ready to handle request", name)) } t.Events = append(t.Events, ScaleEvent{name, newSize}) return nil } -func (t *FakeLaggedScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint) (string, error) { +func (t *FakeLaggedScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint, resource schema.GroupResource) (string, error) { return "", nil } diff --git a/pkg/apps/util/util.go b/pkg/apps/util/util.go index 0f9c0fb6606f..fb70a566ad22 100644 --- a/pkg/apps/util/util.go +++ b/pkg/apps/util/util.go @@ -37,12 +37,11 @@ import ( ) func NewReplicationControllerV1Scaler(client kclientset.Interface) kubectl.Scaler { - return kubectl.ScalerFor( - kapi.Kind("ReplicationController"), - nil, - scaleclient.New(client.Core().RESTClient(), rcv1mapper{}, dynamic.LegacyAPIPathResolverFunc, rcv1mapper{}), - kapi.Resource("replicationcontrollers"), - ) + return kubectl.NewScaler(NewReplicationControllerV1ScaleClient(client)) +} + +func NewReplicationControllerV1ScaleClient(client kclientset.Interface) scaleclient.ScalesGetter { + return scaleclient.New(client.Core().RESTClient(), rcv1mapper{}, dynamic.LegacyAPIPathResolverFunc, rcv1mapper{}) } // rcv1mapper pins preferred version to v1 and scale kind to autoscaling/v1 Scale diff --git a/pkg/cmd/infra/deployer/deployer.go b/pkg/cmd/infra/deployer/deployer.go index 33b640b724bb..af2d32b89416 100644 --- a/pkg/cmd/infra/deployer/deployer.go +++ b/pkg/cmd/infra/deployer/deployer.go @@ -238,7 +238,7 @@ func (d *Deployer) Deploy(namespace, rcName string) error { } // Scale the deployment down to zero. retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second) - if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retryWaitParams, retryWaitParams); err != nil { + if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retryWaitParams, retryWaitParams, kapi.Resource("replicationcontrollers")); err != nil { fmt.Fprintf(d.errOut, "error: Couldn't scale down prior deployment %s: %v\n", appsutil.LabelForDeployment(candidate), err) } else { fmt.Fprintf(d.out, "--> Scaled older deployment %s down\n", candidate.Name) diff --git a/pkg/oc/admin/diagnostics/diagnostics/cluster/app_create/setup_cleanup.go b/pkg/oc/admin/diagnostics/diagnostics/cluster/app_create/setup_cleanup.go index b5f9c20679fb..41376d163ba3 100644 --- a/pkg/oc/admin/diagnostics/diagnostics/cluster/app_create/setup_cleanup.go +++ b/pkg/oc/admin/diagnostics/diagnostics/cluster/app_create/setup_cleanup.go @@ -8,6 +8,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + appsutil "github.com/openshift/origin/pkg/apps/util" "github.com/openshift/origin/pkg/cmd/server/bootstrappolicy" newproject "github.com/openshift/origin/pkg/oc/admin/project" appscmd "github.com/openshift/origin/pkg/oc/cli/deploymentconfigs" @@ -82,7 +83,7 @@ func (d *AppCreate) cleanupApp() { d.out.Debug("DCluAC043", fmt.Sprintf("%s: Deleting components of app '%s' if present.", now(), d.appName)) // reap the DC's deployments first - if err := appscmd.NewDeploymentConfigReaper(d.AppsClient, d.KubeClient).Stop(d.project, d.appName, time.Duration(1)*time.Second, nil); err != nil { + if err := appscmd.NewDeploymentConfigReaper(d.AppsClient, d.KubeClient, appsutil.NewReplicationControllerV1ScaleClient(d.KubeClient)).Stop(d.project, d.appName, time.Duration(1)*time.Second, nil); err != nil { errs = append(errs, err) } diff --git a/pkg/oc/cli/deploymentconfigs/delete.go b/pkg/oc/cli/deploymentconfigs/delete.go index 396dd09e5c32..a6dc68a97ae8 100644 --- a/pkg/oc/cli/deploymentconfigs/delete.go +++ b/pkg/oc/cli/deploymentconfigs/delete.go @@ -8,6 +8,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + scaleclient "k8s.io/client-go/scale" kapi "k8s.io/kubernetes/pkg/apis/core" kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" @@ -18,14 +19,15 @@ import ( ) // NewDeploymentConfigReaper returns a new reaper for deploymentConfigs -func NewDeploymentConfigReaper(appsClient appsclient.Interface, kc kclientset.Interface) kubectl.Reaper { - return &DeploymentConfigReaper{appsClient: appsClient, kc: kc, pollInterval: kubectl.Interval, timeout: kubectl.Timeout} +func NewDeploymentConfigReaper(appsClient appsclient.Interface, kc kclientset.Interface, scaleClient scaleclient.ScalesGetter) kubectl.Reaper { + return &DeploymentConfigReaper{appsClient: appsClient, kc: kc, scaleClient: scaleClient, pollInterval: kubectl.Interval, timeout: kubectl.Timeout} } // DeploymentConfigReaper implements the Reaper interface for deploymentConfigs type DeploymentConfigReaper struct { appsClient appsclient.Interface kc kclientset.Interface + scaleClient scaleclient.ScalesGetter pollInterval, timeout time.Duration } @@ -85,10 +87,6 @@ func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time. if err != nil { return err } - rcReaper, err := kubectl.ReaperFor(kapi.Kind("ReplicationController"), reaper.kc) - if err != nil { - return err - } // If there is neither a config nor any deployments, nor any deployer pods, we can return NotFound. deployments := rcList.Items @@ -98,9 +96,20 @@ func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time. } for _, rc := range deployments { - if err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { - // Better not error out here... - glog.Infof("Cannot delete ReplicationController %s/%s for deployment config %s/%s: %v", rc.Namespace, rc.Name, namespace, name, err) + // this is unnecessary since the ownership is present + if reaper.scaleClient != nil { + rcReaper, err := kubectl.ReaperFor(kapi.Kind("ReplicationController"), reaper.kc, reaper.scaleClient) + if err != nil { + return err + } + if err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { + // Better not error out here... + glog.Infof("Cannot delete ReplicationController %s/%s for deployment config %s/%s: %v", rc.Namespace, rc.Name, namespace, name, err) + } + } else { + if err := reaper.kc.Core().ReplicationControllers(rc.Namespace).Delete(rc.Name, nil); err != nil { + glog.Infof("Cannot delete ReplicationController %s/%s for deployment config %s/%s: %v", rc.Namespace, rc.Name, namespace, name, err) + } } // Only remove deployer pods when the deployment was failed. For completed diff --git a/pkg/oc/cli/deploymentconfigs/delete_test.go b/pkg/oc/cli/deploymentconfigs/delete_test.go index 2e4f7b41abda..70bbe51748b2 100644 --- a/pkg/oc/cli/deploymentconfigs/delete_test.go +++ b/pkg/oc/cli/deploymentconfigs/delete_test.go @@ -85,12 +85,6 @@ func TestStop(t *testing.T) { }, kexpected: []clientgotesting.Action{ clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"}).String()}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-1"), }, err: false, @@ -108,12 +102,6 @@ func TestStop(t *testing.T) { }, kexpected: []clientgotesting.Action{ clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"}).String()}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-1"), }, err: false, @@ -131,40 +119,10 @@ func TestStop(t *testing.T) { }, kexpected: []clientgotesting.Action{ clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"}).String()}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-2"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-2"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-2"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-2"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-2"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-3"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-3"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-3"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-3"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-3"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-4"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-4"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-4"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-4"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-4"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-5"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-5"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-5"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-5"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-5"), }, err: false, @@ -182,40 +140,10 @@ func TestStop(t *testing.T) { }, kexpected: []clientgotesting.Action{ clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"}).String()}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-2"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-2"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-2"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-2"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-2"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-3"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-3"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-3"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-3"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-3"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-4"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-4"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-4"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-4"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-4"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-5"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-5"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-5"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-5"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-5"), }, err: false, @@ -231,12 +159,6 @@ func TestStop(t *testing.T) { }, kexpected: []clientgotesting.Action{ clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"}).String()}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewListAction(replicationControllersResource, replicationControllerKind, "default", metav1.ListOptions{}), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewUpdateAction(replicationControllersResource, "default", nil), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), - clientgotesting.NewGetAction(replicationControllersResource, "default", "config-1"), clientgotesting.NewDeleteAction(replicationControllersResource, "default", "config-1"), }, err: false, diff --git a/pkg/oc/cli/deploymentconfigs/scale.go b/pkg/oc/cli/deploymentconfigs/scale.go deleted file mode 100644 index 1792fbacdb6a..000000000000 --- a/pkg/oc/cli/deploymentconfigs/scale.go +++ /dev/null @@ -1,101 +0,0 @@ -package deploymentconfigs - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - kapi "k8s.io/kubernetes/pkg/apis/core" - kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - "k8s.io/kubernetes/pkg/kubectl" - - appsclient "github.com/openshift/origin/pkg/apps/generated/internalclientset" - appsinternal "github.com/openshift/origin/pkg/apps/generated/internalclientset/typed/apps/internalversion" - "github.com/openshift/origin/pkg/apps/util" -) - -// NewDeploymentConfigScaler returns a new scaler for deploymentConfigs -func NewDeploymentConfigScaler(appsClient appsclient.Interface, kc kclientset.Interface) kubectl.Scaler { - return &DeploymentConfigScaler{rcClient: kc.Core(), dcClient: appsClient.Apps(), clientInterface: kc} -} - -// DeploymentConfigScaler is a wrapper for the kubectl Scaler client -type DeploymentConfigScaler struct { - rcClient kcoreclient.ReplicationControllersGetter - dcClient appsinternal.DeploymentConfigsGetter - - clientInterface kclientset.Interface -} - -// Scale updates the DeploymentConfig with the provided namespace/name, to a -// new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for its -// deployment replica count to reach the new value (if wait is not nil). -func (scaler *DeploymentConfigScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error { - if preconditions == nil { - preconditions = &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := kubectl.ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - // TODO: convert to a watch and use resource version from the ScaleCondition - kubernetes/kubernetes#31051 - if waitForReplicas != nil { - dc, err := scaler.dcClient.DeploymentConfigs(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - rc, err := scaler.rcClient.ReplicationControllers(namespace).Get(util.LatestDeploymentNameForConfig(dc), metav1.GetOptions{}) - if err != nil { - return err - } - return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, controllerHasSpecifiedReplicas(scaler.clientInterface, rc, dc.Spec.Replicas)) - } - return nil -} - -// ScaleSimple does a simple one-shot attempt at scaling - not useful on its -// own, but a necessary building block for Scale. -func (scaler *DeploymentConfigScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint) (string, error) { - scale, err := scaler.dcClient.DeploymentConfigs(namespace).GetScale(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - scale.Spec.Replicas = int32(newSize) - updated, err := scaler.dcClient.DeploymentConfigs(namespace).UpdateScale(name, scale) - if err != nil { - return "", kubectl.ScaleError{FailureType: kubectl.ScaleUpdateFailure, ResourceVersion: "Unknown", ActualError: err} - } - return updated.ResourceVersion, nil -} - -// controllerHasSpecifiedReplicas returns a condition that will be true if and -// only if the specified replica count for a controller's ReplicaSelector -// equals the Replicas count. -// -// This is a slightly modified version of -// metav1.ControllerHasDesiredReplicas. This is necessary because when -// scaling an RC via a DC, the RC spec replica count is not immediately -// updated to match the owning DC. -func controllerHasSpecifiedReplicas(c kclientset.Interface, controller *kapi.ReplicationController, specifiedReplicas int32) wait.ConditionFunc { - // If we're given a controller where the status lags the spec, it either means that the controller is stale, - // or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case. - desiredGeneration := controller.Generation - - return func() (bool, error) { - ctrl, err := c.Core().ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - // There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass, - // or, after this check has passed, a modification causes the rc manager to create more pods. - // This will not be an issue once we've implemented graceful delete for rcs, but till then - // concurrent stop operations on the same rc might have unintended side effects. - return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == specifiedReplicas, nil - } -} diff --git a/pkg/oc/cli/deploymentconfigs/scale_test.go b/pkg/oc/cli/deploymentconfigs/scale_test.go deleted file mode 100644 index 1c0dbfb9ce4f..000000000000 --- a/pkg/oc/cli/deploymentconfigs/scale_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package deploymentconfigs - -import ( - "testing" - "time" - - extensions "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - clientgotesting "k8s.io/client-go/testing" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - "k8s.io/kubernetes/pkg/kubectl" - - appsapi "github.com/openshift/origin/pkg/apps/apis/apps" - _ "github.com/openshift/origin/pkg/apps/apis/apps/install" - appstest "github.com/openshift/origin/pkg/apps/apis/apps/test" - appsfake "github.com/openshift/origin/pkg/apps/generated/internalclientset/fake" - appsutil "github.com/openshift/origin/pkg/apps/util" -) - -func TestScale(t *testing.T) { - tests := []struct { - name string - size uint - wait bool - errExpected bool - }{ - { - name: "simple scale", - size: 2, - wait: false, - errExpected: false, - }, - { - name: "scale with wait", - size: 2, - wait: true, - errExpected: false, - }, - } - - for _, test := range tests { - t.Logf("evaluating test %q", test.name) - oc := &appsfake.Clientset{} - kc := &fake.Clientset{} - scaler := NewDeploymentConfigScaler(oc, kc) - - config := appstest.OkDeploymentConfig(1) - config.Spec.Replicas = 1 - deployment, _ := appsutil.MakeDeployment(config, legacyscheme.Codecs.LegacyCodec(appsapi.SchemeGroupVersion)) - - var wait *kubectl.RetryParams - if test.wait { - wait = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Second} - } - - oc.AddReactor("get", "deploymentconfigs", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { - if action.GetSubresource() == "scale" { - return true, &extensions.Scale{Spec: extensions.ScaleSpec{}}, nil - } - return true, config, nil - }) - oc.AddReactor("update", "deploymentconfigs", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { - if action.GetSubresource() != "scale" { - return true, nil, nil - } - // Simulate the asynchronous update of the RC replicas based on the - // scale replica count. - scale := action.(clientgotesting.UpdateAction).GetObject().(*extensions.Scale) - scale.Status.Replicas = scale.Spec.Replicas - config.Spec.Replicas = scale.Spec.Replicas - deployment.Spec.Replicas = scale.Spec.Replicas - deployment.Status.Replicas = deployment.Spec.Replicas - return true, scale, nil - }) - kc.AddReactor("get", "replicationcontrollers", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { - return true, deployment, nil - }) - - err := scaler.Scale("default", config.Name, test.size, nil, nil, wait) - if err != nil { - if !test.errExpected { - t.Errorf("unexpected error: %s", err) - continue - } - } - - if e, a := config.Spec.Replicas, deployment.Spec.Replicas; e != a { - t.Errorf("expected rc/%s replicas %d, got %d", deployment.Name, e, a) - } - } -} diff --git a/pkg/oc/cli/util/clientcmd/factory.go b/pkg/oc/cli/util/clientcmd/factory.go index 3ac64efa45a4..4832f0b29cd0 100644 --- a/pkg/oc/cli/util/clientcmd/factory.go +++ b/pkg/oc/cli/util/clientcmd/factory.go @@ -58,7 +58,7 @@ var _ kcmdutil.Factory = &Factory{} func NewFactory(optionalClientConfig kclientcmd.ClientConfig) *Factory { clientAccessFactory := NewClientAccessFactory(optionalClientConfig) objectMappingFactory := NewObjectMappingFactory(clientAccessFactory) - builderFactory := kcmdutil.NewBuilderFactory(clientAccessFactory, objectMappingFactory) + builderFactory := NewBuilderFactory(clientAccessFactory, objectMappingFactory) return &Factory{ ClientAccessFactory: clientAccessFactory, diff --git a/pkg/oc/cli/util/clientcmd/factory_builder.go b/pkg/oc/cli/util/clientcmd/factory_builder.go new file mode 100644 index 000000000000..8317356fd1f8 --- /dev/null +++ b/pkg/oc/cli/util/clientcmd/factory_builder.go @@ -0,0 +1,144 @@ +package clientcmd + +import ( + "k8s.io/apimachinery/pkg/api/meta" + scaleclient "k8s.io/client-go/scale" + "k8s.io/kubernetes/pkg/kubectl" + kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/plugins" + "k8s.io/kubernetes/pkg/kubectl/resource" + + appsapi "github.com/openshift/origin/pkg/apps/apis/apps" + appsclient "github.com/openshift/origin/pkg/apps/generated/internalclientset" + authorizationapi "github.com/openshift/origin/pkg/authorization/apis/authorization" + authorizationreaper "github.com/openshift/origin/pkg/authorization/reaper" + buildapi "github.com/openshift/origin/pkg/build/apis/build" + buildclient "github.com/openshift/origin/pkg/build/generated/internalclientset" + buildcmd "github.com/openshift/origin/pkg/oc/cli/builds" + deploymentcmd "github.com/openshift/origin/pkg/oc/cli/deploymentconfigs" + userapi "github.com/openshift/origin/pkg/user/apis/user" + authenticationreaper "github.com/openshift/origin/pkg/user/reaper" +) + +type ring2Factory struct { + clientAccessFactory ClientAccessFactory + objectMappingFactory kcmdutil.ObjectMappingFactory + kubeBuilderFactory kcmdutil.BuilderFactory +} + +func NewBuilderFactory(clientAccessFactory ClientAccessFactory, objectMappingFactory kcmdutil.ObjectMappingFactory) kcmdutil.BuilderFactory { + return &ring2Factory{ + clientAccessFactory: clientAccessFactory, + objectMappingFactory: objectMappingFactory, + kubeBuilderFactory: kcmdutil.NewBuilderFactory(clientAccessFactory, objectMappingFactory), + } +} + +// NewBuilder returns a new resource builder for structured api objects. +func (f *ring2Factory) NewBuilder() *resource.Builder { + return f.kubeBuilderFactory.NewBuilder() +} + +// PluginLoader loads plugins from a path set by the KUBECTL_PLUGINS_PATH env var. +// If this env var is not set, it defaults to +// "~/.kube/plugins", plus +// "./kubectl/plugins" directory under the "data dir" directory specified by the XDG +// system directory structure spec for the given platform. +func (f *ring2Factory) PluginLoader() plugins.PluginLoader { + return f.kubeBuilderFactory.PluginLoader() +} + +func (f *ring2Factory) PluginRunner() plugins.PluginRunner { + return f.kubeBuilderFactory.PluginRunner() +} + +func (f *ring2Factory) ScaleClient() (scaleclient.ScalesGetter, error) { + return f.kubeBuilderFactory.ScaleClient() +} + +func (f *ring2Factory) Scaler() (kubectl.Scaler, error) { + return f.kubeBuilderFactory.Scaler() +} + +func (f *ring2Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { + gk := mapping.GroupVersionKind.GroupKind() + switch { + case appsapi.IsKindOrLegacy("DeploymentConfig", gk): + kc, err := f.clientAccessFactory.ClientSet() + if err != nil { + return nil, err + } + config, err := f.clientAccessFactory.OpenShiftClientConfig().ClientConfig() + if err != nil { + return nil, err + } + scaleClient, err := f.ScaleClient() + if err != nil { + return nil, err + } + return deploymentcmd.NewDeploymentConfigReaper(appsclient.NewForConfigOrDie(config), kc, scaleClient), nil + case authorizationapi.IsKindOrLegacy("Role", gk): + authClient, err := f.clientAccessFactory.OpenshiftInternalAuthorizationClient() + if err != nil { + return nil, err + } + return authorizationreaper.NewRoleReaper(authClient.Authorization(), authClient.Authorization()), nil + case authorizationapi.IsKindOrLegacy("ClusterRole", gk): + authClient, err := f.clientAccessFactory.OpenshiftInternalAuthorizationClient() + if err != nil { + return nil, err + } + return authorizationreaper.NewClusterRoleReaper(authClient.Authorization(), authClient.Authorization(), authClient.Authorization()), nil + case userapi.IsKindOrLegacy("User", gk): + userClient, err := f.clientAccessFactory.OpenshiftInternalUserClient() + if err != nil { + return nil, err + } + authClient, err := f.clientAccessFactory.OpenshiftInternalAuthorizationClient() + if err != nil { + return nil, err + } + oauthClient, err := f.clientAccessFactory.OpenshiftInternalOAuthClient() + if err != nil { + return nil, err + } + securityClient, err := f.clientAccessFactory.OpenshiftInternalSecurityClient() + if err != nil { + return nil, err + } + return authenticationreaper.NewUserReaper( + userClient, + userClient, + authClient, + authClient, + oauthClient, + securityClient.Security().SecurityContextConstraints(), + ), nil + case userapi.IsKindOrLegacy("Group", gk): + userClient, err := f.clientAccessFactory.OpenshiftInternalUserClient() + if err != nil { + return nil, err + } + authClient, err := f.clientAccessFactory.OpenshiftInternalAuthorizationClient() + if err != nil { + return nil, err + } + securityClient, err := f.clientAccessFactory.OpenshiftInternalSecurityClient() + if err != nil { + return nil, err + } + return authenticationreaper.NewGroupReaper( + userClient, + authClient, + authClient, + securityClient.Security().SecurityContextConstraints(), + ), nil + case buildapi.IsKindOrLegacy("BuildConfig", gk): + config, err := f.clientAccessFactory.OpenShiftClientConfig().ClientConfig() + if err != nil { + return nil, err + } + return buildcmd.NewBuildConfigReaper(buildclient.NewForConfigOrDie(config)), nil + } + return f.kubeBuilderFactory.Reaper(mapping) +} diff --git a/pkg/oc/cli/util/clientcmd/factory_object_mapping.go b/pkg/oc/cli/util/clientcmd/factory_object_mapping.go index 50f1529dff39..6d8198b19872 100644 --- a/pkg/oc/cli/util/clientcmd/factory_object_mapping.go +++ b/pkg/oc/cli/util/clientcmd/factory_object_mapping.go @@ -6,8 +6,6 @@ import ( "sort" "time" - "k8s.io/kubernetes/pkg/kubectl/categories" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -18,6 +16,7 @@ import ( kapi "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/categories" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" "k8s.io/kubernetes/pkg/kubectl/resource" @@ -29,19 +28,13 @@ import ( appsmanualclient "github.com/openshift/origin/pkg/apps/client/internalversion" appsclient "github.com/openshift/origin/pkg/apps/generated/internalclientset" appsutil "github.com/openshift/origin/pkg/apps/util" - authorizationapi "github.com/openshift/origin/pkg/authorization/apis/authorization" - authorizationreaper "github.com/openshift/origin/pkg/authorization/reaper" buildapi "github.com/openshift/origin/pkg/build/apis/build" buildmanualclient "github.com/openshift/origin/pkg/build/client/internalversion" - buildclient "github.com/openshift/origin/pkg/build/generated/internalclientset" buildutil "github.com/openshift/origin/pkg/build/util" configcmd "github.com/openshift/origin/pkg/bulk" imageapi "github.com/openshift/origin/pkg/image/apis/image" - buildcmd "github.com/openshift/origin/pkg/oc/cli/builds" deploymentcmd "github.com/openshift/origin/pkg/oc/cli/deploymentconfigs" "github.com/openshift/origin/pkg/oc/cli/describe" - userapi "github.com/openshift/origin/pkg/user/apis/user" - authenticationreaper "github.com/openshift/origin/pkg/user/reaper" ) type ring1Factory struct { @@ -186,100 +179,6 @@ func (f *ring1Factory) LogsForObject(object, options runtime.Object, timeout tim } } -func (f *ring1Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) { - if appsapi.IsKindOrLegacy("DeploymentConfig", mapping.GroupVersionKind.GroupKind()) { - kc, err := f.clientAccessFactory.ClientSet() - if err != nil { - return nil, err - } - config, err := f.clientAccessFactory.OpenShiftClientConfig().ClientConfig() - if err != nil { - return nil, err - } - return deploymentcmd.NewDeploymentConfigScaler(appsclient.NewForConfigOrDie(config), kc), nil - } - return f.kubeObjectMappingFactory.Scaler(mapping) -} - -func (f *ring1Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { - gk := mapping.GroupVersionKind.GroupKind() - switch { - case appsapi.IsKindOrLegacy("DeploymentConfig", gk): - kc, err := f.clientAccessFactory.ClientSet() - if err != nil { - return nil, err - } - config, err := f.clientAccessFactory.OpenShiftClientConfig().ClientConfig() - if err != nil { - return nil, err - } - return deploymentcmd.NewDeploymentConfigReaper(appsclient.NewForConfigOrDie(config), kc), nil - case authorizationapi.IsKindOrLegacy("Role", gk): - authClient, err := f.clientAccessFactory.OpenshiftInternalAuthorizationClient() - if err != nil { - return nil, err - } - return authorizationreaper.NewRoleReaper(authClient.Authorization(), authClient.Authorization()), nil - case authorizationapi.IsKindOrLegacy("ClusterRole", gk): - authClient, err := f.clientAccessFactory.OpenshiftInternalAuthorizationClient() - if err != nil { - return nil, err - } - return authorizationreaper.NewClusterRoleReaper(authClient.Authorization(), authClient.Authorization(), authClient.Authorization()), nil - case userapi.IsKindOrLegacy("User", gk): - userClient, err := f.clientAccessFactory.OpenshiftInternalUserClient() - if err != nil { - return nil, err - } - authClient, err := f.clientAccessFactory.OpenshiftInternalAuthorizationClient() - if err != nil { - return nil, err - } - oauthClient, err := f.clientAccessFactory.OpenshiftInternalOAuthClient() - if err != nil { - return nil, err - } - securityClient, err := f.clientAccessFactory.OpenshiftInternalSecurityClient() - if err != nil { - return nil, err - } - return authenticationreaper.NewUserReaper( - userClient, - userClient, - authClient, - authClient, - oauthClient, - securityClient.Security().SecurityContextConstraints(), - ), nil - case userapi.IsKindOrLegacy("Group", gk): - userClient, err := f.clientAccessFactory.OpenshiftInternalUserClient() - if err != nil { - return nil, err - } - authClient, err := f.clientAccessFactory.OpenshiftInternalAuthorizationClient() - if err != nil { - return nil, err - } - securityClient, err := f.clientAccessFactory.OpenshiftInternalSecurityClient() - if err != nil { - return nil, err - } - return authenticationreaper.NewGroupReaper( - userClient, - authClient, - authClient, - securityClient.Security().SecurityContextConstraints(), - ), nil - case buildapi.IsKindOrLegacy("BuildConfig", gk): - config, err := f.clientAccessFactory.OpenShiftClientConfig().ClientConfig() - if err != nil { - return nil, err - } - return buildcmd.NewBuildConfigReaper(buildclient.NewForConfigOrDie(config)), nil - } - return f.kubeObjectMappingFactory.Reaper(mapping) -} - func (f *ring1Factory) HistoryViewer(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { if appsapi.IsKindOrLegacy("DeploymentConfig", mapping.GroupVersionKind.GroupKind()) { kc, err := f.clientAccessFactory.ClientSet()