diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go index 65a7a6f17021..646226d27782 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go @@ -360,6 +360,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho } mappings, err := a.mapper.RESTMappings(targetGK) + mappings, err = overrideMappingsForOapiDeploymentConfig(mappings, err, targetGK) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error()) setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/patch_dc.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/patch_dc.go new file mode 100644 index 000000000000..8c8821a1ac4a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/patch_dc.go @@ -0,0 +1,22 @@ +package podautoscaler + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + apimeta "k8s.io/apimachinery/pkg/api/meta" +) + +func overrideMappingsForOapiDeploymentConfig(mappings []*apimeta.RESTMapping, err error, targetGK schema.GroupKind) ([]*apimeta.RESTMapping, error) { + if (targetGK == schema.GroupKind{Kind: "DeploymentConfig"}) { + err = nil + // NB: we don't convert to apps.openshift.io here since the patched scale client + // will do it for us. + mappings = []*apimeta.RESTMapping{ + { + Resource: "deploymentconfigs", + GroupVersionKind: targetGK.WithVersion("v1"), + }, + } + } + return mappings, err +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go index 3f85197a0b6f..0cc915625d20 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/client.go @@ -89,6 +89,8 @@ func (c *scaleClient) pathAndVersionFor(resource schema.GroupResource) (string, return "", gvr, fmt.Errorf("unable to get full preferred group-version-resource for %s: %v", resource.String(), err) } + gvr = correctOapiDeploymentConfig(gvr) // TODO(directxman12): remove when /oapi is removed + groupVer := gvr.GroupVersion() // we need to set the API path based on GroupVersion (defaulting to the legacy path if none is set) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/patch_dc.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/patch_dc.go new file mode 100644 index 000000000000..e4dbb1ea5b69 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/scale/patch_dc.go @@ -0,0 +1,24 @@ +package scale + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + oappsv1 "github.com/openshift/api/apps/v1" +) + +var dcGVR = schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "deploymentconfigs", +} + +func correctOapiDeploymentConfig(gvr schema.GroupVersionResource) schema.GroupVersionResource { + // TODO(directxman12): this is a dirty, dirty hack because oapi just appears in discovery as "/v1", like + // the kube core API. We can remove it if/when we get rid of the legacy oapi group entirely. It makes me + // cry a bit inside, but such is life. + if gvr == dcGVR { + return oappsv1.SchemeGroupVersion.WithResource(gvr.Resource) + } + + return gvr +}