diff --git a/pkg/oc/cli/cli.go b/pkg/oc/cli/cli.go index 30b91e643267..67cf8339db86 100644 --- a/pkg/oc/cli/cli.go +++ b/pkg/oc/cli/cli.go @@ -12,6 +12,7 @@ import ( kubecmd "k8s.io/kubernetes/pkg/kubectl/cmd" ktemplates "k8s.io/kubernetes/pkg/kubectl/cmd/templates" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/resource" "github.com/openshift/origin/pkg/cmd/flagtypes" "github.com/openshift/origin/pkg/cmd/templates" @@ -315,6 +316,9 @@ func CommandFor(basename string) *cobra.Command { case "kubectl": cmd = kubecmd.NewKubectlCommand(kcmdutil.NewFactory(nil), in, out, errout) default: + // we only need this change for `oc`. `kubectl` should behave as close to `kubectl` as we can + resource.OAPIToGroupified = OAPIToGroupified + kcmdutil.OAPIToGroupifiedGVK = OAPIToGroupifiedGVK cmd = NewCommandCLI("oc", "oc", in, out, errout) } diff --git a/pkg/oc/cli/cmd/version.go b/pkg/oc/cli/cmd/version.go index 94026e72a56a..c920bc49ae31 100644 --- a/pkg/oc/cli/cmd/version.go +++ b/pkg/oc/cli/cmd/version.go @@ -75,7 +75,7 @@ func (o *VersionOptions) Complete(cmd *cobra.Command, f *clientcmd.Factory, out o.Clients = f.ClientSet var err error o.ClientConfig, err = f.ClientConfig() - if err != nil && !clientcmd.IsConfigurationMissing(err) { + if err != nil && !kclientcmd.IsEmptyConfig(err) { return err } diff --git a/pkg/oc/cli/groupification.go b/pkg/oc/cli/groupification.go new file mode 100644 index 000000000000..58f13b040d72 --- /dev/null +++ b/pkg/oc/cli/groupification.go @@ -0,0 +1,190 @@ +package cli + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + appsv1 "github.com/openshift/api/apps/v1" + authorizationv1 "github.com/openshift/api/authorization/v1" + buildv1 "github.com/openshift/api/build/v1" + imagev1 "github.com/openshift/api/image/v1" + networkv1 "github.com/openshift/api/network/v1" + oauthv1 "github.com/openshift/api/oauth/v1" + projectv1 "github.com/openshift/api/project/v1" + quotav1 "github.com/openshift/api/quota/v1" + routev1 "github.com/openshift/api/route/v1" + securityv1 "github.com/openshift/api/security/v1" + templatev1 "github.com/openshift/api/template/v1" + userv1 "github.com/openshift/api/user/v1" + "github.com/openshift/origin/pkg/apps/apis/apps" + "github.com/openshift/origin/pkg/authorization/apis/authorization" + "github.com/openshift/origin/pkg/build/apis/build" + "github.com/openshift/origin/pkg/image/apis/image" + "github.com/openshift/origin/pkg/network/apis/network" + "github.com/openshift/origin/pkg/oauth/apis/oauth" + "github.com/openshift/origin/pkg/project/apis/project" + "github.com/openshift/origin/pkg/quota/apis/quota" + "github.com/openshift/origin/pkg/route/apis/route" + "github.com/openshift/origin/pkg/security/apis/security" + "github.com/openshift/origin/pkg/template/apis/template" + "github.com/openshift/origin/pkg/user/apis/user" +) + +func OAPIToGroupifiedGVK(gvk *schema.GroupVersionKind) { + if len(gvk.Group) > 0 { + return + } + + newGroup, ok := oapiKindsToGroup[gvk.Kind] + if !ok { + return + } + gvk.Group = newGroup +} + +func OAPIToGroupified(uncast runtime.Object, gvk *schema.GroupVersionKind) { + if len(gvk.Group) > 0 { + return + } + + switch obj := uncast.(type) { + case *unstructured.Unstructured: + newGroup := fixOAPIGroupKindInTopLevelUnstructured(obj.Object) + if len(newGroup) > 0 { + gvk.Group = newGroup + } + case *unstructured.UnstructuredList: + newGroup := fixOAPIGroupKindInTopLevelUnstructured(obj.Object) + if len(newGroup) > 0 { + gvk.Group = newGroup + } + + case *apps.DeploymentConfig, *appsv1.DeploymentConfig, *apps.DeploymentConfigList, *appsv1.DeploymentConfigList, + *apps.DeploymentConfigRollback, *appsv1.DeploymentConfigRollback: + gvk.Group = apps.GroupName + + case *authorization.ClusterRoleBinding, *authorizationv1.ClusterRoleBinding, *authorization.ClusterRoleBindingList, *authorizationv1.ClusterRoleBindingList, + *authorization.ClusterRole, *authorizationv1.ClusterRole, *authorization.ClusterRoleList, *authorizationv1.ClusterRoleList, + *authorization.Role, *authorizationv1.Role, *authorization.RoleList, *authorizationv1.RoleList, + *authorization.RoleBinding, *authorizationv1.RoleBinding, *authorization.RoleBindingList, *authorizationv1.RoleBindingList, + *authorization.RoleBindingRestriction, *authorizationv1.RoleBindingRestriction, *authorization.RoleBindingRestrictionList, *authorizationv1.RoleBindingRestrictionList: + gvk.Group = authorization.GroupName + + case *build.BuildConfig, *buildv1.BuildConfig, *build.BuildConfigList, *buildv1.BuildConfigList, + *build.Build, *buildv1.Build, *build.BuildList, *buildv1.BuildList: + gvk.Group = build.GroupName + + case *image.Image, *imagev1.Image, *image.ImageList, *imagev1.ImageList, + *image.ImageSignature, *imagev1.ImageSignature, + *image.ImageStreamImage, *imagev1.ImageStreamImage, + *image.ImageStreamImport, *imagev1.ImageStreamImport, + *image.ImageStreamMapping, *imagev1.ImageStreamMapping, + *image.ImageStream, *imagev1.ImageStream, *image.ImageStreamList, *imagev1.ImageStreamList, + *image.ImageStreamTag, *imagev1.ImageStreamTag: + gvk.Group = image.GroupName + + case *network.ClusterNetwork, *networkv1.ClusterNetwork, *network.ClusterNetworkList, *networkv1.ClusterNetworkList, + *network.NetNamespace, *networkv1.NetNamespace, *network.NetNamespaceList, *networkv1.NetNamespaceList, + *network.HostSubnet, *networkv1.HostSubnet, *network.HostSubnetList, *networkv1.HostSubnetList, + *network.EgressNetworkPolicy, *networkv1.EgressNetworkPolicy, *network.EgressNetworkPolicyList, *networkv1.EgressNetworkPolicyList: + gvk.Group = network.GroupName + + case *project.Project, *projectv1.Project, *project.ProjectList, *projectv1.ProjectList, + *project.ProjectRequest, *projectv1.ProjectRequest: + gvk.Group = project.GroupName + + case *quota.ClusterResourceQuota, *quotav1.ClusterResourceQuota, *quota.ClusterResourceQuotaList, *quotav1.ClusterResourceQuotaList: + gvk.Group = quota.GroupName + + case *oauth.OAuthAuthorizeToken, *oauthv1.OAuthAuthorizeToken, *oauth.OAuthAuthorizeTokenList, *oauthv1.OAuthAuthorizeTokenList, + *oauth.OAuthClientAuthorization, *oauthv1.OAuthClientAuthorization, *oauth.OAuthClientAuthorizationList, *oauthv1.OAuthClientAuthorizationList, + *oauth.OAuthClient, *oauthv1.OAuthClient, *oauth.OAuthClientList, *oauthv1.OAuthClientList, + *oauth.OAuthAccessToken, *oauthv1.OAuthAccessToken, *oauth.OAuthAccessTokenList, *oauthv1.OAuthAccessTokenList: + gvk.Group = oauth.GroupName + + case *route.Route, *routev1.Route, *route.RouteList, *routev1.RouteList: + gvk.Group = route.GroupName + + case *security.SecurityContextConstraints, *securityv1.SecurityContextConstraints, *security.SecurityContextConstraintsList, *securityv1.SecurityContextConstraintsList: + gvk.Group = security.GroupName + + case *template.Template, *templatev1.Template, *template.TemplateList, *templatev1.TemplateList: + gvk.Group = template.GroupName + + case *user.Group, *userv1.Group, *user.GroupList, *userv1.GroupList, + *user.Identity, *userv1.Identity, *user.IdentityList, *userv1.IdentityList, + *user.UserIdentityMapping, *userv1.UserIdentityMapping, + *user.User, *userv1.User, *user.UserList, *userv1.UserList: + gvk.Group = user.GroupName + + } +} + +var oapiKindsToGroup = map[string]string{ + "DeploymentConfigRollback": "apps.openshift.io", + "DeploymentConfig": "apps.openshift.io", "DeploymentConfigList": "apps.openshift.io", + "ClusterRoleBinding": "authorization.openshift.io", "ClusterRoleBindingList": "authorization.openshift.io", + "ClusterRole": "authorization.openshift.io", "ClusterRoleList": "authorization.openshift.io", + "RoleBindingRestriction": "authorization.openshift.io", "RoleBindingRestrictionList": "authorization.openshift.io", + "RoleBinding": "authorization.openshift.io", "RoleBindingList": "authorization.openshift.io", + "Role": "authorization.openshift.io", "RoleList": "authorization.openshift.io", + "BuildConfig": "build.openshift.io", "BuildConfigList": "build.openshift.io", + "Build": "build.openshift.io", "BuildList": "build.openshift.io", + "Image": "image.openshift.io", "ImageList": "image.openshift.io", + "ImageSignature": "image.openshift.io", + "ImageStreamImage": "image.openshift.io", + "ImageStreamImport": "image.openshift.io", + "ImageStreamMapping": "image.openshift.io", + "ImageStream": "image.openshift.io", "ImageStreamList": "image.openshift.io", + "ImageStreamTag": "image.openshift.io", "ImageStreamTagList": "image.openshift.io", + "ClusterNetwork": "network.openshift.io", "ClusterNetworkList": "network.openshift.io", + "EgressNetworkPolicy": "network.openshift.io", "EgressNetworkPolicyList": "network.openshift.io", + "HostSubnet": "network.openshift.io", "HostSubnetList": "network.openshift.io", + "NetNamespace": "network.openshift.io", "NetNamespaceList": "network.openshift.io", + "OAuthAccessToken": "oauth.openshift.io", "OAuthAccessTokenList": "oauth.openshift.io", + "OAuthAuthorizeToken": "oauth.openshift.io", "OAuthAuthorizeTokenList": "oauth.openshift.io", + "OAuthClientAuthorization": "oauth.openshift.io", "OAuthClientAuthorizationList": "oauth.openshift.io", + "OAuthClient": "oauth.openshift.io", "OAuthClientList": "oauth.openshift.io", + "Project": "project.openshift.io", "ProjectList": "project.openshift.io", + "ProjectRequest": "project.openshift.io", + "ClusterResourceQuota": "quota.openshift.io", "ClusterResourceQuotaList": "quota.openshift.io", + "Route": "route.openshift.io", "RouteList": "route.openshift.io", + "SecurityContextConstraint": "security.openshift.io", "SecurityContextConstraintList": "security.openshift.io", + "Template": "template.openshift.io", "TemplateList": "template.openshift.io", + "Group": "user.openshift.io", "GroupList": "user.openshift.io", + "Identity": "user.openshift.io", "IdentityList": "user.openshift.io", + "UserIdentityMapping": "user.openshift.io", + "User": "user.openshift.io", "UserList": "user.openshift.io", +} + +func fixOAPIGroupKindInTopLevelUnstructured(obj map[string]interface{}) string { + kind, ok := obj["kind"] + if !ok { + return "" + } + kindStr, ok := kind.(string) + if !ok { + return "" + } + newGroup, ok := oapiKindsToGroup[kindStr] + if !ok { + return "" + } + + apiVersion, ok := obj["apiVersion"] + if !ok { + return newGroup + } + apiVersionStr, ok := apiVersion.(string) + if !ok { + return newGroup + } + + if apiVersionStr != "v1" { + return newGroup + } + obj["apiVersion"] = newGroup + "/v1" + + return newGroup +} diff --git a/pkg/oc/cli/util/clientcmd/factory_client_access.go b/pkg/oc/cli/util/clientcmd/factory_client_access.go index ebf8c6174f8a..90dbd0686b08 100644 --- a/pkg/oc/cli/util/clientcmd/factory_client_access.go +++ b/pkg/oc/cli/util/clientcmd/factory_client_access.go @@ -2,12 +2,10 @@ package clientcmd import ( "errors" - "net/http" "path/filepath" "regexp" "strconv" "strings" - "time" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -21,13 +19,11 @@ import ( restclient "k8s.io/client-go/rest" kclientcmd "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/client-go/util/homedir" kapi "k8s.io/kubernetes/pkg/apis/core" kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/kubectl/util/transport" appsapiv1 "github.com/openshift/api/apps/v1" appsapi "github.com/openshift/origin/pkg/apps/apis/apps" @@ -37,7 +33,6 @@ import ( ) type ring0Factory struct { - clientConfig kclientcmd.ClientConfig imageResolutionOptions FlagBinder kubeClientAccessFactory kcmdutil.ClientAccessFactory } @@ -53,70 +48,14 @@ func NewClientAccessFactory(optionalClientConfig kclientcmd.ClientConfig) Client kclientcmd.UseOpenShiftKubeConfigValues = true kclientcmd.ErrEmptyConfig = kclientcmd.NewErrConfigurationMissing() - flags := pflag.NewFlagSet("", pflag.ContinueOnError) - clientConfig := optionalClientConfig - if optionalClientConfig == nil { - clientConfig = kcmdutil.DefaultClientConfig(flags) - } factory := &ring0Factory{ - clientConfig: clientConfig, imageResolutionOptions: &imageResolutionOptions{}, } - factory.kubeClientAccessFactory = kcmdutil.NewClientAccessFactoryFromDiscovery( - flags, - clientConfig, - &discoveryFactory{clientConfig: clientConfig}, - ) + factory.kubeClientAccessFactory = kcmdutil.NewClientAccessFactory(optionalClientConfig) return factory } -type discoveryFactory struct { - clientConfig kclientcmd.ClientConfig - cacheDir string -} - -func (f *discoveryFactory) BindFlags(flags *pflag.FlagSet) { - defaultCacheDir := filepath.Join(homedir.HomeDir(), ".kube", "http-cache") - flags.StringVar(&f.cacheDir, kcmdutil.FlagHTTPCacheDir, defaultCacheDir, "Default HTTP cache directory") -} - -func (f *discoveryFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface, error) { - // Output using whatever version was negotiated in the client cache. The - // version we decode with may not be the same as what the server requires. - cfg, err := f.clientConfig.ClientConfig() - if err != nil { - return nil, err - } - // given 25 groups with one-ish version each, discovery needs to make 50 requests - // double it just so we don't end up here again for a while. This config is only used for discovery. - cfg.Burst = 100 - - if f.cacheDir != "" { - wt := cfg.WrapTransport - cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - if wt != nil { - rt = wt(rt) - } - return transport.NewCacheRoundTripper(f.cacheDir, rt) - } - } - - // at this point we've negotiated and can get the client - kubeClient, err := kclientset.NewForConfig(cfg) - if err != nil { - return nil, err - - } - - cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube", "cache", "discovery"), cfg.Host) - return kcmdutil.NewCachedDiscoveryClient(newLegacyDiscoveryClient(kubeClient.Discovery().RESTClient()), cacheDir, time.Duration(10*time.Minute)), nil -} - -func (f *ring0Factory) RawConfig() (clientcmdapi.Config, error) { - return f.kubeClientAccessFactory.RawConfig() -} - func (f *ring0Factory) DiscoveryClient() (discovery.CachedDiscoveryInterface, error) { return f.kubeClientAccessFactory.DiscoveryClient() } @@ -133,8 +72,12 @@ func (f *ring0Factory) ClientConfig() (*restclient.Config, error) { return f.kubeClientAccessFactory.ClientConfig() } +func (f *ring0Factory) RawConfig() (clientcmdapi.Config, error) { + return f.kubeClientAccessFactory.RawConfig() +} + func (f *ring0Factory) BareClientConfig() (*restclient.Config, error) { - return f.clientConfig.ClientConfig() + return f.kubeClientAccessFactory.BareClientConfig() } func (f *ring0Factory) RESTClient() (*restclient.RESTClient, error) { diff --git a/pkg/oc/cli/util/clientcmd/legacy_discovery.go b/pkg/oc/cli/util/clientcmd/legacy_discovery.go deleted file mode 100644 index cab093f560da..000000000000 --- a/pkg/oc/cli/util/clientcmd/legacy_discovery.go +++ /dev/null @@ -1,83 +0,0 @@ -package clientcmd - -import ( - "net/url" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - restclient "k8s.io/client-go/rest" -) - -// legacyDiscoveryClient implements the functions that discovery server-supported API groups, -// versions and resources. -type legacyDiscoveryClient struct { - *discovery.DiscoveryClient -} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -// This can return an error *and* a partial result -func (d *legacyDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) { - parentList, err := d.DiscoveryClient.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - return parentList, err - } - - if groupVersion != "v1" { - return parentList, nil - } - - // we request v1, we must combine the parent list with the list from /oapi - - url := url.URL{} - url.Path = "/oapi/" + groupVersion - originResources := &metav1.APIResourceList{} - err = d.RESTClient().Get().AbsPath(url.String()).Do().Into(originResources) - if err != nil { - // ignore 403 or 404 error to be compatible with an v1.0 server. - if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - return parentList, nil - } - return parentList, err - } - - parentList.APIResources = append(parentList.APIResources, originResources.APIResources...) - return parentList, nil -} - -// ServerResources returns the supported resources for all groups and versions. -// This can return an error *and* a partial result -func (d *legacyDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - apiGroups, err := d.ServerGroups() - if err != nil { - return nil, err - } - - result := []*metav1.APIResourceList{} - failedGroups := make(map[schema.GroupVersion]error) - - for _, apiGroup := range apiGroups.Groups { - for _, version := range apiGroup.Versions { - gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} - resources, err := d.ServerResourcesForGroupVersion(version.GroupVersion) - if err != nil { - failedGroups[gv] = err - continue - } - - result = append(result, resources) - } - } - - if len(failedGroups) == 0 { - return result, nil - } - - return result, &discovery.ErrGroupDiscoveryFailed{Groups: failedGroups} -} - -// newLegacyDiscoveryClient creates a new DiscoveryClient for the given RESTClient. -func newLegacyDiscoveryClient(c restclient.Interface) *legacyDiscoveryClient { - return &legacyDiscoveryClient{discovery.NewDiscoveryClient(c)} -} diff --git a/test/cmd/authentication.sh b/test/cmd/authentication.sh index 1ef7776aa633..58d0172557f2 100755 --- a/test/cmd/authentication.sh +++ b/test/cmd/authentication.sh @@ -81,8 +81,8 @@ os::cmd::expect_success_and_text "curl -k -XPOST -H 'Content-Type: application/j os::cmd::expect_success_and_text "curl -k -XPOST -H 'Content-Type: application/json' -H 'Authorization: Bearer ${accesstoken}' '${API_SCHEME}://${API_HOST}:${API_PORT}/apis/authorization.openshift.io/v1/subjectaccessreviews' -d '{\"namespace\":\"${project}\",\"verb\":\"create\",\"resource\":\"pods\"}'" '"kind": "SubjectAccessReviewResponse"' os::cmd::expect_success_and_text "oc policy can-i create pods --token='${accesstoken}' -n '${project}' --ignore-scopes" 'yes' os::cmd::expect_success_and_text "oc policy can-i create pods --token='${accesstoken}' -n '${project}'" 'no' -os::cmd::expect_success_and_text "oc policy can-i create subjectaccessreviews --token='${accesstoken}' -n '${project}'" 'no' -os::cmd::expect_success_and_text "oc policy can-i create subjectaccessreviews --token='${accesstoken}' -n '${project}' --ignore-scopes" 'yes' +os::cmd::expect_success_and_text "oc policy can-i create subjectaccessreviews.authorization.openshift.io --token='${accesstoken}' -n '${project}'" 'no' +os::cmd::expect_success_and_text "oc policy can-i create subjectaccessreviews.authorization.openshift.io --token='${accesstoken}' -n '${project}' --ignore-scopes" 'yes' os::cmd::expect_success_and_text "oc policy can-i create pods --token='${accesstoken}' -n '${project}' --scopes='role:admin:*'" 'yes' os::cmd::expect_success_and_text "oc policy can-i --list --token='${accesstoken}' -n '${project}' --scopes='role:admin:*'" 'get.*pods' os::cmd::expect_success_and_not_text "oc policy can-i --list --token='${accesstoken}' -n '${project}'" 'get.*pods' diff --git a/test/cmd/deployments.sh b/test/cmd/deployments.sh index 8fa8ea9f27cc..10002ab475cc 100755 --- a/test/cmd/deployments.sh +++ b/test/cmd/deployments.sh @@ -159,7 +159,7 @@ os::test::junit::declare_suite_start "cmd/deployments/setdeploymenthook" arg="-f test/integration/testdata/test-deployment-config.yaml" os::cmd::expect_failure_and_text "oc set deployment-hook" "error: one or more deployment configs" os::cmd::expect_failure_and_text "oc set deployment-hook ${arg}" "error: you must specify one of --pre, --mid, or --post" -os::cmd::expect_failure_and_text "oc set deployment-hook ${arg} -o yaml --pre -- mycmd" 'deploymentconfigs "test-deployment-config" not found' +os::cmd::expect_failure_and_text "oc set deployment-hook ${arg} -o yaml --pre -- mycmd" 'deploymentconfigs.apps.openshift.io "test-deployment-config" not found' os::cmd::expect_success_and_text "oc set deployment-hook ${arg} --local -o yaml --post -- mycmd" 'mycmd' os::cmd::expect_success_and_not_text "oc set deployment-hook ${arg} --local -o yaml --post -- mycmd | oc set deployment-hook -f - --local -o yaml --post --remove" 'mycmd' os::cmd::expect_success "oc create ${arg}" diff --git a/test/cmd/login.sh b/test/cmd/login.sh index daac42f5eda6..c69a2dc78199 100755 --- a/test/cmd/login.sh +++ b/test/cmd/login.sh @@ -27,7 +27,7 @@ cp "${KUBECONFIG}" "${login_kubeconfig}" unset KUBECONFIG unset KUBERNETES_MASTER # test client not configured -os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST oc get services" 'Missing or incomplete configuration info. Please login' +os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST oc get services --loglevel=8" 'Missing or incomplete configuration info. Please login' unused_port="33333" # setting env bypasses the not configured message os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST KUBERNETES_MASTER=http://${API_HOST}:${unused_port} oc get services" 'did you specify the right host or port' diff --git a/test/cmd/migrate.sh b/test/cmd/migrate.sh index 847936fd08a5..2261d46ed2b1 100755 --- a/test/cmd/migrate.sh +++ b/test/cmd/migrate.sh @@ -33,9 +33,9 @@ os::test::junit::declare_suite_end os::test::junit::declare_suite_start "cmd/migrate/storage_oauthclientauthorizations" # Create valid OAuth client -os::cmd::expect_success_and_text 'oc create -f test/testdata/oauth/client.yaml' 'oauthclient "test-oauth-client" created' +os::cmd::expect_success_and_text 'oc create -f test/testdata/oauth/client.yaml' 'oauthclient.oauth.openshift.io "test-oauth-client" created' # Create OAuth client authorization for client -os::cmd::expect_success_and_text 'oc create -f test/testdata/oauth/clientauthorization.yaml' 'oauthclientauthorization "user1:test-oauth-client" created' +os::cmd::expect_success_and_text 'oc create -f test/testdata/oauth/clientauthorization.yaml' 'oauthclientauthorization.oauth.openshift.io "user1:test-oauth-client" created' # Delete client os::cmd::expect_success_and_text 'oc delete oauthclient test-oauth-client' 'oauthclient.oauth.openshift.io "test-oauth-client" deleted' # Assert that migration/update still works even though the client authorization is no longer valid diff --git a/test/cmd/router.sh b/test/cmd/router.sh index e36dd4ffc14d..31b058a0f407 100755 --- a/test/cmd/router.sh +++ b/test/cmd/router.sh @@ -70,7 +70,7 @@ os::cmd::expect_success_and_text 'oc get dc/router -o yaml' 'readinessProbe' os::cmd::expect_success_and_text "oc delete svc/router" 'service "router" deleted' os::cmd::expect_success_and_text "oc delete dc/router" 'deploymentconfig.apps.openshift.io "router" deleted' # create a router and check for success with a warning about the existing clusterrolebinding -os::cmd::expect_success_and_text "oc adm router" 'warning: clusterrolebindings "router-router-role" already exists' +os::cmd::expect_success_and_text "oc adm router" 'warning: clusterrolebindings.authorization.openshift.io "router-router-role" already exists' # only when using hostnetwork should we force the probes to use localhost os::cmd::expect_success_and_not_text "oc adm router -o yaml --host-network=false" 'host: localhost' diff --git a/test/cmd/set-liveness-probe.sh b/test/cmd/set-liveness-probe.sh index c63693371b9e..23aef85744e5 100755 --- a/test/cmd/set-liveness-probe.sh +++ b/test/cmd/set-liveness-probe.sh @@ -12,7 +12,7 @@ trap os::test::junit::reconcile_output EXIT os::test::junit::declare_suite_start "cmd/set-probe-liveness" # This test setting a liveness probe, without warning about replication controllers whose deployment depends on deployment configs -os::cmd::expect_success_and_text 'oc create -f pkg/oc/graph/genericgraph/test/simple-deployment.yaml' 'deploymentconfig "simple-deployment" created' +os::cmd::expect_success_and_text 'oc create -f pkg/oc/graph/genericgraph/test/simple-deployment.yaml' 'deploymentconfig.apps.openshift.io "simple-deployment" created' os::cmd::expect_success_and_text 'oc status --suggest' 'dc/simple-deployment has no liveness probe' # test --local flag diff --git a/test/integration/etcd_storage_path_test.go b/test/integration/etcd_storage_path_test.go index 4337a7818d57..45569190aa7f 100644 --- a/test/integration/etcd_storage_path_test.go +++ b/test/integration/etcd_storage_path_test.go @@ -1067,8 +1067,12 @@ func TestEtcd3StoragePath(t *testing.T) { mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { - t.Errorf("unexpected error getting mapping for %s from %s with GVK %s: %v", kind, pkgPath, gvk, err) - continue + t.Logf("unexpected error getting mapping for %s from %s with GVK %s: %v", kind, pkgPath, gvk, err) + mapping, err = legacyscheme.Registry.RESTMapper().RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + t.Errorf("unexpected error getting mapping for %s from %s with GVK %s: %v", kind, pkgPath, gvk, err) + continue + } } gvResource := gvk.GroupVersion().WithResource(mapping.Resource) diff --git a/test/testdata/bootstrappolicy/cluster_admin_without_apigroups.yaml b/test/testdata/bootstrappolicy/cluster_admin_without_apigroups.yaml index c7446d729034..03e71ebd37c8 100644 --- a/test/testdata/bootstrappolicy/cluster_admin_without_apigroups.yaml +++ b/test/testdata/bootstrappolicy/cluster_admin_without_apigroups.yaml @@ -7,6 +7,7 @@ metadata: rules: - apiGroups: - '' # Allow mutation of Origin policy so we can proxy to RBAC + - 'authorization.openshift.io' # Allow mutation of openshift authoriation groupified so we can proxy to RBAC - 'rbac.authorization.k8s.io' # Allow mutation of RBAC so we can test escalation attributeRestrictions: null resources: diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go index 024d35a0a61f..7573c38784d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go @@ -604,6 +604,7 @@ func createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command, generator kube return nil, err } groupVersionKind := groupVersionKinds[0] + cmdutil.FixOAPIGroupifiedGVK(&groupVersionKind) if len(overrides) > 0 { codec := runtime.NewCodec(cmdutil.InternalVersionJSONEncoder(), cmdutil.InternalVersionDecoder()) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go index 5b553222dd89..ba9b8d00275c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go @@ -82,6 +82,9 @@ func NewClientAccessFactory(optionalClientConfig clientcmd.ClientConfig) ClientA return NewClientAccessFactoryFromDiscovery(flags, clientConfig, &discoveryFactory{clientConfig: clientConfig}) } +func NewClientAccessFactoryFromFlags(flags *pflag.FlagSet, clientConfig clientcmd.ClientConfig) ClientAccessFactory { + return NewClientAccessFactoryFromDiscovery(flags, clientConfig, &discoveryFactory{clientConfig: clientConfig}) +} // NewClientAccessFactoryFromDiscovery allows an external caller to substitute a different discoveryFactory // Which allows for the client cache to be built in ring0, but still rely on a custom discovery client @@ -111,6 +114,11 @@ func (f *discoveryFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface return nil, err } + // The more groups you have, the more discovery requests you need to make. + // given 25 groups (our groups + a few custom resources) with one-ish version each, discovery needs to make 50 requests + // double it just so we don't end up here again for a while. This config is only used for discovery. + cfg.Burst = 100 + if f.cacheDir != "" { wt := cfg.WrapTransport cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/patch_oapi_gvk.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/patch_oapi_gvk.go new file mode 100644 index 000000000000..faa7cc6e8253 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/patch_oapi_gvk.go @@ -0,0 +1,13 @@ +package util + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var OAPIToGroupifiedGVK func(gvk *schema.GroupVersionKind) + +func FixOAPIGroupifiedGVK(gvk *schema.GroupVersionKind) { + if OAPIToGroupifiedGVK != nil { + OAPIToGroupifiedGVK(gvk) + } +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/patch_restmapper_kind.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/patch_restmapper_kind.go new file mode 100644 index 000000000000..233e2b4ba132 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/patch_restmapper_kind.go @@ -0,0 +1,91 @@ +package discovery + +import ( + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var oapiResources = map[string]bool{ + "appliedclusterresourcequotas": true, + "appliedclusterresourcequota": true, + "buildconfigs": true, + "buildconfig": true, + "bc": true, + "builds": true, + "build": true, + "clusternetworks": true, + "clusternetwork": true, + "clusterresourcequotas": true, + "clusterresourcequota": true, + "clusterquota": true, + "clusterrolebindings": true, + "clusterrolebinding": true, + "clusterroles": true, + "clusterrole": true, + "deploymentconfigrollbacks": true, + "deploymentconfigrollback": true, + "deploymentconfigs": true, + "deploymentconfig": true, + "dc": true, + "egressnetworkpolicies": true, + "egressnetworkpolicy": true, + "groups": true, + "group": true, + "hostsubnets": true, + "hostsubnet": true, + "identities": true, + "identity": true, + "images": true, + "image": true, + "imagesignatures": true, + "imagesignature": true, + "imagestreamimages": true, + "imagestreamimage": true, + "isimage": true, + "imagestreamimports": true, + "imagestreamimport": true, + "imagestreammappings": true, + "imagestreammapping": true, + "imagestreams": true, + "imagestream": true, + "is": true, + "imagestreamtags": true, + "imagestreamtag": true, + "istag": true, + "netnamespaces": true, + "netnamespace": true, + "oauthaccesstokens": true, + "oauthaccesstoken": true, + "oauthauthorizetokens": true, + "oauthauthorizetoken": true, + "oauthclientauthorizations": true, + "oauthclientauthorization": true, + "oauthclients": true, + "oauthclient": true, + "processedtemplates": true, + "processedtemplate": true, + "projects": true, + "project": true, + "rolebindingrestrictions": true, + "rolebindingrestriction": true, + "rolebindings": true, + "rolebinding": true, + "roles": true, + "role": true, + "routes": true, + "route": true, + "securitycontextconstraints": true, + "securitycontextconstraint": true, + "scc": true, + "templates": true, + "template": true, + "useridentitymappings": true, + "useridentitymapping": true, + "users": true, + "user": true, +} + +func isOAPIResource(resource schema.GroupVersionResource) bool { + return oapiResources[strings.ToLower(resource.Resource)] +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper.go index f90e1bb0b048..a377b650cbcd 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper.go @@ -112,6 +112,13 @@ func NewRESTMapper(groupResources []*APIGroupResources, versionInterfaces meta.V versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + + if isOAPIResource(plural) { + oapiGV := schema.GroupVersion{Version: "v1"} + versionMapper.AddSpecific(oapiGV.WithKind(resource.Kind), plural, singular, scope) + // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior + versionMapper.Add(oapiGV.WithKind(resource.Kind+"List"), scope) + } } // TODO why is this type not in discovery (at least for "v1") versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot)