diff --git a/pkg/cmd/server/bootstrappolicy/dead.go b/pkg/cmd/server/bootstrappolicy/dead.go index 3bc3a87252d1..1ec351ca8ade 100644 --- a/pkg/cmd/server/bootstrappolicy/dead.go +++ b/pkg/cmd/server/bootstrappolicy/dead.go @@ -9,6 +9,8 @@ import ( var ( deadClusterRoles = []rbac.ClusterRole{} + + deadClusterRoleBindings = []rbac.ClusterRoleBinding{} ) func addDeadClusterRole(name string) { @@ -25,12 +27,33 @@ func addDeadClusterRole(name string) { ) } +func addDeadClusterRoleBinding(name, roleName string) { + for _, existing := range deadClusterRoleBindings { + if name == existing.Name { + glog.Fatalf("%q was already registered", name) + } + } + + deadClusterRoleBindings = append(deadClusterRoleBindings, + rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: roleName}, + }, + ) +} + // GetDeadClusterRoles returns cluster roles which should no longer have any permissions. // These are enumerated so that a reconcile that tightens permissions will properly. func GetDeadClusterRoles() []rbac.ClusterRole { return deadClusterRoles } +// GetDeadClusterRoleBindings returns cluster role bindings which should no longer have any subjects. +// These are enumerated so that a reconcile that tightens permissions will properly remove them. +func GetDeadClusterRoleBindings() []rbac.ClusterRoleBinding { + return deadClusterRoleBindings +} + func init() { // these were replaced by kube controller roles addDeadClusterRole("system:replication-controller") @@ -50,4 +73,7 @@ func init() { addDeadClusterRole("system:build-controller") addDeadClusterRole("system:deploymentconfig-controller") addDeadClusterRole("system:deployment-controller") + + // this was replaced by the node authorizer + addDeadClusterRoleBinding("system:nodes", "system:node") } diff --git a/pkg/cmd/server/bootstrappolicy/policy.go b/pkg/cmd/server/bootstrappolicy/policy.go index 97b1637c26d9..90f0b6c560e7 100644 --- a/pkg/cmd/server/bootstrappolicy/policy.go +++ b/pkg/cmd/server/bootstrappolicy/policy.go @@ -696,13 +696,13 @@ func GetOpenshiftBootstrapClusterRoles() []rbac.ClusterRole { Rules: []rbac.PolicyRule{ // Needed to check API access. These creates are non-mutating rbac.NewRule("create").Groups(kAuthnGroup).Resources("tokenreviews").RuleOrDie(), - rbac.NewRule("create").Groups(authzGroup, legacyAuthzGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(), rbac.NewRule("create").Groups(kAuthzGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(), // Needed to build serviceLister, to populate env vars for services rbac.NewRule(read...).Groups(kapiGroup).Resources("services").RuleOrDie(), // Nodes can register themselves - // TODO: restrict to creating a node with the same name they announce + // Use the NodeRestriction admission plugin to limit a node to creating/updating its own API object. rbac.NewRule("create", "get", "list", "watch").Groups(kapiGroup).Resources("nodes").RuleOrDie(), + rbac.NewRule("update", "patch", "delete").Groups(kapiGroup).Resources("nodes").RuleOrDie(), // TODO: restrict to the bound node once supported rbac.NewRule("update", "patch").Groups(kapiGroup).Resources("nodes/status").RuleOrDie(), @@ -731,7 +731,7 @@ func GetOpenshiftBootstrapClusterRoles() []rbac.ClusterRole { // Needed for glusterfs volumes rbac.NewRule("get").Groups(kapiGroup).Resources("endpoints").RuleOrDie(), // Nodes are allowed to request CSRs (specifically, request serving certs) - rbac.NewRule("get", "create").Groups(certificates.GroupName).Resources("certificatesigningrequests").RuleOrDie(), + rbac.NewRule("get", "create", "list", "watch").Groups(certificates.GroupName).Resources("certificatesigningrequests").RuleOrDie(), }, }, @@ -966,9 +966,6 @@ func GetOpenshiftBootstrapClusterRoleBindings() []rbac.ClusterRoleBinding { newOriginClusterBinding(StatusCheckerRoleBindingName, StatusCheckerRoleName). Groups(AuthenticatedGroup, UnauthenticatedGroup). BindingOrDie(), - newOriginClusterBinding(NodeRoleBindingName, NodeRoleName). - Groups(NodesGroup). - BindingOrDie(), newOriginClusterBinding(NodeProxierRoleBindingName, NodeProxierRoleName). // Allow node identities to run node proxies Groups(NodesGroup). @@ -1009,6 +1006,10 @@ func GetOpenshiftBootstrapClusterRoleBindings() []rbac.ClusterRoleBinding { func GetBootstrapClusterRoleBindings() []rbac.ClusterRoleBinding { openshiftClusterRoleBindings := GetOpenshiftBootstrapClusterRoleBindings() + // dead cluster roles need to be checked for conflicts (in case something new comes up) + // so add them to this list. + openshiftClusterRoleBindings = append(openshiftClusterRoleBindings, GetDeadClusterRoleBindings()...) + kubeClusterRoleBindings := bootstrappolicy.ClusterRoleBindings() kubeControllerClusterRoleBindings := bootstrappolicy.ControllerRoleBindings() openshiftControllerClusterRoleBindings := ControllerRoleBindings() diff --git a/pkg/cmd/server/bootstrappolicy/policy_test.go b/pkg/cmd/server/bootstrappolicy/policy_test.go index 90316a2666ef..145b801f78f7 100644 --- a/pkg/cmd/server/bootstrappolicy/policy_test.go +++ b/pkg/cmd/server/bootstrappolicy/policy_test.go @@ -14,6 +14,7 @@ import ( "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" rulevalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" + kbootstrappolicy "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" "github.com/openshift/origin/pkg/api/v1" "github.com/openshift/origin/pkg/cmd/server/bootstrappolicy" @@ -109,6 +110,7 @@ func TestCovers(t *testing.T) { var clusterAdmin *rbac.ClusterRole var storageAdmin *rbac.ClusterRole var imageBuilder *rbac.ClusterRole + var nodeRole *rbac.ClusterRole for i := range allRoles { role := allRoles[i] @@ -135,6 +137,8 @@ func TestCovers(t *testing.T) { storageAdmin = &role case bootstrappolicy.ImageBuilderRoleName: imageBuilder = &role + case bootstrappolicy.NodeRoleName: + nodeRole = &role } } @@ -176,4 +180,19 @@ func TestCovers(t *testing.T) { if covers, miss := rulevalidation.Covers(systemMaster.Rules, clusterAdmin.Rules); !covers { t.Errorf("failed to cover: %#v", miss) } + + // Make sure our node role covers upstream node rules + if covers, miss := rulevalidation.Covers(nodeRole.Rules, kbootstrappolicy.NodeRules()); !covers { + t.Errorf("upstream node role has extra permissions:") + for _, r := range miss { + t.Logf("\t%s", r.CompactString()) + } + } + // Make sure our node role doesn't have any extra permissions + if covers, miss := rulevalidation.Covers(kbootstrappolicy.NodeRules(), nodeRole.Rules); !covers { + t.Errorf("openshift node role has extra permissions:") + for _, r := range miss { + t.Logf("\t%s", r.CompactString()) + } + } } diff --git a/pkg/cmd/server/origin/admission/register.go b/pkg/cmd/server/origin/admission/register.go index f5566de30b14..3fe0384709e3 100644 --- a/pkg/cmd/server/origin/admission/register.go +++ b/pkg/cmd/server/origin/admission/register.go @@ -28,6 +28,7 @@ import ( securityadmission "github.com/openshift/origin/pkg/security/admission" serviceadmit "github.com/openshift/origin/pkg/service/admission" + "k8s.io/kubernetes/plugin/pkg/admission/noderestriction" storageclassdefaultadmission "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault" imagepolicyapi "github.com/openshift/origin/pkg/image/admission/imagepolicy/api" @@ -83,6 +84,7 @@ var ( serviceadmit.RestrictedEndpointsPluginName, "LimitRanger", "ServiceAccount", + noderestriction.PluginName, "SecurityContextConstraint", "SCCExecRestrictions", "PersistentVolumeLabel", @@ -111,7 +113,6 @@ var ( // these are new, reassess post-rebase "Initializers", "GenericAdmissionWebhook", - "NodeRestriction", "PodTolerationRestriction", ) ) diff --git a/pkg/cmd/server/origin/master_config.go b/pkg/cmd/server/origin/master_config.go index 58f5947e3d0d..ac840fcc1da4 100644 --- a/pkg/cmd/server/origin/master_config.go +++ b/pkg/cmd/server/origin/master_config.go @@ -43,10 +43,12 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" kclientsetexternal "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" kclientsetinternal "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" kinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" kinternalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion" rbacinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion" rbaclisters "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion" sacontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" @@ -54,9 +56,12 @@ import ( kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" "k8s.io/kubernetes/pkg/serviceaccount" + noderestriction "k8s.io/kubernetes/plugin/pkg/admission/noderestriction" saadmit "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" storageclassdefaultadmission "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault" + "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node" rbacauthorizer "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + kbootstrappolicy "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" "github.com/openshift/origin/pkg/auth/authenticator/request/paramtoken" authnregistry "github.com/openshift/origin/pkg/auth/oauth/registry" @@ -233,6 +238,8 @@ func BuildMasterConfig(options configapi.MasterConfig, informers InformerAccess) kubeAuthorizer, kubeSubjectLocator, informers.GetInternalKubeInformers().Rbac().InternalVersion().ClusterRoles().Lister(), + informers.GetInternalKubeInformers().Core().InternalVersion().Pods(), + informers.GetInternalKubeInformers().Core().InternalVersion().PersistentVolumes(), options.ProjectConfig.ProjectRequestMessage, ) @@ -404,6 +411,7 @@ var ( "PodPreset", "LimitRanger", "ServiceAccount", + noderestriction.PluginName, "SecurityContextConstraint", storageclassdefaultadmission.PluginName, "AlwaysPullImages", @@ -415,7 +423,6 @@ var ( "DefaultTolerationSeconds", "Initializers", "GenericAdmissionWebhook", - "NodeRestriction", "PodTolerationRestriction", // NOTE: ResourceQuota and ClusterResourceQuota must be the last 2 plugins. // DO NOT ADD ANY PLUGINS AFTER THIS LINE! @@ -448,6 +455,7 @@ var ( "PodPreset", "LimitRanger", "ServiceAccount", + noderestriction.PluginName, "SecurityContextConstraint", storageclassdefaultadmission.PluginName, "AlwaysPullImages", @@ -459,7 +467,6 @@ var ( "DefaultTolerationSeconds", "Initializers", "GenericAdmissionWebhook", - "NodeRestriction", "PodTolerationRestriction", // NOTE: ResourceQuota and ClusterResourceQuota must be the last 2 plugins. // DO NOT ADD ANY PLUGINS AFTER THIS LINE! @@ -786,14 +793,27 @@ func buildKubeAuth(r rbacinformers.Interface) (kauthorizer.Authorizer, rbacregis return kubeAuthorizer, ruleResolver, kubeSubjectLocator } -func newAuthorizer(kubeAuthorizer kauthorizer.Authorizer, kubeSubjectLocator rbacauthorizer.SubjectLocator, clusterRoleGetter rbaclisters.ClusterRoleLister, projectRequestDenyMessage string) (kauthorizer.Authorizer, authorizer.SubjectLocator) { +func newAuthorizer( + kubeAuthorizer kauthorizer.Authorizer, + kubeSubjectLocator rbacauthorizer.SubjectLocator, + clusterRoleGetter rbaclisters.ClusterRoleLister, + podInformer coreinformers.PodInformer, + pvInformer coreinformers.PersistentVolumeInformer, + projectRequestDenyMessage string, +) (kauthorizer.Authorizer, authorizer.SubjectLocator) { messageMaker := authorizer.NewForbiddenMessageResolver(projectRequestDenyMessage) roleBasedAuthorizer := authorizer.NewAuthorizer(kubeAuthorizer, messageMaker) subjectLocator := authorizer.NewSubjectLocator(kubeSubjectLocator) + scopeLimitedAuthorizer := scope.NewAuthorizer(roleBasedAuthorizer, clusterRoleGetter, messageMaker) + graph := node.NewGraph() + node.AddGraphEventHandlers(graph, podInformer, pvInformer) + nodeAuthorizer := node.NewAuthorizer(graph, nodeidentifier.NewDefaultNodeIdentifier(), kbootstrappolicy.NodeRules()) + authorizer := authorizerunion.New( authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup), // authorizes system:masters to do anything, just like upstream + nodeAuthorizer, scopeLimitedAuthorizer) return authorizer, subjectLocator diff --git a/test/integration/node_authorizer_test.go b/test/integration/node_authorizer_test.go new file mode 100644 index 000000000000..0caa3607d7a8 --- /dev/null +++ b/test/integration/node_authorizer_test.go @@ -0,0 +1,321 @@ +package integration + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/kubernetes/pkg/api" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + + "github.com/openshift/origin/pkg/cmd/server/admin" + "github.com/openshift/origin/pkg/cmd/server/crypto" + testutil "github.com/openshift/origin/test/util" + testserver "github.com/openshift/origin/test/util/server" +) + +func TestNodeAuthorizer(t *testing.T) { + masterConfig, err := testserver.DefaultMasterOptions() + if err != nil { + t.Fatal(err) + } + defer testserver.CleanupMasterEtcd(t, masterConfig) + + // we care about pods getting rejected for referencing secrets at all, not because the pod's service account doesn't reference them + masterConfig.ServiceAccountConfig.LimitSecretReferences = false + + clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig) + if err != nil { + t.Fatal(err) + } + clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) + if err != nil { + t.Fatal(err) + } + superuserClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) + if err != nil { + t.Fatal(err) + } + + clientConfig := rest.AnonymousClientConfig(clusterAdminClientConfig) + + caDir := filepath.Dir(masterConfig.ServingInfo.ClientCA) + signer := &admin.SignerCertOptions{ + CertFile: filepath.Join(caDir, "ca.crt"), + KeyFile: filepath.Join(caDir, "ca.key"), + SerialFile: filepath.Join(caDir, "ca.serial.txt"), + } + + certDir, err := ioutil.TempDir("", "nodeauthorizer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(certDir) + + nodeanonClient := makeNodeClientset(t, signer, certDir, "unknown", clientConfig) + node1Client := makeNodeClientset(t, signer, certDir, "system:node:node1", clientConfig) + node2Client := makeNodeClientset(t, signer, certDir, "system:node:node2", clientConfig) + + // Prep namespace + if _, err := superuserClient.Core().Namespaces().Create(&api.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns"}}); err != nil { + t.Fatal(err) + } + wait.Poll(time.Second, time.Minute, func() (bool, error) { + sa, err := superuserClient.Core().ServiceAccounts("ns").Get("default", metav1.GetOptions{}) + if err != nil || len(sa.Secrets) == 0 { + return false, nil + } + return true, nil + }) + + // Create objects + if _, err := superuserClient.Core().Secrets("ns").Create(&api.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}}); err != nil { + t.Fatal(err) + } + if _, err := superuserClient.Core().Secrets("ns").Create(&api.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mypvsecret"}}); err != nil { + t.Fatal(err) + } + if _, err := superuserClient.Core().ConfigMaps("ns").Create(&api.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}); err != nil { + t.Fatal(err) + } + if _, err := superuserClient.Core().PersistentVolumeClaims("ns").Create(&api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: "mypvc"}, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany}, + Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceStorage: resource.MustParse("1")}}, + }, + }); err != nil { + t.Fatal(err) + } + if _, err := superuserClient.Core().PersistentVolumes().Create(&api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{Name: "mypv"}, + Spec: api.PersistentVolumeSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany}, + Capacity: api.ResourceList{api.ResourceStorage: resource.MustParse("1")}, + ClaimRef: &api.ObjectReference{Namespace: "ns", Name: "mypvc"}, + PersistentVolumeSource: api.PersistentVolumeSource{AzureFile: &api.AzureFileVolumeSource{ShareName: "default", SecretName: "mypvsecret"}}, + }, + }); err != nil { + t.Fatal(err) + } + + getSecret := func(client clientset.Interface) error { + _, err := client.Core().Secrets("ns").Get("mysecret", metav1.GetOptions{}) + return err + } + getPVSecret := func(client clientset.Interface) error { + _, err := client.Core().Secrets("ns").Get("mypvsecret", metav1.GetOptions{}) + return err + } + getConfigMap := func(client clientset.Interface) error { + _, err := client.Core().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{}) + return err + } + getPVC := func(client clientset.Interface) error { + _, err := client.Core().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{}) + return err + } + getPV := func(client clientset.Interface) error { + _, err := client.Core().PersistentVolumes().Get("mypv", metav1.GetOptions{}) + return err + } + + createNode2NormalPod := func(client clientset.Interface) error { + _, err := client.Core().Pods("ns").Create(&api.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, + Spec: api.PodSpec{ + NodeName: "node2", + Containers: []api.Container{{Name: "image", Image: "busybox"}}, + Volumes: []api.Volume{ + {Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "mysecret"}}}, + {Name: "cm", VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: "myconfigmap"}}}}, + {Name: "pvc", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: "mypvc"}}}, + }, + }, + }) + return err + } + updateNode2NormalPodStatus := func(client clientset.Interface) error { + startTime := metav1.NewTime(time.Now()) + _, err := client.Core().Pods("ns").UpdateStatus(&api.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, + Status: api.PodStatus{StartTime: &startTime}, + }) + return err + } + deleteNode2NormalPod := func(client clientset.Interface) error { + zero := int64(0) + return client.Core().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + } + + createNode2MirrorPod := func(client clientset.Interface) error { + _, err := client.Core().Pods("ns").Create(&api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2mirrorpod", + Annotations: map[string]string{api.MirrorPodAnnotationKey: "true"}, + }, + Spec: api.PodSpec{ + NodeName: "node2", + Containers: []api.Container{{Name: "image", Image: "busybox"}}, + }, + }) + return err + } + deleteNode2MirrorPod := func(client clientset.Interface) error { + zero := int64(0) + return client.Core().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + } + + createNode2 := func(client clientset.Interface) error { + _, err := client.Core().Nodes().Create(&api.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}}) + return err + } + updateNode2Status := func(client clientset.Interface) error { + _, err := client.Core().Nodes().UpdateStatus(&api.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "node2"}, + Status: api.NodeStatus{}, + }) + return err + } + deleteNode2 := func(client clientset.Interface) error { + return client.Core().Nodes().Delete("node2", nil) + } + + // nodeanonClient := clientsetForToken(tokenNodeUnknown, clientConfig) + // node1Client := clientsetForToken(tokenNode1, clientConfig) + // node2Client := clientsetForToken(tokenNode2, clientConfig) + + // all node requests from node1 and unknown node fail + expectForbidden(t, getSecret(nodeanonClient)) + expectForbidden(t, getPVSecret(nodeanonClient)) + expectForbidden(t, getConfigMap(nodeanonClient)) + expectForbidden(t, getPVC(nodeanonClient)) + expectForbidden(t, getPV(nodeanonClient)) + expectForbidden(t, createNode2NormalPod(nodeanonClient)) + expectForbidden(t, createNode2MirrorPod(nodeanonClient)) + expectForbidden(t, deleteNode2MirrorPod(nodeanonClient)) + expectForbidden(t, createNode2(nodeanonClient)) + expectForbidden(t, updateNode2Status(nodeanonClient)) + expectForbidden(t, deleteNode2(nodeanonClient)) + + expectForbidden(t, getSecret(node1Client)) + expectForbidden(t, getPVSecret(node1Client)) + expectForbidden(t, getConfigMap(node1Client)) + expectForbidden(t, getPVC(node1Client)) + expectForbidden(t, getPV(node1Client)) + expectForbidden(t, createNode2NormalPod(nodeanonClient)) + expectForbidden(t, createNode2MirrorPod(node1Client)) + expectForbidden(t, deleteNode2MirrorPod(node1Client)) + expectForbidden(t, createNode2(node1Client)) + expectForbidden(t, updateNode2Status(node1Client)) + expectForbidden(t, deleteNode2(node1Client)) + + // related object requests from node2 fail + expectForbidden(t, getSecret(node2Client)) + expectForbidden(t, getPVSecret(node2Client)) + expectForbidden(t, getConfigMap(node2Client)) + expectForbidden(t, getPVC(node2Client)) + expectForbidden(t, getPV(node2Client)) + expectForbidden(t, createNode2NormalPod(nodeanonClient)) + // mirror pod and self node lifecycle is allowed + expectAllowed(t, createNode2MirrorPod(node2Client)) + expectAllowed(t, deleteNode2MirrorPod(node2Client)) + expectAllowed(t, createNode2(node2Client)) + expectAllowed(t, updateNode2Status(node2Client)) + expectAllowed(t, deleteNode2(node2Client)) + + // create a pod as an admin to add object references + expectAllowed(t, createNode2NormalPod(superuserClient)) + + // unidentifiable node and node1 are still forbidden + expectForbidden(t, getSecret(nodeanonClient)) + expectForbidden(t, getPVSecret(nodeanonClient)) + expectForbidden(t, getConfigMap(nodeanonClient)) + expectForbidden(t, getPVC(nodeanonClient)) + expectForbidden(t, getPV(nodeanonClient)) + expectForbidden(t, createNode2NormalPod(nodeanonClient)) + expectForbidden(t, updateNode2NormalPodStatus(nodeanonClient)) + expectForbidden(t, deleteNode2NormalPod(nodeanonClient)) + expectForbidden(t, createNode2MirrorPod(nodeanonClient)) + expectForbidden(t, deleteNode2MirrorPod(nodeanonClient)) + + expectForbidden(t, getSecret(node1Client)) + expectForbidden(t, getPVSecret(node1Client)) + expectForbidden(t, getConfigMap(node1Client)) + expectForbidden(t, getPVC(node1Client)) + expectForbidden(t, getPV(node1Client)) + expectForbidden(t, createNode2NormalPod(node1Client)) + expectForbidden(t, updateNode2NormalPodStatus(node1Client)) + expectForbidden(t, deleteNode2NormalPod(node1Client)) + expectForbidden(t, createNode2MirrorPod(node1Client)) + expectForbidden(t, deleteNode2MirrorPod(node1Client)) + + // node2 can get referenced objects now + expectAllowed(t, getSecret(node2Client)) + expectAllowed(t, getPVSecret(node2Client)) + expectAllowed(t, getConfigMap(node2Client)) + expectAllowed(t, getPVC(node2Client)) + expectAllowed(t, getPV(node2Client)) + expectForbidden(t, createNode2NormalPod(node2Client)) + expectAllowed(t, updateNode2NormalPodStatus(node2Client)) + expectAllowed(t, deleteNode2NormalPod(node2Client)) + expectAllowed(t, createNode2MirrorPod(node2Client)) + expectAllowed(t, deleteNode2MirrorPod(node2Client)) +} + +func makeNodeClientset(t *testing.T, signer *admin.SignerCertOptions, certDir string, username string, anonymousConfig *rest.Config) clientset.Interface { + clientCertOptions := &admin.CreateClientCertOptions{ + SignerCertOptions: signer, + CertFile: admin.DefaultCertFilename(certDir, username), + KeyFile: admin.DefaultKeyFilename(certDir, username), + ExpireDays: crypto.DefaultCertificateLifetimeInDays, + User: username, + Groups: []string{"system:nodes"}, + Overwrite: true, + } + if err := clientCertOptions.Validate(nil); err != nil { + t.Fatal(err) + } + if _, err := clientCertOptions.CreateClientCert(); err != nil { + t.Fatal(err) + } + + configCopy := *anonymousConfig + configCopy.TLSClientConfig.CertFile = clientCertOptions.CertFile + configCopy.TLSClientConfig.KeyFile = clientCertOptions.KeyFile + + c, err := clientset.NewForConfig(&configCopy) + if err != nil { + t.Fatal(err) + } + return c +} + +func expectForbidden(t *testing.T, err error) { + if !errors.IsForbidden(err) { + _, file, line, _ := runtime.Caller(1) + t.Errorf("%s:%d: Expected forbidden error, got %v", filepath.Base(file), line, err) + } +} + +func expectNotFound(t *testing.T, err error) { + if !errors.IsNotFound(err) { + _, file, line, _ := runtime.Caller(1) + t.Errorf("%s:%d: Expected notfound error, got %v", filepath.Base(file), line, err) + } +} + +func expectAllowed(t *testing.T, err error) { + if err != nil { + _, file, line, _ := runtime.Caller(1) + t.Errorf("%s:%d: Expected no error, got %v", filepath.Base(file), line, err) + } +} diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_role_bindings.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_role_bindings.yaml index 3fd9079170df..bc7b22cbd1ef 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_role_bindings.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_role_bindings.yaml @@ -132,19 +132,6 @@ items: - apiGroup: rbac.authorization.k8s.io kind: Group name: system:unauthenticated -- apiVersion: rbac.authorization.k8s.io/v1beta1 - kind: ClusterRoleBinding - metadata: - creationTimestamp: null - name: system:nodes - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:node - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: @@ -271,6 +258,16 @@ items: - apiGroup: rbac.authorization.k8s.io kind: Group name: system:unauthenticated +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + creationTimestamp: null + name: system:nodes + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node + subjects: [] - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: @@ -1056,9 +1053,6 @@ items: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:node - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes + subjects: [] kind: List metadata: {} diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index b2b3edfe24d4..87a4f629dc3e 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -2160,14 +2160,6 @@ items: - tokenreviews verbs: - create - - apiGroups: - - "" - - authorization.openshift.io - resources: - - localsubjectaccessreviews - - subjectaccessreviews - verbs: - - create - apiGroups: - authorization.k8s.io resources: @@ -2192,6 +2184,14 @@ items: - get - list - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - delete + - patch + - update - apiGroups: - "" resources: @@ -2256,6 +2256,8 @@ items: verbs: - create - get + - list + - watch - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 1697ad680d96..4c9cd35296e9 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -405,10 +405,12 @@ func ClusterRoleBindings() []rbac.ClusterRoleBinding { rbac.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(), rbac.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(), - // This default system:nodes binding is deprecated in 1.7 with the availability of the Node authorizer. - // If an admin wants to grant the system:node role (which cannot partition Node API access), they will need to create their own clusterrolebinding. - // TODO: Remove the subjects from this binding in 1.8 (leave the empty binding for tightening reconciliation), and remove AddClusterRoleBindingFilter() - rbac.NewClusterBinding(systemNodeRoleName).Groups(user.NodesGroup).BindingOrDie(), + // This default binding of the system:node role to the system:nodes group is deprecated in 1.7 with the availability of the Node authorizer. + // This leaves the binding, but with an empty set of subjects, so that tightening reconciliation can remove the subject. + { + ObjectMeta: metav1.ObjectMeta{Name: systemNodeRoleName}, + RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: systemNodeRoleName}, + }, } addClusterRoleBindingLabel(rolebindings) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-role-bindings.yaml b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-role-bindings.yaml index 5b666247fc8a..87e8d85066c5 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-role-bindings.yaml +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-role-bindings.yaml @@ -121,10 +121,7 @@ items: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:node - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes + subjects: [] - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: