From f20c0a40887d6c0065de599ce8de8159d4ce6122 Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 21 Aug 2018 17:09:45 -0400 Subject: [PATCH] switch test-cmd tests use mostly kubeconfig --- hack/test-cmd.sh | 2 +- test/cmd/login.sh | 11 ++++++----- test/cmd/policy.sh | 8 ++++---- test/cmd/status.sh | 4 ++-- test/extended/alternate_certs.sh | 20 ++++++++++---------- 5 files changed, 23 insertions(+), 22 deletions(-) diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index 4fb91277712e..4ae787daf3a5 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -104,7 +104,7 @@ for test in "${tests[@]}"; do done os::log::debug "Metrics information logged to ${LOG_DIR}/metrics.log" -oc get --raw /metrics --config="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log" +oc get --raw /metrics --kubeconfig="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log" if [[ -n "${failed:-}" ]]; then exit 1 diff --git a/test/cmd/login.sh b/test/cmd/login.sh index 6f69e54aacce..fd7c6378b5ac 100755 --- a/test/cmd/login.sh +++ b/test/cmd/login.sh @@ -44,10 +44,10 @@ if [[ "${API_SCHEME}" == "https" ]]; then fi # remove self-provisioner role from user and test login prompt before creating any projects -os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'" +os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'" os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. Contact your system administrator to request a project" # make sure standard login prompt is printed once self-provisioner status is restored -os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'" +os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'" os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. You can try to create a new project, by running" # make sure `oc login` fails with unauthorized error os::cmd::expect_failure_and_text 'oc login <<< \n' 'Login failed \(401 Unauthorized\)' @@ -87,7 +87,7 @@ os::cmd::expect_failure_and_text 'oc get pods' '"system:anonymous" cannot list p # make sure we report an error if the config file we pass is not writable # Does not work inside of a container, determine why and reenable -# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--config=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified' +# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--kubeconfig=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified' echo "login warnings: ok" # login and create serviceaccount and test login and logout with a service account token @@ -107,11 +107,12 @@ os::cmd::expect_success 'oc project project-foo' os::cmd::expect_success_and_text 'oc config view' "current-context.+project-foo/${API_HOST}:${API_PORT}/test-user" os::cmd::expect_success_and_text 'oc whoami' 'test-user' os::cmd::expect_success_and_text "oc whoami --config='${login_kubeconfig}'" 'system:admin' +os::cmd::expect_success_and_text "oc whoami --kubeconfig='${login_kubeconfig}'" 'system:admin' os::cmd::expect_success_and_text 'oc whoami -t' '.' os::cmd::expect_success_and_text 'oc whoami -c' '.' -# test config files from the --config flag -os::cmd::expect_success "oc get services --config='${login_kubeconfig}'" +# test config files from the --kubeconfig flag +os::cmd::expect_success "oc get services --kubeconfig='${login_kubeconfig}'" # test config files from env vars os::cmd::expect_success "KUBECONFIG='${login_kubeconfig}' oc get services" os::test::junit::declare_suite_end diff --git a/test/cmd/policy.sh b/test/cmd/policy.sh index 0e4bccd6066a..96f814a65321 100755 --- a/test/cmd/policy.sh +++ b/test/cmd/policy.sh @@ -251,28 +251,28 @@ os::cmd::expect_success 'oc adm policy add-cluster-role-to-user alternate-cluste # switch to test user to be sure that default project admin policy works properly new_kubeconfig="${workingdir}/tempconfig" os::cmd::expect_success "oc config view --raw > $new_kubeconfig" -os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --config=${new_kubeconfig}" +os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --kubeconfig=${new_kubeconfig}" # alternate-cluster-admin should default to having star rights, so he should be able to update his role to that os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user" resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}") cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir} os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml -os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" +os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" # alternate-cluster-admin can restrict himself to less groups (no star) os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user" resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}") cp ${OS_ROOT}/test/testdata/bootstrappolicy/cluster_admin_without_apigroups.yaml ${workingdir} os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/cluster_admin_without_apigroups.yaml -os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml" +os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml" # alternate-cluster-admin should NOT have the power add back star now (anything other than star is considered less so this mimics testing against no groups) os::cmd::try_until_failure "oc policy who-can update hpa.autoscaling | grep -q alternate-cluster-admin-user" resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}") cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir} os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml -os::cmd::expect_failure_and_text "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges" +os::cmd::expect_failure_and_text "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges" # This test validates cluster level policy for serviceaccounts # ensure service account cannot list pods at the namespace level diff --git a/test/cmd/status.sh b/test/cmd/status.sh index 296880ac8716..dc688bda244a 100755 --- a/test/cmd/status.sh +++ b/test/cmd/status.sh @@ -27,7 +27,7 @@ os::cmd::try_until_text "oc get projects -o jsonpath='{.items}'" "^\[\]$" os::cmd::expect_success 'oc logout' # remove self-provisioner role from user and test login prompt before creating any projects -os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'" +os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'" # login as 'test-user' os::cmd::expect_success "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything" @@ -37,7 +37,7 @@ os::cmd::expect_success_and_text 'oc status' "You don't have any projects. Conta os::cmd::expect_success_and_text 'oc status --all-namespaces' "Showing all projects on server" # make sure standard login prompt is printed once self-provisioner status is restored os::cmd::expect_success "oc logout" -os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'" +os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'" os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything" "You don't have any projects. You can try to create a new project, by running" # make sure `oc status` re-uses the correct "no projects" message from `oc login` diff --git a/test/extended/alternate_certs.sh b/test/extended/alternate_certs.sh index d0640e427c40..8a39ec24abe3 100755 --- a/test/extended/alternate_certs.sh +++ b/test/extended/alternate_certs.sh @@ -52,24 +52,24 @@ OPENSHIFT_ON_PANIC=crash openshift start master \ OS_PID=$! # Wait for the server to be up -os::cmd::try_until_success "oc whoami --config=master/admin.kubeconfig" +os::cmd::try_until_success "oc whoami --kubeconfig=master/admin.kubeconfig" # Verify the server is serving with the custom and internal CAs, and that the generated ca-bundle.crt works for both os::cmd::expect_success_and_text "curl -vvv https://localhost:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'my-custom-ca' os::cmd::expect_success_and_text "curl -vvv https://127.0.0.1:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'openshift-signer' # Verify kubeconfigs have connectivity to hosts serving with custom and generated certs -os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig" 'system:admin' -os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin' -os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin' +os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig" 'system:admin' +os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin' +os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin' -os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig" 'system:openshift-master' -os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master' -os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master' +os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig" 'system:openshift-master' +os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master' +os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master' -os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig" 'system:node:mynode' -os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode' -os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode' +os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig" 'system:node:mynode' +os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode' +os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode' os::test::junit::declare_suite_end