Skip to content

Commit

Permalink
switch test-cmd tests use mostly kubeconfig
Browse files Browse the repository at this point in the history
  • Loading branch information
deads2k committed Aug 21, 2018
1 parent 2804dd4 commit 16a6227
Show file tree
Hide file tree
Showing 7 changed files with 31 additions and 30 deletions.
2 changes: 1 addition & 1 deletion hack/test-cmd.sh
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ for test in "${tests[@]}"; do
done

os::log::debug "Metrics information logged to ${LOG_DIR}/metrics.log"
oc get --raw /metrics --config="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log"
oc get --raw /metrics --kubeconfig="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log"

if [[ -n "${failed:-}" ]]; then
exit 1
Expand Down
8 changes: 4 additions & 4 deletions test/cmd/config.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@ os::cmd::expect_success_and_not_text 'oc get bc' 'does not exist'
# need some level of default (both upstream and here) to get the pretty auth message because you fail on namespace first.
os::cmd::expect_failure_and_text 'KUBERNETES_MASTER=anything env -u KUBERNETES_SERVICE_HOST oc get buildconfigs --user="test"' 'auth info "test" does not exist'

os::cmd::expect_failure_and_text 'oc get bc --config=missing' 'missing: no such file or directory'
os::cmd::expect_failure_and_text 'oc get bc --kubeconfig=missing' 'missing: no such file or directory'

# define temp location for new config
NEW_CONFIG_LOC="${BASETMPDIR}/new-config.yaml"

# make sure non-existing --cluster and --user can still be set
os::cmd::expect_success_and_text "oc config set-context new-context-name --cluster=missing-cluster --user=missing-user --namespace=default --config='${NEW_CONFIG_LOC}'" 'Context "new-context-name" '
os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST -u KUBECONFIG -u KUBERNETES_MASTER oc get buildconfigs --config='${NEW_CONFIG_LOC}'" 'Missing or incomplete configuration info'
os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST oc get buildconfigs --config='${NEW_CONFIG_LOC}'" 'Missing or incomplete configuration info'
os::cmd::expect_success_and_text "oc config set-context new-context-name --cluster=missing-cluster --user=missing-user --namespace=default --kubeconfig='${NEW_CONFIG_LOC}'" 'Context "new-context-name" '
os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST -u KUBECONFIG -u KUBERNETES_MASTER oc get buildconfigs --kubeconfig='${NEW_CONFIG_LOC}'" 'Missing or incomplete configuration info'
os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST oc get buildconfigs --kubeconfig='${NEW_CONFIG_LOC}'" 'Missing or incomplete configuration info'
)
echo "config error handling: ok"
os::test::junit::declare_suite_end
13 changes: 7 additions & 6 deletions test/cmd/login.sh
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,10 @@ if [[ "${API_SCHEME}" == "https" ]]; then
fi

# remove self-provisioner role from user and test login prompt before creating any projects
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. Contact your system administrator to request a project"
# make sure standard login prompt is printed once self-provisioner status is restored
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. You can try to create a new project, by running"
# make sure `oc login` fails with unauthorized error
os::cmd::expect_failure_and_text 'oc login <<< \n' 'Login failed \(401 Unauthorized\)'
Expand Down Expand Up @@ -87,7 +87,7 @@ os::cmd::expect_failure_and_text 'oc get pods' '"system:anonymous" cannot list p

# make sure we report an error if the config file we pass is not writable
# Does not work inside of a container, determine why and reenable
# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--config=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified'
# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--kubeconfig=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified'
echo "login warnings: ok"

# login and create serviceaccount and test login and logout with a service account token
Expand All @@ -106,12 +106,13 @@ os::cmd::expect_success 'oc get projects'
os::cmd::expect_success 'oc project project-foo'
os::cmd::expect_success_and_text 'oc config view' "current-context.+project-foo/${API_HOST}:${API_PORT}/test-user"
os::cmd::expect_success_and_text 'oc whoami' 'test-user'
os::cmd::expect_success_and_text "oc whoami --config='${login_kubeconfig}'" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --kubeconfig='${login_kubeconfig}'" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --kubeconfig='${login_kubeconfig}'" 'system:admin'
os::cmd::expect_success_and_text 'oc whoami -t' '.'
os::cmd::expect_success_and_text 'oc whoami -c' '.'

# test config files from the --config flag
os::cmd::expect_success "oc get services --config='${login_kubeconfig}'"
# test config files from the --kubeconfig flag
os::cmd::expect_success "oc get services --kubeconfig='${login_kubeconfig}'"
# test config files from env vars
os::cmd::expect_success "KUBECONFIG='${login_kubeconfig}' oc get services"
os::test::junit::declare_suite_end
8 changes: 4 additions & 4 deletions test/cmd/policy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -251,28 +251,28 @@ os::cmd::expect_success 'oc adm policy add-cluster-role-to-user alternate-cluste
# switch to test user to be sure that default project admin policy works properly
new_kubeconfig="${workingdir}/tempconfig"
os::cmd::expect_success "oc config view --raw > $new_kubeconfig"
os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --config=${new_kubeconfig}"
os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --kubeconfig=${new_kubeconfig}"

# alternate-cluster-admin should default to having star rights, so he should be able to update his role to that
os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user"
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir}
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml
os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml"
os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml"

# alternate-cluster-admin can restrict himself to less groups (no star)
os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user"
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
cp ${OS_ROOT}/test/testdata/bootstrappolicy/cluster_admin_without_apigroups.yaml ${workingdir}
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/cluster_admin_without_apigroups.yaml
os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml"
os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml"

# alternate-cluster-admin should NOT have the power add back star now (anything other than star is considered less so this mimics testing against no groups)
os::cmd::try_until_failure "oc policy who-can update hpa.autoscaling | grep -q alternate-cluster-admin-user"
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir}
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml
os::cmd::expect_failure_and_text "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges"
os::cmd::expect_failure_and_text "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges"

# This test validates cluster level policy for serviceaccounts
# ensure service account cannot list pods at the namespace level
Expand Down
4 changes: 2 additions & 2 deletions test/cmd/status.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ os::cmd::try_until_text "oc get projects -o jsonpath='{.items}'" "^\[\]$"
os::cmd::expect_success 'oc logout'

# remove self-provisioner role from user and test login prompt before creating any projects
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"

# login as 'test-user'
os::cmd::expect_success "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything"
Expand All @@ -37,7 +37,7 @@ os::cmd::expect_success_and_text 'oc status' "You don't have any projects. Conta
os::cmd::expect_success_and_text 'oc status --all-namespaces' "Showing all projects on server"
# make sure standard login prompt is printed once self-provisioner status is restored
os::cmd::expect_success "oc logout"
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything" "You don't have any projects. You can try to create a new project, by running"

# make sure `oc status` re-uses the correct "no projects" message from `oc login`
Expand Down
6 changes: 3 additions & 3 deletions test/cmd/volumes.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ os::cmd::expect_success 'oc set volume dc/test-deployment-config --add --name=vo
os::cmd::expect_success 'oc set volume dc/test-deployment-config --add --name=vol2 --type=emptydir -m /opt'
os::cmd::expect_failure_and_text "oc set volume dc/test-deployment-config --add --name=vol1 --type=secret --secret-name='\$ecret' -m /data" 'overwrite to replace'
os::cmd::expect_success "oc set volume dc/test-deployment-config --add --name=vol10 --secret-name='my-secret' -m /data-2"
os::cmd::expect_success "oc set volume dc/test-deployment-config --add --name=vol11 --configmap-name='my-configmap' -m /data-21"
os::cmd::expect_success "oc set volume dc/test-deployment-config --add --name=vol11 --kubeconfigmap-name='my-configmap' -m /data-21"
os::cmd::expect_success_and_text 'oc get dc/test-deployment-config -o jsonpath={.spec.template.spec.containers[0].volumeMounts}' '/data-21'
os::cmd::expect_success_and_text 'oc get dc/test-deployment-config -o jsonpath={.spec.template.spec.volumes[4].configMap}' 'my-configmap'
os::cmd::expect_success 'oc set volume dc/test-deployment-config --add --name=vol1 --type=emptyDir -m /data --overwrite'
Expand All @@ -48,7 +48,7 @@ os::cmd::expect_success 'oc set volume dc/test-deployment-config --remove --conf

os::cmd::expect_failure "oc set volume dc/test-deployment-config --add -t 'secret' --secret-name='asdf' --default-mode '888'"

os::cmd::expect_success "oc set volume dc/test-deployment-config --add -t 'configmap' --configmap-name='asdf' --default-mode '123'"
os::cmd::expect_success "oc set volume dc/test-deployment-config --add -t 'configmap' --kubeconfigmap-name='asdf' --default-mode '123'"
os::cmd::expect_success_and_text 'oc get dc/test-deployment-config -o jsonpath={.spec.template.spec.volumes[0]}' '83'
os::cmd::expect_success 'oc set volume dc/test-deployment-config --remove --confirm'

Expand Down Expand Up @@ -87,7 +87,7 @@ spec:

os::cmd::expect_success_and_text 'oc get dc simple-dc' 'simple-dc'
os::cmd::expect_success 'oc create cm cmvol'
os::cmd::expect_success 'oc set volume dc/simple-dc --add --name=cmvolume --type=configmap --configmap-name=cmvol'
os::cmd::expect_success 'oc set volume dc/simple-dc --add --name=cmvolume --type=configmap --kubeconfigmap-name=cmvol'
os::cmd::expect_success_and_text 'oc set volume dc/simple-dc' 'configMap/cmvol as cmvolume'

# command alias
Expand Down
20 changes: 10 additions & 10 deletions test/extended/alternate_certs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -52,24 +52,24 @@ OPENSHIFT_ON_PANIC=crash openshift start master \
OS_PID=$!

# Wait for the server to be up
os::cmd::try_until_success "oc whoami --config=master/admin.kubeconfig"
os::cmd::try_until_success "oc whoami --kubeconfig=master/admin.kubeconfig"

# Verify the server is serving with the custom and internal CAs, and that the generated ca-bundle.crt works for both
os::cmd::expect_success_and_text "curl -vvv https://localhost:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'my-custom-ca'
os::cmd::expect_success_and_text "curl -vvv https://127.0.0.1:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'openshift-signer'

# Verify kubeconfigs have connectivity to hosts serving with custom and generated certs
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin'

os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master'

os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode'

os::test::junit::declare_suite_end

Expand Down

0 comments on commit 16a6227

Please sign in to comment.