Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

accept --kubeconfig like kubectl #20721

Merged
merged 3 commits into from
Aug 23, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
282 changes: 278 additions & 4 deletions contrib/completions/bash/oc

Large diffs are not rendered by default.

282 changes: 278 additions & 4 deletions contrib/completions/zsh/oc

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion hack/test-cmd.sh
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ for test in "${tests[@]}"; do
done

os::log::debug "Metrics information logged to ${LOG_DIR}/metrics.log"
oc get --raw /metrics --config="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log"
oc get --raw /metrics --kubeconfig="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log"

if [[ -n "${failed:-}" ]]; then
exit 1
Expand Down
11 changes: 6 additions & 5 deletions test/cmd/login.sh
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,10 @@ if [[ "${API_SCHEME}" == "https" ]]; then
fi

# remove self-provisioner role from user and test login prompt before creating any projects
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. Contact your system administrator to request a project"
# make sure standard login prompt is printed once self-provisioner status is restored
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. You can try to create a new project, by running"
# make sure `oc login` fails with unauthorized error
os::cmd::expect_failure_and_text 'oc login <<< \n' 'Login failed \(401 Unauthorized\)'
Expand Down Expand Up @@ -87,7 +87,7 @@ os::cmd::expect_failure_and_text 'oc get pods' '"system:anonymous" cannot list p

# make sure we report an error if the config file we pass is not writable
# Does not work inside of a container, determine why and reenable
# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--config=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified'
# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--kubeconfig=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified'
echo "login warnings: ok"

# login and create serviceaccount and test login and logout with a service account token
Expand All @@ -107,11 +107,12 @@ os::cmd::expect_success 'oc project project-foo'
os::cmd::expect_success_and_text 'oc config view' "current-context.+project-foo/${API_HOST}:${API_PORT}/test-user"
os::cmd::expect_success_and_text 'oc whoami' 'test-user'
os::cmd::expect_success_and_text "oc whoami --config='${login_kubeconfig}'" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --kubeconfig='${login_kubeconfig}'" 'system:admin'
os::cmd::expect_success_and_text 'oc whoami -t' '.'
os::cmd::expect_success_and_text 'oc whoami -c' '.'

# test config files from the --config flag
os::cmd::expect_success "oc get services --config='${login_kubeconfig}'"
# test config files from the --kubeconfig flag
os::cmd::expect_success "oc get services --kubeconfig='${login_kubeconfig}'"
# test config files from env vars
os::cmd::expect_success "KUBECONFIG='${login_kubeconfig}' oc get services"
os::test::junit::declare_suite_end
8 changes: 4 additions & 4 deletions test/cmd/policy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -251,28 +251,28 @@ os::cmd::expect_success 'oc adm policy add-cluster-role-to-user alternate-cluste
# switch to test user to be sure that default project admin policy works properly
new_kubeconfig="${workingdir}/tempconfig"
os::cmd::expect_success "oc config view --raw > $new_kubeconfig"
os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --config=${new_kubeconfig}"
os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --kubeconfig=${new_kubeconfig}"

# alternate-cluster-admin should default to having star rights, so he should be able to update his role to that
os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user"
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir}
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml
os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml"
os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml"

# alternate-cluster-admin can restrict himself to less groups (no star)
os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user"
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
cp ${OS_ROOT}/test/testdata/bootstrappolicy/cluster_admin_without_apigroups.yaml ${workingdir}
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/cluster_admin_without_apigroups.yaml
os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml"
os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml"

# alternate-cluster-admin should NOT have the power add back star now (anything other than star is considered less so this mimics testing against no groups)
os::cmd::try_until_failure "oc policy who-can update hpa.autoscaling | grep -q alternate-cluster-admin-user"
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir}
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml
os::cmd::expect_failure_and_text "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges"
os::cmd::expect_failure_and_text "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges"

# This test validates cluster level policy for serviceaccounts
# ensure service account cannot list pods at the namespace level
Expand Down
4 changes: 2 additions & 2 deletions test/cmd/status.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ os::cmd::try_until_text "oc get projects -o jsonpath='{.items}'" "^\[\]$"
os::cmd::expect_success 'oc logout'

# remove self-provisioner role from user and test login prompt before creating any projects
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"

# login as 'test-user'
os::cmd::expect_success "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything"
Expand All @@ -37,7 +37,7 @@ os::cmd::expect_success_and_text 'oc status' "You don't have any projects. Conta
os::cmd::expect_success_and_text 'oc status --all-namespaces' "Showing all projects on server"
# make sure standard login prompt is printed once self-provisioner status is restored
os::cmd::expect_success "oc logout"
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything" "You don't have any projects. You can try to create a new project, by running"

# make sure `oc status` re-uses the correct "no projects" message from `oc login`
Expand Down
20 changes: 10 additions & 10 deletions test/extended/alternate_certs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -52,24 +52,24 @@ OPENSHIFT_ON_PANIC=crash openshift start master \
OS_PID=$!

# Wait for the server to be up
os::cmd::try_until_success "oc whoami --config=master/admin.kubeconfig"
os::cmd::try_until_success "oc whoami --kubeconfig=master/admin.kubeconfig"

# Verify the server is serving with the custom and internal CAs, and that the generated ca-bundle.crt works for both
os::cmd::expect_success_and_text "curl -vvv https://localhost:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'my-custom-ca'
os::cmd::expect_success_and_text "curl -vvv https://127.0.0.1:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'openshift-signer'

# Verify kubeconfigs have connectivity to hosts serving with custom and generated certs
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin'

os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master'

os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode'
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode'

os::test::junit::declare_suite_end

Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.