- step by step
- OpenID Connect ( OIDC )
- Configure Access to Multiple Clusters
- get info
- kubeadm-cfg.yml
- have fun
references:
- * 创建 kubeconfig 文件
- 创建k8s context
- Configure Access to Multiple Clusters
- Organizing Cluster Access Using kubeconfig Files
- kubectl config
- Kubectl Config Set-Context | Tutorial and Best Practices
- Checklist: pros and cons of using multiple Kubernetes clusters, and how to distribute workloads between them
- kubectl config view
others:
more usage:
step by step
defining clusters
$ kubectl config set-cluster my-cluster --server=127.0.0.1:8087
modify server
$ kubectl config set-cluster NAME --server=https://10.69.114.92:6443
# or
$ kubectl config set-cluster $(kubectl config current-context) --server=https://10.69.114.92:6443
defining users
- using token
$ kubectl config set-credentials my-user --token=Py93bt12mT
using basic authentication
$ kubectl config set-credentials my-user --username=redhat-username --password=redhat-password
using certificates
$ kubectl config set-credentials my-user --client-certificate=redhat-certificate.crt --client-key=redhat-key.key
defining contexts
$ kubectl config set-context --cluster=my-cluster --user=my-user
- by namespace
$ kubectl config set-context my-context --cluster=my-cluster --user=my-user --namespace=redhat-dev
setup default namespace
$ kubectl config set-context --current --namespace=<my_namespace>
use contexts
$ kubectl config use-context my-context
verify
$ kubectl config get-contexts CURRENT NAME CLUSTER AUTHINFO NAMESPACE * my-context 172.0.7.2:6443 my-user redhat-dev my-context-2 172.1.8.0:6443 my-user-2 $ kubectl config current-context my-context
OpenID Connect ( OIDC )
[!NOTE|label:references:]
- Option 1 - OIDC Authenticator
$ kubectl config set-credentials USER_NAME \ --auth-provider=oidc \ --auth-provider-arg=idp-issuer-url=( issuer url ) \ --auth-provider-arg=client-id=( your client id ) \ --auth-provider-arg=client-secret=( your client secret ) \ --auth-provider-arg=refresh-token=( your refresh token ) \ --auth-provider-arg=idp-certificate-authority=( path to your ca certificate ) \ --auth-provider-arg=id-token=( your id_token )
echo "-----BEGIN CERTIFICATE-----
....
-----END CERTIFICATE-----
" \ > ca-kubernetes-staging.pem
## set cluster
kubectl config set-cluster kubernetes-staging \
--server=https://127.0.7.2:6443 \
--certificate-authority=ca-kubernetes-staging.pem \
--embed-certs
## set credential
kubectl config set-credentials marslo@kubernetes-staging \
--auth-provider=oidc \
--auth-provider-arg='idp-issuer-url=https://dex-k8s.domain.com/' \
--auth-provider-arg='client-id=dex-k8s-authenticator' \
--auth-provider-arg='client-secret=Z**********************0' \
--auth-provider-arg='refresh-token=C**********************************************************************n' \
--auth-provider-arg='id-token=e**********************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************A'
## set context
kubectl config set-context kubernetes-staging --cluster=kubernetes-staging --user=marslo@kubernetes-staging
## use context
kubectl config use-context kubernetes-staging
verify
# get id-token $ kubectl config view -o jsonpath='{.users[?(@.name == "marslo@kubernetes-staging")].user.auth-provider.config.id-token}' # get the password for the `e2e` user $ kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}'
Configure Access to Multiple Clusters
[!TIP] a configuration file describes
clusters
,users
, andcontexts
set clusters
# cluster development $ kubectl config --kubeconfig=config-demo \ set-cluster development \ --server=https://1.2.3.4 \ --certificate-authority=fake-ca-file # cluster test $ kubectl config --kubeconfig=config-demo \ set-cluster test \ --server=https://5.6.7.8 \ --insecure-skip-tls-verify
set user
[!NOTE]
- To delete a user you can run
kubectl --kubeconfig=config-demo config unset users.<name>
- To remove a cluster, you can run
kubectl --kubeconfig=config-demo config unset clusters.<name>
- To remove a context, you can run
kubectl --kubeconfig=config-demo config unset contexts.<name>
# with CA $ kubectl config --kubeconfig=config-demo \ set-credentials developer \ --client-certificate=fake-cert-file \ --client-key=fake-key-seefile # with basic authentication $ kubectl config --kubeconfig=config-demo \ set-credentials experimenter \ --username=exp \ --password=some-password
- To delete a user you can run
add context
# user developer namespace frontend $ kubectl config --kubeconfig=config-demo \ set-context dev-frontend \ --cluster=development \ --namespace=frontend \ --user=developer # user developer namespace storage $ kubectl config --kubeconfig=config-demo \ set-context dev-storage \ --cluster=development \ --namespace=storage \ --user=developer # user experimenter $ kubectl config --kubeconfig=config-demo \ set-context exp-test \ --cluster=test \ --namespace=default \ --user=experimenter
result
$ kubectl config --kubeconfig=config-demo get-contexts CURRENT NAME CLUSTER AUTHINFO NAMESPACE dev-frontend development developer frontend dev-storage development developer storage exp-test test experimenter default $ kubectl config --kubeconfig=config-demo view apiVersion: v1 clusters: - cluster: certificate-authority: fake-ca-file server: https://1.2.3.4 name: development - cluster: insecure-skip-tls-verify: true server: https://5.6.7.8 name: test contexts: - context: cluster: development namespace: frontend user: developer name: dev-frontend - context: cluster: development namespace: storage user: developer name: dev-storage - context: cluster: test namespace: default user: experimenter name: exp-test current-context: "" kind: Config preferences: {} users: - name: developer user: client-certificate: fake-cert-file client-key: fake-key-seefile - name: experimenter user: password: some-password username: exp
use context
$ kubectl config --kubeconfig=config-demo use-context dev-frontend Switched to context "dev-frontend". $ kubectl config --kubeconfig=config-demo get-contexts CURRENT NAME CLUSTER AUTHINFO NAMESPACE * dev-frontend development developer frontend dev-storage development developer storage exp-test test experimenter default $ kubectl config --kubeconfig=config-demo view --minify apiVersion: v1 clusters: - cluster: certificate-authority: fake-ca-file server: https://1.2.3.4 name: development contexts: - context: cluster: development namespace: frontend user: developer name: dev-frontend current-context: dev-frontend kind: Config preferences: {} users: - name: developer user: client-certificate: fake-cert-file client-key: fake-key-seefile
Create a second configuration file
KUBECONFIG
environment variable- linux
$ export KUBECONFIG_SAVED="$KUBECONFIG"
- windows
> $Env:KUBECONFIG_SAVED=$ENV:KUBECONFIG
- linux
temporarily append two paths to your kubeconfig environment variable
- linux
$ export KUBECONFIG="${KUBECONFIG}:config-demo:config-demo-2"
- windows
> $Env:KUBECONFIG=("config-demo;config-demo-2")
- linux
Append $HOME/.kube/config to your KUBECONFIG environment variable
- linux
$ export KUBECONFIG="${KUBECONFIG}:${HOME}/.kube/config"
- windows
> $Env:KUBECONFIG="$Env:KUBECONFIG;$HOME\.kube\config"
- linux
Clean up
- linux
$ export KUBECONFIG="$KUBECONFIG_SAVED"
- windows
> $Env:KUBECONFIG=$ENV:KUBECONFIG_SAVED
with Proxy
[!NOTE] references:
$ kubectl config set-cluster <my-cluster-name> --proxy-url=<my-proxy-url>
# i.e.
$ kubectl config set-cluster development --proxy-url=http://sample.proxy.com:3128
result
apiVersion: v1 kind: Config clusters: - cluster: proxy-url: http://proxy.example.org:3128 server: https://k8s.example.org/k8s/clusters/c-xxyyzz name: development users: - name: developer contexts: - context: name: development
get info
[!INFO|label:references:]
basic view
- get contexts list
$ kubectl config --kubeconfig=config-demo get-contexts CURRENT NAME CLUSTER AUTHINFO NAMESPACE * dev-frontend development developer frontend dev-storage development developer storage exp-test test experimenter default
get current context
$ kubectl config --kubeconfig=config-demo current-context dev-frontend
get clusters
$ kubectl config --kubeconfig=config-demo get-clusters NAME development test
get users
$ kubectl config --kubeconfig=config-demo get-users NAME developer experimenter
server IP
by cluster name
# get all cluster name
$ kubectl config --kubeconfig=config-demo view -o jsonpath="{.clusters[*].name}"
development test
$ kubectl config --kubeconfig=config-demo view \
-o jsonpath='{.clusters[?(@.name == "development")].cluster.server}'
https://1.2.3.4
current in-use via --minify
[!NOTE]
--minify=false: Remove all information not used by current-context from the output
$ kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}'
https://1.2.3.4
# or
$ kubectl config view --minify -o jsonpath="{.clusters[].cluster.server}"
https://1.2.3.4
# more info
$ kubectl config view --minify -o jsonpath="{.clusters[*].name}"
development
# or
$ kubectl config view --minify -o jsonpath="{.clusters[].name}"
development
current in-use via current-context
# or get current cluster IP
$ kubectl config --kubeconfig=config-demo current-context
development
$ kubectl config --kubeconfig=config-demo view \
-o jsonpath="{.clusters[?(@.name == \"$(kubectl config --kubeconfig=config-demo current-context)\")].cluster.server}"
get user
$ kubectl config --kubeconfig=config-demo view -o jsonpath='{.users[*].name}'
developer experimenter
get password
$ kubectl config --kubeconfig=config-demo view -o jsonpath='{.users[?(@.name == "experimenter")]}'
{"name":"experimenter","user":{"password":"some-password","username":"exp"}}
$ kubectl config --kubeconfig=config-demo view -o jsonpath='{.users[?(@.name == "experimenter")].user.password}'
some-password
get key
$ kubectl config --kubeconfig=config-demo view -o jsonpath='{.users[?(@.name == "developer")]}'
{"name":"developer","user":{"client-certificate":"fake-cert-file","client-key":"fake-key-seefile"}}
# or via base64 decoding
$ kubectl config --kubeconfig=config-demo view -o jsonpath='{.users[?(@.name == "developer")]}' | base64 -d
--minify=false:
Remove all information not used by current-context from the output
$ kubectl config --kubeconfig=config-demo view -o jsonpath='{.users[?(@.name == "developer")].user.client-key}'
fake-key-seefile
# or via base64 decoding
$ kubectl config --kubeconfig=config-demo view -o jsonpath='{.users[?(@.name == "developer")].user.client-key}' | base64 -d
kubeadm-cfg.yml
[!NOTE|label:references:]
$ kubectl get cm kubeadm-config -n kube-system -o=jsonpath="{.data.ClusterConfiguration}"
have fun
-
exec >/tmp/output && CONTEXT_NAME=kubernetes-admin@kubernetes \ CONTEXT_CLUSTER=$(kubectl config view -o=jsonpath="{.contexts[?(@.name==\"${CONTEXT_NAME}\")].context.cluster}") \ CONTEXT_USER=$(kubectl config view -o=jsonpath="{.contexts[?(@.name==\"${CONTEXT_NAME}\")].context.user}") && \ echo "[" && \ kubectl config view -o=json | jq -j --arg CONTEXT_NAME "$CONTEXT_NAME" '.contexts[] | select(.name==$CONTEXT_NAME)' && \ echo "," && \ kubectl config view -o=json | jq -j --arg CONTEXT_CLUSTER "$CONTEXT_CLUSTER" '.clusters[] | select(.name==$CONTEXT_CLUSTER)' && \ echo "," && \ kubectl config view -o=json | jq -j --arg CONTEXT_USER "$CONTEXT_USER" '.users[] | select(.name==$CONTEXT_USER)' && \ echo -e "\n]\n" && \ exec >/dev/tty && \ cat /tmp/output | jq && \ rm -rf /tmp/output
- or
$ kubectl config view -o json | jq '. as $o | ."current-context" as $current_context_name | $o.contexts[] | select(.name == $current_context_name) as $context | $o.clusters[] | select(.name == $context.context.cluster) as $cluster | $o.users[] | select(.name == $context.context.user) as $user | {"current-context-name": $current_context_name, context: $context, cluster: $cluster, user: $user}'
- or