OpenShift & K8S Cheat Sheet

From Bitbull Wiki
Jump to navigation Jump to search

1 Kubernetes

1.1 Setup

1.1.1 oc

mkdir $HOME/bin ; cd $HOME/bin 
curl -L$(curl -L -s | grep  openshift-client-linux- | head -1 | cut -d\" -f2) --output openshift-client-linux.tar.gz
tar vxfz openshift-client-linux.tar.gz
rm -f openshift-client-linux.tar.gz

1.1.2 kubectl-neat

mkdir $HOME/bin ; cd $HOME/bin 
rm -f kubectl-neat
chmod 700 kubectl-neat
tar vxfz kubectl-neat_linux_amd64.tar.gz
rm -f LICENSE kubectl-neat_linux_amd64.tar.gz

1.1.3 kubectl

mkdir $HOME/bin ; cd $HOME/bin 
rm -f kubectl
curl -LO$(curl -s
chmod 700 kubectl
grep 'kubectl completion bash' $HOME/.bashrc || echo 'source <(kubectl completion bash)' >> $HOME/.bashrc

1.1.4 kustomize

cd $HOME/bin
curl -s ""  | bash

1.1.5 helm

curl | sh
grep 'helm completion bash' $HOME/.bashrc || echo 'source <(helm completion bash)' >> $HOME/.bashrc

1.2 Daily


1.2.1 SHELL

  • Authenticate Kubectl
export KUBECONFIG=<PATH-TO-CONFIG>/kubeconfig-dev.yaml

1.2.2 Use Helm

export KUBECONFIG=/etc/rancher/k3s/k3s.yaml # for K3S
helm search hub wordpress --max-col-width 120
helm repo add stable
helm repo add bitnami
helm repo update

1.2.3 Install APP with Helm

kubectl create namespace wordpress
kubectl config set-context --current --namespace=wordpress
helm install my-wordpress bitnami/wordpress
kubectl get all
kubectl delete service my-wordpress
kubectl get pods
kubectl exec my-wordpress-55dc589b4c-ldkxj -- printenv | grep HTTP
kubectl expose deployment/my-wordpress --port=8080 --name=my-wordpress
kubectl get service
kubectl create ingress ingress-www*=my-wordpress:8080
# User: user
# Password: $(kubectl get secret --namespace wordpress my-wordpress -o jsonpath="{.data.wordpress-password}" | base64 --decode)

helm delete my-wordpress
kubectl delete namespace wordpress

1.2.4 Export current Namespace Objects

for n in $(kubectl get -o=name pvc,configmap,ingress,service,secret,deployment,statefulset,hpa,job,cronjob | grep -v 'secret/default-token')
   kubectl get -o=yaml --export $n > $(dirname $n)_$(basename $n).yaml

1.2.5 Export Objects for all Namespaces

for n in $(kubectl get -o=custom-columns=NAMESPACE:.metadata.namespace,KIND:.kind, pv,pvc,configmap,ingress,service,secret,deployment,statefulset,hpa,job,cronjob,serviceaccount --all-namespaces | grep -v 'secrets/default-token')
    if (( $i < 1 )); then
        if [[ "$namespace" == "PersistentVolume" ]]; then
    elif (( $i < 2 )); then
    elif (( $i < 3 )); then
        if [[ "$namespace" != "NAMESPACE" ]]; then
            mkdir -p $namespace
            yaml=$((kubectl get $kind -o=yaml $name -n $namespace ) 2>/dev/null)
            if [[ $kind != 'Secret' || $yaml != *"type:"* ]]; then
                echo "Saving ${namespace}/${kind}.${name}.yaml"
                kubectl get $kind -o=yaml $name -n $namespace > $namespace/$kind.$name.yaml

1.2.6 Export all Manifests in all Namespaces

while read -r line
    output=$(kubectl get "$line" --all-namespaces -o yaml 2>/dev/null | grep '^items:')
    if ! grep -q "\[\]" <<< $output; then
        echo -e "\n======== "$line" manifests ========\n"
        kubectl get "$line" --all-namespaces -o yaml
done < <(kubectl api-resources | awk '{print $1}' | grep -v '^NAME' | grep -v events | sort -u)

1.2.7 .bash_profile

  • openshift project/user prompt
source <(kubectl completion bash)
export PS1='### \D{%d.%m.%Y_%H:%M} \u@\e[1;32m\h\e[m:\w \e[1;33m✯ $(kubectl config view -o jsonpath="{.contexts[].context.namespace}")\e[m \n# '
  • password gen
genpasswd() {
   local l=$1
   [ "$l" == "" ] && l=16
   tr -dc A-Za-z0-9_=., < /dev/urandom | head -c ${l} | xargs 

1.3 Deploy

1.3.1 Create Secret

read password

kubectl create secret generic db-user-pass --from-literal=username=$username --from-literal=password="$password"

kubectl get secret db-user-pass -o jsonpath='{.data.password}' | base64 --decode

----REF IN POD------
       - name: SECRET_USERNAME
             name: db-user-pass
             key: username

1.3.2 Create and change into Namespace

kubectl create namespace mynamespace
kubectl config set-context --current --namespace=mynamespace

1.3.3 Deploy Docker Container and Expose Service

kubectl create deployment xfce --image=christian773/xfce-vnc:latest --port=6901

1.3.4 Inject VARS into Deployment/Container

kubectl edit deployment nginx1
  - name: nginx
    image: nginx:1.7.9
    - name: MY_VAT
      value: MY_VALUE

1.3.5 Get Environment VARS of Pod

kubectl exec pod-name -- printenv

1.3.6 Expose Container Port as Service

kubectl expose deployment nginx-app --port=8080 --name=nginx-service

1.3.7 Create Ingress Rule for Service

kubectl create ingress ingress-www*=nginx-service:8080

1.3.8 Docker Pull Secret

oc create secret docker-registry docker --docker-username=username1 --docker-password=xcgsedfgsdfhgsdfg
oc secrets link default docker --for=pull
oc new-app --name wiki --image image.registry.tld/image:tag --source-secret=docker

1.4 Change/Update

1.4.1 Force Deployment to pull (latest)

oc get deployments
oc set env deployment/myapp UPDATE_TIMESTAMP="$(date)"
oc rollout status deployment/myapp
oc describe pod [POD_NAME]

1.5 Configure

1.5.1 Traefik Config


kubectl edit deployments traefik -n kube-system
kubectl -n kube-system edit cm traefik

1.6 Debug

1.6.1 Run Container with custom Command

kubectl run -i --tty busybox --image=busybox -- sh

1.6.2 Attach Container in Pod (log)

kubectl attach busybox -c busybox -i -t

1.6.3 Jump into Pods Shell

kubectl exec --stdin --tty shell-demo -- /bin/bash

1.6.4 Get all Containers on all Namespaces

kubectl get pods --all-namespaces -o jsonpath="{..image}" |tr -s 'space:' '\n' |sort |uniq -c

1.6.5 Get all Containers grouped by Pods for all Namespaces

kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\n"}{}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' |sort

1.6.6 Open Firewall for NodePort (non persistent)

kubectl describe svc | grep NodePort: | awk '{print $3}' | tr 'A-Z' 'a-z' | grep -v '^$' | while read PORT
  echo PORT=$PORT
  firewall-cmd --zone=public --add-port=$PORT

2 OpenShift

2.1 Install

2.1.1 OC & KUBECTL (OKD)

mkdir $HOME/bin ; cd $HOME/bin 
rm -f oc
wget$(curl -L | grep openshift-client-linux | grep href= | cut -d\" -f2 )
tar vxfz openshift-client-linux-*.tar.gz
rm -rf openshift-client-linux-*.tar.gz
grep 'oc completion bash' $HOME/.bashrc || echo 'source <(oc completion bash)' >> $HOME/.bashrc
grep 'kubectl completion bash' $HOME/.bashrc || echo 'source <(kubectl completion bash)' >> $HOME/.bashrc

2.1.2 OC & KUBECTL (OpenShift)

mkdir $HOME/bin ; cd $HOME/bin 
rm -f oc kubectl
tar vxfz oc.tar.gz 
rm -f oc.tar.gz
grep 'oc completion bash' $HOME/.bashrc || echo 'source <(oc completion bash)' >> $HOME/.bashrc
grep 'kubectl completion bash' $HOME/.bashrc || echo 'source <(kubectl completion bash)' >> $HOME/.bashrc

2.1.3 .bash_profile

  • openshift project/user prompt
function ps1(){
   export PS1='[\u@\h($(oc whoami -c 2>/dev/null|cut -d/ -f3,1)) \W]\$ '
function ps1e(){
   export PS1='# [\d \t \u@\h($(oc whoami -c 2>/dev/null|cut -d/ -f3,1)) \W]\n$ '
  • password gen
genpasswd() {
        local l=$1
        [ "$l" == "" ] && l=16
        tr -dc A-Za-z0-9_=%.,: < /dev/urandom | head -c ${l} | xargs 

2.2 Debug

2.2.1 etcd

M1=$(oc get nodes -l  | grep Ready | head -1 | cut -d' ' -f1 )
oc rsh -n openshift-etcd etcd-$M1  <<EOF                                        
etcdctl member list -w table

3 Administration

3.1 daily cmds

oc get nodes -o wide
oc get all -o wide --all-namespaces
oc get ep -o wide
oc get events --sort-by='.lastTimestamp'
oc get rolebindings --all-namespaces
oc get pv
oc get pvc
oc get projects
oc get users
oc get groups

3.2 inspect user/group permissions

oc get rolebinding -o wide -n gitea
oc get rolebinding -o wide --all-namespaces

3.3 inspect imagestreams

oc get is -n openshift
oc describe is php -n openshift
oc export -n openshift isimage php@42c4a9072f

4 snippets

4.1 run as root (anyuid)

oc create serviceaccount sa-anyuid
oc adm policy add-scc-to-user anyuid -z sa-anyuid
# create new-app before to get a dc
oc patch dc/deployment-config-name --patch '{"spec":{"template":{"spec":{"serviceAccountName": "sa-anyuid"}}}}'

4.2 run as root (anyuid) for every pod in project

oc adm policy add-scc-to-user anyuid -z default

4.3 imagestream demo (build service)

  • get all the imagestreams
oc get is -n openshift
  • inspect nginx imagestream
oc describe is nginx -n openshift
  • setup new project
oc new-project is-demo
  • setup the dev environment
oc new-app --name=html-dev  nginx:1.10~
oc get all
oc logs -f builds/html-dev-1
oc get svc
oc expose svc/html-dev
oc get route
  • show this app to the qa team
oc get is
oc tag docker-registry.default.svc:5000/is-demo/html-dev:latest is-demo/html-qa:1.0
oc get is
oc new-app --name=html-qa --image-stream="is-demo/html-qa:1.0"
oc expose svc/html-qa
  • now go and make some changes to the git repo, then push it to github
now lets build the latest dev release
oc start-build html-dev
oc status
oc get pods
  • check dev application for latest changes
  • check if qa application remains in desired state
  • now commit the new dev branch to qa branch
oc get is
oc tag docker-registry.default.svc:5000/is-demo/html-dev html-qa:1.1
  • change the imagestream to newer release
oc edit dc/html-qa
oc get dc
oc get pods
  • check if qa application is reflecting latest changes from v1.1
  • now we rollback the qa release to v1.0
oc edit dc/html-qa
oc get dc
oc get pods
  • check if qa application is reflecting the rollbacked version v1.0

5 database backup

5.1 mariadb

  • install global backup template
oc create -f

5.2 mariadb database backup with oc

oc rsh dc/mariadb /bin/sh -i -c "MYSQL_PWD=$MYSQL_ROOT_PASSWORD /usr/bin/mysqldump -u root --skip-lock-tables \$MYSQL_DATABASE" > mysqldb.sql
oc rsh deployments/mariadb /bin/bash -i -c "MYSQL_PWD=\$MARIADB_ROOT_PASSWORD /usr/bin/mysqldump -u root --skip-lock-tables \$MYSQL_DATABASE" > mariadb.sql

5.3 postgresql dump with kubectl

kubectl exec zammad-postgresql-0 -- bash -c "PGPASSWORD="xxx" pg_dump -Uzammad zammad_production" > zammad_production.sql

6 Maintenance

6.1 Databases

6.1.1 Mariadb login


6.2 CleanUp old docker images on nodes

Keeping up to three tag revisions 1, and keeping resources (images, image streams and pods) younger than sixty minutes:
oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m
Pruning every image that exceeds defined limits:
oc adm prune images --prune-over-size-limit
CopyPaste example
oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m --confirm
oc adm prune images --prune-over-size-limit --confirm