Difference between revisions of "Install AWX on K3S"

From Bitbull Wiki
Jump to navigation Jump to search
Line 48: Line 48:
 
kubectl config set-context --namespace=$NAMESPACE --current
 
kubectl config set-context --namespace=$NAMESPACE --current
  
cd ; mkdir git ; cd git
+
cd ; mkdir awx ; cd awx
git clone https://github.com/ansible/awx-operator.git -b 0.16.0
+
vim kustomization.yaml
cd awx-operator/
+
------
make deploy
+
---
# cd ~/git/awx-operator/config/manager && /usr/local/sbin/kustomize edit set image controller=quay.io/ansible/awx-operator:0.16.0
+
apiVersion: kustomize.config.k8s.io/v1beta1
# cd ~/git/awx-operator && /usr/local/sbin/kustomize build config/default | kubectl apply -f -
+
kind: Kustomization
 +
resources:
 +
  # Find the latest tag here: https://github.com/ansible/awx-operator/releases
 +
  - github.com/ansible/awx-operator/config/default?ref=0.20.0
 +
  - awx-bitbull.yml
  
kubectl get pods -n $NAMESPACE
+
# Set the image tags to match the git version from above
NAME                                              READY   STATUS    RESTARTS  AGE
+
images:
awx-operator-controller-manager-6795f9f5f5-xq4nt  2/2     Running  0         6m1s
+
   - name: quay.io/ansible/awx-operator
 +
     newTag: 0.20.0
  
cd ~/git/
+
# Specify a custom namespace in which to install AWX
vim awx-instance.yml
+
namespace: awx
 +
------
 +
vim awx-bitbull.yml
 
------
 
------
 
---
 
---
Line 66: Line 73:
 
kind: AWX
 
kind: AWX
 
metadata:
 
metadata:
   name: awx
+
   name: bitbull
 
spec:
 
spec:
   ingress_type: Ingress
+
   ingress_type: route
   route_tls_termination_mechanism: edge
+
   route_host: ansible.apps.bitbull.ch
   hostname: tower.domain.com
+
   route_tls_termination_mechanism: Edge
 
------
 
------
  
kubectl apply -f awx-instance.yml
+
kustomize build . | kubectl apply -f -
 
 
kubectl get all
 
kubectl logs -f deployment.apps/awx-operator-controller-manager -c awx-manager
 
kubectl get events -w
 
 
 
kubectl get deployment
 
NAME                              READY  UP-TO-DATE  AVAILABLE  AGE
 
awx-operator-controller-manager  1/1    1            1          30m
 
awx                              1/1    1            1          19m
 
  
kubectl get ingress
 
NAME          CLASS    HOSTS        ADDRESS        PORTS  AGE
 
awx-ingress  <none>  tower.domain.com  10.112.8.203  80      19m
 
 
kubectl get svc
 
NAME                                              TYPE        CLUSTER-IP      EXTERNAL-IP  PORT(S)    AGE
 
awx-operator-controller-manager-metrics-service  ClusterIP  10.43.225.102  <none>        8443/TCP  33m
 
awx-postgres                                      ClusterIP  None            <none>        5432/TCP  23m
 
awx-service                                      ClusterIP  10.43.155.232  <none>        80/TCP    22m
 
 
</pre>
 
</pre>
  
 
=Fetch the secret and test the login=
 
=Fetch the secret and test the login=
  kubectl get secret awx-admin-password -o jsonpath='{.data.password}' | base64 --decode
+
  kubectl get secret bitbull-admin-password -o jsonpath='{.data.password}' | base64 --decode
  
 
  firefox https://fqdn.domain.com
 
  firefox https://fqdn.domain.com
Line 209: Line 198:
 
</pre>
 
</pre>
  
[[Category:Ansible]]
+
 
[[Category:K3S]]
+
 
[[Category:OpenShift & K8S]]
+
 
  
 
==AWX CLI==
 
==AWX CLI==
Line 222: Line 211:
 
</pre>
 
</pre>
  
 +
[[Category:Ansible]]
 
[[Category:K3S]]
 
[[Category:K3S]]
 
[[Category:OpenShift & K8S]]
 
[[Category:OpenShift & K8S]]
[[Category:Ansible]]
 

Revision as of 19:26, 17 April 2022

Since version 18, AWX, the comunity edition of ansible tower gets deployed by a kubernetes operator.
This makes it easier to install and maintain the installation, but not all of us are familiar with kubernetes and operators.
So I share a short step by step guide on how to setup ansible awx in a "semi professional" way on a single k3s kubernetes node.

1 VM Setup

1.1 VM requirements

Just setup a CentOS8 minimal VM with the following requirements

  • OS: centos8 minimal
  • CPU: 2
  • MEM: 8GB (6 GB may work as well)
  • DISK: 40G (7GB used on a fresh setup)

1.2 Prepare OS

dnf -y upgrade
dnf -y install setroubleshoot-server curl lsof wget make
sed -i  '/swap/d' /etc/fstab
swapoff -a
firewall-cmd --permanent --zone=public --add-service=https
firewall-cmd --zone=public --add-masquerade --permanent
firewall-cmd --reload
reboot



2 Setup K3S

curl -sfL https://get.k3s.io | sh
cat /etc/systemd/system/k3s.service
systemctl status k3s
kubectl get nodes
# all pods in running state? fine!
kubectl get pods --all-namespaces
# install kustomize
cd /usr/local/sbin/
curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"  | bash

3 Deploy AWX

export NAMESPACE=awx
kubectl create namespace $NAMESPACE
kubectl config set-context --namespace=$NAMESPACE --current

cd ; mkdir awx ; cd awx
vim kustomization.yaml
------
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
  # Find the latest tag here: https://github.com/ansible/awx-operator/releases
  - github.com/ansible/awx-operator/config/default?ref=0.20.0
  - awx-bitbull.yml

# Set the image tags to match the git version from above
images:
  - name: quay.io/ansible/awx-operator
    newTag: 0.20.0

# Specify a custom namespace in which to install AWX
namespace: awx
------
vim awx-bitbull.yml
------
---
apiVersion: awx.ansible.com/v1beta1
kind: AWX
metadata:
  name: bitbull
spec:
  ingress_type: route
  route_host: ansible.apps.bitbull.ch
  route_tls_termination_mechanism: Edge
------

kustomize build . | kubectl apply -f -

4 Fetch the secret and test the login

kubectl get secret bitbull-admin-password -o jsonpath='{.data.password}' | base64 --decode
firefox https://fqdn.domain.com
  • user: admin

5 Links

6 Debug Notes

6.1 Open Node Port for direct access

PORT=$(kubectl describe svc awx-service | grep NodePort: | awk '{print $3}' | tr 'A-Z' 'a-z')
echo PORT=$PORT
firewall-cmd --zone=public --add-port=$PORT

6.2 Disable SELinux

setenforce 0
> /var/log/audit/audit.log 
# do some bad things
sealert -a /var/log/audit/audit.log

6.3 Traefik Config

- https://levelup.gitconnected.com/a-guide-to-k3s-ingress-using-traefik-with-nodeport-6eb29add0b4b

kubectl -n kube-system edit cm traefik

6.4 Jump into container for debugging

# get pods
kubectl get pods
# get containers inside of pods
kubectl describe <pod-name>
kubectl exec --stdin --tty <pod-name> -c <container-name> -- /bin/bash

7 Notes

7.1 Backup

vim awx-backup.yml
---
apiVersion: awx.ansible.com/v1beta1
kind: AWXBackup
metadata:
  name: awxbackup-20220311
  namespace: awx
spec:
  deployment_name: awx
...
oc apply -f awx-backup.yml
oc get awxbackups awxbackup-20220311 -o yaml
apiVersion: awx.ansible.com/v1beta1
kind: AWXBackup
...
name: awxbackup-20220311
...
status:
  backupClaim: awx-backup-claim
  backupDirectory: /backups/tower-openshift-backup-2022-03-11-05:45:56
  conditions:
  - lastTransitionTime: "2022-03-11T05:45:07Z"
    reason: Successful
    status: "True"
    type: Running

Keep that as well for DR reason

oc get awxbackups awxbackup-20220311 -o yaml > awxbackup-20220311.yml
ll /srv/nfs/pv05/tower-openshift-backup-2022-03-11-05\:45\:56/
total 13072
-rw-r--r--. 1 1000680000 root      600 Mar 11 06:46 awx_object
-rw-r--r--. 1 1000680000 root      670 Mar 11 06:46 secrets.yml
-rw-------. 1 1000680000 root 13377441 Mar 11 06:46 tower.db

7.2 Restore

vim awx-restore.yml
---
apiVersion: awx.ansible.com/v1beta1
kind: AWXRestore
metadata:
  name: awxrestore-20220311
  namespace: awx
spec:
  deployment_name: awx
  backup_name: awxbackup-20220311
  backup_pvc_namespace: 'awx' # in my case, I do restore same namespace
...
oc apply -f awx-restore.yml
oc get awxrestores -o yaml

7.3 okd 4.10 instance template

apiVersion: awx.ansible.com/v1beta1
kind: AWX
metadata:
  name: bitbull
spec:
  ingress_type: route
  route_host: awx.domain.com
  route_tls_termination_mechanism: edge



7.4 AWX CLI

pip3 install awxkit
export TOWER_HOST=https://awx.domain.com TOWER_USERNAME=admin TOWER_PASSWORD=xxx
awx login admin
awx export > export.json