Monday 27 June 2022

Creating and configuring a Kubernetes Cluster with 2 nodes

 

#on the main node

sudo hostnamectl set-hostname main.example.com

exec bash


#on node 1

sudo hostnamectl set-hostname client-node-1.example.com

exec bash


#on node 2

sudo hostnamectl set-hostname client-node-2.example.com

exec bash


#on the main and all nodes:

sudo kubeadm reset --force

sudo mkdir /etc/docker

sudo tee /etc/docker/daemon.json 0<<EOF

{

  "exec-opts": ["native.cgroupdriver=systemd"],

  "log-driver": "json-file",

  "log-opts": {

    "max-size": "100m"

  },

  "storage-driver": "overlay2"

}

EOF

sudo systemctl enable docker

sudo systemctl daemon-reload

sudo systemctl restart docker

sudo swapoff -a


#on the main

log=${HOME}/install-leader.log

pod_network_cidr=192.168.0.0/16

sudo kubeadm init --pod-network-cidr ${pod_network_cidr} --ignore-preflight-errors all 2>&1 | tee ${log}

<...>

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:

  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.31.6.219:6443 --token 7ubi5y.8t39cqgtvz9cgkvs \

    --discovery-token-ca-cert-hash sha256:65823bb628114490d8fe03a12f444d6d1d6eb243308e8af630259815356e39fd 


#Run  on the main node to allow non-root users to access use kubeadm

mkdir -p ${HOME}/.kube

sudo cp /etc/kubernetes/admin.conf ${HOME}/.kube/config

sudo chown -R $( id -u ):$( id -g ) ${HOME}/.kube/


echo 'source <(kubectl completion bash)' | tee --append ${HOME}/.bashrc

source ${HOME}/.bashrc


#In main, to add the Weave Net CNI plugin for highly available clusters

kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')

serviceaccount/weave-net created

clusterrole.rbac.authorization.k8s.io/weave-net created

clusterrolebinding.rbac.authorization.k8s.io/weave-net created

role.rbac.authorization.k8s.io/weave-net created

rolebinding.rbac.authorization.k8s.io/weave-net created

daemonset.apps/weave-net created


kubectl get no

NAME               STATUS   ROLES    AGE   VERSION

main.example.com   Ready    master   27m   v1.19.4


#run the kubeadm join command copied in the previous step to join each node to the cluster

sudo kubeadm join 172.31.6.219:6443 --token 7ubi5y.8t39cqgtvz9cgkvs     --discovery-token-ca-cert-hash sha256:65823bb628114490d8fe03a12f444d6d1d6eb243308e8af630259815356e39fd

[preflight] Running pre-flight checks

[preflight] Reading configuration from the cluster...

[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'

[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"

[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"

[kubelet-start] Starting the kubelet

[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...


This node has joined the cluster:

* Certificate signing request was sent to apiserver and a response was received.

* The Kubelet was informed of the new secure connection details.


Run 'kubectl get nodes' on the control-plane to see this node join the cluster.


OR

#Run the following to create a token 

sudo kubeadm token create --print-join-command


kubectl get no

NAME                        STATUS   ROLES    AGE     VERSION

client-node-1.example.com   Ready    <none>   4m6s    v1.19.4

client-node-2.example.com   Ready    <none>   3m33s   v1.19.4

main.example.com            Ready    master   33m     v1.19.4


kubectl get ns

NAME              STATUS   AGE

default           Active   35m

kube-node-lease   Active   35m

kube-public       Active   35m

kube-system       Active   35m


kubectl get po -A

NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE

kube-system   coredns-f9fd979d6-bxtbx                    1/1     Running   0          35m

kube-system   coredns-f9fd979d6-d8qz8                    1/1     Running   0          35m

kube-system   etcd-main.example.com                      1/1     Running   0          36m

kube-system   kube-apiserver-main.example.com            1/1     Running   0          36m

kube-system   kube-controller-manager-main.example.com   1/1     Running   0          36m

kube-system   kube-proxy-9k5pz                           1/1     Running   0          35m

kube-system   kube-proxy-g2555                           1/1     Running   0          6m3s

kube-system   kube-proxy-rlvrd                           1/1     Running   0          6m36s

kube-system   kube-scheduler-main.example.com            1/1     Running   0          35m

kube-system   weave-net-fks2v                            2/2     Running   1          21m

kube-system   weave-net-phbf7                            2/2     Running   0          6m3s

kube-system   weave-net-xskw5                            2/2     Running   1          6m36s


 kubectl get po -A -o wide

NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE   IP             NODE                        NOMINATED NODE   READINESS GATES

kube-system   coredns-f9fd979d6-bxtbx                    1/1     Running   0          50m   10.32.0.2      main.example.com            <none>           <none>

kube-system   coredns-f9fd979d6-d8qz8                    1/1     Running   0          50m   10.32.0.3      main.example.com            <none>           <none>

kube-system   etcd-main.example.com                      1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   kube-apiserver-main.example.com            1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   kube-controller-manager-main.example.com   1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   kube-proxy-9k5pz                           1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   kube-proxy-g2555                           1/1     Running   0          20m   172.31.7.216   client-node-2.example.com   <none>           <none>

kube-system   kube-proxy-rlvrd                           1/1     Running   0          21m   172.31.11.38   client-node-1.example.com   <none>           <none>

kube-system   kube-scheduler-main.example.com            1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   weave-net-fks2v                            2/2     Running   1          36m   172.31.6.219   main.example.com            <none>           <none>

kube-system   weave-net-phbf7                            2/2     Running   0          20m   172.31.7.216   client-node-2.example.com   <none>           <none>

kube-system   weave-net-xskw5                            2/2     Running   1          21m   172.31.11.38   client-node-1.example.com   <none>           <none>


No comments:

Post a Comment