Thursday 30 June 2022

Creating and Configuring the Deployment in Kubernetes using OpenShift


Creating the Deployment


kubectl create deployment myapp1 --image=docker.io/openshift/hello-openshift 

deployment.apps/myapp1 created


kubectl get deployment

NAME     READY   UP-TO-DATE   AVAILABLE   AGE

myapp1   1/1     1            1           14s


kubectl get po

NAME                     READY   STATUS    RESTARTS       AGE

myapp1-bcb89d7dd-hvxgk   1/1     Running   0              101s


Accessing the Pod


kubectl expose deployment myapp1 --port=8080

service/myapp1 exposed


kubectl get svc

NAME              TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE

kubernetes        ClusterIP   10.96.0.1       <none>        443/TCP        5d6h

myapp1            ClusterIP   10.111.40.120   <none>        8080/TCP       33s


curl 10.111.40.120:8080

Hello OpenShift!


Create OpenShift Kubernetes Pod Example


 ls

openshift-pod.yaml  openshift1-pod.yaml


 cat openshift-pod.yaml 

apiVersion: v1

kind: Pod

metadata:

  name: mypod1

  labels:

    mycka: round-robin

spec:

  containers:

  - name: mycontainer

    image: openshift/hello-openshift

    ports:

    - containerPort: 8080


kubectl create -f openshift-pod.yaml

pod/mypod1 created


cat openshift1-pod.yaml 

apiVersion: v1

kind: Pod

metadata:

  name: mypod2

  labels:

    mycka: round-robin

spec:

  containers:

  - name: mycontainer

    image: openshift/hello-openshift

    ports:

    - containerPort: 8080


kubectl create -f openshift1-pod.yaml

pod/mypod2 created


kubectl get pods -o wide

NAME                    READY   STATUS    RESTARTS       AGE    IP          NODE                 NOMINATED NODE   READINESS GATES

mypod1                  1/1     Running   0              33s    10.44.0.3   node-1.example.com   <none>           <none>

mypod2                  1/1     Running   0              9s     10.44.0.4   node-1.example.com   <none>           <none>


curl 10.44.0.3:8080

Hello OpenShift!


curl 10.44.0.4:8080

Hello OpenShift!


Configure Pods in Kubernetes Cluster | Label and Service Example with apache


 ls

pod.yaml  pod1.yaml  service.yaml


Configuring and setting up the apache Pods


cat pod.yaml 

apiVersion: v1

kind: Pod

metadata:

  name: apache2

  labels:

    mycka: mylabel

spec:

  containers:

  - name: mycontainer

    image: docker.io/httpd

    ports:

    - containerPort: 80


kubectl create -f pod.yaml 

pod/apache2 created


 cat pod1.yaml 

apiVersion: v1

kind: Pod

metadata:

  name: apache3

  labels:

    mycka: mylabel

spec:

  containers:

  - name: mycontainer

    image: docker.io/httpd

    ports:

    - containerPort: 80


kubectl create -f pod1.yaml 

pod/apache3 created


kubectl get po

NAME                    READY   STATUS    RESTARTS       AGE

apache2                 1/1     Running   0              70s

apache3                 1/1     Running   0              15s


Configuring and setting up the Service 


cat service.yaml 

kind: Service

apiVersion: v1

metadata:

  name: myservice

spec:

  selector: 

      mycka: mylabel

  ports:

    - protocol: TCP

      port: 8081

      targetPort: 80


 kubectl create -f service.yaml 

service/myservice created


 kubectl get svc

NAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE

kubernetes        ClusterIP   10.96.0.1      <none>        443/TCP        5d5h

myservice         ClusterIP   10.98.192.24   <none>        8081/TCP       8s


Executing the Apache services 


kubectl exec -it apache2 bash

kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.

root@apache2:/usr/local/apache2# echo \342\200\234Hello from pod1 \342\200\235 > htdocs/index.html

root@apache2:/usr/local/apache2# cat htdocs/index.html

“Hello from pod1 ”

root@apache2:/usr/local/apache2# exit

exit


kubectl exec -it apache3 bash

kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.

root@apache3:/usr/local/apache2# echo \342\200\234Hello from pod2 \342\200\235 > htdocs/index.html

root@apache3:/usr/local/apache2# cat htdocs/index.html

“Hello from pod2 ”

root@apache3:/usr/local/apache2# exit

exit


#myservice is connected to apache2 and apache3 

kubectl get svc -o wide

NAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE     SELECTOR

kubernetes        ClusterIP   10.96.0.1      <none>        443/TCP        5d5h    <none>

myservice         ClusterIP   10.98.192.24   <none>        8081/TCP       3m43s   mycka=mylabel


#service can call from both pods

curl 10.98.192.24:8081

“Hello from pod1 ”


curl 10.98.192.24:8081

“Hello from pod2 ”


Wednesday 29 June 2022

Kubernetes Demo | Pod Deployment | Web application with MySQL


Ref: - https://www.youtube.com/watch?v=0j-iIW3_sbg&t=1291s


ls

mysqldatabase.yaml  mysqlservice.yml  webapplication.yaml  webservice.yml


cat webapplication.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: webapp1

  labels:

    app: webapp-sql

    tier: frontend

spec:

  replicas: 1

  selector:

    matchLabels:

      app: webapp-sql

      tier: frontend

  template:

    metadata:

      labels:

        app: webapp-sql

        tier: frontend

    spec:

      containers:

      - name: webapp1

        image: hshar/webapp

        ports:

        - containerPort: 8081


 cat mysqldatabase.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: sqldb

  labels:

    app: webapp-sql

    tier: backend

spec:

  replicas: 1

  selector:

    matchLabels:

      app: webapp-sql

      tier: backend

  template:

    metadata:

      labels:

        app: webapp-sql

        tier: backend

    spec:

      containers:

        - name: mysql

          image: hshar/mysql:5.5

          ports:

            - containerPort: 3306


cat webservice.yml

apiVersion: v1

kind: Service

metadata:

  name: webapp-sql

spec:

  selector:

    app: webapp-sql

    tier: frontend

  ports:

  - port: 80

  type: NodePort


 cat mysqlservice.yml

apiVersion: v1

kind: Service

metadata:

  name: webapp-sql1

spec:

  selector:

    app: webapp-sql

    tier: backend

  ports:

    - port: 3306

  clusterIP:


kubectl apply -f webapplication.yaml

deployment.apps/webapp1 created


kubectl apply -f mysqldatabase.yaml

deployment.apps/sqldb created


kubectl get deployment

NAME      READY   UP-TO-DATE   AVAILABLE   AGE

sqldb     1/1     1            1           69s

webapp1   1/1     1            1           80s


kubectl apply -f webservice.yml

service/webapp-sql created


kubectl apply -f mysqlservice.yml

service/webapp-sql1 created


kubectl get service

NAME          TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE

kubernetes    ClusterIP   10.96.0.1       <none>        443/TCP        5d7h

webapp-sql    NodePort    10.98.200.159   <none>        80:32025/TCP   19s

webapp-sql1   ClusterIP   10.97.173.97    <none>        3306/TCP       7s


kubectl get po

NAME                       READY   STATUS    RESTARTS   AGE

sqldb-6777dc65bb-ttkwk     1/1     Running   0          2m

webapp1-6996dfc8d8-2kl4q   1/1     Running   0          2m11s


kubectl exec -it webapp1-6996dfc8d8-2kl4q bash                   

kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.

root@webapp1-6996dfc8d8-2kl4q:/# nano var/www/html/index.php

<...>

$servername = "webapp-sql1";

$username = "root";

$password = "edureka";

$dbname = "Product_Details";

<...>

root@webapp1-6996dfc8d8-2kl4q:/# exit

exit


kubectl exec -it sqldb-6777dc65bb-ttkwk bash

root@sqldb-6777dc65bb-ttkwk:/# mysql -u root -pedureka

<...>

mysql> CREATE DATABASE Product_Details;

Query OK, 1 row affected (0.00 sec)

mysql> USE Product_Details;

Database changed

mysql> CREATE TABLE products ( product_name VARCHAR(10), product_id VARCHAR(15) );

Query OK, 0 rows affected (0.01 sec)

mysql> exit

Bye

root@sqldb-6777dc65bb-ttkwk:/# exit

exit


kubectl get services

NAME          TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE

kubernetes    ClusterIP   10.96.0.1       <none>        443/TCP        5d7h

webapp-sql    NodePort    10.98.200.159   <none>        80:32025/TCP   9m38s

webapp-sql1   ClusterIP   10.97.173.97    <none>        3306/TCP       9m26s


kubectl get po

NAME                       READY   STATUS    RESTARTS   AGE

sqldb-6777dc65bb-ttkwk     1/1     Running   0          28m

webapp1-6996dfc8d8-2kl4q   1/1     Running   0          29m


kubectl describe po/webapp1-6996dfc8d8-2kl4q | grep Node:

Node:         minikube-m02/192.168.49.3


http://192.168.49.3:32025/index.php


Monday 27 June 2022

Create Pods with namespace in Kubernetes


#default namespace

kubectl get ns

NAME                   STATUS   AGE

default                Active   4d7h

kube-node-lease        Active   4d7h

kube-public            Active   4d7h

kube-system            Active   4d7h

kubernetes-dashboard   Active   4d7h


#to create a namespace

kubectl create ns mynamespace

namespace/mynamespace created


kubectl get ns

NAME                   STATUS   AGE

default                Active   4d7h

kube-node-lease        Active   4d7h

kube-public            Active   4d7h

kube-system            Active   4d7h

kubernetes-dashboard   Active   4d7h

mynamespace            Active   7s


#create a pod with mynamespace

kubectl run nginx --image=nginx --namespace mynamespace

pod/nginx created


kubectl get pods --namespace mynamespace

NAME    READY   STATUS    RESTARTS   AGE

nginx   1/1     Running   0          13s


#add 2nd pod with mynamespace

kubectl run nginx2 --image=nginx --namespace mynamespace

pod/nginx2 created


kubectl get pods --namespace mynamespace

NAME     READY   STATUS    RESTARTS   AGE

nginx    1/1     Running   0          76s

nginx2   1/1     Running   0          17s


#delete mynamespace

kubectl delete ns mynamespace

namespace "mynamespace" deleted


#all resources listed are deleted from the mynamespace

kubectl delete ns mynamespace

Error from server (NotFound): namespaces "mynamespace" not found


Use Label to Add Remove Filter Pods in Kubernetes


kubectl run nginx1 --image=nginx

pod/nginx1 created


#see Labels

kubectl describe pod nginx1

Name:         nginx1

Namespace:    default

Priority:     0

Node:         minikube-m02/192.168.49.3

Start Time:   Mon, 27 Jun 2022 17:35:39 -0700

Labels:       run=nginx1

Annotations:  <none>

Status:       Running

IP:           172.17.0.5

IPs:

  IP:  172.17.0.5

Containers:

  nginx1:

    Container ID:   docker://876631db265761b3dbe3170433210257470606f776dff93ffd6                                                                                                             aca3a3ec8b8f9

    Image:          nginx

    Image ID:       docker-pullable://nginx@sha256:10f14ffa93f8dedf1057897b745e5                                                                                                             ac72ac5655c299dade0aa434c71557697ea

    Port:           <none>

    Host Port:      <none>

    State:          Running

      Started:      Mon, 27 Jun 2022 17:35:48 -0700

    Ready:          True

    Restart Count:  0

    Environment:    <none>

    Mounts:

      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-8ddm2 (ro)

Conditions:

  Type              Status

  Initialized       True

  Ready             True

  ContainersReady   True

  PodScheduled      True

Volumes:

  kube-api-access-8ddm2:

    Type:                    Projected (a volume that contains injected data fro                                                                                                             m multiple sources)

    TokenExpirationSeconds:  3607

    ConfigMapName:           kube-root-ca.crt

    ConfigMapOptional:       <nil>

    DownwardAPI:             true

QoS Class:                   BestEffort

Node-Selectors:              <none>

Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s

                             node.kubernetes.io/unreachable:NoExecute op=Exists                                                                                                              for 300s

Events:

  Type    Reason     Age   From               Message

  ----    ------     ----  ----               -------

  Normal  Scheduled  17s   default-scheduler  Successfully assigned default/ngin                                                                                                             x1 to minikube-m02

  Normal  Pulling    17s   kubelet            Pulling image "nginx"

  Normal  Pulled     8s    kubelet            Successfully pulled image "nginx"                                                                                                              in 8.887080516s

  Normal  Created    8s    kubelet            Created container nginx1

  Normal  Started    8s    kubelet            Started container nginx1


#add label to a pod

kubectl label pod nginx1 envronment=development

pod/nginx1 labeled


kubectl describe pod nginx1 | grep Labels

Labels:       envronment=development


kubectl run nginx2 --image=nginx

pod/nginx2 created


kubectl label pod nginx2 envronment=production

pod/nginx2 labeled


kubectl describe pod nginx2 | grep Labels

Labels:       envronment=production


kubectl get pods

NAME                        READY   STATUS      RESTARTS   AGE

nginx1                      1/1     Running     0          5m50s

nginx2                      1/1     Running     0          36s


kubectl get pods --show-labels

NAME                        READY   STATUS      RESTARTS   AGE     LABELS

nginx1                      1/1     Running     0          6m11s   envronment=development,run=nginx1

nginx2                      1/1     Running     0          57s     envronment=production,run=nginx2


kubectl label pod nginx2 projectcode=3344

pod/nginx2 labeled


kubectl get pods --show-labels

NAME                        READY   STATUS      RESTARTS   AGE     LABELS

nginx1                      1/1     Running     0          7m      envronment=development,run=nginx1

nginx2                      1/1     Running     0          106s    envronment=production,projectcode=3344,run=nginx2


#remove label

kubectl label pod nginx1 run-

pod/nginx1 unlabeled


kubectl get pods --show-labels

NAME                        READY   STATUS      RESTARTS   AGE     LABELS

nginx1                      1/1     Running     0          7m39s   envronment=development

nginx2                      1/1     Running     0          2m25s   envronment=production,projectcode=3344,run=nginx2


kubectl label pod nginx2 run-

pod/nginx2 unlabeled


kubectl get pods --show-labels

NAME                        READY   STATUS      RESTARTS   AGE     LABELS

nginx1                      1/1     Running     0          8m3s    envronment=development

nginx2                      1/1     Running     0          2m49s   envronment=production,projectcode=3344


kubectl get pods --show-labels | grep development

nginx1                      1/1     Running     0          64m     envronment=development


Kubernetes Pod Deployment and Edit Replication


kubectl create deploy myweb --image=nginx

deployment.apps/myweb created


kubectl get deploy

NAME    READY   UP-TO-DATE   AVAILABLE   AGE

myweb   1/1     1            1           8s


kubectl get deploy myweb

NAME    READY   UP-TO-DATE   AVAILABLE   AGE

myweb   1/1     1            1           21s


kubectl describe deploy myweb

Name:                   myweb

Namespace:              default

CreationTimestamp:      Mon, 27 Jun 2022 17:18:35 -0700

Labels:                 app=myweb

Annotations:            deployment.kubernetes.io/revision: 1

Selector:               app=myweb

Replicas:               1 desired | 1 updated | 1 total | 1 available | 0 unavai                                                                                                             lable

StrategyType:           RollingUpdate

MinReadySeconds:        0

RollingUpdateStrategy:  25% max unavailable, 25% max surge

Pod Template:

  Labels:  app=myweb

  Containers:

   nginx:

    Image:        nginx

    Port:         <none>

    Host Port:    <none>

    Environment:  <none>

    Mounts:       <none>

  Volumes:        <none>

Conditions:

  Type           Status  Reason

  ----           ------  ------

  Available      True    MinimumReplicasAvailable

  Progressing    True    NewReplicaSetAvailable

OldReplicaSets:  <none>

NewReplicaSet:   myweb-58d88b7dfb (1/1 replicas created)

Events:

  Type    Reason             Age   From                   Message

  ----    ------             ----  ----                   -------

  Normal  ScalingReplicaSet  43s   deployment-controller  Scaled up replica set                                                                                                              myweb-58d88b7dfb to 1


#edit no of replicas to 2

kubectl edit deploy myweb

deployment.apps/myweb edited


kubectl get deploy

NAME    READY   UP-TO-DATE   AVAILABLE   AGE

myweb   2/2     2            2           118s


kubectl get pods

NAME                        READY   STATUS      RESTARTS   AGE

myweb-58d88b7dfb-6lj6k      1/1     Running     0          24s

myweb-58d88b7dfb-bxcj6      1/1     Running     0          2m4s


#replicas 3

kubectl edit deploy myweb

deployment.apps/myweb edited


kubectl get pods

NAME                        READY   STATUS      RESTARTS   AGE

myweb-58d88b7dfb-6lj6k      1/1     Running     0          65s

myweb-58d88b7dfb-bxcj6      1/1     Running     0          2m45s

myweb-58d88b7dfb-wb7jj      1/1     Running     0          11s


kubectl describe deploy myweb

Name:                   myweb

Namespace:              default

CreationTimestamp:      Mon, 27 Jun 2022 17:18:35 -0700

Labels:                 app=myweb

Annotations:            deployment.kubernetes.io/revision: 1

Selector:               app=myweb

Replicas:               3 desired | 3 updated | 3 total | 3 available | 0 unavai                                                                                                             lable

StrategyType:           RollingUpdate

MinReadySeconds:        0

RollingUpdateStrategy:  25% max unavailable, 25% max surge

Pod Template:

  Labels:  app=myweb

  Containers:

   nginx:

    Image:        nginx

    Port:         <none>

    Host Port:    <none>

    Environment:  <none>

    Mounts:       <none>

  Volumes:        <none>

Conditions:

  Type           Status  Reason

  ----           ------  ------

  Progressing    True    NewReplicaSetAvailable

  Available      True    MinimumReplicasAvailable

OldReplicaSets:  <none>

NewReplicaSet:   myweb-58d88b7dfb (3/3 replicas created)

Events:

  Type    Reason             Age    From                   Message

  ----    ------             ----   ----                   -------

  Normal  ScalingReplicaSet  3m15s  deployment-controller  Scaled up replica set                                                                                                         myweb-58d88b7dfb to 1

  Normal  ScalingReplicaSet  95s    deployment-controller  Scaled up replica set                                                                                                              myweb-58d88b7dfb to 2

  Normal  ScalingReplicaSet  41s    deployment-controller  Scaled up replica set                                                                                                              myweb-58d88b7dfb to 3


kubectl get pods

NAME                        READY   STATUS      RESTARTS   AGE

myweb-58d88b7dfb-6lj6k      1/1     Running     0          111s

myweb-58d88b7dfb-bxcj6      1/1     Running     0          3m31s

myweb-58d88b7dfb-wb7jj      1/1     Running     0          57s


#delete one pod, it maintains the no of pods

kubectl delete pod myweb-58d88b7dfb-6lj6k

pod "myweb-58d88b7dfb-6lj6k" deleted


kubectl get pods

NAME                        READY   STATUS      RESTARTS   AGE

myweb-58d88b7dfb-bxcj6      1/1     Running     0          4m1s

myweb-58d88b7dfb-k5dwk      1/1     Running     0          5s

myweb-58d88b7dfb-wb7jj      1/1     Running     0          87s


Creating Job in Kubernetes | Example to Print Date


cat job.yaml

apiVersion: batch/v1

kind: Job

metadata:

   name: mydatejob

spec:

  template:

    metadata:

    spec:

      containers:

      - image: busybox

        name: mydatejob

        command:

        - date

      restartPolicy: Never


kubectl create -f job.yaml

job.batch/mydatejob created


kubectl get pods

NAME                        READY   STATUS      RESTARTS   AGE

mydatejob-pt5tg             0/1     Completed   0          15s


kubectl get jobs

NAME                  COMPLETIONS   DURATION   AGE

mydatejob             1/1           6s         35s


kubectl logs mydatejob-pt5tg

Sat Jun 25 00:41:54 UTC 2022


kubectl delete job mydatejob

job.batch "mydatejob" deleted


Kubernetes Daemon Set Demo

 

kubectl get no

NAME           STATUS   ROLES           AGE   VERSION

minikube       Ready    control-plane   29h   v1.24.1

minikube-m02   Ready    <none>          29h   v1.24.1


#list running daemon set

kubectl get po -A -o wide | grep kube-system

kube-system            coredns-6d4b75cb6d-fmhvp                     1/1     Running     0             29h     172.17.0.2     minikube       <none>           <none>

kube-system            etcd-minikube                                1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            kindnet-47txl                                1/1     Running     0             29h     192.168.49.3   minikube-m02   <none>           <none>

kube-system            kindnet-mxbdn                                1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            kube-apiserver-minikube                      1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            kube-controller-manager-minikube             1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            kube-proxy-dj4wg                             1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            kube-proxy-g2bpt                             1/1     Running     0             29h     192.168.49.3   minikube-m02   <none>           <none>

kube-system            kube-scheduler-minikube                      1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            storage-provisioner                          1/1     Running     1 (29h ago)   29h     192.168.49.2   minikube       <none>           <none>


 cat daemonset.yaml

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: fluentd-elasticsearch

  namespace: kube-system

  labels:

    k8s-app: fluentd-logging

spec:

  selector:

    matchLabels:

      name: fluentd-elasticsearch

  template:

    metadata:

      labels:

        name: fluentd-elasticsearch

    spec:

      tolerations:

      # this tolearation is to have the daemonset unnable on master nodes

      # remove it if your masters can't run pods

      - key: node-role.kubernetes.io/master

        operator: Exists

        effect: NoSchedule

      containers:

      - name: fluentd-elasticsearch

        image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2

        resources:

          limits:

            memory: 200Mi

          requests:

            cpu: 100m

            memory: 200Mi

          volumeMounts:

          - name: varlog

            mountPath: /var/log

          - name: varlibdokcercontainers

            mountPath: /var/lib/docker/containers

            readOnly: true

        terminationGracePeriodSeconds: 30

        volumes:

          - name: varlog

            hostPath:

              path: /var/log

          - name: varlobdockercontainers

            hostPath:

              path: /var/lib/docker/containers


kubectl apply -f daemonset.yaml 

kubectl apply -f daemonset.yaml --validate=false

daemonset.apps/fluentd-elasticsearch created


kubectl get po -A -o wide | grep kube-system

kube-system            coredns-6d4b75cb6d-fmhvp                     1/1     Running     0             29h     172.17.0.2     minikube       <none>           <none>

kube-system            etcd-minikube                                1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            fluentd-elasticsearch-q28pk                  1/1     Running     0             53s     172.17.0.3     minikube       <none>           <none>

kube-system            fluentd-elasticsearch-sqlgj                  1/1     Running     0             53s     172.17.0.5     minikube-m02   <none>           <none>

kube-system            kindnet-47txl                                1/1     Running     0             29h     192.168.49.3   minikube-m02   <none>           <none>

kube-system            kindnet-mxbdn                                1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            kube-apiserver-minikube                      1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            kube-controller-manager-minikube             1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            kube-proxy-dj4wg                             1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            kube-proxy-g2bpt                             1/1     Running     0             29h     192.168.49.3   minikube-m02   <none>           <none>

kube-system            kube-scheduler-minikube                      1/1     Running     0             29h     192.168.49.2   minikube       <none>           <none>

kube-system            storage-provisioner                          1/1     Running     1 (29h ago)   29h     192.168.49.2   minikube       <none>           <none>


#to ensure new pod creation

kubectl delete fluentd-elasticsearch-q28pk 


kubectl delete fluentd-elasticsearch-q28pk  --namespace kube-system


CronJob Demo in Kubernetes


cat cron.yaml

apiVersion: batch/v1

kind: CronJob

metadata:

  name: my-cronjob

spec:

  jobTemplate:

    metadata:

      name: my-cronjob

    spec:

      template:

        metadata:

        spec:

          containers:

          - image: busybox

            name: my-cronjob

            command:

            - date

            resources: {}

          restartPolicy: OnFailure

  schedule: '*/1 * * * *'


kubectl apply -f cron.yaml

cronjob.batch/my-cronjob created


#see cron job

kubectl get cj

NAME         SCHEDULE      SUSPEND   ACTIVE   LAST SCHEDULE   AGE

my-cronjob   */1 * * * *   False     0        <none>          10s


kubectl get job

NAME                  COMPLETIONS   DURATION   AGE

my-cronjob-27601880   1/1           9s         12s


#job created every one minute

kubectl get pod

NAME                        READY   STATUS      RESTARTS   AGE

my-cronjob-27601880-gxnqz   0/1     Completed   0          25s


kubectl logs my-cronjob-27601880-gxnqz

Fri Jun 24 23:20:05 UTC 2022


kubectl get pod -w

NAME                        READY   STATUS      RESTARTS   AGE

my-cronjob-27601880-gxnqz   0/1     Completed   0          76s

my-cronjob-27601881-hnsrd   0/1     Completed   0          16s


kubectl logs my-cronjob-27601881-hnsrd

Fri Jun 24 23:21:07 UTC 2022


kubectl get pod -w

NAME                        READY   STATUS      RESTARTS   AGE

my-cronjob-27601880-gxnqz   0/1     Completed   0          2m28s

my-cronjob-27601881-hnsrd   0/1     Completed   0          88s

my-cronjob-27601882-w7mng   0/1     Completed   0          28s


kubectl logs my-cronjob-27601882-w7mng

Fri Jun 24 23:22:08 UTC 2022


Demo ConfigMap in Kubernetes

 

cat configmap.yaml

apiVersion: v1

data:

  key1: SOMESUPERIMPORTANT

kind: ConfigMap

metadata:

  name: my-configmap


cat configmap-pod.yaml

apiVersion: v1

kind: Pod

metadata:

  name: pod-configmap

spec:

  containers:

    - name: test-container

      image: k8s.gcr.io/busybox

      command: [ "/bin/sh", "-c", "env" ]

      env:

        - name: KEY1_CONFIG_MAP

          valueFrom:

            configMapKeyRef:

              name: my-configmap

              key: key1

  restartPolicy: Never


kubectl apply -f configmap.yaml

configmap/my-configmap created


kubectl get cm

NAME               DATA   AGE

kube-root-ca.crt   1      28h

my-configmap       1      42s


kubectl get cm my-configmap

NAME           DATA   AGE

my-configmap   1      58s


kubectl get cm my-configmap -o yaml

apiVersion: v1

data:

  key1: SOMESUPERIMPORTANT

kind: ConfigMap

metadata:

  annotations:

    kubectl.kubernetes.io/last-applied-configuration: |

      {"apiVersion":"v1","data":{"key1":"SOMESUPERIMPORTANT"},"kind":"ConfigMap","metadata":{"annota                                                                                         tions":{},"name":"my-configmap","namespace":"default"}}

  creationTimestamp: "2022-06-24T23:04:38Z"

  name: my-configmap

  namespace: default

  resourceVersion: "36974"

  uid: 59c6bf6e-28f0-4de3-884b-52918aaa977e


#create pod

kubectl apply -f configmap-pod.yaml

pod/pod-configmap created


kubectl get po

NAME            READY   STATUS      RESTARTS   AGE

nginx           1/1     Running     0          28h

pod-configmap   0/1     Completed   0          7s


kubectl logs pod-configmap

KUBERNETES_PORT=tcp://10.96.0.1:443

KUBERNETES_SERVICE_PORT=443

HOSTNAME=pod-configmap

SHLVL=1

HOME=/root

KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1

PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin

KUBERNETES_PORT_443_TCP_PORT=443

KUBERNETES_PORT_443_TCP_PROTO=tcp

KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443

KUBERNETES_SERVICE_PORT_HTTPS=443

KEY1_CONFIG_MAP=SOMESUPERIMPORTANT

PWD=/

KUBERNETES_SERVICE_HOST=10.96.0.1


Creating and configuring a Kubernetes Cluster with 2 nodes

 

#on the main node

sudo hostnamectl set-hostname main.example.com

exec bash


#on node 1

sudo hostnamectl set-hostname client-node-1.example.com

exec bash


#on node 2

sudo hostnamectl set-hostname client-node-2.example.com

exec bash


#on the main and all nodes:

sudo kubeadm reset --force

sudo mkdir /etc/docker

sudo tee /etc/docker/daemon.json 0<<EOF

{

  "exec-opts": ["native.cgroupdriver=systemd"],

  "log-driver": "json-file",

  "log-opts": {

    "max-size": "100m"

  },

  "storage-driver": "overlay2"

}

EOF

sudo systemctl enable docker

sudo systemctl daemon-reload

sudo systemctl restart docker

sudo swapoff -a


#on the main

log=${HOME}/install-leader.log

pod_network_cidr=192.168.0.0/16

sudo kubeadm init --pod-network-cidr ${pod_network_cidr} --ignore-preflight-errors all 2>&1 | tee ${log}

<...>

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:

  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.31.6.219:6443 --token 7ubi5y.8t39cqgtvz9cgkvs \

    --discovery-token-ca-cert-hash sha256:65823bb628114490d8fe03a12f444d6d1d6eb243308e8af630259815356e39fd 


#Run  on the main node to allow non-root users to access use kubeadm

mkdir -p ${HOME}/.kube

sudo cp /etc/kubernetes/admin.conf ${HOME}/.kube/config

sudo chown -R $( id -u ):$( id -g ) ${HOME}/.kube/


echo 'source <(kubectl completion bash)' | tee --append ${HOME}/.bashrc

source ${HOME}/.bashrc


#In main, to add the Weave Net CNI plugin for highly available clusters

kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')

serviceaccount/weave-net created

clusterrole.rbac.authorization.k8s.io/weave-net created

clusterrolebinding.rbac.authorization.k8s.io/weave-net created

role.rbac.authorization.k8s.io/weave-net created

rolebinding.rbac.authorization.k8s.io/weave-net created

daemonset.apps/weave-net created


kubectl get no

NAME               STATUS   ROLES    AGE   VERSION

main.example.com   Ready    master   27m   v1.19.4


#run the kubeadm join command copied in the previous step to join each node to the cluster

sudo kubeadm join 172.31.6.219:6443 --token 7ubi5y.8t39cqgtvz9cgkvs     --discovery-token-ca-cert-hash sha256:65823bb628114490d8fe03a12f444d6d1d6eb243308e8af630259815356e39fd

[preflight] Running pre-flight checks

[preflight] Reading configuration from the cluster...

[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'

[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"

[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"

[kubelet-start] Starting the kubelet

[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...


This node has joined the cluster:

* Certificate signing request was sent to apiserver and a response was received.

* The Kubelet was informed of the new secure connection details.


Run 'kubectl get nodes' on the control-plane to see this node join the cluster.


OR

#Run the following to create a token 

sudo kubeadm token create --print-join-command


kubectl get no

NAME                        STATUS   ROLES    AGE     VERSION

client-node-1.example.com   Ready    <none>   4m6s    v1.19.4

client-node-2.example.com   Ready    <none>   3m33s   v1.19.4

main.example.com            Ready    master   33m     v1.19.4


kubectl get ns

NAME              STATUS   AGE

default           Active   35m

kube-node-lease   Active   35m

kube-public       Active   35m

kube-system       Active   35m


kubectl get po -A

NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE

kube-system   coredns-f9fd979d6-bxtbx                    1/1     Running   0          35m

kube-system   coredns-f9fd979d6-d8qz8                    1/1     Running   0          35m

kube-system   etcd-main.example.com                      1/1     Running   0          36m

kube-system   kube-apiserver-main.example.com            1/1     Running   0          36m

kube-system   kube-controller-manager-main.example.com   1/1     Running   0          36m

kube-system   kube-proxy-9k5pz                           1/1     Running   0          35m

kube-system   kube-proxy-g2555                           1/1     Running   0          6m3s

kube-system   kube-proxy-rlvrd                           1/1     Running   0          6m36s

kube-system   kube-scheduler-main.example.com            1/1     Running   0          35m

kube-system   weave-net-fks2v                            2/2     Running   1          21m

kube-system   weave-net-phbf7                            2/2     Running   0          6m3s

kube-system   weave-net-xskw5                            2/2     Running   1          6m36s


 kubectl get po -A -o wide

NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE   IP             NODE                        NOMINATED NODE   READINESS GATES

kube-system   coredns-f9fd979d6-bxtbx                    1/1     Running   0          50m   10.32.0.2      main.example.com            <none>           <none>

kube-system   coredns-f9fd979d6-d8qz8                    1/1     Running   0          50m   10.32.0.3      main.example.com            <none>           <none>

kube-system   etcd-main.example.com                      1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   kube-apiserver-main.example.com            1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   kube-controller-manager-main.example.com   1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   kube-proxy-9k5pz                           1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   kube-proxy-g2555                           1/1     Running   0          20m   172.31.7.216   client-node-2.example.com   <none>           <none>

kube-system   kube-proxy-rlvrd                           1/1     Running   0          21m   172.31.11.38   client-node-1.example.com   <none>           <none>

kube-system   kube-scheduler-main.example.com            1/1     Running   0          50m   172.31.6.219   main.example.com            <none>           <none>

kube-system   weave-net-fks2v                            2/2     Running   1          36m   172.31.6.219   main.example.com            <none>           <none>

kube-system   weave-net-phbf7                            2/2     Running   0          20m   172.31.7.216   client-node-2.example.com   <none>           <none>

kube-system   weave-net-xskw5                            2/2     Running   1          21m   172.31.11.38   client-node-1.example.com   <none>           <none>


Thursday 23 June 2022

Install minikube


Ref:- minikube.sigs.k8s.io


minikube is local Kubernetes, focusing on making it easy to learn and develop for Kubernetes.

requirement: Docker container with 2 CPUs, 2GB of free memory 20GB of free disk space


curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64

  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current

                                 Dload  Upload   Total   Spent    Left  Speed

100 71.4M  100 71.4M    0     0  31.3M      0  0:00:02  0:00:02 --:--:-- 31.3M


sudo install minikube-linux-amd64 /usr/local/bin/minikube


Install Docker Engine

docker --version

Docker version 20.10.16, build aa7e414


Post-installation steps for Linux anfter installing docker


#Manage Docker as a non-root user

sudo usermod -aG docker $USER

newgrp docker


docker run hello-world

Hello from Docker!

<...>


minikube start

* minikube v1.26.0 on Ubuntu 20.04

* Automatically selected the docker driver. Other choices: ssh, none

* Using Docker driver with root privileges

* Starting control plane node minikube in cluster minikube

* Pulling base image ...

* Downloading Kubernetes v1.24.1 preload ...

    > preloaded-images-k8s-v18-v1...: 405.83 MiB / 405.83 MiB  100.00% 19.32 Mi

    > gcr.io/k8s-minikube/kicbase: 386.00 MiB / 386.00 MiB  100.00% 17.55 MiB p

    > gcr.io/k8s-minikube/kicbase: 0 B [_________________________] ?% ? p/s 15s

* Creating docker container (CPUs=2, Memory=2200MB) ...

* Preparing Kubernetes v1.24.1 on Docker 20.10.17 ...

  - Generating certificates and keys ...

  - Booting up control plane ...

  - Configuring RBAC rules ...

* Verifying Kubernetes components...

  - Using image gcr.io/k8s-minikube/storage-provisioner:v5

* Enabled addons: storage-provisioner, default-storageclass

* Done! kubectl is now configured to use "minikube" cluster and "default" namespace by default


sudo docker ps

CONTAINER ID   IMAGE                                 COMMAND                  CREATED         STATUS         PORTS                                                                                                                                  NAMES

11ed4e911c9f   gcr.io/k8s-minikube/kicbase:v0.0.32   "/usr/local/bin/entr…"   3 minutes ago   Up 3 minutes   127.0.0.1:49157->22/tcp, 127.0.0.1:49156->2376/tcp, 127.0.0.1:49155->5000/tcp, 127.0.0.1:49154->8443/tcp, 127.0.0.1:49153->32443/tcp   minikube


kubectl get nodes

NAME       STATUS   ROLES           AGE     VERSION

minikube   Ready    control-plane   7m14s   v1.24.1


minikube node list

minikube        192.168.49.2


minikube node add --worker

* Adding node m02 to cluster minikube

! Cluster was created without any CNI, adding a node to it might cause broken networking.

* Starting worker node minikube-m02 in cluster minikube

* Pulling base image ...

* Creating docker container (CPUs=2, Memory=2200MB) ...

* Preparing Kubernetes v1.24.1 on Docker 20.10.17 ...

* Verifying Kubernetes components...

* Successfully added m02 to minikube!


kubectl get no

NAME           STATUS   ROLES           AGE   VERSION

minikube       Ready    control-plane   10m   v1.24.1

minikube-m02   Ready    <none>          80s   v1.24.1


minikube status

minikube

type: Control Plane

host: Running

kubelet: Running

apiserver: Running

kubeconfig: Configured


minikube-m02

type: Worker

host: Running

kubelet: Running


kubectl get pods

No resources found in default namespace.


kubectl get pods -A

NAMESPACE     NAME                               READY   STATUS    RESTARTS      AGE

kube-system   coredns-6d4b75cb6d-fmhvp           1/1     Running   0             12m

kube-system   etcd-minikube                      1/1     Running   0             12m

kube-system   kindnet-47txl                      1/1     Running   0             3m11s

kube-system   kindnet-mxbdn                      1/1     Running   0             3m11s

kube-system   kube-apiserver-minikube            1/1     Running   0             12m

kube-system   kube-controller-manager-minikube   1/1     Running   0             12m

kube-system   kube-proxy-dj4wg                   1/1     Running   0             12m

kube-system   kube-proxy-g2bpt                   1/1     Running   0             3m12s

kube-system   kube-scheduler-minikube            1/1     Running   0             12m

kube-system   storage-provisioner                1/1     Running   1 (11m ago)   12m


kubectl run nginx --image=nginx

pod/nginx created


kubectl get pod -w

NAME    READY   STATUS    RESTARTS   AGE

nginx   1/1     Running   0          22s


kubectl cluster-info

Kubernetes control plane is running at https://192.168.49.2:8443

CoreDNS is running at https://192.168.49.2:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy


To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.


minikube dashboard

* Enabling dashboard ...

  - Using image kubernetesui/dashboard:v2.6.0

  - Using image kubernetesui/metrics-scraper:v1.0.8

* Verifying dashboard health ...

* Launching proxy ...

* Verifying proxy health ...

<...>


curl <URL>


Install kubectl binary with curl on Linux


Ref:- kubernetes.io

#Download the latest release with the command

curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"

  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current

                                 Dload  Upload   Total   Spent    Left  Speed

100   154  100   154    0     0   1132      0 --:--:-- --:--:-- --:--:--  1132

100 43.5M  100 43.5M    0     0  23.1M      0  0:00:01  0:00:01 --:--:-- 32.9M


#Download the kubectl checksum file

curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"

  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current

                                 Dload  Upload   Total   Spent    Left  Speed

100   154  100   154    0     0   1193      0 --:--:-- --:--:-- --:--:--  1193

100    64  100    64    0     0    156      0 --:--:-- --:--:-- --:--:--   284


#Validate the kubectl binary against the checksum file

echo "$(cat kubectl.sha256)  kubectl" | sha256sum --check

kubectl: OK


#Install kubectl

sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl

chmod +x kubectl


ls -la kubectl*

-rwxrwxr-x 1 devops devops 45711360 Jun 22 18:13 kubectl

-rw-rw-r-- 1 devops devops       64 Jun 22 18:15 kubectl.sha256


mkdir -p ~/.local/bin

mv ./kubectl ~/.local/bin/kubectl


kubectl version --client

WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short.  Use --output=yaml|json to get the full version.

Client Version: version.Info{Major:"1", Minor:"24", GitVersion:"v1.24.2", GitCommit:"f66044f4361b9f1f96f0053dd46cb7dce5e990a8", GitTreeState:"clean", BuildDate:"2022-06-15T14:22:29Z", GoVersion:"go1.18.3", Compiler:"gc", Platform:"linux/amd64"}

Kustomize Version: v4.5.4


Install minikube




Friday 17 June 2022

Compose and Django | containerize the legacy system using Docker


Ref:- docs.docker.com/samples/django/

sudo docker --version

Docker version 20.10.11, build 761974f


docker-compose --version

docker-compose version 1.29.1, build c34c88b2


mkdir ComposeDjango && cd ComposeDjango

ls

Dockerfile  docker-compose.yml  requirements.txt


cat Dockerfile 

# syntax=docker/dockerfile:1

FROM python:3

ENV PYTHONDONTWRITEBYTECODE=1

ENV PYTHONUNBUFFERED=1

WORKDIR /code

COPY requirements.txt /code/

RUN pip install -r requirements.txt

COPY . /code/


cat requirements.txt 

Django>=3.0,<4.0

psycopg2>=2.8


cat docker-compose.yml 

version: "3.9"

   

services:

  db:

    image: postgres

    volumes:

      - ./data/db:/var/lib/postgresql/data

    environment:

      - POSTGRES_DB=postgres

      - POSTGRES_USER=postgres

      - POSTGRES_PASSWORD=postgres

  web:

    build: .

    command: python manage.py runserver 0.0.0.0:8000

    volumes:

      - .:/code

    ports:

      - "8000:8000"

    environment:

      - POSTGRES_NAME=postgres

      - POSTGRES_USER=postgres

      - POSTGRES_PASSWORD=postgres

    depends_on:

      - db


sudo docker-compose run web django-admin startproject composeexample .

<...>

ls -l

total 24

-rw-r--r-- 1 user user  189 Jun 17 12:25 Dockerfile

drwxr-xr-x 2 root     root     4096 Jun 17 12:32 composeexample

drwxr-xr-x 3 root     root     4096 Jun 17 12:32 data

-rw-r--r-- 1 user user  497 Jun 17 12:27 docker-compose.yml

-rwxr-xr-x 1 root     root      670 Jun 17 12:32 manage.py

-rw-r--r-- 1 user user   31 Jun 17 12:25 requirements.txt


sudo chown -R $USER:$USER composeexample manage.py


ls -l

total 24

-rw-r--r-- 1 user user  189 Jun 17 12:25 Dockerfile

drwxr-xr-x 2 user user 4096 Jun 17 12:32 composeexample

drwxr-xr-x 3 root     root     4096 Jun 17 12:32 data

-rw-r--r-- 1 user user  497 Jun 17 12:27 docker-compose.yml

-rwxr-xr-x 1 user user  670 Jun 17 12:32 manage.py

-rw-r--r-- 1 user user   31 Jun 17 12:25 requirements.txt


#to Connect the database Replace 'Database'

vim composeexample/settings.py

<...>

# Database

# https://docs.djangoproject.com/en/3.2/ref/settings/#databases


import os


[...]


DATABASES = {

    'default': {

        'ENGINE': 'django.db.backends.postgresql',

        'NAME': os.environ.get('POSTGRES_NAME'),

        'USER': os.environ.get('POSTGRES_USER'),

        'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),

        'HOST': 'db',

        'PORT': 5432,

    }

}

<...>


sudo docker-compose up --build

Building web

Sending build context to Docker daemon  43.91MB

Step 1/7 : FROM python:3

 ---> 6bb8bdb609b6

Step 2/7 : ENV PYTHONDONTWRITEBYTECODE=1

 ---> Using cache

 ---> 8bcb2efe0ebb

Step 3/7 : ENV PYTHONUNBUFFERED=1

 ---> Using cache

 ---> 90141c60eb79

Step 4/7 : WORKDIR /code

 ---> Using cache

 ---> 7615300b689c

Step 5/7 : COPY requirements.txt /code/

 ---> Using cache

 ---> 5b520c29e26f

Step 6/7 : RUN pip install -r requirements.txt

 ---> Using cache

 ---> fee2bac5b31a

Step 7/7 : COPY . /code/

 ---> 760f6e271496

Successfully built 760f6e271496

Successfully tagged composedjango_web:latest

Starting composedjango_db_1 ... done

Recreating composedjango_web_1 ... done

Attaching to composedjango_db_1, composedjango_web_1

db_1   | 

db_1   | PostgreSQL Database directory appears to contain a database; Skipping initialization

db_1   | 

db_1   | 2022-06-17 13:10:47.108 UTC [1] LOG:  starting PostgreSQL 14.3 (Debian 14.3-1.pgdg110+1) on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit

db_1   | 2022-06-17 13:10:47.109 UTC [1] LOG:  listening on IPv4 address "0.0.0.0", port 5432

db_1   | 2022-06-17 13:10:47.109 UTC [1] LOG:  listening on IPv6 address "::", port 5432

db_1   | 2022-06-17 13:10:47.116 UTC [1] LOG:  listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432"

db_1   | 2022-06-17 13:10:47.125 UTC [25] LOG:  database system was shut down at 2022-06-17 13:06:25 UTC

db_1   | 2022-06-17 13:10:47.131 UTC [1] LOG:  database system is ready to accept connections

web_1  | Watching for file changes with StatReloader

web_1  | Performing system checks...

web_1  | 

web_1  | System check identified no issues (0 silenced).

web_1  | 

web_1  | You have 18 unapplied migration(s). Your project may not work properly until you apply the migrations for app(s): admin, auth, contenttypes, sessions.

web_1  | Run 'python manage.py migrate' to apply them.

web_1  | June 17, 2022 - 13:10:49

web_1  | Django version 3.2.13, using settings 'composeexample.settings'

web_1  | Starting development server at http://0.0.0.0:8000/

web_1  | Quit the server with CONTROL-C.

web_1  | [17/Jun/2022 13:12:31] "GET / HTTP/1.1" 200 10697

web_1  | [17/Jun/2022 13:12:31] "GET /static/admin/css/fonts.css HTTP/1.1" 200 423


sudo docker ps

CONTAINER ID   IMAGE                  COMMAND                  CREATED          STATUS          PORTS                                       NAMES

63fea9598831   composedjango_web      "python manage.py ru…"   19 seconds ago   Up 18 seconds   0.0.0.0:8000->8000/tcp, :::8000->8000/tcp   composedjango_web_1

746d2b5aeb3d   postgres               "docker-entrypoint.s…"   38 minutes ago   Up 19 seconds   5432/tcp                                    composedjango_db_1


curl localhost:8000

or










#to stop Ctrl+C or from other terminal

sudo docker-compose down

Stopping composedjango_web_1 ... done

Stopping composedjango_db_1  ... done

Removing composedjango_web_1                ... done

Removing composedjango_web_run_f90adcbc769e ... done

Removing composedjango_db_1                 ... done

Removing network composedjango_default


#exit log

<...>

composedjango_web_1 exited with code 0

db_1   | 2022-06-17 13:16:55.734 UTC [1] LOG:  received fast shutdown request

db_1   | 2022-06-17 13:16:55.737 UTC [1] LOG:  aborting any active transactions

db_1   | 2022-06-17 13:16:55.739 UTC [1] LOG:  background worker "logical replication launcher" (PID 31) exited with exit code 1

db_1   | 2022-06-17 13:16:55.740 UTC [26] LOG:  shutting down

db_1   | 2022-06-17 13:16:55.756 UTC [1] LOG:  database system is shut down

composedjango_db_1 exited with code 0


Wednesday 15 June 2022

Install WordPress with Docker Compose


docker-compose --version

docker-compose version 1.29.1, build c34c88b2


sudo docker --version

Docker version 20.10.11, build 761974f


cat docker-compose.yaml 

version: "3"

services:

  database:

    image: mysql

    restart: always

    environment:

      MYSQL_ROOT_PASSWORD: wppassword

      MYSQL_DATABASE: wpdb

      MYSQL_USER: wpuser

      MYSQL_PASSWORD: wppassword

    volumes:

      - mysql:/var/lib/mysql


  wordpress:

    depends_on:

      - database

    image: wordpress:latest

    restart: always

    ports:

      - "8000:80"

    environment:

      WORDPRESS_DB_HOST: database:3306

      WORDPRESS_DB_USER: wpuser

      WORDPRESS_DB_PASSWORD: wppassword

      WORDPRESS_DB_NAME: wpdb

    volumes:

      ["./:/var/www/html"]

volumes:

  mysql: {}


 sudo docker-compose up -d

Creating network "demo_default" with the default driver

Creating volume "demo_mysql" with default driver

Pulling database (mysql:)...

latest: Pulling from library/mysql

Digest: sha256:548da4c67fd8a71908f17c308b8ddb098acf5191d3d7694e56801c6a8b2072cc

Status: Downloaded newer image for mysql:latest

Pulling wordpress (wordpress:latest)...

<...>

Status: Downloaded newer image for wordpress:latest

Creating demo_database_1 ... done

Creating demo_wordpress_1 ... done


sudo docker ps

CONTAINER ID   IMAGE                  COMMAND                  CREATED              STATUS              PORTS                                   NAMES

a15ed83385d2   wordpress:latest       "docker-entrypoint.s…"   About a minute ago   Up About a minute   0.0.0.0:8000->80/tcp, :::8000->80/tcp   demo_wordpress_1

68e6f6493a25   mysql                  "docker-entrypoint.s…"   About a minute ago   Up About a minute   3306/tcp, 33060/tcp                     demo_database_1


curl http://localhost:8000/wp-admin/install.php


sudo docker images

REPOSITORY                                 TAG          IMAGE ID       CREATED         SIZE

wordpress                                  latest       764973ecc5df   5 days ago      609MB

mysql                                      latest       65b636d5542b   2 weeks ago     524MB


sudo docker-compose down --volume

Stopping demo_wordpress_1 ... done

Stopping demo_database_1  ... done

Removing demo_wordpress_1 ... done

Removing demo_database_1  ... done

Removing network demo_default

Removing volume demo_mysql


sudo docker ps

sudo docker ps -a


sudo docker images

REPOSITORY                                 TAG          IMAGE ID       CREATED         SIZE

wordpress                                  latest       764973ecc5df   5 days ago      609MB

mysql                                      latest       65b636d5542b   2 weeks ago     524MB


sudo docker-compose up -d

Creating network "demo_default" with the default driver

Creating volume "demo_mysql" with default driver

Creating demo_database_1 ... done

Creating demo_wordpress_1 ... done


 sudo docker ps

CONTAINER ID   IMAGE                  COMMAND                  CREATED             STATUS             PORTS                                   NAMES

13e5d87ff13e   wordpress:latest       "docker-entrypoint.s…"   15 seconds ago      Up 14 seconds      0.0.0.0:8000->80/tcp, :::8000->80/tcp   demo_wordpress_1

499a91748065   mysql                  "docker-entrypoint.s…"   16 seconds ago      Up 15 seconds      3306/tcp, 33060/tcp                     demo_database_1


sudo docker-compose down --volume

Stopping demo_wordpress_1 ... done

Stopping demo_database_1  ... done

Removing demo_wordpress_1 ... done

Removing demo_database_1  ... done

Removing network demo_default

Removing volume demo_mysql