kubectl get no
NAME STATUS ROLES AGE VERSION
minikube Ready control-plane 29h v1.24.1
minikube-m02 Ready <none> 29h v1.24.1
#list running daemon set
kubectl get po -A -o wide | grep kube-system
kube-system coredns-6d4b75cb6d-fmhvp 1/1 Running 0 29h 172.17.0.2 minikube <none> <none>
kube-system etcd-minikube 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system kindnet-47txl 1/1 Running 0 29h 192.168.49.3 minikube-m02 <none> <none>
kube-system kindnet-mxbdn 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system kube-apiserver-minikube 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system kube-controller-manager-minikube 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system kube-proxy-dj4wg 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system kube-proxy-g2bpt 1/1 Running 0 29h 192.168.49.3 minikube-m02 <none> <none>
kube-system kube-scheduler-minikube 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system storage-provisioner 1/1 Running 1 (29h ago) 29h 192.168.49.2 minikube <none> <none>
cat daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-elasticsearch
namespace: kube-system
labels:
k8s-app: fluentd-logging
spec:
selector:
matchLabels:
name: fluentd-elasticsearch
template:
metadata:
labels:
name: fluentd-elasticsearch
spec:
tolerations:
# this tolearation is to have the daemonset unnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: fluentd-elasticsearch
image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdokcercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlobdockercontainers
hostPath:
path: /var/lib/docker/containers
kubectl apply -f daemonset.yaml
kubectl apply -f daemonset.yaml --validate=false
daemonset.apps/fluentd-elasticsearch created
kubectl get po -A -o wide | grep kube-system
kube-system coredns-6d4b75cb6d-fmhvp 1/1 Running 0 29h 172.17.0.2 minikube <none> <none>
kube-system etcd-minikube 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system fluentd-elasticsearch-q28pk 1/1 Running 0 53s 172.17.0.3 minikube <none> <none>
kube-system fluentd-elasticsearch-sqlgj 1/1 Running 0 53s 172.17.0.5 minikube-m02 <none> <none>
kube-system kindnet-47txl 1/1 Running 0 29h 192.168.49.3 minikube-m02 <none> <none>
kube-system kindnet-mxbdn 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system kube-apiserver-minikube 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system kube-controller-manager-minikube 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system kube-proxy-dj4wg 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system kube-proxy-g2bpt 1/1 Running 0 29h 192.168.49.3 minikube-m02 <none> <none>
kube-system kube-scheduler-minikube 1/1 Running 0 29h 192.168.49.2 minikube <none> <none>
kube-system storage-provisioner 1/1 Running 1 (29h ago) 29h 192.168.49.2 minikube <none> <none>
#to ensure new pod creation
kubectl delete fluentd-elasticsearch-q28pk
kubectl delete fluentd-elasticsearch-q28pk --namespace kube-system
No comments:
Post a Comment