CKA认证模块②-K8S企业运维和落地实战

news2024/10/7 2:23:44

CKA认证模块②-K8S企业运维和落地实战

Pod高级实战-Pod生命周期-启动钩子,停止钩子

Pod生命周期完整流程介绍

容器钩子; 容器探测; Pod重启策略; Pod的终止过程;

Init容器;

初始化容器最佳实践

初始化容器与主容器区别是?

init容器没有readinessProbe…

[root@k8s-master01 pod-2]# cat init.yaml
apiVersion: v1
kind: Pod
metadata:
  name: myapp-pod
  labels:
    app: myapp
spec:
  initContainers:
  - name: init-myservice
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', "until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"]
  - name: init-mydb
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', "until nslookup mydb.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for mydb; sleep 2; done"]
  containers:
  - name: myapp-container
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', 'echo The app is running! && sleep 3600']
[root@k8s-master01 pod-2]# kubectl apply -f init.yaml 
pod/myapp-pod created
[root@k8s-master01 ~]# kubectl get pods -w
NAME        READY   STATUS    RESTARTS   AGE
myapp-pod   0/1     Pending   0          0s
myapp-pod   0/1     Pending   0          0s
myapp-pod   0/1     Init:0/2   0          0s
myapp-pod   0/1     Init:0/2   0          0s
myapp-pod   0/1     Init:0/2   0          1s
# 初始化容器创建不出来,主容器就出不来

[root@k8s-master01 pod-2]# kubectl delete -f init.yaml 
pod "myapp-pod" deleted
[root@k8s-master01 pod-2]# cat init.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: myapp-pod
  labels:
    app: myapp
spec:
  initContainers:
  - name: init-myservice
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', "sleep 2"]
  - name: init-mydb
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', "until nslookup mydb.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for mydb; sleep 2; done"]
  containers:
  - name: myapp-container
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', 'echo The app is running! && sleep 3600']
[root@k8s-master01 pod-2]# kubectl apply -f init.yaml 
pod/myapp-pod created
[root@k8s-master01 ~]# kubectl get pods -w
NAME        READY   STATUS    RESTARTS   AGE
myapp-pod   0/1     Pending   0          0s
myapp-pod   0/1     Pending   0          0s
myapp-pod   0/1     Init:0/2   0          0s
myapp-pod   0/1     Init:0/2   0          0s
myapp-pod   0/1     Init:0/2   0          2s
myapp-pod   0/1     Init:1/2   0          4s
myapp-pod   0/1     Init:1/2   0          5s

[root@k8s-master01 pod-2]# kubectl delete -f init.yaml 
pod "myapp-pod" deleted
[root@k8s-master01 pod-2]# cat init.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: myapp-pod
  labels:
    app: myapp
spec:
  initContainers:
  - name: init-myservice
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', "sleep 2"]
  - name: init-mydb
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', "sleep 2"]
  containers:
  - name: myapp-container
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command: ['sh', '-c', 'echo 主容器正常启动! && sleep 3600']
[root@k8s-master01 pod-2]# kubectl apply -f init.yaml 
pod/myapp-pod created
[root@k8s-master01 ~]# kubectl get pods -w
NAME        READY   STATUS    RESTARTS   AGE
myapp-pod   0/1     Pending   0          0s
myapp-pod   0/1     Pending   0          0s
myapp-pod   0/1     Init:0/2   0          0s
myapp-pod   0/1     Init:0/2   0          1s
myapp-pod   0/1     Init:0/2   0          1s
myapp-pod   0/1     Init:1/2   0          4s
myapp-pod   0/1     Init:1/2   0          5s
myapp-pod   0/1     PodInitializing   0          7s
myapp-pod   1/1     Running           0          8s
# init容器必须全能执行成功,主容器才能运行起来
初始化容器生产应用

主容器运行nginx服务,初始化容器用来给主容器生成index.html文件

[root@k8s-master01 pod-2]# cat init-1.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: initnginx
spec:
  initContainers:
  - name: install
    image: docker.io/library/busybox:1.28
    imagePullPolicy: IfNotPresent
    command:
    - wget
    - "-O"
    - "/work-dir/index.html"
    - "https://www.baidu.com"
    volumeMounts:
    - name: workdir
      mountPath: /work-dir
  containers:
  - name: nginx
    image: docker.io/xianchao/nginx:v1
    imagePullPolicy: IfNotPresent
    ports:
    - containerPort: 80
    volumeMounts:
    - name: workdir
      mountPath: /usr/share/nginx/html
  volumes:
  - name: workdir
    emptyDir: {}

[root@k8s-master01 pod-2]# kubectl apply -f init-1.yaml 
pod/initnginx created
[root@k8s-master01 pod-2]# kubectl get pods -owide
NAME        READY   STATUS    RESTARTS   AGE   IP              NODE         NOMINATED NODE   READINESS GATES
initnginx   1/1     Running   0          14s   10.244.85.195   k8s-node01   <none>           <none>
[root@k8s-master01 pod-2]# curl 10.244.85.195
...
[root@k8s-master01 pod-2]# kubectl exec -it initnginx -c nginx -- /bin/bash
root@initnginx:/# ls /usr/share/nginx/html/           
index.html
Pod生命周期-容器钩子

postStartpreStop

[root@k8s-master01 pod-2]# kubectl explain pod.spec.containers.lifecycle
# 查看帮助及写法
[root@k8s-master01 pod-2]# kubectl delete -f init-1.yaml 
pod "initnginx" deleted
[root@k8s-master01 pod-2]# cat pre-start.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: life-demo
spec:
  containers:
  - name: lifecycle-demo-container
    image: docker.io/xianchao/nginx:v1
    imagePullPolicy: IfNotPresent
    lifecycle:
      postStart:
         exec:
           command: ["/bin/sh", "-c","echo 'lifecycle hookshandler' > /usr/share/nginx/html/test.html"]
      preStop:
         exec:
           command:
           - "/bin/sh"
           - "-c"
           - "nginx -s stop"

[root@k8s-master01 pod-2]# kubectl apply -f pre-start.yaml 
pod/life-demo created
[root@k8s-master01 pod-2]# kubectl get pods
NAME        READY   STATUS    RESTARTS   AGE
life-demo   1/1     Running   0          25s
[root@k8s-master01 pod-2]# kubectl exec -it life-demo -- /bin/bash
root@life-demo:/# cd /usr/share/nginx/html/
root@life-demo:/usr/share/nginx/html# cat test.html 
lifecycle hookshandler
# 测试成功

[root@k8s-master01 pod-2]# kubectl delete -f pre-start.yaml 
pod "life-demo" deleted
# 清除环境

总结:

pod在整个生命周期中有非常多的用户行为:

  1. 初始化容器完成初始化

  2. 主容器启动后可以做启动后钩子(postStart)

  3. 主容器结束前可以做结束前钩子(preStop)

  4. 在主容器运行中可以做一些健康检测,如:startupProbe, livenessProbe, readnessProbe

零故障升级之Pod健康检测

启动探测startupProbe

小测试

[root@k8s-master01 ~]# mkdir pod-4
[root@k8s-master01 ~]# cd pod-4
[root@k8s-master01 pod-4]# cat check.yaml
apiVersion: v1
kind: Pod
metadata:
  name: check
  namespace: default
  labels:
    app: check
spec:
  containers:
  - name: check
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command:
    - /bin/sh
    - -c
    - sleep 10; exit
# 使容器运行10秒后就退出
[root@k8s-master01 pod-4]# kubectl apply -f check.yaml
pod/check created
[root@k8s-master01 pod-2]# kubectl get pods -w
NAME    READY   STATUS    RESTARTS   AGE
check   0/1     Pending   0          0s
check   0/1     Pending   0          0s
check   0/1     ContainerCreating   0          0s
check   0/1     ContainerCreating   0          1s
check   1/1     Running             0          2s
check   0/1     Completed           0          12s
check   1/1     Running             1 (2s ago)   13s
check   0/1     Completed           1 (12s ago)   23s
# 检查pod状态

[root@k8s-master01 pod-4]# kubectl delete -f check.yaml 
pod "check" deleted
# 清除环境
提问: k8s提供了三种探针实现容器探测,哪三种? 作用分别是什么?

Pod探针相关属性

[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.startupProbe.exec
# 查看帮助
[root@k8s-master01 pod-4]# cat startup-exec.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: startupprobe
spec:
  containers:
  - name: startup
    image: xianchao/tomcat-8.5-jre8:v1
    imagePullPolicy: IfNotPresent
    ports:
    - containerPort: 8080
    startupProbe:
     exec:
       command:
       - "/bin/sh"
       - "-c"
       - "ps aux | grep tomcat"
     initialDelaySeconds: 20 #容器启动后多久开始探测
     periodSeconds: 20 #执行探测的时间间隔
     timeoutSeconds: 10 #探针执行检测请求后,等待响应的超时时间
     successThreshold: 1 #成功多少次才算成功
     failureThreshold: 3 #失败多少次才算失败
[root@k8s-master01 pod-4]# kubectl apply -f startup-exec.yaml 
pod/startupprobe created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME           READY   STATUS    RESTARTS   AGE
startupprobe   0/1     Pending   0          0s
startupprobe   0/1     Pending   0          0s
startupprobe   0/1     ContainerCreating   0          0s
startupprobe   0/1     ContainerCreating   0          0s
startupprobe   0/1     Running             0          1s
startupprobe   0/1     Running             0          15s
startupprobe   0/1     Running             0          20s
startupprobe   1/1     Running             0          20s
# 测试探测时间

[root@k8s-master01 pod-4]# kubectl delete -f startup-exec.yaml 
pod "startupprobe" deleted
# 删除环境

[root@k8s-master01 pod-4]# cat startup-exec.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: startupprobe
spec:
  containers:
  - name: startup
    image: xianchao/tomcat-8.5-jre8:v1
    imagePullPolicy: IfNotPresent
    ports:
    - containerPort: 8080
    startupProbe:
     exec:
       command:
       - "/bin/sh"
       - "-c"
       - "aa ps aux | grep tomcat1" # 修改为一条不存在的命令测试
     initialDelaySeconds: 20 #容器启动后多久开始探测
     periodSeconds: 20 #执行探测的时间间隔
     timeoutSeconds: 10 #探针执行检测请求后,等待响应的超时时间
     successThreshold: 1 #成功多少次才算成功
     failureThreshold: 3 #失败多少次才算失败
[root@k8s-master01 pod-4]# kubectl apply -f startup-exec.yaml 
pod/startupprobe created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME           READY   STATUS    RESTARTS   AGE
startupprobe   0/1     Pending   0          0s
startupprobe   0/1     Pending   0          0s
startupprobe   0/1     ContainerCreating   0          0s
startupprobe   0/1     ContainerCreating   0          0s
startupprobe   0/1     Running             0          2s
startupprobe   0/1     Running             1 (0s ago)   81s
startupprobe   0/1     Running             2 (1s ago)   2m41s
startupprobe   0/1     Running             3 (1s ago)   4m1s
# 测试探测

[root@k8s-master01 pod-4]# kubectl delete -f startup-exec.yaml 
pod "startupprobe" deleted
# 删除环境
提问: 第一次探测失败多久会重启?

根据如上测试回答,为什么是60秒?为什么是80秒

tcpSocket模式

[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.startupProbe.tcpSocket
# 查看帮助
[root@k8s-master01 pod-4]# cat startup-tcpsocket.yaml
apiVersion: v1
kind: Pod
metadata:
  name: startupprobe
spec:
  containers:
  - name: startup
    image: xianchao/tomcat-8.5-jre8:v1
    imagePullPolicy: IfNotPresent
    ports:
    - containerPort: 8080
    startupProbe:
     tcpSocket:
       port: 8080
     initialDelaySeconds: 20 #容器启动后多久开始探测
     periodSeconds: 20 #执行探测的时间间隔
     timeoutSeconds: 10 #探针执行检测请求后,等待响应的超时时间
     successThreshold: 1 #成功多少次才算成功
     failureThreshold: 3 #失败多少次才算失败
[root@k8s-master01 pod-4]# kubectl apply -f startup-tcpsocket.yaml 
pod/startupprobe created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME           READY   STATUS    RESTARTS   AGE
startupprobe   0/1     Pending   0          0s
startupprobe   0/1     Pending   0          0s
startupprobe   0/1     ContainerCreating   0          0s
startupprobe   0/1     ContainerCreating   0          1s
startupprobe   0/1     Running             0          1s
startupprobe   0/1     Running             0          40s
startupprobe   1/1     Running             0          40s
# 测试tcpSocket

[root@k8s-master01 pod-4]# kubectl delete -f startup-tcpsocket.yaml 
pod "startupprobe" deleted
# 清除环境

httpGet模式

[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.startupProbe.httpGet.
# 查看帮助
[root@k8s-master01 pod-4]# cat startup-httpget.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: startupprobe
spec:
  containers:
  - name: startup
    image: xianchao/tomcat-8.5-jre8:v1
    imagePullPolicy: IfNotPresent
    ports:
    - containerPort: 8080
    startupProbe:
      httpGet:
        path: /
        port: 8080
      initialDelaySeconds: 20 #容器启动后多久开始探测
      periodSeconds: 20 #执行探测的时间间隔
      timeoutSeconds: 10 #探针执行检测请求后,等待响应的超时时间
      successThreshold: 1 #成功多少次才算成功
      failureThreshold: 3 #失败多少次才算失败

[root@k8s-master01 pod-4]# kubectl apply -f startup-httpget.yaml 
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME           READY   STATUS    RESTARTS   AGE
startupprobe   0/1     Pending   0          0s
startupprobe   0/1     Pending   0          0s
startupprobe   0/1     ContainerCreating   0          0s
startupprobe   0/1     ContainerCreating   0          0s
startupprobe   0/1     Running             0          1s
startupprobe   0/1     Running             0          41s
startupprobe   1/1     Running             0          41s
# 测试httpGet

[root@k8s-master01 pod-4]# kubectl delete -f startup-httpget.yaml 
pod "startupprobe" deleted
# 清除环境

启动探测间隔说明: 不同探针可能有误差,影响不大

存活性探测livenessProbe

exec方式

[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.livenessProbe.exec.
# 查看帮助
[root@k8s-master01 pod-4]# cat liveness-exec.yaml
apiVersion: v1
kind: Pod
metadata:
  name: liveness-exec
  labels:
    app: liveness
spec:
  containers:
  - name: liveness
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    args:                       #创建测试探针探测的文件
    - /bin/sh
    - -c
    - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
    livenessProbe:
      initialDelaySeconds: 10   #延迟检测时间
      periodSeconds: 5          #检测时间间隔
      exec:
        command:
        - cat
        - /tmp/healthy
[root@k8s-master01 pod-4]# kubectl apply -f liveness-exec.yaml
pod/liveness-exec created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME            READY   STATUS    RESTARTS   AGE
liveness-exec   0/1     Pending   0          0s
liveness-exec   0/1     Pending   0          0s
liveness-exec   0/1     ContainerCreating   0          0s
liveness-exec   0/1     ContainerCreating   0          1s
liveness-exec   1/1     Running             0          2s
liveness-exec   1/1     Running             1 (1s ago)   76s
liveness-exec   1/1     Running             2 (0s ago)   2m31s
# 测试exec

[root@k8s-master01 pod-4]# kubectl delete -f liveness-exec.yaml
pod "liveness-exec" deleted
# 清除环境

httpGet方式

[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.livenessProbe.httpGet.
# 查看帮助
[root@k8s-master01 pod-4]# ctr -n k8s.io images import springboot.tar.gz 
[root@k8s-node01 images]# ctr -n k8s.io images import springboot.tar.gz 
[root@k8s-node02 images]# ctr -n k8s.io images import springboot.tar.gz
# 节点导入镜像
[root@k8s-master01 pod-4]# cat liveness-http.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: liveness-http
  labels:
    test: liveness
spec:
  containers:
  - name: liveness
    image: mydlqclub/springboot-helloworld:0.0.1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      initialDelaySeconds: 20   #延迟加载时间
      periodSeconds: 5          #重试时间间隔
      timeoutSeconds: 10        #超时时间设置
      httpGet:
        scheme: HTTP
        port: 8081
        path: /actuator/health
[root@k8s-master01 pod-4]# kubectl apply -f liveness-http.yaml 
pod/liveness-http created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME            READY   STATUS    RESTARTS   AGE
liveness-http   0/1     Pending   0          0s
liveness-http   0/1     Pending   0          0s
liveness-http   0/1     ContainerCreating   0          0s
liveness-http   0/1     ContainerCreating   0          0s
liveness-http   1/1     Running             0          1s
[root@k8s-master01 pod-4]# kubectl get pods -owide
NAME            READY   STATUS    RESTARTS   AGE     IP              NODE         NOMINATED NODE   READINESS GATES
liveness-http   1/1     Running   0          5m24s   10.244.58.209   k8s-node02   <none>           <none>
[root@k8s-master01 pod-4]# curl 10.244.58.209:8081/actuator/health
{"status":"UP"}
# 测试httpGet

[root@k8s-master01 pod-4]# kubectl delete -f liveness-http.yaml 
pod "liveness-http" deleted
# 清除环境

tcpSocket方式

[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.livenessProbe.tcpSocket.
# 查看帮助
[root@k8s-master01 pod-4]# cat liveness-tcp.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: liveness-tcp
  labels:
    app: liveness
spec:
  containers:
  - name: liveness
    image: docker.io/xianchao/nginx:v1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      initialDelaySeconds: 15
      periodSeconds: 20
      tcpSocket:
        port: 80
[root@k8s-master01 pod-4]# kubectl apply -f liveness-tcp.yaml 
pod/liveness-tcp created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME           READY   STATUS    RESTARTS   AGE
liveness-tcp   0/1     Pending   0          0s
liveness-tcp   0/1     Pending   0          0s
liveness-tcp   0/1     ContainerCreating   0          0s
liveness-tcp   0/1     ContainerCreating   0          0s
liveness-tcp   1/1     Running             0          2s
# 测试tcpSocket

[root@k8s-master01 pod-4]# kubectl delete -f liveness-tcp.yaml 
pod "liveness-tcp" deleted
# 清除环境
就绪性探测readinessProbe

exec方式

[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.readinessProbe.
# 查看帮助
[root@k8s-master01 pod-4]# cat readiness-exec.yaml 
apiVersion: v1
kind: Service
metadata:
  name: springboot
  labels:
    app: springboot
spec:
  type: NodePort
  ports:
  - name: server
    port: 8080
    targetPort: 8080
    nodePort: 31180
  - name: management
    port: 8081
    targetPort: 8081
    nodePort: 31181
  selector:
    app: springboot
---
apiVersion: v1
kind: Pod
metadata:
  name: springboot
  labels:
    app: springboot
spec:
  containers:
  - name: springboot
    image: mydlqclub/springboot-helloworld:0.0.1
    imagePullPolicy: IfNotPresent
    ports:
    - name: server
      containerPort: 8080
    - name: management
      containerPort: 8081
    readinessProbe:
      initialDelaySeconds: 20   
      periodSeconds: 5          
      timeoutSeconds: 10   
      httpGet:
        scheme: HTTP
        port: 8081
        path: /actuator/health
[root@k8s-master01 pod-4]# kubectl apply -f readiness-exec.yaml 
service/springboot created
pod/springboot created
[root@k8s-master01 pod-4]# kubectl get pods -l app=springboot -w
NAME         READY   STATUS    RESTARTS   AGE
springboot   0/1     Pending   0          1s
springboot   0/1     Pending   0          3s
springboot   0/1     ContainerCreating   0          3s
springboot   0/1     ContainerCreating   0          6s
springboot   0/1     Running             0          9s
springboot   1/1     Running             0          49s
[root@k8s-master01 pod-4]# kubectl get svc -w
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   4d2h
springboot   NodePort    10.106.29.40   <none>        8080:31180/TCP,8081:31181/TCP   0s
# 只有pod就绪了,才能被service关联到,所以就绪探测很重要

注意: 存活探测和就绪探测不存在先后,而是平行

启动,就绪,存活探测混合使用
[root@k8s-master01 pod-4]# cat start-read-live.yaml 
apiVersion: v1
kind: Service
metadata:
  name: springboot-live
  labels:
    app: springboot
spec:
  type: NodePort
  ports:
  - name: server
    port: 8080
    targetPort: 8080
    nodePort: 31180
  - name: management
    port: 8081
    targetPort: 8081
    nodePort: 31181
  selector:
    app: springboot
---
apiVersion: v1
kind: Pod
metadata:
  name: springboot-live
  labels:
    app: springboot
spec:
  containers:
  - name: springboot
    image: mydlqclub/springboot-helloworld:0.0.1
    imagePullPolicy: IfNotPresent
    ports:
    - name: server
      containerPort: 8080
    - name: management
      containerPort: 8081
    readinessProbe:
      initialDelaySeconds: 20   
      periodSeconds: 5          
      timeoutSeconds: 10   
      httpGet:
        scheme: HTTP
        port: 8081
        path: /actuator/health
    livenessProbe:
      initialDelaySeconds: 20
      periodSeconds: 5
      timeoutSeconds: 10
      httpGet:
        scheme: HTTP
        port: 8081
        path: /actuator/health
    startupProbe:
      initialDelaySeconds: 20
      periodSeconds: 5
      timeoutSeconds: 10
      httpGet:
        scheme: HTTP
        port: 8081
        path: /actuator/health
[root@k8s-master01 pod-4]# kubectl apply -f start-read-live.yaml 
service/springboot-live created
pod/springboot-live created

[root@k8s-master01 pod-4]# kubectl exec -it springboot-live -- kill 1
# 等容器起来再执行
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME              READY   STATUS    RESTARTS   AGE
springboot-live   0/1     Pending   0          0s
springboot-live   0/1     Pending   0          0s
springboot-live   0/1     ContainerCreating   0          0s
springboot-live   0/1     ContainerCreating   0          1s
springboot-live   0/1     Running             0          4s
springboot-live   0/1     Running             0          26s
# 注意这里为什么会有两个,因为我们设置了存活和就绪探测
springboot-live   1/1     Running             0          26s
springboot-live   0/1     Error               0          61s
springboot-live   0/1     Running             1 (3s ago)   63s
springboot-live   0/1     Running             1 (26s ago)   86s
# 同上
springboot-live   1/1     Running             1 (26s ago)   86s

[root@k8s-master01 pod-4]# kubectl delete -f start-read-live.yaml 
service "springboot-live" deleted
pod "springboot-live" deleted
# 清除环境

三种探测大致可以理解为:

startupProbe用于pod启动探测

livenessProbe用于容器启动探测

readinessProbe用于容器内服务状态探测

K8S控制器ReplicaSet入门到企业实战应用

ReplicaSet资源-YAML文件编写技巧
[root@k8s-master01 rs]# kubectl explain replicaset.
# 查看帮助

ctr -n k8s.io images import frontend.tar.gz
ctr -n k8s.io images import myapp-blue-v1.tar.gz
ctr -n k8s.io images import myapp-blue-v2.tar.gz
ctr -n k8s.io images import myapp-v2.tar.gz
# 导入镜像

[root@k8s-master01 rs]# cat replicaset.yaml 
apiVersion: apps/v1
kind: ReplicaSet
metadata:
  name: frontend
  namespace: default
  labels:
    app: guestbook
    tier: frontend
spec:
  replicas: 3
  selector:
    matchLabels:
      tier1: frontend1
  template:
    metadata:
      labels:
        tier1: frontend1
    spec:
      containers:
      - name: php-redis
        image: docker.io/yecc/gcr.io-google_samples-gb-frontend:v3
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        startupProbe:
          initialDelaySeconds: 20
          periodSeconds: 5
          timeoutSeconds: 10
          httpGet: 
            scheme: HTTP
            port: 80
            path: /
        livenessProbe:
          initialDelaySeconds: 20
          periodSeconds: 5
          timeoutSeconds: 10
          httpGet: 
            scheme: HTTP
            port: 80
            path: /
        readinessProbe:
          initialDelaySeconds: 20
          periodSeconds: 5
          timeoutSeconds: 10
          httpGet: 
            scheme: HTTP
            port: 80
            path: /
[root@k8s-master01 rs]# kubectl apply -f replicaset.yaml 
replicaset.apps/frontend created
[root@k8s-master01 rs]# kubectl get pod -w
NAME             READY   STATUS    RESTARTS   AGE
frontend-2h7l7   0/1     Pending   0          0s
frontend-2h7l7   0/1     Pending   0          0s
frontend-2xmw6   0/1     Pending   0          0s
frontend-6cb5q   0/1     Pending   0          0s
frontend-2xmw6   0/1     Pending   0          0s
frontend-6cb5q   0/1     Pending   0          0s
frontend-2xmw6   0/1     ContainerCreating   0          0s
frontend-2h7l7   0/1     ContainerCreating   0          0s
frontend-6cb5q   0/1     ContainerCreating   0          0s
frontend-2h7l7   0/1     ContainerCreating   0          1s
frontend-2xmw6   0/1     ContainerCreating   0          1s
frontend-6cb5q   0/1     ContainerCreating   0          1s
frontend-2xmw6   0/1     Running             0          2s
frontend-6cb5q   0/1     Running             0          2s
frontend-2h7l7   0/1     Running             0          2s
frontend-2h7l7   0/1     Running             0          21s
frontend-2h7l7   1/1     Running             0          21s
frontend-6cb5q   0/1     Running             0          21s
frontend-6cb5q   1/1     Running             0          21s
frontend-2xmw6   0/1     Running             0          22s
frontend-2xmw6   1/1     Running             0          22s
[root@k8s-master01 rs]# kubectl get rs
NAME       DESIRED   CURRENT   READY   AGE
frontend   3         3         0       10s
# 创建ReplicaSet

[root@k8s-master01 rs]# kubectl get pods
NAME             READY   STATUS    RESTARTS   AGE
frontend-2h7l7   1/1     Running   0          57s
frontend-2xmw6   1/1     Running   0          57s
frontend-6cb5q   1/1     Running   0          57s
[root@k8s-master01 rs]# kubectl delete pod frontend-2h7l7
pod "frontend-2h7l7" deleted
[root@k8s-master01 rs]# kubectl get pod -w
NAME             READY   STATUS    RESTARTS   AGE
frontend-2h7l7   1/1     Running   0          75s
frontend-2xmw6   1/1     Running   0          75s
frontend-6cb5q   1/1     Running   0          75s
frontend-2h7l7   1/1     Terminating   0          79s
frontend-g6prf   0/1     Pending       0          1s
frontend-g6prf   0/1     Pending       0          1s
frontend-g6prf   0/1     ContainerCreating   0          1s
frontend-2h7l7   1/1     Terminating         0          80s
frontend-g6prf   0/1     ContainerCreating   0          2s
frontend-2h7l7   0/1     Terminating         0          81s
frontend-2h7l7   0/1     Terminating         0          81s
frontend-2h7l7   0/1     Terminating         0          81s
frontend-g6prf   0/1     Running             0          3s
frontend-g6prf   0/1     Running             0          27s
frontend-g6prf   1/1     Running             0          27s
# 测试删除ReplicaSet管理的pod,如果少了replicaSet会重新创建一个pod
ReplicaSet实现pod扩缩容和更新
[root@k8s-master01 rs]# cat replicaset.yaml |grep replicas:
  replicas: 4
[root@k8s-master01 rs]# kubectl apply -f replicaset.yaml 
replicaset.apps/frontend configured
[root@k8s-master01 rs]# kubectl get pods
NAME             READY   STATUS    RESTARTS   AGE
frontend-2xmw6   1/1     Running   0          31m
frontend-69p98   1/1     Running   0          52s
frontend-6cb5q   1/1     Running   0          31m
frontend-g6prf   1/1     Running   0          30m
# 直接修改yaml文件然后应用实现pod扩容

[root@k8s-master01 rs]# cat replicaset.yaml |grep replicas:
  replicas: 2
[root@k8s-master01 rs]# kubectl apply -f replicaset.yaml 
replicaset.apps/frontend configured
[root@k8s-master01 rs]# kubectl get pods
NAME             READY   STATUS    RESTARTS   AGE
frontend-2xmw6   1/1     Running   0          33m
frontend-g6prf   1/1     Running   0          31m
# 实现pod缩容

[root@k8s-master01 rs]# cat replicaset.yaml |grep image:
        image: docker.io/ikubernetes/myapp:v2
[root@k8s-master01 rs]# kubectl apply -f replicaset.yaml 
replicaset.apps/frontend configured
[root@k8s-master01 rs]# kubectl get pods -owide
NAME             READY   STATUS    RESTARTS   AGE   IP              NODE         NOMINATED NODE   READINESS GATES
frontend-2xmw6   1/1     Running   0          35m   10.244.85.205   k8s-node01   <none>           <none>
frontend-g6prf   1/1     Running   0          33m   10.244.85.206   k8s-node01   <none>           <none>
[root@k8s-master01 rs]# curl 10.244.85.205 |head -n3
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     100   921  100   921    0     0   527k      0 --:--:-- --:--:-- --:--:--  899k
<html ng-app="redis">
  <head>
    <title>Guestbook</title>
# ReplicaSet没有更新容器使用镜像,仍然是之前的内容
[root@k8s-master01 rs]# kubectl delete pod frontend-2xmw6
pod "frontend-2xmw6" deleted
[root@k8s-master01 rs]# kubectl delete pod frontend-g6prf
pod "frontend-g6prf" deleted
[root@k8s-master01 rs]# kubectl get pods -owide
NAME             READY   STATUS    RESTARTS   AGE     IP              NODE         NOMINATED NODE   READINESS GATES
frontend-8cdmn   1/1     Running   0          2m13s   10.244.58.221   k8s-node02   <none>           <none>
frontend-mk6ln   0/1     Running   0          25s     10.244.85.207   k8s-node01   <none>           <none>
[root@k8s-master01 rs]# curl 10.244.58.221
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
# ReplicaSet无法自动完成更新,必须手动删除pod

[root@k8s-master01 rs]# kubectl delete -f replicaset.yaml 
replicaset.apps "frontend" deleted
# 清除环境

总结:

生产环境如果升级,可以删除一个pod,观察一段时间之后没问题再删除另一个pod,但是这样需要人工干预多次;实际生产环境一般采用蓝绿发布,原来有一个rs1,再创建一个rs2(控制器),通过修改service标签,修改service可以匹配到rs2的控制器,这样才是蓝绿发布,这个也需要我们精心的部署规划,我们有一个控制器就是建立在rs之上完成的,叫做Deployment

K8S控制器Deployment入门到企业实战应用

Deployment资源基本介绍

外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传

注意: rollingUpdate更新策略计算方式
[root@k8s-master01 ~]# kubectl explain deploy.spec.strategy.rollingUpdate.
# 查看帮助

最多可用小数直接进一位

最多不可用小数直接舍去保留整数

replicas: 5

​ maxSurge: 25% 5*25%=1.25 -> 5+2=7

​ maxUnanvilable: 25% 5*25%=1.25 -> 5-1=4

Deployment资源-YAML文件编写技巧
[root@k8s-master01 ~]# mkdir deployment
[root@k8s-master01 ~]# cd deployment/
[root@k8s-master01 deployment]# kubectl explain deploy.
# 查看帮助

ctr -n k8s.io images import myapp-blue-v1.tar.gz 
ctr -n k8s.io images import myapp-blue-v2.tar.gz
# 导入镜像

[root@k8s-master01 deployment]# cat deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-v1
  namespace: default
spec:
  replicas: 2
  selector:
    matchLabels:
      app: myapp
      version: v1
  template:
    metadata:
      labels:
        app: myapp
        version: v1
    spec:
      containers:
      - name: myapp
        image: janakiramm/myapp:v1
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        startupProbe:
          initialDelaySeconds: 20
          periodSeconds: 5
          timeoutSeconds: 10
          httpGet:
            port: 80
            path: /
            scheme: HTTP
        livenessProbe:
          initialDelaySeconds: 20
          periodSeconds: 5
          timeoutSeconds: 10
          httpGet:
            port: 80
            path: /
            scheme: HTTP
        readinessProbe:
          initialDelaySeconds: 20
          periodSeconds: 5
          timeoutSeconds: 10
          httpGet:
            port: 80
            path: /
            scheme: HTTP
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml 
deployment.apps/myapp-v1 created
[root@k8s-master01 deployment]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
myapp-v1-58bdffcdd7-bn79v   1/1     Running   0          77s
myapp-v1-58bdffcdd7-cqz99   1/1     Running   0          77s
[root@k8s-master01 deployment]# kubectl get rs
NAME                  DESIRED   CURRENT   READY   AGE
myapp-v1-58bdffcdd7   2         2         2       79s
[root@k8s-master01 deployment]# kubectl get deployment
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
myapp-v1   2/2     2            2           86s
# 创建deployment
Deployment实现pod扩缩容
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep replicas:
  replicas: 3
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml 
deployment.apps/myapp-v1 configured
[root@k8s-master01 deployment]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
myapp-v1-58bdffcdd7-b64k7   1/1     Running   0          32s
myapp-v1-58bdffcdd7-bn79v   1/1     Running   0          3m3s
myapp-v1-58bdffcdd7-cqz99   1/1     Running   0          3m3s
[root@k8s-master01 deployment]# kubectl get rs
NAME                  DESIRED   CURRENT   READY   AGE
myapp-v1-58bdffcdd7   3         3         3       3m9s
[root@k8s-master01 deployment]# kubectl get deploy
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
myapp-v1   3/3     3            3           3m11s
# 通过修改yaml文件中的replicas实现pod扩容

[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep replicas:
  replicas: 2
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml 
deployment.apps/myapp-v1 configured
[root@k8s-master01 deployment]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
myapp-v1-58bdffcdd7-bn79v   1/1     Running   0          4m15s
myapp-v1-58bdffcdd7-cqz99   1/1     Running   0          4m15s
[root@k8s-master01 deployment]# kubectl get rs
NAME                  DESIRED   CURRENT   READY   AGE
myapp-v1-58bdffcdd7   2         2         2       4m17s
[root@k8s-master01 deployment]# kubectl get deployment
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
myapp-v1   2/2     2            2           4m20s
# 实现pod缩容,会随机删除pod
Deployment实现Pod滚动更新
[root@k8s-master01 deployment]# kubectl explain deploy.spec.strategy.
# 查看帮助
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep replicas:
  replicas: 3
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml 
deployment.apps/myapp-v1 configured
# 先将pod副本数调为3并应用

[root@k8s-master01 deployment]# kubectl describe deployments.apps myapp-v1 |grep -i strategy
StrategyType:           RollingUpdate
RollingUpdateStrategy:  25% max unavailable, 25% max surge
# 可以看到默认策略是滚动更新
# 默认最多不可用是25%,最多可用也为25%

replicas: 3
maxSurge: 25% -> 3+1=4
maxUnavailabel: 25% -> 3-0=3

[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep image:
        image: janakiramm/myapp:v2
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml 
deployment.apps/myapp-v1 configured
# 修改使用镜像使滚动更新
[root@k8s-master01 deployment]# kubectl get pods -w
NAME                        READY   STATUS    RESTARTS   AGE
myapp-v1-58bdffcdd7-bn79v   1/1     Running   0          29m
myapp-v1-58bdffcdd7-cqz99   1/1     Running   0          29m
myapp-v1-58bdffcdd7-vxbjk   1/1     Running   0          5m4s
####################
myapp-v1-744bfb8886-8xrzt   0/1     Pending   0          0s
myapp-v1-744bfb8886-8xrzt   0/1     Pending   0          0s
myapp-v1-744bfb8886-8xrzt   0/1     ContainerCreating   0          0s
myapp-v1-744bfb8886-8xrzt   0/1     ContainerCreating   0          0s
myapp-v1-744bfb8886-8xrzt   0/1     Running             0          1s
myapp-v1-744bfb8886-8xrzt   0/1     Running             0          25s
myapp-v1-744bfb8886-8xrzt   1/1     Running             0          25s
myapp-v1-58bdffcdd7-vxbjk   1/1     Terminating         0          6m4s
myapp-v1-744bfb8886-92vvt   0/1     Pending             0          0s
myapp-v1-744bfb8886-92vvt   0/1     Pending             0          0s
myapp-v1-744bfb8886-92vvt   0/1     ContainerCreating   0          0s
myapp-v1-58bdffcdd7-vxbjk   1/1     Terminating         0          6m5s
...
[root@k8s-master01 deployment]# kubectl get rs -w
NAME                  DESIRED   CURRENT   READY   AGE
myapp-v1-58bdffcdd7   3         3         3       29m
###################
myapp-v1-744bfb8886   1         0         0       0s
myapp-v1-744bfb8886   1         0         0       0s
myapp-v1-744bfb8886   1         1         0       0s
myapp-v1-744bfb8886   1         1         1       25s
myapp-v1-58bdffcdd7   2         3         3       30m
...
[root@k8s-master01 deployment]# kubectl get rs
NAME                  DESIRED   CURRENT   READY   AGE
myapp-v1-58bdffcdd7   0         0         0       31m
myapp-v1-744bfb8886   3         3         3       83s
# 滚动更新完成
Deployment回滚
[root@k8s-master01 deployment]# kubectl rollout history deployment myapp-v1 
deployment.apps/myapp-v1 
REVISION  CHANGE-CAUSE
1         <none>
2         <none>

# 查看deployment滚动更新历史
[root@k8s-master01 deployment]# kubectl rollout undo deployment/myapp-v1 --to-revision=1
deployment.apps/myapp-v1 rolled back
[root@k8s-master01 deployment]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
myapp-v1-58bdffcdd7-2tgr2   1/1     Running   0          2m2s
myapp-v1-58bdffcdd7-bk6w7   1/1     Running   0          101s
myapp-v1-58bdffcdd7-lrjhp   1/1     Running   0          81s
[root@k8s-master01 deployment]# kubectl get rs
NAME                  DESIRED   CURRENT   READY   AGE
myapp-v1-58bdffcdd7   3         3         3       35m
myapp-v1-744bfb8886   0         0         0       5m57s
# 实现回滚
自定义Deployment更新策略
[root@k8s-master01 deployment]# kubectl explain deploy.spec.strategy.
# 查看帮助
[root@k8s-master01 deployment]# cat deploy-demo.yaml |head -n15
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-v1
  namespace: default
spec:
  strategy: 
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      version: v1
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep image:
        image: janakiramm/myapp:v2
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml 
deployment.apps/myapp-v1 configured
[root@k8s-master01 deployment]# kubectl get pods -w
...
# 测试rollingUpdate

当前pod数量为3,限定后更新过程中pod可用数量为2-4.

测试Recreate

生产环境一定别用,因为全部重建需要时间,生产业务是不能down掉的

[root@k8s-master01 deployment]# cat deploy-demo.yaml |head -n15
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-v1
  namespace: default
spec:
  strategy: 
    type: Recreate
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      version: v1
  template:
    metadata:
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep image:
        image: janakiramm/myapp:v1
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml 
deployment.apps/myapp-v1 configured
[root@k8s-master01 deployment]# kubectl get pods -w
...

[root@k8s-master01 deployment]# kubectl delete -f deploy-demo.yaml 
deployment.apps "myapp-v1" deleted
# 清除环境
蓝绿部署基本介绍

蓝绿部署中,一共有两套系统:一套是正在提供服务系统,标记为“绿色”;另一套是准备发布的系统,标记为“蓝色”。两套系统都是功能完善的、正在运行的系统,只是系统版本和对外服务情况不同。(蓝绿都可代表新旧系统,理解意思即可,不必在乎颜色)

开发新版本,要用新版本替换线上的旧版本,在线上的系统之外,搭建了一个使用新版本代码的全新系统。 这时候,一共有两套系统在运行,正在对外提供服务的老系统是绿色系统,新部署的系统是蓝色系统。

优点:

1、更新过程无需停机,风险较少

2、回滚方便,只需要更改路由或者切换DNS服务器,效率较高

缺点:

1、成本较高,需要部署两套环境。如果新版本中基础服务出现问题,会瞬间影响全网用户;如果新版本有问题也会影响全网用户。

2、需要部署两套机器,费用开销大

3、在非隔离的机器(Docker、VM)上操作时,可能会导致蓝绿环境被摧毁风险

4、负载均衡器/反向代理/路由/DNS处理不当,将导致流量没有切换过来情况出现

基于k8s实现蓝绿部署

Kubernetes不支持内置的蓝绿部署.目前最好的方式市创建新的deployment,然后更新应用程序的service以指向新的deployment部署的应用

ctr -n k8s.io images import myapp-lan.tar.gz
ctr -n k8s.io images import myapp-lv.tar.gz
# 节点导入镜像

[root@k8s-master01 deployment]# kubectl create ns blue-green
namespace/blue-green created
[root@k8s-master01 deployment]# cat lan.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-v1
  namespace: blue-green
spec:
  replicas: 3
  selector:
   matchLabels:
    app: myapp
    version: v1
  template:
   metadata:
    labels:
     app: myapp
     version: v1
   spec:
    containers:
    - name: myapp
      image: janakiramm/myapp:v1
      imagePullPolicy: IfNotPresent
      ports:
      - containerPort: 80

[root@k8s-master01 deployment]# kubectl apply -f lan.yaml 
deployment.apps/myapp-v1 created
# 部署正在提供服务的系统
[root@k8s-master01 deployment]# cat service_lanlv.yaml 
apiVersion: v1
kind: Service
metadata:
  name: myapp-lan-lv
  namespace: blue-green
  labels:
    app: myapp
spec:
  type: NodePort
  ports:
  - port: 80
    nodePort: 30062
    name: http
  selector:
    app: myapp
    version: v1

[root@k8s-master01 deployment]# kubectl apply -f service_lanlv.yaml 
service/myapp-lan-lv created
[root@k8s-master01 deployment]# kubectl describe svc myapp-lan-lv -n blue-green |grep -i endpoints:
Endpoints:                10.244.58.236:80,10.244.58.237:80,10.244.85.217:80
# 编写service的yaml文件对外提供服务

外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传

网页访问正常

[root@k8s-master01 deployment]# cat lv.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-v2
  namespace: blue-green
spec:
  replicas: 3
  selector:
   matchLabels:
    app: myapp
    version: v2
  template:
   metadata:
    labels:
     app: myapp
     version: v2
   spec:
    containers:
    - name: myapp
      image: janakiramm/myapp:v2
      imagePullPolicy: IfNotPresent
      ports:
      - containerPort: 80

[root@k8s-master01 deployment]# kubectl apply -f lv.yaml 
deployment.apps/myapp-v2 created
# 部署第二套系统,也就是准备发布的新系统
[root@k8s-master01 deployment]# kubectl get pods -n blue-green
NAME                        READY   STATUS    RESTARTS   AGE
myapp-v1-7b55fffbb5-2vpt7   1/1     Running   0          3m11s
myapp-v1-7b55fffbb5-gfnnt   1/1     Running   0          3m11s
myapp-v1-7b55fffbb5-xpk2f   1/1     Running   0          3m11s
myapp-v2-5779dc88f-55566    1/1     Running   0          16s
myapp-v2-5779dc88f-cjnrp    1/1     Running   0          16s
myapp-v2-5779dc88f-sz2m9    1/1     Running   0          16s
[root@k8s-master01 deployment]# kubectl get pods --show-labels -n blue-green
NAME                        READY   STATUS    RESTARTS   AGE     LABELS
myapp-v1-7b55fffbb5-2vpt7   1/1     Running   0          5m25s   app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v1-7b55fffbb5-gfnnt   1/1     Running   0          5m25s   app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v1-7b55fffbb5-xpk2f   1/1     Running   0          5m25s   app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v2-5779dc88f-55566    1/1     Running   0          2m30s   app=myapp,pod-template-hash=5779dc88f,version=v2
myapp-v2-5779dc88f-cjnrp    1/1     Running   0          2m30s   app=myapp,pod-template-hash=5779dc88f,version=v2
myapp-v2-5779dc88f-sz2m9    1/1     Running   0          2m30s   app=myapp,pod-template-hash=5779dc88f,version=v2
[root@k8s-master01 deployment]# cat service_lanlv.yaml 
apiVersion: v1
kind: Service
metadata:
  name: myapp-lan-lv
  namespace: blue-green
  labels:
    app: myapp
spec:
  type: NodePort
  ports:
  - port: 80
    nodePort: 30062
    name: http
  selector:
    app: myapp
    version: v2
# 只需修改service的yaml文件的匹配标签即可,这里我们修改了version为v2
[root@k8s-master01 deployment]# kubectl apply -f service_lanlv.yaml 
service/myapp-lan-lv configured
[root@k8s-master01 deployment]# kubectl describe svc myapp-lan-lv -n blue-green |grep -i endpoints:
Endpoints:                10.244.58.238:80,10.244.85.218:80,10.244.85.219:80
[root@k8s-master01 deployment]# kubectl get pods --show-labels -n blue-green -owide
NAME                        READY   STATUS    RESTARTS   AGE     IP              NODE         NOMINATED NODE   READINESS GATES   LABELS
myapp-v1-7b55fffbb5-2vpt7   1/1     Running   0          5m40s   10.244.58.237   k8s-node02   <none>           <none>            app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v1-7b55fffbb5-gfnnt   1/1     Running   0          5m40s   10.244.85.217   k8s-node01   <none>           <none>            app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v1-7b55fffbb5-xpk2f   1/1     Running   0          5m40s   10.244.58.236   k8s-node02   <none>           <none>            app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v2-5779dc88f-55566    1/1     Running   0          2m45s   10.244.58.238   k8s-node02   <none>           <none>            app=myapp,pod-template-hash=5779dc88f,version=v2
myapp-v2-5779dc88f-cjnrp    1/1     Running   0          2m45s   10.244.85.218   k8s-node01   <none>           <none>            app=myapp,pod-template-hash=5779dc88f,version=v2
myapp-v2-5779dc88f-sz2m9    1/1     Running   0          2m45s   10.244.85.219   k8s-node01   <none>           <none>            app=myapp,pod-template-hash=5779dc88f,version=v2
# 已经切换为新的系统

[root@k8s-master01 deployment]# kubectl delete -f lv.yaml 
deployment.apps "myapp-v2" deleted
[root@k8s-master01 deployment]# kubectl delete -f lan.yaml 
deployment.apps "myapp-v1" deleted
[root@k8s-master01 deployment]# kubectl delete -f service_lanlv.yaml 
service "myapp-lan-lv" deleted
# 清除环境

外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传

网页访问正常

基于k8s实现金丝雀发布
金丝雀发布简介

**金丝雀发布的由来:**17 世纪,英国矿井工人发现,金丝雀对瓦斯这种气体十分敏感。空气中哪怕有极其微量的瓦斯,金丝雀也会停止歌唱;当瓦斯含量超过一定限度时,虽然人类毫无察觉,金丝雀却早已毒发身亡。当时在采矿设备相对简陋的条件下,工人们每次下井都会带上一只金丝雀作为瓦斯检测指标,以便在危险状况下紧急撤离。

**金丝雀发布(又称灰度发布、灰度更新):**金丝雀发布一般先发1台,或者一个小比例,例如2%的服务器,主要做流量验证用,也称为金丝雀 (Canary) 测试 (国内常称灰度测试)。

简单的金丝雀测试一般通过手工测试验证,复杂的金丝雀测试需要比较完善的监控基础设施配合,通过监控指标反馈,观察金丝雀的健康状况,作为后续发布或回退的依据。 如果金丝测试通过,则把剩余的V1版本全部升级为V2版本。如果金丝雀测试失败,则直接回退金丝雀,发布失败。

外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传

[root@k8s-master01 deployment]# kubectl apply -f lan.yaml 
deployment.apps/myapp-v1 created
[root@k8s-master01 deployment]# kubectl set image deployment myapp-v1 myapp=docker.io/xianchao/nginx:v1  -n blue-green && kubectl rollout pause deployment myapp-v1 -n blue-green
deployment.apps/myapp-v1 image updated
deployment.apps/myapp-v1 paused
[root@k8s-master01 deployment]# kubectl get pods -n blue-green -w
NAME                        READY   STATUS    RESTARTS   AGE
myapp-v1-7b55fffbb5-46cqg   1/1     Running   0          5s
myapp-v1-7b55fffbb5-8mh8k   1/1     Running   0          5s
myapp-v1-7b55fffbb5-fnb8c   1/1     Running   0          5s
#######################

myapp-v1-644d75999-2dlks    0/1     Pending   0          0s
myapp-v1-644d75999-2dlks    0/1     Pending   0          0s
myapp-v1-644d75999-2dlks    0/1     ContainerCreating   0          0s
myapp-v1-644d75999-2dlks    0/1     ContainerCreating   0          0s
myapp-v1-644d75999-2dlks    1/1     Running             0          1s
# 新起一个pod做金丝雀发布,其余三个pod为暂停状态,也就是暂不更新,还是跑着旧业务

[root@k8s-master01 deployment]# kubectl rollout resume deployment myapp-v1 -n blue-green
deployment.apps/myapp-v1 resumed
[root@k8s-master01 deployment]# kubectl get pods -n blue-green -w
...
[root@k8s-master01 deployment]# kubectl get rs -n blue-green
NAME                  DESIRED   CURRENT   READY   AGE
myapp-v1-644d75999    3         3         3       2m25s
myapp-v1-7b55fffbb5   0         0         0       2m32s
# 如果没有问题,就全部更新,解除暂停状态

K8S四层代理Service入门到企业实战

Service四层代理基本介绍
提问: 为什么要有Service?

Service概述

较新的版本使用的是coredns),service的名称解析是依赖于dns附件的,因此在部署完k8s之后需要再部署dns附件,kubernetes要想给客户端提供网络功能,需要依赖第三方的网络插件(flannel,calico等)。

每个K8s节点上都有一个组件叫做kube-proxy,kube-proxy这个组件将始终监视着apiserver中有关service资源的变动信息,需要跟master之上的apiserver交互,随时连接到apiserver上获取任何一个与service资源相关的资源变动状态,这种是通过kubernetes中固有的一种请求方法watch(监视)来实现的,一旦有service资源的内容发生变动(如创建,删除),kube-proxy都会将它转化成当前节点之上的能够实现service资源调度,把我们请求调度到后端特定的pod资源之上的规则,这个规则可能是iptables,也可能是ipvs,取决于service的实现方式。

Service工作原理

k8s在创建Service时,会根据标签选择器selector(lable selector)来查找Pod,据此创建与Service同名的endpoint对象,当Pod 地址发生变化时,endpoint也会随之发生变化,service接收前端client请求的时候,就会通过endpoint,找到转发到哪个Pod进行访问的地址。(至于转发到哪个节点的Pod,由负载均衡kube-proxy决定)

K8S集群中有三类IP地址

  1. Node Network(节点网络)
  2. Pod Network(Pod网络)
  3. Cluster Network(集群地址,也称为service network)
Service代理-ClusterIP类型
[root@k8s-master01 ~]# kubectl explain service.spec.
# 查看帮助

[root@k8s-node01 images]# ctr -n k8s.io images import nginx.tar.gz
[root@k8s-node02 images]# ctr -n k8s.io images import nginx.tar.gz
# 工作节点导入镜像

[root@k8s-master01 ~]# mkdir service
[root@k8s-master01 ~]# cd service/
[root@k8s-master01 service]# cat pod_test.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-nginx
spec:
  selector:
    matchLabels:
      run: my-nginx
  replicas: 2
  template:
    metadata:
      labels:
        run: my-nginx
    spec:
      containers:
      - name: my-nginx
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80  #pod中的容器需要暴露的端口
        startupProbe:
           periodSeconds: 5
           initialDelaySeconds: 60
           # 初始检测时间设置长一点,方便测试
           timeoutSeconds: 10
           httpGet:
             scheme: HTTP
             port: 80
             path: /
        livenessProbe:
           periodSeconds: 5
           initialDelaySeconds: 60
           timeoutSeconds: 10
           httpGet:
             scheme: HTTP
             port: 80
             path: /
        readinessProbe:
           periodSeconds: 5
           initialDelaySeconds: 60
           timeoutSeconds: 10
           httpGet:
             scheme: HTTP
             port: 80
             path: /
[root@k8s-master01 service]# kubectl apply -f pod_test.yaml 
deployment.apps/my-nginx created
# 创建deployment生成pod
[root@k8s-master01 service]# kubectl get pods --show-labels
NAME                        READY   STATUS    RESTARTS   AGE   LABELS
my-nginx-7468bcb55b-b7vnl   0/1     Running   0          23s   pod-template-hash=7468bcb55b,run=my-nginx
my-nginx-7468bcb55b-zzpw2   0/1     Running   0          23s   pod-template-hash=7468bcb55b,run=my-nginx
# 查看pod标签
[root@k8s-master01 service]# cat service_test.yaml 
apiVersion: v1
kind: Service
metadata:
  name: my-nginx
  labels:
    run: my-nginx
spec:
  type: ClusterIP
  ports:
  - port: 80
    # service的端口,暴露给k8s集群内部服务访问
    protocol: TCP
    targetPort: 80
    # pod容器中定义的端口
  selector:
    run: my-nginx
    # 选择拥有run=my-nginx标签的pod
[root@k8s-master01 service]# kubectl apply -f service_test.yaml 
service/my-nginx created
# 创建service
[root@k8s-master01 service]# kubectl get svc -l run=my-nginx
NAME       TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE
my-nginx   ClusterIP   10.98.163.90   <none>        80/TCP    50s
[root@k8s-master01 service]# curl 10.98.163.90
...
# 测试service,ClusterIP类型只有集群内部可访问
[root@k8s-master01 service]# kubectl describe svc my-nginx |grep -i endpoints:
Endpoints:         10.244.58.246:80,10.244.85.224:80

[root@k8s-master01 service]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
my-nginx-7468bcb55b-b7vnl   1/1     Running   0          5m53s
my-nginx-7468bcb55b-zzpw2   1/1     Running   0          5m53s
[root@k8s-master01 service]# kubectl delete pod my-nginx-7468bcb55b-b7vnl 
pod "my-nginx-7468bcb55b-b7vnl" deleted
[root@k8s-master01 service]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
my-nginx-7468bcb55b-8vz67   0/1     Running   0          49s
my-nginx-7468bcb55b-zzpw2   1/1     Running   0          6m46s
[root@k8s-master01 service]# kubectl describe svc my-nginx |grep -i endpoints:
Endpoints:         10.244.85.224:80
# 在pod的服务没有就绪前不会代理
[root@k8s-master01 service]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
my-nginx-7468bcb55b-8vz67   1/1     Running   0          63s
my-nginx-7468bcb55b-zzpw2   1/1     Running   0          7m
[root@k8s-master01 service]# kubectl get pods -owide
NAME                        READY   STATUS    RESTARTS   AGE     IP              NODE         NOMINATED NODE   READINESS GATES
my-nginx-7468bcb55b-8vz67   1/1     Running   0          74s     10.244.58.247   k8s-node02   <none>           <none>
my-nginx-7468bcb55b-zzpw2   1/1     Running   0          7m11s   10.244.85.224   k8s-node01   <none>           <none>
[root@k8s-master01 service]# kubectl describe svc my-nginx |grep -i endpoints:
Endpoints:         10.244.58.247:80,10.244.85.224:80
# 所以就绪探测很重要,不然可能造成service代理了但是服务没起来的情况

[root@k8s-master01 service]# kubectl delete -f pod_test.yaml 
deployment.apps "my-nginx" deleted
[root@k8s-master01 service]# kubectl delete -f service_test.yaml 
service "my-nginx" deleted
# 清除环境

以这个service为例,它的全称为my-nginx.default.svc.cluster.local

也就是: 服务名.命名空间.域名后缀

这个全称只能在关联的pod里访问,集群节点也是无法访问的

Service代理-NodePort类型
[root@k8s-master01 service]# cat pod_nodeport.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-nginx-nodeport
spec:
  selector:
    matchLabels:
      run: my-nginx-nodeport
  replicas: 2
  template:
    metadata:
      labels:
        run: my-nginx-nodeport
    spec:
      containers:
      - name: my-nginx-nodeport-container
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

[root@k8s-master01 service]# kubectl apply -f pod_nodeport.yaml 
deployment.apps/my-nginx-nodeport created
[root@k8s-master01 service]# cat service_nodeport.yaml 
apiVersion: v1
kind: Service
metadata:
  name: my-nginx-nodeport
  labels:
    run: my-nginx-nodeport
spec:
  type: NodePort
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30380
  selector:
    run: my-nginx-nodeport

[root@k8s-master01 service]# kubectl apply -f service_nodeport.yaml 
service/my-nginx-nodeport created
[root@k8s-master01 service]# kubectl get pods
NAME                                 READY   STATUS    RESTARTS   AGE
my-nginx-nodeport-85c4df8944-g8s85   1/1     Running   0          19s
my-nginx-nodeport-85c4df8944-k6b9f   1/1     Running   0          19s
[root@k8s-master01 service]# kubectl get svc
NAME                TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
kubernetes          ClusterIP   10.96.0.1        <none>        443/TCP        8d
my-nginx-nodeport   NodePort    10.110.139.100   <none>        80:30380/TCP   13s
# 创建pod和NodePort类型的service

[root@k8s-master01 service]# ss -lntup |grep 30380
# 查端口是查不到的
[root@k8s-master01 service]# ipvsadm -Ln |head -n10
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.17.0.1:30380 rr
  -> 10.244.58.249:80             Masq    1      0          0         
  -> 10.244.85.227:80             Masq    1      0          0         
TCP  192.168.1.181:30380 rr
  -> 10.244.58.249:80             Masq    1      1          0         
  -> 10.244.85.227:80             Masq    1      0          1 
# 查询防火墙规则

[root@k8s-master01 service]# kubectl delete -f pod_nodeport.yaml 
deployment.apps "my-nginx-nodeport" deleted
[root@k8s-master01 service]# kubectl delete -f service_nodeport.yaml 
service "my-nginx-nodeport" deleted
# 清除环境

外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传

数据转发流程:

客户端请求http://192.168.1.181:30380->docker0虚拟网卡:172.17.0.1:30380->10.244.121.36:80,10.244.102.86:80

Service代理-ExternalName类型

应用场景:跨名称空间访问

需求:default名称空间下的client 服务想要访问nginx-ns名称空间下的nginx-svc服务

[root@k8s-node01 images]# ctr -n k8s.io images import busybox.tar.gz
[root@k8s-node02 images]# ctr -n k8s.io images import busybox.tar.gz
# 工作节点导入镜像

[root@k8s-master01 service]# kubectl create ns nginx-ns
namespace/nginx-ns created
[root@k8s-master01 service]# cat server_nginx.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx
  namespace: nginx-ns
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
   metadata:
    labels:
      app: nginx
   spec:
     containers:
     - name: nginx
       image: nginx
       imagePullPolicy: IfNotPresent

[root@k8s-master01 service]# kubectl apply -f server_nginx.yaml 
deployment.apps/nginx created
# 部署nginx-ns名称空间下的pod跑nginx服务
[root@k8s-master01 service]# cat nginx_svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc
  namespace: nginx-ns
spec:
  selector:
    app: nginx
  ports:
   - name: http
     protocol: TCP
     port: 80
     targetPort: 80
[root@k8s-master01 service]# kubectl apply -f nginx_svc.yaml 
service/nginx-svc created
# 部署nginx的service,类型为ClusterIP
[root@k8s-master01 service]# cat client.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: client
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: busybox
  template:
   metadata:
    labels:
      app: busybox
   spec:
     containers:
     - name: busybox
       image: busybox
       imagePullPolicy: IfNotPresent
       command: ["/bin/sh","-c","sleep 36000"]

[root@k8s-master01 service]# kubectl apply -f client.yaml 
deployment.apps/client created
# 部署默认名称空间下的client
[root@k8s-master01 service]# cat client_svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: client-svc
spec:
  type: ExternalName
  externalName: nginx-svc.nginx-ns.svc.cluster.local
  ports:
  - name: http
    port: 80
    targetPort: 80

[root@k8s-master01 service]# kubectl apply -f client_svc.yaml 
service/client-svc created
# 创建ExternalName类型的service
[root@k8s-master01 service]# kubectl get svc
NAME         TYPE           CLUSTER-IP   EXTERNAL-IP                            PORT(S)   AGE
client-svc   ExternalName   <none>       nginx-svc.nginx-ns.svc.cluster.local   80/TCP    89s
kubernetes   ClusterIP      10.96.0.1    <none>                                 443/TCP   8d
[root@k8s-master01 service]# kubectl get pods
NAME                      READY   STATUS    RESTARTS   AGE
client-798446484b-wrz86   1/1     Running   0          2m6s
[root@k8s-master01 service]# kubectl exec -it client-798446484b-wrz86 -- /bin/sh
/ # wget -q -O - client-svc
...
/ # wget -q -O - client-svc.default.svc.cluster.local
...
/ # wget -q -O - nginx-svc.nginx-ns.svc.cluster.local
...
# 上面三个请求结果一样

[root@k8s-master01 service]# kubectl delete -f client_svc.yaml 
service "client-svc" deleted
[root@k8s-master01 service]# kubectl delete -f client.yaml 
deployment.apps "client" deleted
[root@k8s-master01 service]# kubectl delete -f server_nginx.yaml 
deployment.apps "nginx" deleted
# 清除环境
Service代理-自定义Endpoints资源

k8s最佳实践:映射外部服务案例分享

场景1:k8s集群引用外部的mysql数据库

[root@k8s-node02 ~]# yum -y install mariadb-server
[root@k8s-node02 ~]# systemctl enable --now mariadb
[root@k8s-node02 ~]# mysql
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 3
Server version: 5.5.68-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> quit
Bye
# 在node2安装节点外部服务mariadb-server做测试

[root@k8s-master01 service]# cat mysql_service.yaml
apiVersion: v1
kind: Service
metadata:
  name: mysql
  namespace: default
spec:
  type: ClusterIP
  ports:
  - port: 3306
[root@k8s-master01 service]# kubectl apply -f mysql_service.yaml 
service/mysql created
# 创建ClusterIP类型的service
[root@k8s-master01 service]# kubectl get svc |grep mysql
mysql        ClusterIP   10.106.232.15   <none>        3306/TCP   43s
[root@k8s-master01 service]# kubectl describe svc mysql |grep -i endpoints:
Endpoints:         <none>
# 可以看到Endpoints: 字段是空的,所以我们要去添加一个

[root@k8s-master01 service]# kubectl explain endpoints.
# 查看帮助
[root@k8s-master01 service]# cat mysql_endpoints.yaml
apiVersion: v1
kind: Endpoints
metadata:
  name: mysql
  # 注意这里要与service名字相同,不然找不到
  namespace: default
subsets:
- addresses: 
  - ip: 192.168.1.192
  ports:
  - port: 3306
[root@k8s-master01 service]# kubectl apply -f mysql_endpoints.yaml 
endpoints/mysql created
# 创建mysql的endpoints
[root@k8s-master01 service]# kubectl describe svc mysql |grep -i endpoints:
Endpoints:         192.168.1.192:3306
# 可以看到已经匹配上

[root@k8s-master01 service]# kubectl delete -f mysql_endpoints.yaml 
endpoints "mysql" deleted
[root@k8s-master01 service]# kubectl delete -f mysql_service.yaml 
service "mysql" deleted
[root@k8s-node02 ~]# yum -y remove mariadb-server
# 清除环境

这样集群内部就可以访问service到外部的服务了

coredns组件详解

CoreDNS 其实就是一个 DNS 服务,而 DNS 作为一种常见的服务发现手段,所以很多开源项目以及工程师都会使用 CoreDNS 为集群提供服务发现的功能,Kubernetes 就在集群中使用 CoreDNS 解决服务发现的问题。 作为一个加入 CNCF(Cloud Native Computing Foundation)的服务, CoreDNS 的实现非常简单。

验证coredns

[root@k8s-node01 ~]# ctr -n k8s.io images import dig.tar.gz
[root@k8s-node02 ~]# ctr -n k8s.io images import dig.tar.gz
# 工作节点导入镜像

[root@k8s-master01 service]# cat dig.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: dig
  namespace: default
spec:
  containers:
  - name: dig
    image:  xianchao/dig:latest
    imagePullPolicy: IfnotPresent
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always

[root@k8s-master01 service]# kubectl apply -f dig.yaml 
pod/dig created
[root@k8s-master01 service]# kubectl get pods
NAME   READY   STATUS    RESTARTS   AGE
dig    1/1     Running   0          114s
# 创建测试pod
[root@k8s-master01 service]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   8d
[root@k8s-master01 service]# kubectl exec -it dig -- /bin/bash
bash-4.3# nslookup kubernetes
Server:		10.96.0.10
Address:	10.96.0.10#53

Name:	kubernetes.default.svc.cluster.local
Address: 10.96.0.1
# 测试集群内部
bash-4.3# nslookup baidu.com
Server:		10.96.0.10
Address:	10.96.0.10#53

Non-authoritative answer:
Name:	baidu.com
Address: 39.156.66.10
Name:	baidu.com
Address: 110.242.68.66
# 测试集群外部

bash-4.3# exit
exit
[root@k8s-master01 service]# kubectl delete -f dig.yaml 
pod "dig" deleted
[root@k8s-master01 service]# kubectl delete -f .
# 清除环境

ot@k8s-master01 service]# kubectl delete -f mysql_endpoints.yaml
endpoints “mysql” deleted
[root@k8s-master01 service]# kubectl delete -f mysql_service.yaml
service “mysql” deleted
[root@k8s-node02 ~]# yum -y remove mariadb-server

清除环境


这样集群内部就可以访问service到外部的服务了

#### coredns组件详解

**CoreDNS 其实就是一个 DNS 服务,而 DNS 作为一种常见的服务发现手段,所以很多开源项目以及工程师都会使用 CoreDNS 为集群提供服务发现的功能,Kubernetes 就在集群中使用 CoreDNS 解决服务发现的问题。 作为一个加入 CNCF(Cloud Native Computing Foundation)的服务**, **CoreDNS 的实现非常简单。**

验证coredns

```shell
[root@k8s-node01 ~]# ctr -n k8s.io images import dig.tar.gz
[root@k8s-node02 ~]# ctr -n k8s.io images import dig.tar.gz
# 工作节点导入镜像

[root@k8s-master01 service]# cat dig.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: dig
  namespace: default
spec:
  containers:
  - name: dig
    image:  xianchao/dig:latest
    imagePullPolicy: IfnotPresent
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always

[root@k8s-master01 service]# kubectl apply -f dig.yaml 
pod/dig created
[root@k8s-master01 service]# kubectl get pods
NAME   READY   STATUS    RESTARTS   AGE
dig    1/1     Running   0          114s
# 创建测试pod
[root@k8s-master01 service]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   8d
[root@k8s-master01 service]# kubectl exec -it dig -- /bin/bash
bash-4.3# nslookup kubernetes
Server:		10.96.0.10
Address:	10.96.0.10#53

Name:	kubernetes.default.svc.cluster.local
Address: 10.96.0.1
# 测试集群内部
bash-4.3# nslookup baidu.com
Server:		10.96.0.10
Address:	10.96.0.10#53

Non-authoritative answer:
Name:	baidu.com
Address: 39.156.66.10
Name:	baidu.com
Address: 110.242.68.66
# 测试集群外部

bash-4.3# exit
exit
[root@k8s-master01 service]# kubectl delete -f dig.yaml 
pod "dig" deleted
[root@k8s-master01 service]# kubectl delete -f .
# 清除环境

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1196460.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

经典的测试开发面试题

1、你在测试中发现了一个bug&#xff0c;但是开发经理认为这不是一个bug&#xff0c;你应该怎样解决&#xff1f; 首先&#xff0c;将问题提交到缺陷管理库进行备案。 然后&#xff0c;要获取判断的依据和标准&#xff1a; 根绝需求说明书&#xff0c;产品说明、设计文档等&…

迷雾系统-人物驱散迷雾

使用linerRender,将人物移动数据动态添加进去&#xff0c;同样是特殊层级让FogCamera渲染 EndCapVertices的数量越多&#xff0c;矩形就变为一个椭圆形的形状&#xff0c;更适合圆形视野探索 当拐点的两个点距离太近&#xff0c;LineRender会发生扭曲&#xff0c;解决方案是在…

Redis五种数据类型及命令操作(一)

&#x1f388;个人公众号:&#x1f388; :✨✨✨ 可为编程✨ &#x1f35f;&#x1f35f; &#x1f511;个人信条:&#x1f511; 知足知不足 有为有不为 为与不为皆为可为&#x1f335; &#x1f349;本篇简介:&#x1f349; 本篇记录Redis五种数据类型及命令操作&#xff0c;如…

Java基础-面向对象进阶-多态, 包, final, 权限修饰符,代码块

Java基础-面向对象进阶-多态, 包, final, 权限修饰符,代码块 多态多态的概述多态中调用成员的特点多态的优势和弊端多态练习 包final权限修饰符代码块来源Gitee地址 多态 多态的概述 多态: 对象的多种形态多态的前提 有继承/实现关系有父类引用指向子类对象有方法的重写 多态…

Netty--NIO(Non-blocking IO)基础--三大组件

NIO&#xff08;Non-blocking IO&#xff09;是一种非阻塞的IO模型&#xff0c;它通过引入Channel和Selector的概念&#xff0c;使得一个线程能够管理多个通道&#xff0c;从而提高了IO操作的效率。本文将介绍NIO的三大组件&#xff1a;Channel、Buffer、以及Selector&#xff…

【C++对象模型】构造函数II

构造函数语意学 》》构造函数语意学I—默认构造函数的构造操作《《 》》构造函数语意学II—拷贝构造函数的构造操作《《 》》构造函数语意学III—程序转化语意学《《 拷贝构造函数的构造操作 有三种情况&#xff0c;会以一个object的内容作为另一个class object的初值。 1.…

跨镜头目标融合__追踪之目标重识别研究(跨镜头目标追踪)

文章目录 标题&#xff1a;跨镜头目标融合&#xff1b;目标重识别&#xff1b;跨镜头目标追踪&#xff1b; 1 目的&#xff1a;2 实现方法/策略&#xff1a;2.1 目标类型位置匹配&#xff08;或考虑结合目标轨迹&#xff09;2.2 目标重识别2.3 目标类型位置匹配(轨迹)目标重识别…

Linux centos系统中添加磁盘

为了学习与训练文件系统或磁盘的分区、格式化和挂载/卸载&#xff0c;我们需要为虚拟机添加磁盘。根据需要&#xff0c;可以添加多块不同大小的磁盘。具体操作讨论如下&#xff0c;供参考。 一、添加 1.开机前 有两个地方&#xff0c;可选择打开添加硬盘对话框 (1)双击左侧…

暖手宝+充电宝设计方案 可实现快速升温和充电 低成本充电电流可选

充电暖手宝因为它的便携性&#xff0c;既能供暖又能当充电宝使用而备受人们喜爱。是冬天暖手供暖的必备神器。 目前&#xff0c;市场常见的暖手宝大致有三个类型&#xff0c;分别是加热水的热水袋、通过化学反应放热的铁粉袋子和锂电供电的智能暖手宝。与常见的暖手宝不同&…

计算机网络期末复习-Part3

1、rdt1.0&#xff0c;rdt2.0&#xff0c;rdt3.0的底层信道模型 RDT 1.0: 完全可靠的底层信道&#xff0c;没有比特差错&#xff0c;也没有分组丢失。 RDT 2.0: 具有比特差错的底层信道&#xff0c;有比特差错&#xff0c;但没有分组丢失。 RDT 3.0: 具有差错和丢包的底层信道…

如何将BMP图片批量转为PNG透明图片,并去掉BMP黑色背景

将BMP图片批量转为PNG透明图片&#xff0c;并去掉BMP黑色背景&#xff0c;这里推荐一款软件bmp2png&#xff0c;关键是免费的。截图如下&#xff1a; 这个小软件不仅可以将bmp图片批量转为png图片&#xff0c;而且还增加了压缩功能&#xff0c;导出png图片时压缩导出图片&#…

jbase引入连接池

网站主题和代码生成器完事之后&#xff0c;ORM成为主要工作&#xff0c;之前只是搭了ORM的大体结构&#xff0c;没有详细优化和实现。这次引入连接池、把连接串信息从配置取、调整抛出异常。 连接池包选择c3p0&#xff0c;因为他稳定&#xff0c;用的多&#xff0c;下载引入c3…

Maven打包引入本地依赖包

Maven打包引入本地依赖包 SpringBoot 工程&#xff0c;Maven 在构建项目时&#xff0c;如何引入本地 Jar 包&#xff1f; 适合场景&#xff1a; 引用的依赖不在 Maven 仓库第三方公司提供的 SDK 包Maven 内网离线开发引入被定制改动过的 Jar 包 解决方法&#xff1a; 在 I…

nature日报:为什么印度德里现在的空气污染如此严重?

为什么印度德里现在的空气污染如此严重&#xff1f; 后季风季节为印度大城市的空气污染积累创造了理想的条件。 本文整理扩展自2023年11月10日nature杂志的NEWS EXPLAINER——Why is Delhi’s air pollution so bad right now? (nature.com) Highlights 季风期间&#xff0…

卡码网语言基础课 | 11. 句子缩写

目录 一、 字符串大小的比较 二、 ASCII码值 三、 基本框架代码 四、 解题思路 4.1 首字母问题 4.2 判定小写字母 4.3 小写字母转换为大写字母 五、空格判断 六、 代码模块化 6.1 满足的条件 6.2 代码完善 七、 题目解答 7.1 原始代码 7.2 改进代码 八、 拓展与…

Java_继承和多态

文章目录 前言继承继承语法继承总结super指定访问父级子类构造方法super和this再谈初始化(执行顺序)protected 关键字继承方式final 关键字继承与组合 多态动态绑定与静态绑定多态实现条件重写 前言 适合复习看 继承 继承语法 修饰符 class 子类 extends 父类 { // ... }子类…

[BJDCTF2020]Easy MD5 1

题目环境&#xff1a; 尝试了SQL注入、命令执行等都不行 点击提交并burp进行抓包Repeater进行重放这里看到了内置的SQL语句select * from admin where passwordmd5($pass,true) 发现传进去的值会进行md5加密 这里看了大佬们的解释 ffifdyop绕过&#xff0c;绕过原理是&#xff…

2023最新软件测试面试300问

一、Linux系统应用和环境配置 1、Linux系统的操作命令给我说10个&#xff0c;一般用什么工具远程连接Linux服务器&#xff1f; 2、Linux中的日志存储在哪里&#xff1f;怎么查看日志内容&#xff1f; 3、Linux中top和ps命令的区别&#xff1f; 4、Linux命令运行的结果如何写…

配置cuda和cudnn出现 libcudnn.so.8 is not a symbolic link问题

cuda版本为11.2 问题如图所示&#xff1a; 解决办法&#xff1a; sudo ln -sf /usr/local/cuda-11.2/targets/x86_64-linux/lib/libcudnn_adv_train.so.8.1.1 /usr/local/cuda-11.2/targets/x86_64-linux/lib/libcudnn_adv_train.so.8 sudo ln -sf /usr/local/cuda-11.2/targ…

CKA认证模块②-K8S企业运维和落地实战-2

CKA认证模块②-K8S企业运维和落地实战-2 K8S常见的存储方案及具体应用场景分析 k8s存储-empty emptyDir类型的Volume是在Pod分配到Node上时被创建&#xff0c;Kubernetes会在Node上自动分配一个目录&#xff0c;因此无需指定宿主机Node上对应的目录文件。 这个目录的初始内容…