k8s入门(三)之StatefulSet、Service、Ingress

news2024/11/24 20:51:56

一、StatefulSet操作

1、初体验

可以发现等待前一个启动好了,在启动下一个,因此容器的健康检查失败也会影响启动
域名:web-2.nginx.default.svc.cluster.local,一般写到web-2.nginx.default就可以了

[root@master01 ~]# cat ssweb.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  serviceName: "nginx"
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80

[root@master01 ~]# kubectl apply -f ssweb.yaml

[root@master01 ~]# kubectl get pod -owide
NAME       READY   STATUS    RESTARTS   AGE     IP           NODE       NOMINATED NODE   READINESS GATES
dns-test   1/1     Running   0          2m2s    10.0.4.103   master01   <none>           <none>
web-0      1/1     Running   0          9m45s   10.0.4.157   master01   <none>           <none>
web-1      1/1     Running   0          9m42s   10.0.0.9     master03   <none>           <none>
web-2      1/1     Running   0          9m39s   10.0.2.74    node01     <none>           <none>


[root@master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   2d2h
nginx        ClusterIP   None         <none>        80/TCP    41s

[root@master01 ~]# kubectl get pod -w

在这里插入图片描述
测试:

[root@master01 ~]# kubectl run -i --tty --image busybox:1.28 dns-test --restart Never --rm

在这里插入图片描述
主机名、DNS不会改变

可以发现IP地址改变,其他都没有改变

[root@master01 ~]# kubectl delete pod -l app=nginx
[root@master01 ~]# kubectl get pod -owide
NAME       READY   STATUS    RESTARTS   AGE    IP           NODE       NOMINATED NODE   READINESS GATES
dns-test   1/1     Running   0          5m3s   10.0.4.103   master01   <none>           <none>
web-0      1/1     Running   0          47s    10.0.4.152   master01   <none>           <none>
web-1      1/1     Running   0          44s    10.0.0.211   master03   <none>           <none>
web-2      1/1     Running   0          41s    10.0.2.48    node01     <none>           <none>

2、扩容和缩容

(1)扩容

跟着之前顺序扩容

[root@master01 ~]# kubectl scale statefulset web --replicas 8
[root@master01 ~]# kubectl get pod -w

在这里插入图片描述
(2)缩容

scale和path都可以,可以发现先删除web-7,然后在删除web-6

[root@master01 ~]# kubectl scale statefulset web --replicas 3
[root@master01 ~]# kubectl patch sts web -p '{"spec":{"replicas":3}}'
[root@master01 ~]# kubectl get pod -w

在这里插入图片描述
(3)OnDelete更新

默认是RollingUpdate


[root@master01 ~]# cat ssweb.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  updateStrategy:
    type: OnDelete
  serviceName: "nginx"
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80

[root@master01 ~]# kubectl apply -f ssweb.yaml
[root@master01 ~]# kubectl describe sts web

在这里插入图片描述
测试:

发现镜像并不会更新

[root@master01 ~]# kubectl patch statefulsets.apps web --type='json' -p '[{"op":"replace","path":"/spec/template/spec/containers/0/image","value":"nginx:1.19"}]'
[root@master01 ~]# kubectl get sts -owide
NAME   READY   AGE     CONTAINERS   IMAGES
web    3/3     9m55s   nginx        nginx:1.19

[root@master01 ~]# kubectl describe pod web-0|grep Image:
    Image:          nginx:latest
[root@master01 ~]# kubectl delete pod web-1
[root@master01 ~]# kubectl get pod -w
[root@master01 ~]# for i in web-0 web-1 web-2;do echo -n $i:;kubectl describe pod $i|grep Image:;done
web-0:    Image:          nginx:latest
web-1:    Image:          nginx:1.19
web-2:    Image:          nginx:latest

在这里插入图片描述
(4)RollingUpdate更新

[root@master01 ~]# cat ssweb.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  updateStrategy:
    type: OnDelete
  serviceName: "nginx"
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80

[root@master01 ~]# kubectl apply -f ssweb.yaml
[root@master01 ~]# kubectl describe sts web|grep Update
Update Strategy:    RollingUpdate

[root@master01 ~]# kubectl set image statefulset/web nginx=nginx:1.19
[root@master01 ~]# for i in web-0 web-1 web-2;do echo -n $i:;kubectl describe pod $i|grep Image:;done
web-0:    Image:          nginx:1.19
web-1:    Image:          nginx:1.19
web-2:    Image:          nginx:1.19

3、灰度发布

(1)partition

设置partition为3,保护3个不更新

[root@master01 ~]# cat ssweb.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  updateStrategy:
    type: OnDelete
  serviceName: "nginx"
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80

[root@master01 ~]# kubectl apply -f ssweb.yaml
[root@master01 ~]# kubectl patch statefulsets.apps web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":3}}}}'
[root@master01 ~]# kubectl set image statefulset/web nginx=nginx:1.21

[root@master01 ~]# for i in web-0 web-1 web-2;do echo -n $i:;kubectl describe pod $i|grep Image:;done
web-0:    Image:          nginx:1.19
web-1:    Image:          nginx:1.19
web-2:    Image:          nginx:1.19
[root@master01 ~]# cat ssweb.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  serviceName: "nginx"
  replicas: 10
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80

[root@master01 ~]# kubectl apply -f ssweb.yaml
[root@master01 ~]# kubectl set image statefulset/web nginx=nginx:1.19
[root@master01 ~]# for i in web-{0..9};do echo -n $i:;kubectl describe pod $i|grep Image:;done
web-0:    Image:          nginx:latest
web-1:    Image:          nginx:latest
web-2:    Image:          nginx:latest
web-3:    Image:          nginx:1.19
web-4:    Image:          nginx:1.19
web-5:    Image:          nginx:1.19
web-6:    Image:          nginx:1.19
web-7:    Image:          nginx:1.19
web-8:    Image:          nginx:1.19
web-9:    Image:          nginx:1.19

[root@master01 ~]# kubectl patch statefulsets.apps web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":2}}}}'
[root@master01 ~]# for i in web-{0..9};do echo -n $i:;kubectl describe pod $i|grep Image:;done
web-0:    Image:          nginx:latest
web-1:    Image:          nginx:latest
web-2:    Image:          nginx:1.19
web-3:    Image:          nginx:1.19
web-4:    Image:          nginx:1.19
web-5:    Image:          nginx:1.19
web-6:    Image:          nginx:1.19
web-7:    Image:          nginx:1.19
web-8:    Image:          nginx:1.19
web-9:    Image:          nginx:1.19

(2)回滚

[root@master01 ~]# kubectl rollout history statefulset web
statefulset.apps/web
REVISION  CHANGE-CAUSE
1         <none>
2         <none>

[root@master01 ~]# kubectl rollout history statefulset web --revision 2
statefulset.apps/web with revision #2
Pod Template:
  Labels:       app=nginx
  Containers:
   nginx:
    Image:      nginx:1.19
    Port:       80/TCP
    Host Port:  0/TCP
    Environment:        <none>
    Mounts:     <none>
  Volumes:      <none>

[root@master01 ~]# kubectl rollout undo statefulset web --to-revision 1
[root@master01 ~]# for i in web-{0..9};do echo -n $i:;kubectl describe pod $i|grep Image:;done
web-0:    Image:          nginx:latest
web-1:    Image:          nginx:latest
web-2:    Image:          nginx:latest
web-3:    Image:          nginx:latest
web-4:    Image:          nginx:latest
web-5:    Image:          nginx:latest
web-6:    Image:          nginx:latest
web-7:    Image:          nginx:latest
web-8:    Image:          nginx:latest
web-9:    Image:          nginx:latest

(3)通过yaml文件设置

[root@master01 ~]# cat ssweb.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 10%
      partition: 3
  serviceName: "nginx"
  replicas: 10
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80

[root@master01 ~]# kubectl apply -f ssweb.yaml
[root@master01 ~]# kubectl describe sts web

在这里插入图片描述
(4)通过yaml文件设置(并行)

[root@master01 ~]# cat ssweb.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  podManagementPolicy: Parallel
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 10%
      partition: 3
  serviceName: "nginx"
  replicas: 10
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80

[root@master01 ~]# kubectl apply -f ssweb.yaml
[root@master01 ~]# kubectl get pod -w

在这里插入图片描述
级联删除(方式一)

[root@master01 ~]# kubectl delete sts web
statefulset.apps "web" deleted
[root@master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   2d4h
nginx        ClusterIP   None         <none>        80/TCP    3m50s
[root@master01 ~]# kubectl delete svc nginx

级联删除(方式二)

[root@master01 ~]# kubectl delete sts web --cascade=orphan
[root@master01 ~]# kubectl delete pod -l app=nginx

强制删除

不推荐使用

[root@master01 ~]# kubectl apply -f ssweb.yaml
[root@master01 ~]# kubectl set image statefulset/web nginx=nginx:haha
[root@master01 ~]# kubectl delete pods web-9 --grace-period=0 --force

在这里插入图片描述
正常删除

Terminating或者Unknown得时候,推荐

[root@master01 ~]# kubectl patch pod web-9 -p '{"metadata":{"finalizers":null}}'

二、Service操作


1、初体验

[root@master01 ~]# cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: frontend
spec:
  replicas: 3
  selector:
    matchLabels:
      app: frontend
  template:
    metadata:
      labels:
        app: frontend
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

[root@master01 ~]# cat nginx-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc
spec:
  ports:
  - protocol: TCP
    port: 8080
    targetPort: 80
  selector:
    app: frontend

[root@master01 ~]# kubectl apply -f nginx.yaml
[root@master01 ~]# kubectl apply -f nginx-svc.yaml

[root@master01 ~]# kubectl get pod -owide
NAME                        READY   STATUS    RESTARTS   AGE     IP           NODE       NOMINATED NODE   READINESS GATES
frontend-64dccd9ff7-bdmk8   1/1     Running   0          6m48s   10.0.0.230   master03   <none>           <none>
frontend-64dccd9ff7-f9xqq   1/1     Running   0          6m48s   10.0.2.157   node01     <none>           <none>
frontend-64dccd9ff7-shrn9   1/1     Running   0          6m48s   10.0.4.32    master01   <none>           <none>

[root@master01 ~]# kubectl get svc -owide
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE    SELECTOR
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP    2d5h   <none>
nginx-svc    ClusterIP   10.103.140.145   <none>        8080/TCP   37s    app=frontend

[root@master01 ~]# kubectl describe svc nginx-svc
Name:              nginx-svc
Namespace:         default
Labels:            <none>
Annotations:       <none>
Selector:          app=frontend
Type:              ClusterIP
IP Family Policy:  SingleStack
IP Families:       IPv4
IP:                10.103.140.145
IPs:               10.103.140.145
Port:              <unset>  8080/TCP
TargetPort:        80/TCP
Endpoints:         10.0.0.230:80,10.0.2.157:80,10.0.4.32:80
Session Affinity:  None
Events:            <none>

2、扩容测试

[root@master01 ~]# kubectl scale deployment frontend --replicas 6
[root@master01 ~]# kubectl describe svc nginx-svc
Name:              nginx-svc
Namespace:         default
Labels:            <none>
Annotations:       <none>
Selector:          app=frontend
Type:              ClusterIP
IP Family Policy:  SingleStack
IP Families:       IPv4
IP:                10.103.140.145
IPs:               10.103.140.145
Port:              <unset>  8080/TCP
TargetPort:        80/TCP
Endpoints:         10.0.0.230:80,10.0.1.163:80,10.0.2.157:80 + 3 more...
Session Affinity:  None
Events:            <none>

[root@master01 ~]# kubectl get ep
NAME         ENDPOINTS                                               AGE
kubernetes   10.10.10.21:6443,10.10.10.22:6443,10.10.10.23:6443      2d5h
nginx-svc    10.0.0.230:80,10.0.1.163:80,10.0.2.157:80 + 3 more...   5m59s

[root@master01 ~]# kubectl describe ep nginx-svc
Name:         nginx-svc
Namespace:    default
Labels:       <none>
Annotations:  endpoints.kubernetes.io/last-change-trigger-time: 2023-06-01T09:30:53Z
Subsets:
  Addresses:          10.0.0.230,10.0.1.163,10.0.2.157,10.0.3.9,10.0.4.153,10.0.4.32
  NotReadyAddresses:  <none>
  Ports:
    Name     Port  Protocol
    ----     ----  --------
    <unset>  80    TCP

Events:  <none>

3、测试集群IP

[root@master01 ~]# kubectl get service
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP    2d5h
nginx-svc    ClusterIP   10.103.140.145   <none>        8080/TCP   7m44s
[root@master01 ~]# ping 10.103.140.145
[root@master01 ~]# kubectl get pod -owide
NAME                        READY   STATUS    RESTARTS   AGE     IP           NODE       NOMINATED NODE   READINESS GATES
frontend-64dccd9ff7-69675   1/1     Running   0          8m45s   10.0.3.9     node02     <none>           <none>
frontend-64dccd9ff7-bdmk8   1/1     Running   0          17m     10.0.0.230   master03   <none>           <none>
frontend-64dccd9ff7-f9xqq   1/1     Running   0          17m     10.0.2.157   node01     <none>           <none>
frontend-64dccd9ff7-h5bnk   1/1     Running   0          8m45s   10.0.1.163   master02   <none>           <none>
frontend-64dccd9ff7-jvx6z   1/1     Running   0          8m45s   10.0.4.153   master01   <none>           <none>
frontend-64dccd9ff7-shrn9   1/1     Running   0          17m     10.0.4.32    master01   <none>           <none>

[root@master01 ~]# ipvsadm -Ln|grep -A 6 10.103.140.145
TCP  10.103.140.145:8080 rr
  -> 10.0.0.230:80                Masq    1      0          0
  -> 10.0.1.163:80                Masq    1      0          0
  -> 10.0.2.157:80                Masq    1      0          0
  -> 10.0.3.9:80                  Masq    1      0          0
  -> 10.0.4.32:80                 Masq    1      0          0
  -> 10.0.4.153:80                Masq    1      0          0

在这里插入图片描述

4、测试ns-nginx

[root@master01 ~]# kubectl create ns test
[root@master01 ~]# cat ns-test.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: ns-nginx
  namespace: test
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nstest
  template:
    metadata:
      labels:
        app: nstest
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: ns-svc
  namespace: test
spec:
  ports:
  - protocol: TCP
    port: 8080
    targetPort: 80
  selector:
    app: nstest

[root@master01 ~]# kubectl apply -f ns-test.yaml
[root@master01 ~]# kubectl get pod -n test
NAME                        READY   STATUS    RESTARTS   AGE
ns-nginx-867c4f9bcb-g7db5   1/1     Running   0          17s
ns-nginx-867c4f9bcb-vc86d   1/1     Running   0          17s
ns-nginx-867c4f9bcb-x2vf5   1/1     Running   0          17s

[root@master01 ~]#  kubectl get svc -n test
NAME     TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
ns-svc   ClusterIP   10.104.254.251   <none>        8080/TCP   24s
[root@master01 ~]# kubectl run box -it --rm --image busybox:1.28  /bin/sh
/ # cat /etc/resolv.conf
search default.svc.cluster.local svc.cluster.local cluster.local
nameserver 10.96.0.10
options ndots:5

/ # nslookup ns-svc.test
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      ns-svc.test
Address 1: 10.104.254.251 ns-svc.test.svc.cluster.local

/ # wget ns-svc.test:8080
Connecting to ns-svc.test:8080 (10.104.254.251:8080)
index.html           100% |****************************************************************************************************************|   615   0:00:00 ETA
/ # cat index.html
<!DOCTYPE html>
...
</html>

5、设置hostname

[root@master01 ~]# cat domainname.yaml
apiVersion: v1
kind: Service
metadata:
  name: default-subdomain
spec:
  selector:
    name: busybox
  clusterIP: None
  ports:
  - name: foo
    port: 1234
    targetPort: 1234
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox1
  labels:
    name: busybox
spec:
  hostname: busybox-1
  subdomain: default-subdomain
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "100000000"
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox2
  labels:
    name: busybox
spec:
  hostname: busybox-2
  subdomain: default-subdomain
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "100000000"

[root@master01 ~]# kubectl apply -f domainname.yaml
[root@master01 ~]# kubectl get pod -owide|grep busy
busybox1                    1/1     Running   0          4s      10.0.0.188   master03   <none>           <none>
busybox2                    1/1     Running   0          4s      10.0.4.197   master01   <none>           <none>

[root@master01 ~]# kubectl get svc
NAME                TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
default-subdomain   ClusterIP   None             <none>        1234/TCP   82s
kubernetes          ClusterIP   10.96.0.1        <none>        443/TCP    2d6h
nginx-svc           ClusterIP   10.103.140.145   <none>        8080/TCP   95m

[root@master01 ~]# kubectl describe svc default-subdomain
Name:              default-subdomain
Namespace:         default
Labels:            <none>
Annotations:       <none>
Selector:          name=busybox
Type:              ClusterIP
IP Family Policy:  SingleStack
IP Families:       IPv4
IP:                None
IPs:               None
Port:              foo  1234/TCP
TargetPort:        1234/TCP
Endpoints:         10.0.4.159:1234,10.0.4.46:1234
Session Affinity:  None
Events:            <none>

测试访问:

[root@master01 ~]# kubectl exec -it busybox1 -- sh
/ # nslookup default-subdomain
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      default-subdomain
Address 1: 10.0.4.197 busybox-2.default-subdomain.default.svc.cluster.local
Address 2: 10.0.0.188 busybox-1.default-subdomain.default.svc.cluster.local
/ # nslookup busybox-1
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      busybox-1
Address 1: 10.0.0.188 busybox-1.default-subdomain.default.svc.cluster.local
/ # nslookup busybox-2
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

nslookup: can't resolve 'busybox-2'
/ # nslookup busybox-2.default-subdomain
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      busybox-2.default-subdomain
Address 1: 10.0.4.197 busybox-2.default-subdomain.default.svc.cluster.local

6、setHostnameAsFQDN

FQDN:最多64得字符,否则会导致pending

[root@master01 ~]# cat domainname.yaml
apiVersion: v1
kind: Service
metadata:
  name: default-subdomain
spec:
  selector:
    name: busybox
  clusterIP: None
  ports:
  - name: foo
    port: 1234
    targetPort: 1234
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox1
  labels:
    name: busybox
spec:
  setHostnameAsFQDN: true
  hostname: busybox-1
  subdomain: default-subdomain
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "100000000"
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox2
  labels:
    name: busybox
spec:
  hostname: busybox-2
  subdomain: default-subdomain
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "100000000"

[root@master01 ~]# kubectl apply -f domainname.yaml
service/default-subdomain created
[root@master01 ~]# kubectl exec -it busybox1 -- sh
/ # hostname
busybox-1.default-subdomain.default.svc.cluster.local

[root@master01 ~]# kubectl exec -it busybox2 -- sh
/ # hostname
busybox-2
/ # hostname -f
busybox-2.default-subdomain.default.svc.cluster.local

7、DNS策略

(1)默认ClusterFirst

[root@master01 ~]# kubectl edit pod busybox1

在这里插入图片描述
(2)ClusterFirstWithHostNet

[root@master01 ~]# cat domainname.yaml
apiVersion: v1
kind: Service
metadata:
  name: default-subdomain
spec:
  selector:
    name: busybox
  clusterIP: None
  ports:
  - name: foo
    port: 1234
    targetPort: 1234
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox1
  labels:
    name: busybox
spec:
  hostname: busybox-1
  hostNetwork: true
  dnsPolicy: ClusterFirstWithHostNet
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "100000000"
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox2
  labels:
    name: busybox
spec:
  hostname: busybox-2
  subdomain: default-subdomain
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "100000000"

[root@master01 ~]# kubectl apply -f domainname.yaml
[root@master01 ~]# kubectl get pod|grep busy
busybox1                    1/1     Running   0          26s
busybox2                    1/1     Running   0          26s
[root@master01 ~]# kubectl exec -it busybox1 -- sh    #部署到了master03节点上了
/ # hostname
master03
/ # exit
[root@master01 ~]# kubectl exec -it busybox2 -- sh
/ # hostname
busybox-2
/ # exit

(3)None

自己设置dns,searches:搜索域名

[root@master01 ~]# cat domainname.yaml
apiVersion: v1
kind: Service
metadata:
  name: default-subdomain
spec:
  selector:
    name: busybox
  clusterIP: None
  ports:
  - name: foo
    port: 1234
    targetPort: 1234
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox1
  labels:
    name: busybox
spec:
  dnsPolicy: None
  dnsConfig:
    nameservers:
    - 114.114.114.114
    searches:
    - ns1.my.dns.search
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "100000000"
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox2
  labels:
    name: busybox
spec:
  hostname: busybox-2
  subdomain: default-subdomain
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "100000000"

[root@master01 ~]# kubectl delete -f domainname.yaml
[root@master01 ~]# kubectl apply -f domainname.yaml

测试:

[root@master01 ~]# kubectl exec -it busybox1 -- sh
/ # ping www.baidu.com
PING www.baidu.com (14.119.104.254): 56 data bytes
64 bytes from 14.119.104.254: seq=0 ttl=50 time=42.844 ms
64 bytes from 14.119.104.254: seq=1 ttl=50 time=42.574 ms
^C
--- www.baidu.com ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 42.574/42.709/42.844 ms
/ # nslookup www.baidu.com
Server:    114.114.114.114
Address 1: 114.114.114.114 public1.114dns.com

Name:      www.baidu.com
Address 1: 14.119.104.189
Address 2: 14.119.104.254
/ # cat /etc/resolv.conf
search ns1.my.dns.search
nameserver 114.114.114.114
/ # hostname -f
busybox1

8、sessionAffinity亲和性

ClientIP和None模式可以做亲和

[root@master01 ~]# cat nginx-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc
spec:
  sessionAffinity: ClientIP
  ports:
  - protocol: TCP
    port: 8080
    targetPort: 80
  selector:
    app: frontend

[root@master01 ~]# cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: frontend
spec:
  replicas: 3
  selector:
    matchLabels:
      app: frontend
  template:
    metadata:
      labels:
        app: frontend
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

[root@master01 ~]# kubectl apply -f nginx.yaml
[root@master01 ~]# kubectl apply -f nginx-svc.yaml
[root@master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP    2d23h
nginx-svc    ClusterIP   10.103.140.145   <none>        8080/TCP   18h

测试:

可以发现每次都访问到一台机器上

[root@master01 ~]# curl 10.103.140.145:8080     
[root@master01 ~]# kubectl logs frontend-64dccd9ff7-bn4wq

9、自定义http-web-svc

[root@master01 ~]# cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: frontend
spec:
  replicas: 3
  selector:
    matchLabels:
      app: frontend
  template:
    metadata:
      labels:
        app: frontend
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
          name: http-web-svc

[root@master01 ~]# cat nginx-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc
spec:
  sessionAffinity: ClientIP
  ports:
  - protocol: TCP
    port: 8080
    targetPort: http-web-svc
  selector:
    app: frontend

[root@master01 ~]# kubectl delete -f nginx.yaml
[root@master01 ~]# kubectl delete -f nginx-svc.yaml
[root@master01 ~]# kubectl apply -f nginx.yaml
[root@master01 ~]# kubectl apply -f nginx-svc.yaml

测试:

[root@master01 ~]# kubectl get service
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP    2d23h
nginx-svc    ClusterIP   10.99.182.168   <none>        8080/TCP   3s
[root@master01 ~]# kubectl describe svc nginx-svc
Name:              nginx-svc
Namespace:         default
Labels:            <none>
Annotations:       <none>
Selector:          app=frontend
Type:              ClusterIP
IP Family Policy:  SingleStack
IP Families:       IPv4
IP:                10.99.182.168
IPs:               10.99.182.168
Port:              <unset>  8080/TCP
TargetPort:        http-web-svc/TCP
Endpoints:         10.0.0.87:80,10.0.1.247:80,10.0.2.1:80
Session Affinity:  ClientIP
Events:            <none>

10、无标签访问外部数据

(1)创建一个httpd

[root@node01 ~]# yum install -y httpd
[root@node01 ~]# systemctl start httpd
[root@node01 ~]# echo "<h1>node01</h1>" > /var/www/html/index.html

在这里插入图片描述
(2)创建Endpoints

[root@master01 ~]# cat ex-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-service
spec:
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
---
apiVersion: v1
kind: Endpoints
metadata:
  name: my-service
subsets:
  - addresses:
      - ip: 10.10.10.24
    ports:
      - port: 80

[root@master01 ~]# kubectl apply -f ex-svc.yaml

(3)测试

[root@master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP   162m
my-service   ClusterIP   10.109.96.132   <none>        80/TCP    41s
[root@master01 ~]# kubectl describe svc my-service
Name:              my-service
Namespace:         default
Labels:            <none>
Annotations:       <none>
Selector:          <none>
Type:              ClusterIP
IP Family Policy:  SingleStack
IP Families:       IPv4
IP:                10.109.96.132
IPs:               10.109.96.132
Port:              <unset>  80/TCP
TargetPort:        80/TCP
Endpoints:         10.10.10.24:80
Session Affinity:  None
Events:            <none>
[root@master01 ~]# curl 10.109.96.132
<h1>node01</h1>

11、NodePort

云厂商得话,我们可以使用LoadBalancer
可以在apiserver中设置端口范围

[root@master01 ~]# cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: frontend
spec:
  replicas: 3
  selector:
    matchLabels:
      app: frontend
  template:
    metadata:
      labels:
        app: frontend
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

[root@master01 ~]# cat nginx-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc
spec:
  type: NodePort
  ports:
  - protocol: TCP
    port: 8080
    targetPort: 80
  selector:
    app: frontend


[root@master01 ~]# kubectl apply -f nginx.yaml
[root@master01 ~]# kubectl apply -f nginx-svc.yaml
[root@master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP          3h3m
my-service   ClusterIP   10.109.96.132    <none>        80/TCP           21m
nginx-svc    NodePort    10.110.184.239   <none>        8080:30649/TCP   10m

测试:

所有节点得30649都能访问到

在这里插入图片描述

三、Ingress操作


1、Ingress安装

https://github.com/kubernetes/ingress-nginx
https://github.com/kubernetes/ingress-nginx/blob/main/deploy/static/provider/cloud/deploy.yaml
更新得话,只能replace或者edit,不能apply

在这里插入图片描述

[root@master01 ~]# cat deploy.yaml
apiVersion: v1
kind: Namespace
metadata:
  labels:
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx
  namespace: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-admission
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx
  namespace: ingress-nginx
rules:
- apiGroups:
  - ""
  resources:
  - namespaces
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - configmaps
  - pods
  - secrets
  - endpoints
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - services
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses/status
  verbs:
  - update
- apiGroups:
  - networking.k8s.io
  resources:
  - ingressclasses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - coordination.k8s.io
  resourceNames:
  - ingress-nginx-leader
  resources:
  - leases
  verbs:
  - get
  - update
- apiGroups:
  - coordination.k8s.io
  resources:
  - leases
  verbs:
  - create
- apiGroups:
  - ""
  resources:
  - events
  verbs:
  - create
  - patch
- apiGroups:
  - discovery.k8s.io
  resources:
  - endpointslices
  verbs:
  - list
  - watch
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-admission
  namespace: ingress-nginx
rules:
- apiGroups:
  - ""
  resources:
  - secrets
  verbs:
  - get
  - create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx
rules:
- apiGroups:
  - ""
  resources:
  - configmaps
  - endpoints
  - nodes
  - pods
  - secrets
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - coordination.k8s.io
  resources:
  - leases
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - services
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - events
  verbs:
  - create
  - patch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses/status
  verbs:
  - update
- apiGroups:
  - networking.k8s.io
  resources:
  - ingressclasses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - discovery.k8s.io
  resources:
  - endpointslices
  verbs:
  - list
  - watch
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-admission
rules:
- apiGroups:
  - admissionregistration.k8s.io
  resources:
  - validatingwebhookconfigurations
  verbs:
  - get
  - update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: ingress-nginx
subjects:
- kind: ServiceAccount
  name: ingress-nginx
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-admission
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
  name: ingress-nginx-admission
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ingress-nginx
subjects:
- kind: ServiceAccount
  name: ingress-nginx
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-admission
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
  name: ingress-nginx-admission
  namespace: ingress-nginx
---
apiVersion: v1
data:
  allow-snippet-annotations: "true"
kind: ConfigMap
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-controller
  namespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  externalTrafficPolicy: Local
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  ports:
  - appProtocol: http
    name: http
    port: 80
    protocol: TCP
    targetPort: http
    nodePort: 30080
  - appProtocol: https
    name: https
    port: 443
    protocol: TCP
    targetPort: https
    nodePort: 30443
  selector:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  #type: LoadBalancer
  type: NodePort
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-controller-admission
  namespace: ingress-nginx
spec:
  ports:
  - appProtocol: https
    name: https-webhook
    port: 443
    targetPort: webhook
  selector:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  minReadySeconds: 0
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app.kubernetes.io/component: controller
      app.kubernetes.io/instance: ingress-nginx
      app.kubernetes.io/name: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/component: controller
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
        app.kubernetes.io/version: 1.8.0
    spec:
      # hostNetwork: tue
      containers:
      - args:
        - /nginx-ingress-controller
        - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
        - --election-id=ingress-nginx-leader
        - --controller-class=k8s.io/ingress-nginx
        - --ingress-class=nginx
        - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
        - --validating-webhook=:8443
        - --validating-webhook-certificate=/usr/local/certificates/cert
        - --validating-webhook-key=/usr/local/certificates/key
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: LD_PRELOAD
          value: /usr/local/lib/libmimalloc.so
        image: bitnami/nginx-ingress-controller:1.8.0
        imagePullPolicy: IfNotPresent
        lifecycle:
          preStop:
            exec:
              command:
              - /wait-shutdown
        livenessProbe:
          failureThreshold: 5
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: controller
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          name: https
          protocol: TCP
        - containerPort: 8443
          name: webhook
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        resources:
          requests:
            cpu: 100m
            memory: 90Mi
        securityContext:
          allowPrivilegeEscalation: true
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - ALL
          runAsUser: 101
        volumeMounts:
        - mountPath: /usr/local/certificates/
          name: webhook-cert
          readOnly: true
      dnsPolicy: ClusterFirst
      nodeSelector:
        kubernetes.io/os: linux
      serviceAccountName: ingress-nginx
      terminationGracePeriodSeconds: 300
      volumes:
      - name: webhook-cert
        secret:
          secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-admission-create
  namespace: ingress-nginx
spec:
  template:
    metadata:
      labels:
        app.kubernetes.io/component: admission-webhook
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
        app.kubernetes.io/version: 1.8.0
      name: ingress-nginx-admission-create
    spec:
      containers:
      - args:
        - create
        - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
        - --namespace=$(POD_NAMESPACE)
        - --secret-name=ingress-nginx-admission
        env:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.1.1
        imagePullPolicy: IfNotPresent
        name: create
        securityContext:
          allowPrivilegeEscalation: false
      nodeSelector:
        kubernetes.io/os: linux
      restartPolicy: OnFailure
      securityContext:
        fsGroup: 2000
        runAsNonRoot: true
        runAsUser: 2000
      serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-admission-patch
  namespace: ingress-nginx
spec:
  template:
    metadata:
      labels:
        app.kubernetes.io/component: admission-webhook
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
        app.kubernetes.io/version: 1.8.0
      name: ingress-nginx-admission-patch
    spec:
      containers:
      - args:
        - patch
        - --webhook-name=ingress-nginx-admission
        - --namespace=$(POD_NAMESPACE)
        - --patch-mutating=false
        - --secret-name=ingress-nginx-admission
        - --patch-failure-policy=Fail
        env:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.1.1
        imagePullPolicy: IfNotPresent
        name: patch
        securityContext:
          allowPrivilegeEscalation: false
      nodeSelector:
        kubernetes.io/os: linux
      restartPolicy: OnFailure
      securityContext:
        fsGroup: 2000
        runAsNonRoot: true
        runAsUser: 2000
      serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: nginx
spec:
  controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.8.0
  name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:
  - v1
  clientConfig:
    service:
      name: ingress-nginx-controller-admission
      namespace: ingress-nginx
      path: /networking/v1/ingresses
  failurePolicy: Fail
  matchPolicy: Equivalent
  name: validate.nginx.ingress.kubernetes.io
  rules:
  - apiGroups:
    - networking.k8s.io
    apiVersions:
    - v1
    operations:
    - CREATE
    - UPDATE
    resources:
    - ingresses
  sideEffects: None

[root@master01 ~]# kubectl apply -f deploy.yaml

在这里插入图片描述

2、案例测试

(1)nginx环境

# 每个节点都执行
[root@master01 ~]# mkdir -p /www/nginx && echo "master01" >/www/nginx/index.html
[root@master01 ~]# cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-cluster
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        volumeMounts:
        - name: www
          mountPath: /usr/share/nginx/html/
      volumes:
      - name: www
        hostPath:
          path: /www/nginx
---
apiVersion: v1
kind: Service
metadata:
  name: nginxsvc
spec:
  selector:
    app: nginx
  ports:
  - name: http
    port: 80
    targetPort: 80

[root@master01 ~]# kubectl apply -f nginx.yaml
[root@master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP   5h17m
my-service   ClusterIP   10.109.96.132   <none>        80/TCP    155m
nginxsvc     ClusterIP   10.102.155.12   <none>        80/TCP    2m6s
[root@master01 ~]# curl 10.102.155.12
master01

(2)生成ingress-nginx

[root@master01 ~]# cat ingress-single.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: test-ingress
spec:
  ingressClassName: nginx
  defaultBackend:
    service:
      name: nginxsvc
      port:
        number: 80

[root@master01 ~]# kubectl apply -f ingress-single.yaml
[root@master01 ~]# kubectl get ingress
NAME           CLASS   HOSTS   ADDRESS          PORTS   AGE
test-ingress   nginx   *       10.104.238.113   80      107s

[root@master01 ~]# kubectl describe ingress test-ingress
Name:             test-ingress
Labels:           <none>
Namespace:        default
Address:          10.104.238.113
Ingress Class:    nginx
Default backend:  nginxsvc:80 (10.0.1.120:80,10.0.2.66:80,10.0.3.217:80)
Rules:
  Host        Path  Backends
  ----        ----  --------
  *           *     nginxsvc:80 (10.0.1.120:80,10.0.2.66:80,10.0.3.217:80)
Annotations:  <none>
Events:
  Type    Reason  Age                   From                      Message
  ----    ------  ----                  ----                      -------
  Normal  Sync    110s (x2 over 2m21s)  nginx-ingress-controller  Scheduled for sync

[root@master01 ~]# kubectl get pod -owide -n ingress-nginx
NAME                                        READY   STATUS      RESTARTS   AGE   IP           NODE       NOMINATED NODE   READINESS GATES
ingress-nginx-admission-create-rdnrn        0/1     Completed   0          29m   10.0.3.128   master02   <none>           <none>
ingress-nginx-admission-patch-vtgr4         0/1     Completed   0          29m   10.0.2.60    node02     <none>           <none>
ingress-nginx-controller-59b655fb7b-b4j9q   1/1     Running     0          29m   10.0.0.50    node01     <none>           <none>

[root@master01 ~]# curl 10.0.0.50
master02
[root@master01 ~]# curl 10.0.0.50
node02
[root@master01 ~]# curl 10.0.0.50
master01

(3)创建httpd

# 每个节点都执行
[root@master01 ~]# mkdir -p /www/httpd && echo "master01 httpd" >/www/httpd/index.html
[root@master01 ~]# cat httpd.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd-cluster
spec:
  replicas: 3
  selector:
    matchLabels:
      app: httpd
  template:
    metadata:
      labels:
        app: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        volumeMounts:
        - name: www
          mountPath: /usr/local/apache2/htdocs
      volumes:
      - name: www
        hostPath:
          path: /www/httpd
---
apiVersion: v1
kind: Service
metadata:
  name: httpdsvc
spec:
  selector:
    app: httpd
  ports:
  - name: httpd
    port: 80
    targetPort: 80

[root@master01 ~]# kubectl apply -f httpd.yaml
[root@master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
httpdsvc     ClusterIP   10.105.174.147   <none>        80/TCP    32s
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP   5h38m
my-service   ClusterIP   10.109.96.132    <none>        80/TCP    177m
nginxsvc     ClusterIP   10.102.155.12    <none>        80/TCP    23m

[root@master01 ~]# cat ingress-httpd.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: http-ingress
spec:
  ingressClassName: nginx
  rules:
  - host: "nginx.wielun.com"
    http:
      paths:
      - pathType: Prefix
        path: /
        backend:
          service:
            name: nginxsvc
            port:
              number: 80
  - host: "httpd.wielun.com"
    http:
      paths:
      - pathType: Prefix
        path: /
        backend:
          service:
            name: httpdsvc
            port:
              number: 80

[root@master01 ~]# kubectl apply -f ingress-httpd.yaml

(4)测试结果

[root@master01 ~]# vim /etc/hosts
10.104.238.113 nginx.wielun.com httpd.wielun.com

[root@master01 ~]# curl -H "Host: httpd.wielun.com" 10.104.238.113
master01 httpd
[root@master01 ~]# curl -H "Host: nginx.wielun.com" 10.104.238.113
master01

[root@master01 ~]# kubectl describe ingressclasses.networking.k8s.io nginx
Name:         nginx
Labels:       app.kubernetes.io/component=controller
              app.kubernetes.io/instance=ingress-nginx
              app.kubernetes.io/name=ingress-nginx
              app.kubernetes.io/part-of=ingress-nginx
              app.kubernetes.io/version=1.8.0
Annotations:  <none>
Controller:   k8s.io/ingress-nginx
Events:       <none>


[root@master01 ~]# kubectl get ingressclasses.networking.k8s.io
NAME    CONTROLLER             PARAMETERS   AGE
nginx   k8s.io/ingress-nginx   <none>       73m

[root@master01 ~]# kubectl describe ingress http-ingress
Name:             http-ingress
Labels:           <none>
Namespace:        default
Address:          10.104.238.113
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host              Path  Backends
  ----              ----  --------
  nginx.wielun.com
                    /   nginxsvc:80 (10.0.1.120:80,10.0.2.66:80,10.0.3.217:80)
  httpd.wielun.com
                    /   httpdsvc:80 (10.0.0.132:80,10.0.1.114:80,10.0.3.164:80)
Annotations:        <none>
Events:
  Type    Reason  Age                From                      Message
  ----    ------  ----               ----                      -------
  Normal  Sync    28m (x2 over 29m)  nginx-ingress-controller  Scheduled for sync

3、通过路径匹配

(1)生成路径

[root@master01 ~]# mkdir -p /www/httpd/test && cp /www/httpd/index.html /www/httpd/test
[root@master01 ~]#mkdir -p /www/nginx/prod && cp /www/nginx/index.html /www/nginx/prod

(2)创建service和ingress

[root@master01 ~]# cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-cluster
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        volumeMounts:
        - name: www
          mountPath: /usr/share/nginx/html/
      volumes:
      - name: www
        hostPath:
          path: /www/nginx
---
apiVersion: v1
kind: Service
metadata:
  name: nginxsvc
spec:
  selector:
    app: nginx
  ports:
  - name: http
    port: 8080
    targetPort: 80

[root@master01 ~]# cat httpd.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd-cluster
spec:
  replicas: 3
  selector:
    matchLabels:
      app: httpd
  template:
    metadata:
      labels:
        app: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        volumeMounts:
        - name: www
          mountPath: /usr/local/apache2/htdocs
      volumes:
      - name: www
        hostPath:
          path: /www/httpd
---
apiVersion: v1
kind: Service
metadata:
  name: httpdsvc
spec:
  selector:
    app: httpd
  ports:
  - name: httpd
    port: 8081
    targetPort: 80


[root@master01 ~]# cat ingress-httpd.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: http-ingress
spec:
  ingressClassName: nginx
  rules:
  - host: "nginx.wielun.com"
    http:
      paths:
      - pathType: Prefix
        path: /prod
        backend:
          service:
            name: nginxsvc
            port:
              number: 8080
      - pathType: Prefix
        path: /test
        backend:
          service:
            name: httpdsvc
            port:
              number: 8081

[root@master01 ~]# kubectl apply -f httpd.yaml
[root@master01 ~]# kubectl apply -f nginx.yaml
[root@master01 ~]# kubectl apply -f ingress-httpd.yaml
[root@master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
httpdsvc     ClusterIP   10.110.116.208   <none>        8081/TCP   31s
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP    25h
my-service   ClusterIP   10.109.96.132    <none>        80/TCP     22h
nginxsvc     ClusterIP   10.110.201.244   <none>        8080/TCP   28s

(3)测试

[root@master01 ~]# curl nginx.wielun.com/prod/
<html>
<head><title>403 Forbidden</title></head>
<body>
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.25.0</center>
</body>
</html>
[root@master01 ~]# curl nginx.wielun.com/test
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>301 Moved Permanently</title>
</head><body>
<h1>Moved Permanently</h1>
<p>The document has moved <a href="http://nginx.wielun.com/test/">here</a>.</p>
</body></html>
[root@master01 ~]# curl nginx.wielun.com/test/
master01 httpd

[root@master01 ~]# kubectl describe ingress http-ingress
Name:             http-ingress
Labels:           <none>
Namespace:        default
Address:          10.104.238.113
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host              Path  Backends
  ----              ----  --------
  nginx.wielun.com
                    /prod   nginxsvc:8080 (10.0.0.108:80,10.0.1.159:80,10.0.3.47:80)
                    /test   httpdsvc:8081 (10.0.1.204:80,10.0.2.47:80,10.0.3.242:80)
Annotations:        <none>
Events:
  Type    Reason  Age                  From                      Message
  ----    ------  ----                 ----                      -------
  Normal  Sync    19h (x2 over 19h)    nginx-ingress-controller  Scheduled for sync
  Normal  Sync    5m41s (x2 over 10m)  nginx-ingress-controller  Scheduled for sync

4、rewrite实现从定项

[root@master01 ~]# cat ingress-rewrite.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: http-ingress
  annotations:
    nginx.ingress.kubernetes.io/rewite-target: https://www.baidu.com
spec:
  ingressClassName: nginx
  rules:
  - host: "nginx.wielun.com"
    http:
      paths:
      - pathType: Prefix
        path: /
        backend:
          service:
            name: nginxsvc
            port:
              number: 80

[root@master01 ~]# kubectl delete -f ingress-httpd.yaml
[root@master01 ~]# kubectl apply -f ingress-rewrite.yaml

四、灰度发布


1、灰度发布方式

  • canary:蓝绿和灰度发布,社区版本
  • service:蓝绿和灰度发布,云端版本

流量分类:

canary-by-header - > canary-by-cookie - > canary-weight

  • 基于Request Header得流量切分,适用于灰度发布及AB测试
  • 基于Cookie得流量切分,适用于灰度发布及AB测试
  • 基于权重得流量切分,适用于蓝绿发布

2、环境准备

[root@master01 ~]# mkdir -p /www/{old,new}
[root@master01 ~]# echo "old version" >> /www/old/index.html && echo "new version" >> /www/new/index.html

3、创建老环境

(1)配置canary-nginx.yaml

sessionAffinity:生产要打开

[root@master01 ~]# cat canary-nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: old-nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: old-nginx
  template:
    metadata:
      labels:
        app: old-nginx
    spec:
      containers:
      - name: old-nginx
        image: nginx:1.19
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
          protocol: TCP
        volumeMounts:
        - name: old
          mountPath: /usr/share/nginx/html/
      volumes:
      - name: old
        hostPath:
          path: /www/old
---
apiVersion: v1
kind: Service
metadata:
  name: old-nginx
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: old-nginx
  sessionAffinity: None

[root@master01 ~]# kubectl apply -f canary-nginx.yaml
[root@master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP   27h
my-service   ClusterIP   10.109.96.132   <none>        80/TCP    24h
old-nginx    ClusterIP   10.101.60.205   <none>        80/TCP    4s

[root@master01 ~]# kubectl get deployment -owide
NAME        READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES       SELECTOR
old-nginx   3/3     3            3           94s   old-nginx    nginx:1.19   app=old-nginx

(2)配置canary-ingress.yaml

[root@master01 ~]# cat canary-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: old-release
spec:
  ingressClassName: nginx
  rules:
  - host: "www.wielun.com"
    http:
      paths:
      - pathType: ImplementationSpecific
        path: /
        backend:
          service:
            name: old-nginx
            port:
              number: 80

[root@master01 ~]# kubectl apply -f canary-ingress.yaml
[root@master01 ~]# kubectl get ingress
NAME          CLASS   HOSTS            ADDRESS          PORTS   AGE
old-release   nginx   www.wielun.com   10.104.238.113   80      1m

[root@master01 ~]# vim /etc/hosts
10.104.238.113  www.wielun.com

[root@master01 ~]# curl www.wielun.com
old version

4、请求头方式

(1)配置canary-nginx-new.yaml

[root@master01 ~]# cat canary-nginx-new.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: new-nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: new-nginx
  template:
    metadata:
      labels:
        app: new-nginx
    spec:
      containers:
      - name: new-nginx
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
          protocol: TCP
        volumeMounts:
        - name: new
          mountPath: /usr/share/nginx/html/
      volumes:
      - name: new
        hostPath:
          path: /www/new
---
apiVersion: v1
kind: Service
metadata:
  name: new-nginx
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: new-nginx
  sessionAffinity: None

[root@master01 ~]# kubectl apply -f canary-nginx-new.yaml

(2)配置canary-ingress-new.yaml


[root@master01 ~]# cat canary-ingress-new.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: new-release
  annotations:
    nginx.ingress.kubernetes.io/canary: "true"
    nginx.ingress.kubernetes.io/canary-by-header: "foo"
    nginx.ingress.kubernetes.io/canary-by-header-value: "bar"

spec:
  ingressClassName: nginx
  rules:
  - host: "www.wielun.com"
    http:
      paths:
      - pathType: ImplementationSpecific
        path: /
        backend:
          service:
            name: new-nginx
            port:
              number: 80

[root@master01 ~]# kubectl apply -f canary-ingress-new.yaml
[root@master01 ~]# kubectl get ingress
NAME          CLASS   HOSTS            ADDRESS          PORTS   AGE
new-release   nginx   www.wielun.com   10.104.238.113   80      5m
old-release   nginx   www.wielun.com   10.104.238.113   80      11m

(3)测试

[root@master01 ~]# kubectl describe ingress old-release
Name:             old-release
Labels:           <none>
Namespace:        default
Address:          10.104.238.113
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host            Path  Backends
  ----            ----  --------
  www.wielun.com
                  /   old-nginx:80 (10.0.1.50:80,10.0.2.213:80,10.0.3.160:80)
Annotations:      <none>
Events:
  Type    Reason  Age                From                      Message
  ----    ------  ----               ----                      -------
  Normal  Sync    13m (x2 over 14m)  nginx-ingress-controller  Scheduled for sync
  
[root@master01 ~]# kubectl describe ingress new-release
Name:             new-release
Labels:           <none>
Namespace:        default
Address:          10.104.238.113
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host            Path  Backends
  ----            ----  --------
  www.wielun.com
                  /   new-nginx:80 (10.0.0.143:80,10.0.1.245:80,10.0.3.8:80)
Annotations:      nginx.ingress.kubernetes.io/canary: true
                  nginx.ingress.kubernetes.io/canary-by-header: foo
                  nginx.ingress.kubernetes.io/canary-by-header-value: bar
Events:
  Type    Reason  Age                    From                      Message
  ----    ------  ----                   ----                      -------
  Normal  Sync    2m59s (x2 over 3m36s)  nginx-ingress-controller  Scheduled for sync

[root@master01 ~]# curl www.wielun.com
old version

[root@master01 ~]# curl -H "foo:bar" www.wielun.com
new version

5、权重

[root@master01 ~]# cat canary-ingress-new.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: new-release
  annotations:
    nginx.ingress.kubernetes.io/canary: "true"
    nginx.ingress.kubernetes.io/canary-weight: "50"

spec:
  ingressClassName: nginx
  rules:
  - host: "www.wielun.com"
    http:
      paths:
      - pathType: ImplementationSpecific
        path: /
        backend:
          service:
            name: new-nginx
            port:
              number: 80

[root@master01 ~]# kubectl apply -f canary-ingress-new.yaml
[root@master01 ~]# for i in {0..10};do curl www.wielun.com;done
new version
new version
new version
old version
new version
old version
old version
old version
new version
old version
new version

6、全部切换到新版本

[root@master01 ~]# cat canary-nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: old-nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: old-nginx
  template:
    metadata:
      labels:
        app: old-nginx
    spec:
      containers:
      - name: old-nginx
        image: nginx:1.19
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
          protocol: TCP
        volumeMounts:
        - name: old
          mountPath: /usr/share/nginx/html/
      volumes:
      - name: old
        hostPath:
          path: /www/old
---
apiVersion: v1
kind: Service
metadata:
  name: old-nginx
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: new-nginx
  sessionAffinity: None

[root@master01 ~]# kubectl apply -f canary-nginx.yaml
[root@master01 ~]# for i in {0..10};do curl www.wielun.com;done
new version
new version
new version
new version
new version
new version
new version
new version
new version
new version
new version

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/616752.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

2023 年 10 个最佳 Java IDE 和编辑器

集成开发环境或 IDE 是构建软件项目所需的最关键部分之一。它们使开发变得简单高效。 IDE 通过缩短设置时间、加速开发任务、让开发人员了解最新的危险和最佳实践以及标准化开发过程以便任何人都可以参与来提高开发人员的生产力。 IDE 还可以帮助重构开发过程。它们使程序员能…

HTTP请求报文解剖

HTTP请求报文解剖 HTTP请求报文由3部分组成&#xff08;请求行请求头请求体&#xff09;&#xff1a; 下面是一个实际的请求报文&#xff1a; ①是请求方法&#xff0c;GET和POST是最常见的HTTP方法&#xff0c;除此以外还包括DELETE、HEAD、OPTIONS、PUT、TRACE。不过&#…

Thread 类基本用法详解

Thread 类基本用法详解 Thread类的作用线程创建继承 Thread, 重写 run实现 Runnable, 重写 run继承 Thread, 重写 run, 使用匿名内部类实现 Runnable, 重写 run, 使用匿名内部类使用 lambda 表达式(最推荐) 线程中断1.使用标志位来控制线程是否要停止2.使用Thread自带的标志位来…

软件测试商城项目----优惠券测试流程

优惠券冒烟测试 后台添加优惠券&#xff0c;前端领取&#xff0c;消费并成功扣除对应金额&#xff0c;如果冒烟测试不通过不进行下面的测试。 优惠券基本信息测试 &#xff08;1&#xff09;优惠券名称 &#xff08;2&#xff09;优惠券副标题 &#xff08;3&#xff09;优惠券…

ubuntu2204server虚拟机创建

ubuntu2204 虚拟机创建系统安装系统配置 虚拟机创建ubuntu2204&#xff0c;然后配置清华源&#xff0c;修改网卡已经ssh文件&#xff0c;实现远程连接 虚拟机创建 一路回车默认到创建磁盘 一路回车到自定义硬件 添加一块网盘&#xff0c;其他的是否添加自己根据实际情况&…

【C刷题笔记】找单身狗问题

目录 版本1:在数组内只有一个元素没有成对出现 版本2:在数组内有两个元素没有成对出现 第一步:异或所有元素,异或就是相同为0&#xff0c;相异为1 第二步:计算ret的二进制中哪一位元素是1 第三步:开始分组异或 1.分组: 2.异或 版本1:在数组内只有一个元素没有成对出现 单…

基于C/C++将派生类赋值给基类的详细讲解

目录 将派生类对象赋值给基类对象 将派生类指针赋值给基类指针 将派生类引用赋值给基类引用 本期推荐书目 在 C/C中经常会发生数据类型的转换&#xff0c;例如将 int 类型的数据赋值给 float 类型的变量时&#xff0c;编译器会先把 int 类型的数据转换为 float 类型再赋值&a…

华为OD机试 Java 实现【查找兄弟单词】【牛客练习题 HJ27】,附详细解题思路

一、题目描述 定义一个单词的“兄弟单词”为&#xff1a;交换该单词字母顺序&#xff08;注&#xff1a;可以交换任意次&#xff09;&#xff0c;而不添加、删除、修改原有的字母就能生成的单词。 兄弟单词要求和原来的单词不同。例如&#xff1a; ab 和 ba 是兄弟单词。 ab …

三防工业平板电脑搭载大容量聚合物锂电池,满足全天候移动式作业

在制造业、物流业等行业中&#xff0c;电脑经常会接触到水&#xff0c;因此防水是非常重要的。工业三防平板电脑采用了特殊的防水技术&#xff0c;能够在水中工作&#xff0c;即使在雨天或水花溅到电脑上时也不会受到损坏。 三防工业平板电脑采用8核ARM架构处理器&#xff0c;2…

QML开发避坑指南(四):ImageProvider的使用

&#xff08;1&#xff09;引子 首先&#xff0c;ImageProvider是为QML应用提供的、一种先进的图像加载方式。 作者建议从三点理解其定义&#xff1a; 加载方式的差异&#xff1a; 相比本地资源的加载方式&#xff1a;source : “qrc:///前缀/demo.jpg”。 ImageProvider方式…

使用 ChatGPT API 构建系统(三):思维链推理

今天我学习了DeepLearning.AI的 Building Systems with the ChatGPT API 的在线课程&#xff0c;我想和大家一起分享一下该门课程的一些主要内容。 下面是我们通过Open API来访问ChatGPT模型的主要代码&#xff1a; import openai#您的openai的api key openai.api_key YOUR-O…

使用FS_Hi3861鸿蒙开发板编译时常见的一些问题

本文档支持的是下面的产品 华清远见 FS-Hi3861https://gitee.com/HiSpark/hi3861_hdu_iot_application/blob/master/src/vendor/hqyj/fs_hi3861/doc/%E5%8D%8E%E6%B8%85%E8%BF%9C%E8%A7%81%20FS_Hi3861%E5%BC%80%E5%8F%91%E6%8C%87%E5%AF%BC.md 目录 在添加第三方库完成后&a…

想要精通算法和SQL的成长之路 - 下一个排列

想要精通算法和SQL的成长之路 - 下一个排列 前言一. 下一个排列二. 下一个更大元素III 前言 想要精通算法和SQL的成长之路 - 系列导航 一. 下一个排列 原题链接 整数数组的一个 排列 就是将其所有成员以序列或线性顺序排列。 例如&#xff0c;arr [1,2,3] &#xff0c;以下…

Wi-Fi 6还没用熟,Wi-Fi 7就要来了,性能“高攀不起”!

Wi-Fi作为一种无线通信技术&#xff0c;在现代生活中扮演着重要的角色。随着无线设备数量的不断增加和对高速、高容量网络的需求日益增长&#xff0c;Wi-Fi技术不断发展演进。在过去的几十年中&#xff0c;我们见证了多个Wi-Fi技术标准的推出&#xff0c;其中最新的一代是Wi-Fi…

Spring Boot集成Redisson布隆过滤器案例

1 什么是布隆过滤器 布隆过滤器实际上是一个非常长的二进制向量(bitmap)和一系列随机哈希函数。那什么又叫哈希函数呢&#xff1f;哈希函数指将哈希表中元素的关键键值通过一定的函数关系映射为元素存储位置的函数。&#xff08;HashMap源码&#xff09; 布隆过滤器的优点&…

python基于轻量级CNN模型开发构建手写藏文数字识别系统

最近做的很多工作都是跟手写性质的数据集有关的&#xff0c;比如&#xff1a;手写汉字、手写甲骨文、手写数字、手写字母等等&#xff0c;今天主要做的实践是对藏文中的手写数字进行识别分析&#xff0c;在我之前的博文中有很多相关的实践分析&#xff0c;感兴趣的话可以自行移…

图解Redis中的9种数据结构

如图所示&#xff0c;Redis中提供了9种不同的数据操作类型&#xff0c;他们分别代表了不同的数据存储结构。 图2-17 数据类型 String类型 String类型是Redis用的较多的一个基本类型&#xff0c;也是最简单的一种类型&#xff0c;它和我们在Java中使用的字符类型什么太大区别&…

MyBatis 万字进阶

文章目录 一. 增, 删, 改 操作1.1 修改操作1.2 删除操作1.3 添加操作1.3.1 返回受影响行数1.3.2 返回 id 二. 查询操作2.1 单表查询2.1.1 参数占位符 ${} 和 #{}2.1.2 SQL 注入问题2.1.3 ${} 的优点2.1.4 Like 查询 2.2 多表查询2.2.1 返回类型 resultType2.2.2 返回字典映射 r…

Linux内核进程创建流程

本文代码基于Linux5.10 内容主要参考《Linux内核深度解析》余华兵 当Linux内核要创建一个新进程时&#xff0c; 流程大致如下 ret fork(); if (ret 0) {/* 子进程装载程序 */ret execve(filename, argv, envp); } else if (ret > 0) {/* 父进程 */ } 大致可以分为创建新…

pagehelper分页插件(SpringBoot,Mybatis整合前后端分析)

前言&#xff1a;在学习项目的过程中遇到了数据分页的功能&#xff0c;单纯的js前端不能处理大的数据量&#xff0c;需要后端整理好数据发送给前端&#xff0c;那么使用分页插件无疑是个好选择. 目录 pagehelper依赖 接口方法mapper Mybatis Service ServiceImpl PageResu…