云计算第四阶段: cloud二周目 07-08

news2024/10/18 9:35:14

cloud 07

一、k8s服务管理

创建服务

# 资源清单文件
[root@master ~]# kubectl create service clusterip websvc --tcp=80:80 --dry-run=client -o yaml
[root@master ~]# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  selector:
    app: web
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl apply -f websvc.yaml 
service/websvc created
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)
kubernetes   ClusterIP   10.245.0.1      <none>        443/TCP
websvc       ClusterIP   10.245.5.18     <none>        80/TCP
解析域名
# 安装工具软件包
[root@master ~]# dnf install -y bind-utils
# 查看 DNS 服务地址
[root@master ~]# kubectl -n kube-system get service kube-dns
NAME       TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
kube-dns   ClusterIP   10.245.0.10   <none>        53/UDP,53/TCP,9153/TCP
# 域名解析测试
[root@master ~]# host websvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases: 

websvc.default.svc.cluster.local has address 10.245.5.18

创建后端应用

[root@master ~]# vim web1.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: web1
  labels:
    app: web   # 服务靠标签寻找后端
spec:
  containers:
  - name: apache
    image: myos:httpd

[root@master ~]# kubectl apply -f web1.yaml
pod/web1 created
[root@master ~]# curl http://10.245.5.18
Welcome to The Apache.
负载均衡
[root@master ~]# sed 's,web1,web2,' web1.yaml |kubectl apply -f -
pod/web2 created
[root@master ~]# sed 's,web1,web3,' web1.yaml |kubectl apply -f -
pod/web3 created
[root@master ~]# curl -s http://10.245.5.18/info.php |grep php_host
php_host:       web1
[root@master ~]# curl -s http://10.245.5.18/info.php |grep php_host
php_host:       web2
[root@master ~]# curl -s http://10.245.5.18/info.php |grep php_host
php_host:       web3
固定 IP 服务
[root@master ~]# vim websvc.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  clusterIP: 10.245.1.80    # 可以设置 ClusterIP
  selector:
    app: web
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl replace --force -f websvc.yaml 
service "websvc" deleted
service/websvc replaced
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
kubernetes   ClusterIP   10.245.0.1    <none>        443/TCP
websvc       ClusterIP   10.245.1.80   <none>        80/TCP

端口别名

[root@master ~]# vim websvc.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  clusterIP: 10.245.1.80
  selector:
    app: web
  ports:
  - protocol: TCP
    port: 80
    targetPort: myhttp    # 使用别名查找后端服务端口

[root@master ~]# kubectl replace --force -f websvc.yaml 
service "websvc" deleted
service/websvc replaced

[root@master ~]# kubectl delete pod --all
pod "web1" deleted
pod "web2" deleted
pod "web3" deleted

[root@master ~]# vim web1.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: web1
  labels:
    app: web
spec:
  containers:
  - name: apache
    image: myos:httpd
    ports:               # 配置端口规范
    - name: myhttp       # 端口别名
      protocol: TCP      # 协议
      containerPort: 80  # 端口号

[root@master ~]# kubectl apply -f web1.yaml
pod/web1 created
[root@master ~]# curl http://10.245.1.80
Welcome to The Apache.

服务排错

---
kind: Service
apiVersion: v1
metadata:
  name: web123
spec:
  type: ClusterIP
  clusterIP: 192.168.1.88
  selector:
    app: apache
  ports:
  - protocol: TCP
    port: 80
    targetPort: web

nodePort


对外发布服务

[root@master ~]# vim mysvc.yaml
---
kind: Service
apiVersion: v1
metadata:
  name: mysvc
spec:
  type: NodePort            # 服务类型
  selector:
    app: web
  ports:
  - protocol: TCP
    port: 80
    nodePort: 30080         # 映射端口号
    targetPort: 80

[root@master ~]# kubectl apply -f mysvc.yaml 
service/mysvc configured
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
kubernetes   ClusterIP   10.245.0.1    <none>        443/TCP
websvc       ClusterIP   10.245.1.80   <none>        80/TCP
mysvc        NodePort    10.245.3.88   <none>        80:30080/TCP

[root@master ~]# curl http://node-0001:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0002:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0003:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0004:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0005:30080
Welcome to The Apache.

二、lngress 安装与策略配置

安装控制器

[root@master ~]# cd plugins/ingress
[root@master ingress]# docker load -i ingress.tar.xz
[root@master ingress]# docker images|while read i t _;do
    [[ "${t}" == "TAG" ]] && continue
    [[ "${i}" =~ ^"harbor:443/".+ ]] && continue
    docker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}
    docker push harbor:443/plugins/${i##*/}:${t}
    docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
[root@master ingress]# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' deploy.yaml
443:    image: registry.k8s.io/ingress-nginx/controller:v1.9.6
546:    image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06
599:    image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06

[root@master ingress]# kubectl apply -f deploy.yaml
[root@master ingress]# kubectl -n ingress-nginx get pods
NAME                                        READY   STATUS      RESTARTS
ingress-nginx-admission-create--1-lm52c     0/1     Completed   0
ingress-nginx-admission-patch--1-sj2lz      0/1     Completed   0
ingress-nginx-controller-5664857866-tql24   1/1     Running     0
验证后端服务
[root@master ~]# kubectl get pods,services 
NAME       READY   STATUS    RESTARTS   AGE
pod/web1   1/1     Running   0          35m

NAME                 TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
service/kubernetes   ClusterIP   10.245.0.1    <none>        443/TCP
service/websvc       ClusterIP   10.245.1.80   <none>        80/TCP
service/mysvc        NodePort    10.245.3.88   <none>        80:30080/TCP

[root@master ~]# curl http://10.245.1.80
Welcome to The Apache.
对外发布服务
# 查询 ingress 控制器类名称
[root@master ~]# kubectl get ingressclasses.networking.k8s.io 
NAME    CONTROLLER             PARAMETERS   AGE
nginx   k8s.io/ingress-nginx   <none>       5m7s

# 资源清单文件
[root@master ~]# kubectl create ingress mying --class=nginx --rule=nsd.tedu.cn/*=mysvc:80 --dry-run=client -o yaml
[root@master ~]# vim mying.yaml
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
  name: mying
spec:
  ingressClassName: nginx
  rules:
  - host: nsd.tedu.cn
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: websvc
            port:
              number: 80

[root@master ~]# kubectl apply -f mying.yaml 
ingress.networking.k8s.io/mying created
[root@master ~]# kubectl get ingress
NAME    CLASS   HOSTS         ADDRESS        PORTS
mying   nginx   nsd.tedu.cn   192.168.1.51   80
[root@master ~]# curl -H "Host: nsd.tedu.cn" http://192.168.1.51
Welcome to The Apache.


三、Dashboard 安装

#下面给大家介绍下新的k8s插件

web 管理插件

安装 Dashboard

[root@master ~]# cd plugins/dashboard
[root@master dashboard]# docker load -i dashboard.tar.xz
[root@master dashboard]# docker images|while read i t _;do
    [[ "${t}" == "TAG" ]] && continue
    [[ "${i}" =~ ^"harbor:443/".+ ]] && continue
    docker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}
    docker push harbor:443/plugins/${i##*/}:${t}
    docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
[root@master dashboard]# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' recommended.yaml
193:    image: kubernetesui/dashboard:v2.7.0
278:    image: kubernetesui/metrics-scraper:v1.0.8
[root@master dashboard]# kubectl apply -f recommended.yaml
[root@master dashboard]# kubectl -n kubernetes-dashboard get pods
NAME                                         READY   STATUS    RESTARTS
dashboard-metrics-scraper-66f6f56b59-b42ng   1/1     Running   0
kubernetes-dashboard-65ff57f4cf-lwtsk        1/1     Running   0

发布服务

# 查看服务状态
[root@master dashboard]# kubectl -n kubernetes-dashboard get service
NAME                        TYPE        CLUSTER-IP       PORT(S)
dashboard-metrics-scraper   ClusterIP   10.245.205.236   8000/TCP
kubernetes-dashboard        ClusterIP   10.245.215.40    443/TCP
# 获取服务资源对象文件
[root@master dashboard]# sed -n '30,45p' recommended.yaml >dashboard-svc.yaml
[root@master dashboard]# vim dashboard-svc.yaml
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      nodePort: 30443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

[root@master dashboard]# kubectl apply -f dashboard-svc.yaml 
service/kubernetes-dashboard configured
[root@master dashboard]# kubectl -n kubernetes-dashboard get service
NAME                        TYPE        CLUSTER-IP       PORT(S)
dashboard-metrics-scraper   ClusterIP   10.245.205.236   8000/TCP
kubernetes-dashboard        NodePort    10.245.215.40    443:30443/TCP

  • #记得访问下仪表盘dashboard登录页面

四、RBAC 权限管理

服务账号与权限

创建服务账号

# 资源对象模板
[root@master ~]# kubectl -n kubernetes-dashboard create serviceaccount kube-admin --dry-run=client -o yaml
[root@master ~]# vim admin-user.yaml
---
kind: ServiceAccount
apiVersion: v1
metadata:
  name: kube-admin
  namespace: kubernetes-dashboard

[root@master ~]# kubectl apply -f admin-user.yaml 
serviceaccount/kube-admin created
[root@master ~]# kubectl -n kubernetes-dashboard get serviceaccounts 
NAME                   SECRETS   AGE
default                0         16m
kube-admin             0         11s
kubernetes-dashboard   0         16m

获取用户 token

[root@master ~]# kubectl -n kubernetes-dashboard create token kube-admin
<Base64 编码的令牌数据>

角色与鉴权

#类似网游DNF里面的角色管理,GM管理员和玩家的关系。

资源对象描述作用域
ServiceAccount服务账号,为 Pod 中运行的进程提供了一个身份单一名称空间
Role角色,包含一组代表相关权限的规则单一名称空间
ClusterRole角色,包含一组代表相关权限的规则全集群
RoleBinding将权限赋予用户,Role、ClusterRole 均可使用单一名称空间
ClusterRoleBinding将权限赋予用户,只可以使用 ClusterRole全集群

资源对象权限

createdeletedeletecollectiongetlistpatchupdatewatch
创建删除删除集合获取属性获取列表补丁更新监控
普通角色
[root@master ~]# kubectl cluster-info dump |grep authorization-mode
                            "--authorization-mode=Node,RBAC",

# 资源对象模板
[root@master ~]# kubectl -n default create role myrole --resource=pods --verb=get,list --dry-run=client -o yaml
[root@master ~]# kubectl -n default create rolebinding kube-admin-role --role=myrole --serviceaccount=kubernetes-dashboard:kube-admin --dry-run=client -o yaml
[root@master ~]# vim myrole.yaml 
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: myrole
  namespace: default
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
  - list

---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kube-admin-role
  namespace: default
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: myrole
subjects:
- kind: ServiceAccount
  name: kube-admin
  namespace: kubernetes-dashboard

[root@master ~]# kubectl apply -f myrole.yaml 
role.rbac.authorization.k8s.io/myrole created
rolebinding.rbac.authorization.k8s.io/kube-admin-role created

[root@master ~]# kubectl delete -f myrole.yaml 
role.rbac.authorization.k8s.io "myrole" deleted
rolebinding.rbac.authorization.k8s.io "kube-admin-role" deleted
集群管理员
[root@master ~]# kubectl get clusterrole
NAME                              CREATED AT
admin                             2022-06-24T08:11:17Z
cluster-admin                     2022-06-24T08:11:17Z
... ...

# 资源对象模板
[root@master ~]# kubectl create clusterrolebinding kube-admin-role --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kube-admin --dry-run=client -o yaml
[root@master ~]# vim admin-user.yaml 
---
kind: ServiceAccount
apiVersion: v1
metadata:
  name: kube-admin
  namespace: kubernetes-dashboard

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kube-admin-role
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kube-admin
  namespace: kubernetes-dashboard

[root@master ~]# kubectl apply -f admin-user.yaml 
serviceaccount/kube-admin unchanged
clusterrolebinding.rbac.authorization.k8s.io/kube-admin-role created

cloud 08

#上一小节讲过K8S的有控制组件和计算组件。现在我们一起来深入研究K8S的控制组件。

一、Deployment

资源清单文件
[root@master ~]# kubectl create deployment myweb --image=myos:httpd --dry-run=client -o yaml
[root@master ~]# vim mydeploy.yaml
---
kind: Deployment          # 资源对象类型
apiVersion: apps/v1       # 版本
metadata:                 # 元数据
  name: mydeploy          # 名称
spec:                     # 详细定义
  replicas: 3             # 副本数量
  selector:               # 定义标签选择器
    matchLabels:          # 支持 matchExpressions 表达式语法
      app: deploy-httpd   # 通过标签来确定那个 Pod 由它来管理
  template:               # 定义用来创建 Pod 的模板,以下为 Pod 定义
    metadata:
      labels:
        app: deploy-httpd
    spec:
      containers:
      - name: apache
        image: myos:httpd
配置案例
# 创建控制器
[root@master ~]# kubectl apply -f mydeploy.yaml 
deployment.apps/mydeploy created

[root@master ~]# kubectl get deployments
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
mydeploy   3/3     3            3           1s

# 控制器自动创建 ReplicaSet
[root@master ~]# kubectl get replicasets 
NAME                  DESIRED   CURRENT   READY   AGE
mydeploy-76f96b85df   3         3         3       2s

# 控制器自动创建 Pod
[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-5gng9   1/1     Running   0          3s
mydeploy-76f96b85df-vsfrw   1/1     Running   0          3s
mydeploy-76f96b85df-z9x95   1/1     Running   0          3s

# 集群自维护自治理
[root@master ~]# kubectl delete pod --all 
pod "mydeploy-76f96b85df-5gng9" deleted
pod "mydeploy-76f96b85df-vsfrw" deleted
pod "mydeploy-76f96b85df-z9x95" deleted

# 删除后自动重新创建
[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-7dvwh   1/1     Running   0          7s
mydeploy-76f96b85df-kpbz4   1/1     Running   0          7s
mydeploy-76f96b85df-kr2zq   1/1     Running   0          7s
集群服务

# 创建集群服务
[root@master ~]# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  clusterIP: 10.245.1.80
  selector:
    app: deploy-httpd
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl replace --force -f websvc.yaml 
service/websvc replaced
[root@master ~]# curl -m 3 http://10.245.1.80
Welcome to The Apache.
集群扩缩容

#抽象来说,扩容就是在基础存储设备上,添加新的设备,然后挂载到新的设备上。达到扩容结果, 类似吃鸡游戏里的扩容弹夹。

而缩容就是为了达到更佳的运行效率,减少存储设备上的存储空间,达到缩容目的。古代的增兵减灶

# 集群扩容
[root@master ~]# kubectl scale deployment mydeploy --replicas 10
deployment.apps/mydeploy scaled

[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-kg27l   1/1     Running   0          6s
mydeploy-76f96b85df-q5fzb   1/1     Running   0          6s
mydeploy-76f96b85df-rxhp4   1/1     Running   0          6s
mydeploy-76f96b85df-szf69   1/1     Running   0          6s
mydeploy-76f96b85df-tp2xj   1/1     Running   0          6s
......

# 集群缩容
[root@master ~]# kubectl scale deployment mydeploy --replicas=2
deployment.apps/mydeploy scaled

[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-7dvwh   1/1     Running   0          51s
mydeploy-76f96b85df-kr2zq   1/1     Running   0          51s
历史版本信息
# 查看历史版本
[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         <none>

# 添加注释信息
[root@master ~]# kubectl annotate deployments mydeploy kubernetes.io/change-cause="httpd.v1"
deployment.apps/mydeploy annotated

[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         httpd.v1

# 更新资源清单文件
[root@master ~]# vim mydeploy.yaml
# 在创建容器的镜像下面添加
        imagePullPolicy: Always

[root@master ~]# kubectl apply -f mydeploy.yaml
deployment.apps/mydeploy patched

# 更新版本信息
[root@master ~]# kubectl annotate deployments mydeploy kubernetes.io/change-cause="httpd.v2"
deployment.apps/mydeploy annotated

[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         httpd.v1
2         httpd.v2
滚动更新
# 修改镜像,滚动更新集群
[root@master ~]# kubectl set image deployment mydeploy apache=myos:nginx
deployment.apps/mydeploy image updated

# 给新版本添加注释信息
[root@master ~]# kubectl annotate deployments mydeploy kubernetes.io/change-cause="nginx.v1"
deployment.apps/mydeploy annotated

# 查看历史版本信息
[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         httpd.v1
2         httpd.v2
3         nginx.v1

# 访问验证服务
[root@master ~]# curl -m 3 http://10.245.1.80
Nginx is running !
版本回滚

#类似游戏里面的怀旧服,而这里的版本回滚是用于恢复数据

# 历史版本与回滚
[root@master ~]# kubectl rollout undo deployment mydeploy --to-revision 1
deployment.apps/mydeploy rolled back

[root@master ~]# kubectl rollout history deployment mydeploy 
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
2         httpd.v2
3         nginx.v1
4         httpd.v1

[root@master ~]# curl -m 3 http://10.245.1.80
Welcome to The Apache.

清理资源对象
# 删除控制器时会自动回收自己创建的 Pod
[root@master ~]# kubectl delete deployments mydeploy
deployment.apps "mydeploy" deleted

二、DaemonSet

配置案例
[root@master ~]# cp -a mydeploy.yaml myds.yaml
[root@master ~]# vim myds.yaml
---
kind: DaemonSet         # 资源对象类型
apiVersion: apps/v1
metadata:
  name: myds            # 控制器名称
spec:
  # replicas: 2         # 删除副本参数
  selector:
    matchLabels:
      app: ds-httpd     # 修改标签防止冲突
  template:
    metadata:
      labels:
        app: ds-httpd   # 修改标签防止冲突
    spec:
      containers:
      - name: apache
        image: myos:httpd
        imagePullPolicy: Always

[root@master ~]# kubectl apply -f myds.yaml 
daemonset.apps/myds created
[root@master ~]# kubectl get pods -o wide
NAME         READY   STATUS    RESTARTS   IP            NODE
myds-msrcx   1/1     Running   0          10.244.1.11   node-0001
myds-lwq8l   1/1     Running   0          10.244.2.17   node-0002
myds-4wt72   1/1     Running   0          10.244.3.14   node-0003
myds-6k82t   1/1     Running   0          10.244.4.15   node-0004
myds-9c6wc   1/1     Running   0          10.244.5.19   node-0005
清理资源对象
# 删除控制器
[root@master ~]# kubectl delete daemonsets myds
daemonset.apps "myds" deleted

三、Job、CronJob

 

 Job 控制器

# 资源文件模板
[root@master ~]# kubectl create job myjob --image=myos:8.5 --dry-run=client -o yaml -- sleep 3
[root@master ~]# vim myjob.yaml 
---
kind: Job
apiVersion: batch/v1
metadata:
  name: myjob
spec:
  template:  # 以下定义 Pod 模板
    metadata: {}
    spec:
      restartPolicy: OnFailure
      containers:
      - name: myjob
        image: myos:8.5
        command: ["/bin/sh"]
        args:
        - -c
        - |
          sleep 3
          exit $((RANDOM%2))

[root@master ~]# kubectl apply -f myjob.yaml 
job.batch/myjob created

# 失败了会重启
[root@master ~]# kubectl get pods -l job-name=myjob -w
NAME             READY   STATUS      RESTARTS     AGE
myjob--1-lrtbk   1/1     Running     0            2s
myjob--1-lrtbk   0/1     Error       0            4s
myjob--1-lrtbk   1/1     Running     1 (1s ago)   5s
myjob--1-lrtbk   0/1     Completed   1            9s

[root@master ~]# kubectl get jobs.batch 
NAME    COMPLETIONS   DURATION   AGE
myjob   1/1           8s         12s

# 删除Job控制器
[root@master ~]# kubectl delete -f myjob.yaml 
job.batch "myjob" deleted

#pod控制器创建失败,任务会确保创建成功而重启,避免失败

Cronjob

#类似ansible中的crontab模块,可以定时执行某一任务
配置案例
# 资源对象模板
[root@master ~]# kubectl create cronjob mycj --image=myos:8.5 --schedule='* * * * *' --dry-run=client -o yaml -- sleep 3
[root@master ~]# vim mycj.yaml
---
kind: CronJob
apiVersion: batch/v1
metadata:
  name: mycj
spec:
  schedule: "* * * * *"
  jobTemplate:  # 以下定义 Job 模板
    metadata: {}
    spec:
      template:
        metadata: {}
        spec:
          restartPolicy: OnFailure
          containers:
          - name: myjob
            image: myos:8.5
            command: ["/bin/sh"]
            args:
            - -c
            - |
              sleep 3
              exit $((RANDOM%2))

[root@master ~]# kubectl apply -f mycj.yaml 
cronjob.batch/mycj created
[root@master ~]# kubectl get cronjobs 
NAME   SCHEDULE        SUSPEND   ACTIVE   LAST SCHEDULE   AGE
mycj   * * * * 1-5     False     0        <none>          4s

# 按照时间周期,每分钟触发一个任务
[root@master ~]# kubectl get jobs -w
NAME                     READY   STATUS              RESTARTS
mycj-27808172--1-w6sbx   0/1     Pending             0
mycj-27808172--1-w6sbx   0/1     ContainerCreating   0
mycj-27808172--1-w6sbx   1/1     Running             0
mycj-27808172--1-w6sbx   0/1     Completed           1

# 保留三次结果,多余的会被删除
[root@master ~]# kubectl get jobs 
NAME            COMPLETIONS   DURATION   AGE
mycj-27605367   1/1           31s        3m30s
mycj-27605368   1/1           31s        2m30s
mycj-27605369   1/1           31s        90s
mycj-27605370   0/1           30s        30s

[root@master ~]# kubectl get jobs 
NAME            COMPLETIONS   DURATION   AGE
mycj-27605368   1/1           31s        2m33s
mycj-27605369   1/1           31s        93s
mycj-27605370   1/1           31s        33s

# 删除CJ控制器
[root@master ~]# kubectl delete -f mycj.yaml 
cronjob.batch "mycj" deleted

四、StatefulSet

Headless 服务
[root@master ~]# cp websvc.yaml stssvc.yaml 
[root@master ~]# vim stssvc.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: stssvc          # 服务名称
spec:
  type: ClusterIP
  clusterIP: None       # 设置 IP 为 None
  selector:
    app: sts-httpd      # 设置 Pod 标签
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl apply -f stssvc.yaml 
service/stssvc created

[root@master ~]# kubectl get services stssvc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
stssvc       ClusterIP   None          <none>        80/TCP    51s
资源清单文件
[root@master ~]# cp -a mydeploy.yaml mysts.yaml
[root@master ~]# vim mysts.yaml
---
kind: StatefulSet       # 资源对象类型
apiVersion: apps/v1
metadata:
  name: mysts           # 控制器名称
spec:
  serviceName: stssvc   # 新增 headless 服务名称
  replicas: 3
  selector:
    matchLabels:
      app: sts-httpd    # 修改标签防止冲突
  template:
    metadata:
      labels:
        app: sts-httpd  # 修改标签防止冲突
    spec:
      containers:
      - name: apache
        image: myos:httpd
配置案例
# statefulset 主要解决了 Pod 创建顺序的问题
# statefulset 主要解决了访问指定 Pod 的问题
[root@master ~]# kubectl apply -f mysts.yaml 
statefulset.apps/mysts created

[root@master ~]# kubectl get pods
NAME      READY   STATUS    RESTARTS   AGE
mysts-0   1/1     Running   0          3s
mysts-1   1/1     Running   0          2s
mysts-2   1/1     Running   0          1s

# 所有 Pod IP 地址
[root@master ~]# host stssvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases: 
stssvc.default.svc.cluster.local has address 10.244.1.81
stssvc.default.svc.cluster.local has address 10.244.2.82
stssvc.default.svc.cluster.local has address 10.244.3.83

# 单个 Pod IP 地址
[root@master ~]# host mysts-0.stssvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases: 
mysts-0.stssvc.default.svc.cluster.local has address 10.244.1.81

# 删除sts控制器
[root@master ~]# kubectl delete -f mysts.yaml -f stssvc.yaml
statefulset.apps "mysts" deleted
service "stssvc" deleted
弹性云服务

五、HorizontalPodAutoscaler

 配置后端服务

# 为 Deploy 模板添加资源配额
[root@master ~]# cat mydeploy.yaml websvc.yaml >mycluster.yaml
[root@master ~]# vim mycluster.yaml 
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: mydeploy
spec:
  replicas: 1
  selector:
    matchLabels:
      app: deploy-httpd
  template:
    metadata:
      labels:
        app: deploy-httpd
    spec:
      containers:
      - name: apache
        image: myos:httpd
        resources:           # 为该资源设置配额
          requests:          # HPA 控制器会根据配额使用情况伸缩集群
            cpu: 300m        # CPU 配额

---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  clusterIP: 10.245.1.80
  selector:
    app: deploy-httpd
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl replace --force -f mycluster.yaml
deployment.apps/mydeploy replaced
service/websvc replaced

# 验证服务
[root@master ~]# kubectl top pods
NAME                       CPU(cores)   MEMORY(bytes)   
mydeploy-b4f9dc786-w4x2z   6m           18Mi            

[root@master ~]# curl -s http://10.245.1.80/info.php
<pre>
Array
(
    [REMOTE_ADDR] => 10.244.219.64
    [REQUEST_METHOD] => GET
    [HTTP_USER_AGENT] => curl/7.61.1
    [REQUEST_URI] => /info.php
)
php_host:   mydeploy-b4f9dc786-w4x2z
1229

HPA 控制器

[root@master ~]# vim myhpa.yaml 
---
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2
metadata:
  name: myhpa
spec:
  behavior:
    scaleDown:
      stabilizationWindowSeconds: 60
  scaleTargetRef:
    kind: Deployment
    apiVersion: apps/v1
    name: mydeploy
  minReplicas: 1
  maxReplicas: 5
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 50

[root@master ~]# kubectl apply -f myhpa.yaml 
horizontalpodautoscaler.autoscaling/myhpa created

# 刚刚创建 unknown 是正常现象,最多等待 60s 就可以正常获取数据
[root@master ~]# kubectl get horizontalpodautoscalers
NAME    REFERENCE             TARGETS         MINPODS   MAXPODS   REPLICAS
myhpa   Deployment/mydeploy   <unknown>/50%   1         5         0

[root@master ~]# kubectl get horizontalpodautoscalers
NAME    REFERENCE             TARGETS   MINPODS   MAXPODS   REPLICAS
myhpa   Deployment/mydeploy   0%/50%    1         5         3
配置案例

# 终端 1 访问提高负载
[root@master ~]# while sleep 1;do curl -s "http://10.245.1.80/info.php?id=100000" -o /dev/null; done &
# 终端 2 监控 HPA 变化
[root@master ~]# kubectl get hpa -w
NAME    REFERENCE             TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
myhpa   Deployment/mydeploy   0%/50%    1         5         1          1m
myhpa   Deployment/mydeploy   31%/50%   1         5         1          2m
myhpa   Deployment/mydeploy   70%/50%   1         5         1          2m15s
myhpa   Deployment/mydeploy   72%/50%   1         5         2          2m30s
myhpa   Deployment/mydeploy   36%/50%   1         5         2          2m45s
myhpa   Deployment/mydeploy   55%/50%   1         5         2          3m
myhpa   Deployment/mydeploy   58%/50%   1         5         3          3m15s
myhpa   Deployment/mydeploy   39%/50%   1         5         3          3m30s
... ...
myhpa   Deployment/mydeploy   66%/50%   1         5         4          5m
myhpa   Deployment/mydeploy   68%/50%   1         5         5          5m15s
myhpa   Deployment/mydeploy   55%/50%   1         5         5          5m30s
myhpa   Deployment/mydeploy   58%/50%   1         5         5          5m45s
myhpa   Deployment/mydeploy   62%/50%   1         5         5          6m

# 如果 60s 内平均负载小于标准值,就会自动缩减集群规模
[root@master ~]# kubectl get hpa -w
NAME    REFERENCE             TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
myhpa   Deployment/mydeploy   52%/50%   1         5         5          13m
myhpa   Deployment/mydeploy   44%/50%   1         5         5          13m15s
myhpa   Deployment/mydeploy   38%/50%   1         5         5          13m30s
myhpa   Deployment/mydeploy   35%/50%   1         5         5          13m45s
myhpa   Deployment/mydeploy   28%/50%   1         5         5          14m
... ...
myhpa   Deployment/mydeploy   8%/50%    1         5         5          18m30s
myhpa   Deployment/mydeploy   9%/50%    1         5         4          18m45s
myhpa   Deployment/mydeploy   9%/50%    1         5         4          19m
myhpa   Deployment/mydeploy   12%/50%   1         5         3          19m15s
myhpa   Deployment/mydeploy   15%/50%   1         5         3          19m30s
myhpa   Deployment/mydeploy   18%/50%   1         5         2          19m45s
myhpa   Deployment/mydeploy   33%/50%   1         5         1          20m

课后总结:

#我们本节学的好多控制器,都有差异和区别,可以按照类似以下的提示词,来对AI提问,得到更加符合工作使用环境的回答。


至此云计算cloud二周目内容更新完毕!

大家有想练习的,可以去华为云、阿里云等云平台,创建帐号,使用30天免费体验版云产品

熟悉相关云产品的使用与配置,里面也有一些项目的免费体验课,可以照着案例学基本项目架构

下一阶段,将回重回网络阶段,深入了解云计算与云原生领域的网络架构知识.

                        

下个阶段见!!!   

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2217676.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

IGFBP7:免疫治疗新靶点

前 言 胰岛素样生长因子结合蛋白7&#xff08;IGFBP7&#xff09;是胰岛素超家族的生长促进肽成员&#xff0c;可与胰岛素和IGF结合&#xff0c;调控细胞生长和分化。IGFBP7在不同的肿瘤类型中表现出抑制或促进肿瘤生长的“自相矛盾”活性。研究发现IGFBP7可增强治疗性单克隆…

C语言 | Leetcode C语言题解之第491题非递减子序列

题目&#xff1a; 题解&#xff1a; int** ans; int ansSize; int* temp; int tempSize;void dfs(int cur, int last, int* nums, int numsSize, int** returnColumnSizes) {if (cur numsSize) {if (tempSize > 2) {ans[ansSize] malloc(sizeof(int) * tempSize);memcpy(…

HDLBits中文版,标准参考答案 | 5 Verification: Writing Testbenches | 验证:编写测试平台

关注 望森FPGA 查看更多FPGA资讯 这是望森的第 24 期分享 作者 | 望森 来源 | 望森FPGA 目录 1 Clock 2 Testbench1 3 AND gate 4 Testbench2 5 T flip-fop 本文中的代码都能够正常运行&#xff0c;请放心食用&#x1f60b;~ 练习的官方网站是&#xff1a;https://hdlbi…

Hi3061M——VL53L0X激光测距(IIC)(同样适用于其他MCU)

这里写目录标题 前言VL53L0X的简介VL53L0X的初始化和效准过程的相关APIVL53L0X开始测量和获取测量值VL53L0X移植配置结语 前言 手头正好有一个空闲的激光测距模块VL53L0X&#xff0c;想在Hi3061M上跑一下测距&#xff0c;以前并没有用过VL53L0X&#xff0c;想着以为还是向以前…

浅谈针对Nor flash状态寄存器保护位修改方法

浅谈针对Nor flash状态寄存器保护位修改 ✨最近在写QSPI驱动Nor flash过程中&#xff0c;在操作芯片的状态寄存器的时候&#xff0c;遇到的一些问题和解决办法。 &#x1f52c;操作Nor flash测试芯片&#xff1a;GD25Q64&#x1f516;以QSPI测试为例&#xff0c;SPI方式修改没有…

集成电路学习:什么是WLAN无线局域网

WLAN&#xff1a;无线局域网 WLAN&#xff0c;即无线局域网&#xff08;Wireless Local Area Network&#xff09;&#xff0c;是一种通过无线技术构建的局域网络&#xff0c;它使用无线信号传输数据&#xff0c;取代了传统有线网络中的网线连接。以下是关于WLAN的详细解释&…

Lumerical学习——优化和参数扫描(Optimization and parameter sweeps)

一、概要介绍 这部分介绍优化和参数扫描项目设定的方法。在有大量数据模拟计算过程中这个特性允许用户使处理方法自动地查找期望的参数值。 ① 创建一个参数扫描任务 ② 创建一个优化任务 ③ 创建一个良率分析任务 ⑤ 打开所选择项目的编辑窗口&#xff0c;编辑其属性…

stm32 bootloader写法

bootloader写法&#xff1a; 假设app的起始地址&#xff1a;0x08020000&#xff0c;则bootloader的范围是0x0800,0000~0x0801,FFFF。 #define APP_ADDR 0x08020000 // 应用程序首地址定义 typedef void (*APP_FUNC)(void); // 函数指针类型定义 /*main函数中调用rum_app&#x…

【Unity】Unity Shader学习笔记(八)基础纹理2:高度纹理、法线纹理、模型空间下的法线纹理、切线空间下的法线纹理光照计算

文章目录 凹凸映射法线纹理设置高度纹理&#xff08;Height Map&#xff09;法线纹理&#xff08;Normal Map&#xff09;模型空间的法线纹理切线空间的法线纹理优劣对比 切线空间下的法线纹理光照计算最终效果完整代码TANGENT语义内置宏 TANGENT_SPACE_ROTATIONObjSpaceLightD…

前缀和--一维和二维模板

前缀和 【模板】前缀和 描述 给定一个长度为n的数组a1,a2,…ana1,a2,…a**n. 接下来有q次查询, 每次查询有两个参数l, r. 对于每个询问, 请输出alal1…ara**la**l1…a**r 输入描述&#xff1a; 第一行包含两个整数n和q. 第二行包含n个整数, 表示a1,a2,…ana1,a2,…a**n.…

JavaWeb——Maven(4/8):Maven坐标,idea集成-导入maven项目(两种方式)

目录 Maven坐标 导入Maven项目 第一种方式 第二种方式 Maven坐标 Maven 坐标 是 Maven 当中资源的唯一标识。通过这个坐标&#xff0c;我们就能够唯一定位资源的位置。 Maven 坐标主要用在两个地方。第一个地方&#xff1a;我们可以使用坐标来定义项目。第二个地方&#…

CLANet:基于明场图像的跨批次细胞系识别综合框架|文献速递-基于深度学习的医学影像分类,分割与多模态应用

Title 题目 CLANet: A comprehensive framework for cross-batch cell line identificationusing brightfield images CLANet&#xff1a;基于明场图像的跨批次细胞系识别综合框架 01 文献速递介绍 细胞系鉴定&#xff08;Cell Line Authentication&#xff0c;CLA&#x…

英飞达医学影像存档与通信系统 WebUserLogin.asmx 信息泄露漏洞复现

0x01 产品简介 英飞达医学影像存档与通信系统 Picture Archiving and Communication System,它是应用在医院影像科室的系统,主要的任务就是把日常产生的各种医学影像(包括核磁,CT,超声,各种X光机,各种红外仪、显微仪等设备产生的图像)通过各种接口(模拟,DICOM,网络…

二叉树与堆讲解

目录 1.树的概念及结构 1.树的概念 2.树的相关概念 3.树的表示 2.二叉树 1.概念 2.特殊的二叉树 1.满二叉树 2.完全二叉树 3.二叉树的性质 4.二叉树的存储结构 1.顺序结构 2.链式存储 3.堆 1.堆的概念及结构 2.堆的实现 1.堆的创建 2.堆的初始化&#xff08;H…

力扣力扣力:206. 反转链表

leetcode链接&#xff1a;206. 反转链表 题目描述&#xff1a; 给你单链表的头节点 head &#xff0c;请你反转链表&#xff0c;并返回反转后的链表。 示例 1&#xff1a; 输入&#xff1a;head [1,2,3,4,5]输出&#xff1a;[5,4,3,2,1] 示例 2&#xff1a; 输入&#xff1…

手写Spring IOC-简易版

目录 项目结构entitydaoIUserDaoUserDaoImpl serviceIUserServiceUserServiceImpl ApplicationContext 配置文件初始化 IOC 容器RunApplication 注解初始化 IOC 容器BeanAutowired Reference 项目结构 entity User Data NoArgsConstructor AllArgsConstructor Accessors(chai…

面试八股(自用)

什么是java序列化&#xff0c;什么时候需要序列化? 序列化是指将java对象转化成字节流的过程&#xff0c;反序列化是指将字节流转化成java对象的过程。 当java对象需要在网络上传输 或者 持久化到存储文件中&#xff0c;就需要对java对象进行序列化处理。 JVM的主要组成部分…

Leetcode—1242. 多线程网页爬虫【中等】Plus(多线程)

2024每日刷题&#xff08;187&#xff09; Leetcode—1242. 多线程网页爬虫 实现代码 /*** // This is the HtmlParsers API interface.* // You should not implement it, or speculate about its implementation* class HtmlParser {* public:* vector<string>…

【Java 并发编程】阻塞队列与仿真餐厅

前言 阻塞队列 (BlockingQueue) 顾名思义是一种支持阻塞操作的队列&#xff0c;因为内部机制中添加了 wait 和 notify 方法&#xff0c;所以阻塞队列具备了线程之前相互协调的功能。阻塞队列主要运用于两个场景&#xff0c;一是生产者与消费者模型&#xff0c;二是线程池。本章…

【C语言】循环嵌套:乘法表

循环嵌套&#xff0c;外层循环执行一次&#xff0c;内层循环执行i次。分别控制 在循环的过程中加一层循环。 多层循环属于循环嵌套、嵌套循环 #include <stdio.h> #include <math.h> /* 功能&#xff1a;循环嵌套 乘法表 时间&#xff1a;2024年10月 地点&#xf…