一、新建nfs服务器(192.168.1.44)
1.基础配置(IP地址防火墙等)
2.配置时间同步
[root@localhost ~]# yum -y install ntpdate.x86_64
[root@localhost ~]# ntpdate time2.aliyun.com
27 Sep 10:28:08 ntpdate[1634]: adjust time server 203.107.6.88 offset 0.014965 sec
[root@localhost ~]# crontab -e //设置计划任务
* 3 * * * /sbin/ntpdate time2.aliyun.com
3.安装nfs服务应用
[root@localhost ~]# yum -y install rpcbind nfs-utils
4.配置文件
[root@localhost ~]# echo "/root/pes *(rw,sync)" >> /etc/exports
[root@localhost ~]# cat /etc/exports
/root/pes *(rw.sync)
5.准备pes资源目录(部署考试系统项目)
6.启动服务
[root@localhost ~]# systemctl start rpcbind.service nfs-server.service //启动
[root@localhost ~]# systemctl enable rpcbind.service nfs-server.service //设置开机启动
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
7.验证
[root@localhost ~]# showmount -e localhost //这样代表成功
Export list for localhost:
/root/pes *
二、部署前端
1.创建web-deployment.yaml资源清单文件
[root@k8s-master ~]# mkdir pes-k8s
[root@k8s-master ~]# cd pes-k8s
[root@k8s-master pes-k8s]# vim web-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-deployment
namespace: default
labels:
app: web-deployment
spec:
selector:
matchLabels:
app: web-deployment
replicas: 3
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: web-deployment
spec:
containers:
- name: nginxlatest
image: docker.io/library/nginx:latest
imagePullPolicy: Never
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 80
name: nginxport
volumeMounts:
- name: localtime
mountPath: /usr/share/nginx/html
volumes:
- name: localtime
nfs:
server: 192.168.1.44
path: /root/pes/web/dist
restartPolicy: Always
[root@k8s-master pes-k8s]# kubectl create -f web-deployment.yaml
deployment.apps/web-deployment created
[root@k8s-master pes-k8s]# kubectl get po -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
cluster-test-66bb44bd88-nk46t 1/1 Running 76 (21m ago) 14d 172.16.169.179 k8s-node2 <none> <none>
web-deployment-7bff5ff45c-45rz6 1/1 Running 0 13m 172.16.169.186 k8s-node2 <none> <none>
web-deployment-7bff5ff45c-nlq2s 1/1 Running 0 13m 172.16.36.76 k8s-node1 <none> <none>
web-deployment-7bff5ff45c-whkd2 1/1 Running 0 13m 172.16.169.185 k8s-node2 <none> <none>
2.访问测试
[root@k8s-master pes-k8s]# curl 172.16.169.186
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<link rel="icon" href="/favicon.ico">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Vite App</title>
<script type="module" crossorigin src="/assets/index-C4kAShR5.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-CSz7ARPP.css">
</head>
<body>
<div id="app"></div>
</body>
</html>
三、部署Java后台
1.自定义Java容器
(1)把nfs主机的下的pes/java目录传到k8s-master主机
[root@localhost ~]# scp -r pes/java root@192.168.1.110:/root/
(2)查看java目录是否传过来
[root@k8s-master pes-k8s]# ls ~
anaconda-ks.cfg java mysql.tar nginx.1.25.0.tar tdr
busybox.tar k8s-ha-install new.yaml nginx.tar token
centos.tar kubeadm-config.yaml nginx.1.20.0.tar pes-k8s wordpress.tar
haproxy.tar mariadb.tar nginx.1.21.0.tar pods
[root@k8s-master pes-k8s]# cd ~/java/
[root@k8s-master java]# ls
Dockerfile jdk src start.sh
(3)创建Java镜像
[root@k8s-master java]# systemctl start docker.service
[root@k8s-master java]# docker build -t java:v0 ./ //使用Dockerfile创建镜像
[root@k8s-master java]# docker images //查看
REPOSITORY TAG IMAGE ID CREATED SIZE
java v0 5c0a07c9bcfb 2 minutes ago 592MB
wordpress latest e6e656a22e5a 2 weeks ago 699MB
haproxy latest a782c02b8259 3 weeks ago 103MB
mariadb latest 09f5b532c2ef 3 weeks ago 414MB
nginx alpine c7b4f26a7d93 6 weeks ago 43.2MB
nginx latest 39286ab8a5e1 6 weeks ago 188MB
mysql 5.7.44 5107333e08a8 9 months ago 501MB
nginx 1.25.0 7d3c40f240e1 15 months ago 143MB
busybox latest 6fd955f66c23 16 months ago 4.26MB
centos latest 5d0da3dc9764 3 years ago 231MB
nginx 1.21.0 4f380adfc10f 3 years ago 133MB
nginx 1.20.0 7ab27dbbfbdf 3 years ago 133MB
(4)打包上传到k8s-node节点上
[root@k8s-master java]# docker save -o java.tar java:v0 //打包
[root@k8s-master java]# scp java.tar root@192.168.1.22:~ //上传node1节点
java.tar 100% 572MB 25.2MB/s 00:22
[root@k8s-master java]# scp java.tar root@192.168.1.33:~ //上传node2节点
java.tar 100% 572MB 22.9MB/s 00:24
(5)去node1和node2导入镜像
[root@k8s-node1 ~]# ctr -n k8s.io images import java.tar --platform linux/amd64
unpacking docker.io/library/java:v0 (sha256:a3fe9b7e8ff3f56cf95e4d35c0206237c3aeea3ebf10817d99523509c0a59fbc)...done
[root@k8s-node2 ~]# ctr -n k8s.io images import java.tar --platform linux/amd64
unpacking docker.io/library/java:v0 (sha256:a3fe9b7e8ff3f56cf95e4d35c0206237c3aeea3ebf10817d99523509c0a59fbc)...done
[root@k8s-node1 ~]# crictl images|grep java //查看
docker.io/library/java v0 5c0a07c9bcfb3 599MB
[root@k8s-node2 ~]# crictl images|grep java
docker.io/library/java v0 5c0a07c9bcfb3 599MB
2.创建java-deployment.yaml资源清单文件
[root@k8s-master java]# cd
[root@k8s-master ~]# cd pes-k8s/
[root@k8s-master pes-k8s]# vim java-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: java-deployment
namespace: default
labels:
app: java-deployment
spec:
selector:
matchLabels:
app: java-deployment
replicas: 3
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: java-deployment
spec:
# initContainers:
# Init containers are exactly like regular containers, except:
# - Init containers always run to completion.
# - Each init container must complete successfully before the next one starts.
containers:
- name: java
image: docker.io/library/java:v0
imagePullPolicy: Never
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 100m
memory: 300Mi #这里得多给点
ports:
- containerPort: 8080
name: javaport
volumeMounts:
- name: localtime
mountPath: /java/src
volumes:
- name: localtime
nfs:
server: 192.168.1.44
path: /root/pes/java/src
restartPolicy: Always
[root@k8s-master pes-k8s]# kubectl create -f java-deployment.yaml
deployment.apps/java-deployment created
[root@k8s-master pes-k8s]# kubectl get po -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
cluster-test-66bb44bd88-nk46t 1/1 Running 80 (12m ago) 14d 172.16.169.179 k8s-node2 <none> <none>
java-deployment-7948cbb74b-bgq2s 1/1 Running 0 109s 172.16.169.132 k8s-node2 <none> <none>
java-deployment-7948cbb74b-bnsr9 1/1 Running 0 109s 172.16.169.137 k8s-node2 <none> <none>
java-deployment-7948cbb74b-hz7s6 1/1 Running 0 109s 172.16.36.94 k8s-node1 <none> <none>
web-deployment-7bff5ff45c-45rz6 1/1 Running 0 4h5m 172.16.169.186 k8s-node2 <none> <none>
web-deployment-7bff5ff45c-nlq2s 1/1 Running 0 4h5m 172.16.36.76 k8s-node1 <none> <none>
web-deployment-7bff5ff45c-whkd2 1/1 Running 0 4h5m 172.16.169.185 k8s-node2 <none> <none>
注意:压缩包在解压过程中可能出现问题,比如权限丢失
给相应的权限即可
3.访问测试
[root@k8s-master pes-k8s]# curl 172.16.169.132:8080 //这样表示成功
{"code":20002,"msg":"账号不存在或密码错误"}[root@k8s-master pes-k8s]#
四、部署数据库
1.部署mysql-deployment.yaml文件
[root@k8s-master pes-k8s]# vim mysql-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql-deploy
namespace: default
labels:
app: mysql-deploy
spec:
selector:
matchLabels:
app: mysql-deploy
replicas: 1
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: mysql-deploy
spec:
# initContainers:
# Init containers are exactly like regular containers, except:
# - Init containers always run to completion.
# - Each init container must complete successfully before the next one starts.
containers:
- name: mysql-deployment
image: docker.io/library/mysql:5.7.44
imagePullPolicy: Never
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 100m
memory: 200Mi
ports:
- containerPort: 3306
name: mysql-deploy
volumeMounts:
- name: localtime
mountPath: /var/lib/mysql
volumes:
- name: localtime
nfs:
server: 192.168.1.44
path: /root/pes/mysql/data
restartPolicy: Always
[root@k8s-master pes-k8s]# kubectl create -f mysql-deployment.yaml
deployment.apps/mysql-deploy created
[root@k8s-master pes-k8s]# kubectl get po //这里发现mysql没创建成功,这是由于权限不够
NAME READY STATUS RESTARTS AGE
cluster-test-66bb44bd88-nk46t 1/1 Running 80 (30m ago) 14d
java-deployment-7948cbb74b-bgq2s 1/1 Running 0 20m
java-deployment-7948cbb74b-bnsr9 1/1 Running 0 20m
java-deployment-7948cbb74b-hz7s6 1/1 Running 0 20m
mysql-deploy-77776fd66f-4fd5r 0/1 CrashLoopBackOff 2 (20s ago) 45s
web-deployment-7bff5ff45c-45rz6 1/1 Running 0 4h23m
web-deployment-7bff5ff45c-nlq2s 1/1 Running 0 4h23m
web-deployment-7bff5ff45c-whkd2 1/1 Running 0 4h23m
2.到nfs主机
[root@localhost ~]# vim /etc/exports
/root/pes *(rw,sync,no_root_squash)
[root@localhost ~]# systemctl restart nfs.service //重启
3.回到k8s-master
[root@k8s-master pes-k8s]# kubectl delete -f mysql-deployment.yaml //先删
deployment.apps "mysql-deploy" deleted
[root@k8s-master pes-k8s]# kubectl create -f mysql-deployment.yaml //再创
deployment.apps/mysql-deploy created
[root@k8s-master pes-k8s]# kubectl get po
NAME READY STATUS RESTARTS AGE
cluster-test-66bb44bd88-nk46t 1/1 Running 80 (37m ago) 14d
java-deployment-7948cbb74b-bgq2s 1/1 Running 0 26m
java-deployment-7948cbb74b-bnsr9 1/1 Running 0 26m
java-deployment-7948cbb74b-hz7s6 1/1 Running 0 26m
mysql-deploy-77776fd66f-gr9l6 1/1 Running 0 4s
web-deployment-7bff5ff45c-45rz6 1/1 Running 0 4h30m
web-deployment-7bff5ff45c-nlq2s 1/1 Running 0 4h30m
web-deployment-7bff5ff45c-whkd2 1/1 Running 0 4h30m
4.访问测试
[root@k8s-master pes-k8s]# kubectl get po -owide //查看IP
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
cluster-test-66bb44bd88-nk46t 1/1 Running 80 (42m ago) 14d 172.16.169.179 k8s-node2 <none> <none>
java-deployment-7948cbb74b-bgq2s 1/1 Running 0 32m 172.16.169.132 k8s-node2 <none> <none>
java-deployment-7948cbb74b-bnsr9 1/1 Running 0 32m 172.16.169.137 k8s-node2 <none> <none>
java-deployment-7948cbb74b-hz7s6 1/1 Running 0 32m 172.16.36.94 k8s-node1 <none> <none>
mysql-deploy-77776fd66f-gr9l6 1/1 Running 0 5m32s 172.16.36.85 k8s-node1 <none> <none>
web-deployment-7bff5ff45c-45rz6 1/1 Running 0 4h35m 172.16.169.186 k8s-node2 <none> <none>
web-deployment-7bff5ff45c-nlq2s 1/1 Running 0 4h35m 172.16.36.76 k8s-node1 <none> <none>
web-deployment-7bff5ff45c-whkd2 1/1 Running 0 4h35m 172.16.169.185 k8s-node2 <none> <none>
[root@k8s-master pes-k8s]# mysql -h 172.16.36.85 -uzhangmin -pzhangmin //访问,主机必须得有mysql客户端
五、部署对应的service(代理,负载均衡)
[root@k8s-master ~]# kubectl get po --show-labels //查看标签
NAME READY STATUS RESTARTS AGE LABELS
cluster-test-66bb44bd88-nk46t 1/1 Running 82 (5m54s ago) 14d app=cluster-test,pod-template-hash=66bb44bd88
java-deployment-7948cbb74b-bgq2s 1/1 Running 1 (5m54s ago) 84m app=java-deployment,pod-template-hash=7948cbb74b
java-deployment-7948cbb74b-bnsr9 1/1 Running 1 (5m54s ago) 84m app=java-deployment,pod-template-hash=7948cbb74b
java-deployment-7948cbb74b-hz7s6 1/1 Running 1 (6m7s ago) 84m app=java-deployment,pod-template-hash=7948cbb74b
mysql-deploy-77776fd66f-gr9l6 1/1 Running 1 (6m7s ago) 58m app=mysql-deploy,pod-template-hash=77776fd66f
web-deployment-7bff5ff45c-45rz6 1/1 Running 1 (5m54s ago) 5h28m app=web-deployment,pod-template-hash=7bff5ff45c
web-deployment-7bff5ff45c-nlq2s 1/1 Running 1 (6m7s ago) 5h28m app=web-deployment,pod-template-hash=7bff5ff45c
web-deployment-7bff5ff45c-whkd2 1/1 Running 1 (5m54s ago) 5h28m app=web-deployment,pod-template-hash=7bff5ff45c
1.部署web-deployment的service
[root@k8s-master pes-k8s]# vim web-service.yaml
apiVersion: v1
kind: Service
metadata:
name: web-service
namespace: default
spec:
selector:
app: web-deployment
type: NodePort
sessionAffinity: None
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800
ports:
- name: webport
protocol: TCP
port: 80
targetPort: 80
nodePort: 32000
[root@k8s-master pes-k8s]# kubectl create -f web-service.yaml
service/web-service created
[root@k8s-master pes-k8s]# kubectl get svc //查看svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14d
web-service NodePort 10.96.3.80 <none> 80:32000/TCP 74s
2.浏览器访问192.168.1.110:32000
3.宿主机做hosts劫持
C:\Windows\System32\drivers\etc
4.部署java-deployment的service
[root@k8s-master pes-k8s]# vim java-service.yaml
apiVersion: v1
kind: Service
metadata:
name: javaservice
namespace: default
spec:
selector:
app: java-deployment
type: NodePort
sessionAffinity: None
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800
ports:
- name: javaport
protocol: TCP
port: 8080
targetPort: 8080
nodePort: 32100
[root@k8s-master pes-k8s]# kubectl create -f java-service.yaml
service/javaservice created
[root@k8s-master pes-k8s]# kubectl get svc //查看svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
javaservice NodePort 10.96.71.251 <none> 8080:32100/TCP 91s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14d
web-service NodePort 10.96.3.80 <none> 80:32000/TCP 13m
5.外部访问测试
这里请求不到,因为请求的是8080端口,而java服务是32100端口,可以使用代理(haproxy,ipvs,nginx等),也可以改变java服务请求的端口,改为32100(不建议)
6.改变请求java服务的端口(nfs主机)
[root@localhost ~]# cd pes/web/dist/assets/
[root@localhost assets]# for fn in $(ls *.js); do echo $fn; cat $fn|grep 8080; done //查找里面哪个文件有8080
[root@localhost assets]# vim index-8SnX15u9.js //将8080改为32100
7.访问测试
此时能够在外部访问web应用和java应用,发现500错误,查看java的日志文件,发现链接不到数据库
[root@k8s-master pes-k8s]# kubectl logs java-deployment-7948cbb74b-hz7s6
8查看java配置文件(nfs主机)
[root@localhost ~]# cat pes/java/src/application.properties
9.部署mysql-deployment的service
[root@k8s-master pes-k8s]# vim mysql-service.yaml
apiVersion: v1
kind: Service
metadata:
name: hap #这里必须是hap,和java的配置文件保持一致
namespace: default
spec:
selector:
app: mysql-deploy
type: ClusterIP
sessionAffinity: None
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800
ports:
- name: mysqlport
protocol: TCP
port: 3306
targetPort: 3306
[root@k8s-master pes-k8s]# kubectl create -f mysql-service.yaml
service/hap created
[root@k8s-master pes-k8s]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
hap ClusterIP 10.96.171.17 <none> 3306/TCP 76s
javaservice NodePort 10.96.71.251 <none> 8080:32100/TCP 75m
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14d
web-service NodePort 10.96.3.80 <none> 80:32000/TCP 87m
六、访问测试
[root@k8s-master pes-k8s]# mysql -h172.16.36.84 -uzhangmin -pzhangmin //在mysql中获取账号密码
>select * from project_exam_system.user;
浏览器访问