准备工作
需要配置三台虚拟机
关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
临时关闭selinux
setenforce 0
永久关闭selinux
vi /etc/selinux/config
安装docker
rpm -qa|grep docker
yum remove docker* -y
rpm -qa|grep docker
yum install -y yum-utils
#配置docker的yum地址
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#安装指定版本
yum install -y docker-ce-20.10.7 docker-ce-cli-20.10.7 containerd.io-1.4.6
启动&开机启动docker
systemctl start docker
systemctl enable docker --now
docker加速配置
mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://82m9ar63.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
systemctl daemon-reload
systemctl restart docker
安装Kubernetes
基本环境
每个机器使用内网ip互通
#分别设置每个机器自己的hostname
hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
重新连接终端可以显示主机名
#关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
#允许 iptables 检查桥接流量
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl –system
安装kubelet、kubeadm、kubectl
#配置k8s的yum源地址
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#安装 kubelet,kubeadm,kubectl
yum install -y kubelet-1.20.9 kubeadm-1.20.9 kubectl-1.20.9
启动kubelet
systemctl start kubelet
systemctl enable --now kubelet
#所有机器配置master域名
echo "172.16.39.66 master" >> /etc/hosts
echo "172.16.39.67 node1" >> /etc/hosts
echo "172.16.39.68 node2" >> /etc/hosts
初始化master节点
初始化
kubeadm init \
--apiserver-advertise-address=172.16.39.68 \
--control-plane-endpoint=node2 \
--image-repository registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images \
--kubernetes-version v1.20.9 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=192.168.0.0/16
等待完成
记录关键信息
记录master执行完成后的日志
master节点执行上述命令
安装Calico网络插件(master节点)
curl https://docs.projectcalico.org/archive/v3.20/manifests/calico.yaml -O
kubectl apply -f calico.yaml
kubectl get pod -A
等待完成全是1
镜像获取失败(修改docker/daemon.json)
在三台主机里面
修改vi /etc/docker/daemon.json
{
"registry-mirrors": ["https://82m9ar63.mirror.aliyuncs.com","https://0c105db5188026850f80c001def654a0.mirror.swr.myhuaweicloud.com","https://5tqw56kt.mirror.aliyuncs.com","https://docker.1panel.live","http://mirrors.ustc.edu.cn/","http://mirror.azure.cn/","https://hub.rat.dev/","https://docker.ckyl.me/","https://docker.chenby.cn","https://docker.hpcloud.cloud","https://docker.m.daocloud.io"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
systemctl daemon-reload
systemctl restart docker
systemctl start kubelet
查看需要哪些镜像
cat /root/calico.yaml |grep image
docker pull docker.io/calico/cni:v3.20.6
docker pull docker.io/calico/pod2daemon-flexvol:v3.20.6
docker pull docker.io/calico/node:v3.20.6
docker pull docker.io/calico/kube-controllers:v3.20.6
在master操作下面
kubectl apply -f calico.yaml
kubectl get pod -A
全是1结束
加入worker节点
各node节点输入
kubeadm join master:6443 --token 24a7hb.5719b9rdljx27z9y \
--discovery-token-ca-cert-hash sha256:19d04bddfcdaed75221131d57f989cab5e7114bdbc05c51da861cf6d63e2e139
查看kubectl get nodes
kubectl get pod -A
等待全是1
查看kubectl get nodes
安装KubeSphere前置环境
nfs文件系统
安装nfs-server
# 在每个机器
yum install -y nfs-utils
# 在master 执行以下命令,将master节点作为nfs服务器server
暴露/nfs/data/这个目录
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
# 执行以下命令,启动 nfs 服务;创建共享目录
mkdir -p /nfs/data
# 在master执行
systemctl enable rpcbind
systemctl enable nfs-server
systemctl start rpcbind
systemctl start nfs-server
# 使配置生效
exportfs -r
#检查配置是否生效,出现world生效
exportfs
配置nfs-client,在node节点
showmount -e 172.16.39.66
mkdir -p /nfs/data
mount -t nfs 172.16.39.66:/nfs/data /nfs/data
配置默认存储
配置动态供应的默认存储类
vi sc.yaml
加入默认存储.txt里面内容
需修改nfs server ip
查看sc
应用sc.yaml
kubectl apply -f sc.yaml
确认配置是否生效
kubectl get sc
kubectl get pod -A
metrics-server
集群指标监控组件
见metrics-server.txt
vi metrics.yaml
将metrics-server.txt里面内容复制进去,不用修改
kubectl apply -f metrics.yaml
稍等一会
kubectl get pod -A
kubectl top nodes
可以看到集群各节点CPU、内存情况及使用率1c=1000m
kubectl top pods 正在运行的pod
kubectl top pods -A 所有的pod
安装KubeSph
下载核心文件
wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/kubesphere-installer.yaml
wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/cluster-configuration.yaml
修改cluster-configuration
vim cluster-configuration.yaml
修改spec下面的内容
在 cluster-configuration.yaml中指定我们需要开启的功能(将false改为true)
执行安装
kubectl apply -f kubesphere-installer.yaml
kubectl apply -f cluster-configuration.yaml
查看安装进度
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
访问任意机器的 30880端口
账号 : admin
密码 : P@88w0rd
访问之前再次确认所有pod是不是1
kubectl get pods -A
如果一直有ContainerCreating
kubectl describe -n namespace pod查看问题
解决etcd监控证书找不到问题
kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs --from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt --from-file=etcd-client.crt=/etc/kubernetes/pki/apiserver-etcd-client.crt --from-file=etcd-client.key=/etc/kubernetes/pki/apiserver-etcd-client.key
浏览器访问:http://172.25.39.66:30880/login