k8s的配置
拉取镜像,创建pod:从阿里云拉取
[root@k8s-master ~]# kubectl run nginx --image=nginx:latest
[root@k8s-master ~]# kubectl get po -Aowide|grep nginx
default nginx 0/1 ImagePullBackOff 0 4m48s 172.16.85.193 k8s-node01 <none> <none>
[root@k8s-master ~]# kubectl logs nginx
Error from server (BadRequest): container "nginx" in pod "nginx" is waiting to start: trying and failing to pull image
[root@k8s-master ~]# kubectl run nginx0 --image=nginx
[root@k8s-master ~]# kubectl get po -Aowide|grep nginx
default nginx 0/1 ImagePullBackOff 0 9m8s 172.16.85.193 k8s-node01 <none> <none>
default nginx0 1/1 Running 0 2m24s 172.16.85.194 k8s-node01 <none> <none>
#删除nginx
[root@k8s-master ~]# kubectl delete pod nginx
pod "nginx" deleted
[root@k8s-master ~]# kubectl get po -Aowide|grep nginx
default nginx0 1/1 Running 0 5m30s 172.16.85.194 k8s-node01 <none> <none>
注意:nginx0部署在node01节点上,是由 kube-controller-manager-k8s-master 来确定的,它会根据那个节点空闲来分配
部署资源监控metries-server
--复制密钥文件到node01和node02节点
[root@k8s-master ~]# scp /etc/kubernetes/pki/front-proxy-ca.crt k8s-node01:/etc/kubernetes/pki/front-proxy-ca.crt
[root@k8s-master ~]# scp /etc/kubernetes/pki/front-proxy-ca.crt k8s-node02:/etc/kubernetes/pki/front-proxy-ca.crt
--根据yaml文件创建pod
[root@k8s-master ~]# ls
anaconda-ks.cfg k8s-ha-install new.yaml
components.yaml kubeadm-config.yaml token
[root@k8s-master ~]# mkdir pods
您在 /var/spool/mail/root 中有新邮件
[root@k8s-master ~]# mv components.yaml pods/
[root@k8s-master ~]# ls
anaconda-ks.cfg k8s-ha-install kubeadm-config.yaml new.yaml pods token
[root@k8s-master ~]# cd pods/
[root@k8s-master pods]# ls
components.yaml
[root@k8s-master pods]# kubectl create -f components.yaml
serviceaccount/metrics-server created
--查看pod列表状态
[root@k8s-master pods]# kubectl get po -A|grep metrics
kube-system metrics-server-79776b6d54-5sgrk 1/1 Running 0 90s
--查看节点资源监控
看节点所在资源:
[root@k8s-master pods]# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master 119m 5% 1309Mi 33%
k8s-node01 42m 2% 932Mi 50%
k8s-node02 48m 2% 899Mi 48%
看各个pod所在资源:
[root@k8s-master pods]# kubectl top pod -A
看某一pods所在资源:
[root@k8s-master pods]# kubectl top pods -n kube-system
NAME CPU(cores) MEMORY(bytes)
calico-kube-controllers-6d48795585-7vjgk 2m 14Mi
calico-node-qqds6 18m 125Mi
calico-node-rn2nb 21m 132Mi
calico-node-w5bnt 22m 96Mi
coredns-6554b8b87f-thzlw 1m 14Mi
coredns-6554b8b87f-zdhh9 1m 14Mi
etcd-k8s-master 16m 79Mi
kube-apiserver-k8s-master 45m 309Mi
kube-controller-manager-k8s-master 12m 51Mi
kube-proxy-8jk8f 1m 25Mi
kube-proxy-dvfx4 1m 31Mi
kube-proxy-g226c 1m 18Mi
kube-scheduler-k8s-master 3m 23Mi
metrics-server-79776b6d54-5sgrk 3m 17Mi
搭建dashboard
面板:dashboard
--安装dashboard
[root@k8s-master ~]# ls
anaconda-ks.cfg k8s-ha-install kubeadm-config.yaml new.yaml pods token
[root@k8s-master ~]# cd k8s-ha-install/
[root@k8s-master k8s-ha-install]# ls
bootstrap CoreDNS dashboard metrics-server README.md
calico csi-hostpath kubeadm-metrics-server pki snapshotter
[root@k8s-master k8s-ha-install]# cd dashboard/
[root@k8s-master dashboard]# ls
dashboard-user.yaml dashboard.yaml
[root@k8s-master dashboard]# kubectl create -f . # .表示引用当前目录
[root@k8s-master dashboard]# kubectl get po -A|grep dashboard
kubernetes-dashboard dashboard-metrics-scraper-7b554c884f-dvdpz 1/1 Running 0 2m22s
kubernetes-dashboard kubernetes-dashboard-54b699784c-5dxxj 1/1 Running 0 2m22s
--设置svc模式
SVC对外提供一个接口
kubectl edit svc 服务名称 -n 命名空间
[root@k8s-master dashboard]# kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
Edit cancelled, no changes made.
注意:type=NodePort
[root@k8s-master dashboard]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.96.94.243 <none> 443:32052/TCP 8m18s
在浏览器上访问:https://192.168.118.66:32052,高级访问,获得登录的token
--获得token
[root@k8s-master dashboard]# kubectl create token admin-user -n kube-system
eyJhbGciOiJSUzI1NiIsImtpZCI6Ijh0Zm9pU0t5S3puV3ZLWHpwVkRGUXM4aFViRjdVZFBLdHpYeTU1NVJrX1kifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzI2MTE1NTEyLCJpYXQiOjE3MjYxMTE5MTIsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiNWQwMjBkOWUtMDliMi00MGUwLThiMTUtNDViYmI3NGZhMGUyIn19LCJuYmYiOjE3MjYxMTE5MTIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbi11c2VyIn0.EmwSF8lVccF59bVCYZ7ncZAQ26tfi-Y0Zk5kgqEw3pKqMcXrEwxLIp8Hg84hyDvS-NsaXF7NH-6MIm9bjroLmoazUIFTV0e8-CLQyuvqel_u1V0xeXJIRsb_F0q7lylWoF-aNBIZtsY6J_pc_IrAFYiiU-mKiZ2FVz0s6vm5P0ljjo_dsbC9AHeok-qEIlXEgZxiguUfLLeRK95l-lL93-tI7eVzHFesuf1SuD29fNBwprSVIitgVom6nvbE-0nCwMC3894ILualgLAt2JRwsEHjJmgWvVulx2gsD9zRye1UcFkIJXhdIgO1KL6GihmuSmo1zKunrPZkPR_2tPrDUw
[root@k8s-master dashboard]# kubectl run nginx0 --image=nginx
kube-proxy
--改为ipvs模式
[root@k8s-master ~]# kubectl get pods -A|grep proxy
kube-system kube-proxy-8jk8f 1/1 Running 1 (4h57m ago) 22h
kube-system kube-proxy-dvfx4 1/1 Running 1 (4h58m ago) 22h
kube-system kube-proxy-g226c 1/1 Running 1 (4h58m ago) 23h
[root@k8s-master ~]# kubectl edit cm kube-proxy -n kube-system
configmap/kube-proxy edited
--更新kube-proxy的pod
[root@k8s-master ~]# kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system
daemonset.apps/kube-proxy patched
[root@k8s-master ~]# curl 127.0.0.1:10249/proxyMode
ipvs[root@k8s-master ~]#
测试创建参数
[root@k8s-master ~]# kubectl create deploy cluster-test5 --image=registry.cn-beijing.aliyuncs.com/dotbalo/debug-tools -- sleep 3600
deployment.apps/cluster-test5 created
[root@k8s-master ~]# kubectl get po -A|grep clu
default cluster-test1-6bbcc6fc8b-h7b9j 0/1 CrashLoopBackOff 10 (90s ago) 27m
default cluster-test5-6597c95b6c-28tkj 1/1 Running 0 2s
带有哈希值的删除时,要使用deployment,如果使用pod时,不会删除
[root@k8s-master ~]# kubectl delete deployment cluster-test1
--进到创建的节点中
[root@k8s-master ~]# kubectl exec -it cluster-test5-6597c95b6c-28tkj -- bash
(07:36 cluster-test5-6597c95b6c-28tkj:/) ifconfig
eth0 Link encap:Ethernet HWaddr aa:7a:8a:e0:3e:93
inet addr:172.16.58.203 Bcast:0.0.0.0 Mask:255.255.255.255
inet6 addr: fe80::a87a:8aff:fee0:3e93/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1480 Metric:1
RX packets:5 errors:0 dropped:0 overruns:0 frame:0
TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:446 (446.0 B) TX bytes:656 (656.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
(1 07:36 cluster-test5-6597c95b6c-28tkj:/) nslookup kubernetes
Server: 10.96.0.10
Address: 10.96.0.10#53
Name: kubernetes.default.svc.cluster.local
Address: 10.96.0.1
(07:37 cluster-test5-6597c95b6c-28tkj:/) nslookup kube-dns.kube-system
Server: 10.96.0.10
Address: 10.96.0.10#53
Name: kube-dns.kube-system.svc.cluster.local
Address: 10.96.0.10
(07:37 cluster-test5-6597c95b6c-28tkj:/) exit
exit
您在 /var/spool/mail/root 中有新邮件
[root@k8s-master ~]# curl -k https://10.96.0.1:443
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "forbidden: User \"system:anonymous\" cannot get path \"/\"",
"reason": "Forbidden",
"details": {},
"code": 403
}[root@k8s-master ~]# curl http://10.96.0.1:53
curl: (7) Failed connect to 10.96.0.1:53; 拒绝连接
1.kubernetes自动补齐
--安装工具
yum -y install bash-completion
source <(kubectl completion bash)
-- 设置开机自启动
echo "source <(kubectl completion bash)" >> ~/.bashrc
2.节点的删除
# 删除节点
[root@k8s-master ~]# kubectl delete pod cluster-test-64b7b9cbf-jjmmh
pod "cluster-test-64b7b9cbf-jjmmh" deleted
# 节点还在
[root@k8s-master ~]# kubectl get po -A|grep cluster-test
default cluster-test-64b7b9cbf-dnn2m 0/1 ContainerCreating 0 20s
default cluster-test0-58689d5d5d-qr4mv 1/1 Running 0 34m
# 使用deployment删除
[root@k8s-master ~]# kubectl delete deployment cluster-test
deployment.apps "cluster-test" deleted
# 已删除
[root@k8s-master ~]# kubectl get po -A|grep cluster-test
注意:带用哈希值的要使用deployment删除
3.编写yaml文件,创建节点
[root@k8s-master ~]# vim pods/abc.yaml
[root@k8s-master ~]# cd pods/
[root@k8s-master pods]# ls
abc.yaml components.yaml
[root@k8s-master pods]# kubectl create -f abc.yaml
[root@k8s-master pods]# kubectl create -f abc.yaml
pod/busybox-sleep created
[root@k8s-master pods]# kubectl get po -A|grep busybox-sleep
default busybox-sleep 1/1 Running 0 3s
[root@k8s-master pods]# kubectl delete pod busybox-sleep
pod "busybox-sleep" deleted
[root@k8s-master pods]# kubectl get po -A|grep busy
4.编写json文件
[root@k8s-master ~]# vim pods/abc.json
{
"apiVersion":"v1",
"kind":"Pod",
"metadata":{
"name":"busybox-sleep000"
},
"spec":{
"containers":[
{
"name":"busybox000",
"image":"busybox:1.28",
"args":[
"sleep",
"1000"
]
}
]
}
}