1、harbor软件包下载
https://github.com/search?q=harbor&type=repositories
2、出现拒绝连接错误,可能是由于容器没开
# 问题解决: [root@docker ~]# curl localhost:5000/v2/_catalog curl: (7) Failed connect to localhost:5000; 拒绝连接 [root@docker ~]# netstat -tunlp | grep 5000 [root@docker ~]# docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 9d196b7190b3 registry:latest "/entrypoint.sh /etc…" About an hour ago Exited (2) 55 minutes ago r2 [root@docker ~]# docker start r2 r2 [root@docker ~]# netstat -tunlp | grep 5000 tcp 0 0 0.0.0.0:5000 0.0.0.0:* LISTEN 1822/docker-proxy tcp6 0 0 :::5000 :::* LISTEN 1826/docker-proxy
3、上传镜像到私有仓库中
# 打标签 [root@docker ~]# docker tag centos:latest 10.0.0.100:5000/registry:v1 # 推送 [root@docker ~]# docker push 10.0.0.100:5000/registry:v1 The push refers to repository [10.0.0.100:5000/registry] 74ddd0ec08fa: Mounted from centos v1: digest: sha256:a1801b843b1bfaf77c501e7a6d3f709401a1e0c83863037fa3aab063a7fdb9dc size: 529 [root@docker ~]# curl localhost:5000/v2/_catalog {"repositories":["centos","registry"]}
4、从私有仓库中下载镜像到本地
# 安装docker [root@docker2 ~]# cat << EOF | tee /etc/modules-load.d/k8s.conf overlay br_netfilter EOF modprobe overlay modprobe br_netfilter [root@docker2 ~]# cat << EOF | tee /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.ipv4.ip_forward = 1 EOF [root@docker2 ~]# sysctl --system [root@docker2 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 [root@docker2 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo [root@docker2 ~]# yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y # 配置/etc/docker/daemon.json ,指定私有仓库的位置 [root@docker2 ~]# vim /etc/docker/daemon { "insecure-registries":[ "http://10.0.0.100:5000" ] } # 重启docker [root@docker2 ~]# systemctl restart docker # 拉取私有仓库中的镜像 [root@docker2 ~]# docker pull 10.0.0.100:5000/registry:v1 v1: Pulling from registry a1d0c7532777: Pull complete Digest: sha256:a1801b843b1bfaf77c501e7a6d3f709401a1e0c83863037fa3aab063a7fdb9dc Status: Downloaded newer image for 10.0.0.100:5000/registry:v1 10.0.0.100:5000/registry:v1 # 查看镜像列表 [root@docker2 ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE 10.0.0.100:5000/registry v1 5d0da3dc9764 2 years ago 231MB
5、安装pip
为什么安装pip,因为后续要安装docker compose,它是docker的编排工具
ansible对多个主机编排
compose
# 安装python-pip [root@docker ~]# yum -y install python3-pip # 查看所有的所有安装的包以及版本 [root@docker ~]# pip list pip (8.1.2) [root@2 ~]# python3 -m pip install --upgrade pip #将pip工具升级到最新版本 [root@2 ~]# pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple #设置pip3的全局配置
6、配置跨主机的容器网络连接
(1)使用本机的ip地址和端口发布任务和通信
A=》mysql B=》java容器
[root@docker ~]# systemctl start docker # mtu 1500表示一次接收不能超过1500 [root@docker ~]# ip a s 3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether 02:42:a7:5c:14:b0 brd ff:ff:ff:ff:ff:ff inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 valid_lft forever preferred_lft forever inet6 fe80::42:a7ff:fe5c:14b0/64 scope link valid_lft forever preferred_lft forever
# 运行一个nginx容器 [root@docker ~]# docker run -d -p80:80 centos:nginx # 查看容器状态 [root@docker ~]# docker ps # 查看容器IP地址 [root@docker ~]# docker inspect 57 "IPAddress": "172.17.0.2", # 停止容器服务 [root@docker ~]# systemctl stop docker Warning: Stopping docker.service, but it can still be activated by: docker.socket # 安装前是没有docker0的网卡的 # bridge-utils是用于创建和管理Linux网络桥接设备的工具 [root@docker ~]# yum -y install bridge-utils # 用于显示系统中已创建的网络桥接设备的信息 [root@docker ~]# brctl show bridge name bridge id STP enabled interfaces docker0 8000.0242a75c14b0 no # 开启docker服务 [root@docker ~]# systemctl start docker # 查看容器状态 [root@docker ~]# docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 9d196b7190b3 registry:latest "/entrypoint.sh /etc…" 2 hours ago Exited (2) 3 minutes ago r2 # 开启r2容器 [root@docker ~]# docker start r2 r2 # 查看网络桥接设备信息,此时有桥接接口 [root@docker ~]# brctl show bridge name bridge id STP enabled interfaces docker0 8000.0242a75c14b0 no vethd5daedb # 查看network的帮助信息 [root@docker ~]# docker network --help Usage: docker network COMMAND Manage networks Commands: connect Connect a container to a network create Create a network disconnect Disconnect a container from a network inspect Display detailed information on one or more networks ls List networks prune Remove all unused networks rm Remove one or more networks Run 'docker network COMMAND --help' for more information on a command. # 列出当前 Docker 守护进程中存在的所有网络 # bridge:这是默认的网络驱动程序,它创建一个虚拟网络桥接,容器连接到这个桥接,并且可以通过 IP 地址相互通信。 # host:使用宿主机的网络堆栈,容器直接使用宿主机的网络接口,没有网络隔离。 # none:容器没有网络连接,只有一个环回接口。 [root@docker ~]# docker network ls NETWORK ID NAME DRIVER SCOPE 5af3831f7474 bridge bridge local 7e7f01e3bc3a host host local 2d54c66fa7ca none null local # 创建两个容器,并查看它们的IP地址,发现它们是连续的 [root@docker ~]# docker run -it centos:latest /bin/bash [root@5246f78f2d7d /]# [root@docker ~]# [root@docker ~]# docker inspect 52 "IPAddress": "172.17.0.3", [root@docker ~]# docker run -it centos:latest /bin/bash [root@d0094ac22554 /]# [root@docker ~]# [root@docker ~]# docker inspect d0 "IPAddress": "172.17.0.4", # 创建一个network模式是host的容器 [root@docker ~]# docker run -it --network host centos:latest /bin/bash [root@docker /]# [root@docker ~]# # 查看容器的id号 [root@docker ~]# docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 64d033715cae centos:latest "/bin/bash" About a minute ago Up About a minute # 查看容器的IP地址,发现并没有ip地址 [root@docker ~]# docker inspect 64 "IPAddress": "", # 又创建一个network为host的容器 [root@docker ~]# docker run -it --network host centos:yum /bin/bash # 容器直接覆盖源主机上 [root@docker /]# ls bin etc lib lost+found mnt proc run srv tmp var dev home lib64 media opt root sbin sys usr # 下载安装iproute [root@docker /]# yum -y install iproute # 查看ip地址,发现IP地址为本机的地址,与源主机完全重合 [root@docker /]# ip a s # 安装httpd [root@docker /]# yum -y install httpd # 首页输入数据 [root@docker /]# echo "hh" > /var/www/html/index.html # 开启httpd服务,容器中不能使用systemctl [root@docker /]# systemctl start httpd 报错 # 强制开启httpd服务 [root@docker /]# httpd -k start 成功 # 容器中访问 [root@docker /]#curl localhost hh # ctrl p q退出容器 # 关闭防火墙 [root@docker ~]# systemctl stop filewalld 到浏览器访问自己ip地址 curl 自己ip地址(可以访问到页面)
(2)使用etcd数据库与flannel实现容器间的相互通信
1)node11配置
(搭建etcd数据库规定ip地址的范围,搭建flannel连通etcd数据库实现容器ip地址的规定范围分配)
# etcd是一个数据库,flannel负责ip的分配 [root@node11 ~]# yum -y install flannel etcd # 修改etcd的配置文件,配置etcd的监听和通告客户端URL [root@node11 ~]# vim /etc/etcd/etcd.conf # 指定了etcd服务在哪些地址和端口上监听来自客户端的连接请求 ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001" # 指定etcd向其他节点和客户端通告的地址和端口 ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.11:2379,http://10.0.0.11:4001" # 启动etcd服务 [root@node11 ~]# systemctl start etcd # 查看端口是否启用 [root@node11 ~]# netstat -lntup | grep 4001 tcp6 0 0 :::4001 :::* LISTEN 1549/etcd [root@node11 ~]# netstat -lntup | grep 2379 tcp6 0 0 :::2379 :::* LISTEN 1549/etcd # 设置etcd服务开机自启 [root@node11 ~]# systemctl enable etcd Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service. # 数据库测试,使用etcdctl工具进行键值对的设置和获取操作 # 将键为testdir/testkey0的值设置为1000 [root@node11 ~]# etcdctl set testdir/testkey0 1000 1000 # 获取键为testdir/testkey0的值 [root@node11 ~]# etcdctl get testdir/testkey0 1000 # 检查etcd集群的健康状态,通过指定-C http://10.0.0.11:4001参数,连接到IP地址为10.0.0.11的节点的4001端口进行检查 [root@node11 ~]# etcdctl -C http://10.0.0.11:4001 cluster-health member 8e9e05c52164694d is healthy: got healthy result from http://10.0.0.11:2379 cluster is healthy # 配置flanneld要访问的etcd数据库所在的位置 [root@node11 ~]# vim /etc/sysconfig/flanneld FLANNEL_ETCD_ENDPOINTS="http://10.0.0.11:2379" # 在etcd中创建一个特定路径的键值对,用于存储网络配置信息 [root@node11 ~]# etcdctl mk /atomic.io/network/config '{ "Network" : "172.20.0.0/16" }' { "Network" : "172.20.0.0/16" } [root@node11 ~]# etcdctl get /atomic.io/network/config { "Network" : "172.20.0.0/16" } # 启动flanneld [root@node11 ~]# systemctl start flanneld # 设置flanneld开机自启 [root@node11 ~]# systemctl enable flanneld Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service. Created symlink from /etc/systemd/system/docker.service.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service. # 查看flanneld为本机分配的ip网段 [root@node11 ~]# ip a s 3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500 link/none inet 172.20.13.0/16 scope global flannel0 # 安装docker [root@node11 ~]# sh docker.sh # 开启容器 [root@node11 ~]# systemctl start docker # 查看容器的所属网段 [root@node11 ~]# ip a s 4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default link/ether 02:42:88:a7:c7:69 brd ff:ff:ff:ff:ff:ff inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 valid_lft forever preferred_lft forever [root@node11 ~]# cat /run/flannel/subnet.env # 定义了Flannel网络的整体IP地址范围。在一个使用Flannel的容器网络环境中,所有容器的IP地址都将从这个范围内分配 FLANNEL_NETWORK=172.20.0.0/16 # 指定了当前节点在Flannel网络中的子网。这个子网将用于为当前节点上的容器分配IP地址 FLANNEL_SUBNET=172.20.13.1/24 # 设置了网络的最大传输单元(Maximum Transmission Unit)大小。MTU决定了一次可以在网络上传输的最大数据包大小。调整MTU可以优化网络性能,特别是在使用特定网络设备或协议时 FLANNEL_MTU=1472 # 当设置为false时,指示Flannel不进行IP伪装(IP Masquerading)。IP伪装通常用于在容器或虚拟机环境中,将内部网络的IP地址转换为外部可路由的 IP地址。在某些情况下,可能不需要进行IP伪装,或者有其他网络配置方式来处理网络通信 FLANNEL_IPMASQ=false # 将flanneld分配的网段写入到daemon.json文件中 [root@node11 ~]# vim /etc/docker/daemon.json { "registry-mirrors" : [ "https://do.nark.eu.org", "https://dc.j8.work", "https://docker.m.daocloud.io", "https://dockerproxy.com", "https://docker.mirrors.ustc.edu.cn", "https://docker.nju.edu.cn" ], "host" : [ "tcp://0.0.0.0:2375", "unix:///var/run/docker.sock" ], "insecure-registries" : [ "http://10.0.0.100:5000" ], "bip" : "172.20.13.1/24", "mtu" : 1472 } # 重启docker,如果重启不了,就修改一下远程管理 [root@node11 ~]# systemctl restart docker Job for docker.service failed because the control process exited with error code. See "systemctl status docker.service" and "journalctl -xe" for details. [root@node11 ~]# vim /usr/lib/systemd/system/docker.service ExecStart=/usr/bin/dockerd # 重启失败,配置文件没有更新 [root@node11 ~]# systemctl restart docker [root@node11 ~]# systemctl daemon-reload # 重启成功 [root@node11 ~]# systemctl restart docker # flannel0的IP地址与docker0的IP地址属于一个网段 [root@node11 ~]# ip a s 3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500 link/none inet 172.20.13.0/16 scope global flannel0 valid_lft forever preferred_lft forever inet6 fe80::a46d:9cf3:16c5:23d7/64 scope link flags 800 valid_lft forever preferred_lft forever 4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1472 qdisc noqueue state DOWN group default link/ether 02:42:88:a7:c7:69 brd ff:ff:ff:ff:ff:ff inet 172.20.13.1/24 brd 172.20.13.255 scope global docker0 valid_lft forever preferred_lft forever # 拉取一个centos镜像 [root@node11 ~]# docker pull centos # 关闭防火墙 [root@node11 ~]# systemctl stop firewalld [root@node11 ~]# systemctl disable firewalld Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service. Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. # 关闭selinux [root@node11 ~]# setenforce 0 # 创建一个容器 [root@node11 ~]# docker run -it centos:latest /bin/bash [root@d89a648c9278 /]# [root@node11 ~]# # 查看容器的ip地址,是属于被flanneld分配网段的ip地址 [root@node11 ~]# docker inspect d89 "IPAddress": "172.20.13.2",
2)node22配置
(搭建flannel连通etcd数据库实现容器ip地址的规定范围分配,最后两个主机的容器进行相互通信)
# 修改flanneld配置文件,指定etcd数据库所在位置和所开放端口号 [root@node22 ~]# vim /etc/sysconfig/flanneld FLANNEL_ETCD_ENDPOINTS="http://10.0.0.11:2379" # 开启flanneld服务 [root@node22 ~]# systemctl start flanneld # 查看flanneld为本机所分配的网段信息 [root@node22 ~]# ip a s 5: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500 link/none inet 172.20.82.0/16 scope global flannel0 # 安装配置docker [root@node22 ~]# sh docker.sh # 开启docker服务(守护进程) [root@node22 ~]# systemctl start docker # 查看docker的ip网段 [root@node22 ~]# ip a s 3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default link/ether 02:42:ce:a7:7c:ea brd ff:ff:ff:ff:ff:ff inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 [root@node22 ~]# scp 10.0.0.11:/etc/docker/daemon.json /etc/docker/ # 查看flanneld的环境变量信息 [root@node22 ~]# cat /run/flannel/subnet.env FLANNEL_NETWORK=172.20.0.0/16 FLANNEL_SUBNET=172.20.82.1/24 FLANNEL_MTU=1472 FLANNEL_IPMASQ=false # 配置deamon文件 [root@node22 ~]# vim /etc/docker/daemon.json { "registry-mirrors" : [ "https://do.nark.eu.org", "https://dc.j8.work", "https://docker.m.daocloud.io", "https://dockerproxy.com", "https://docker.mirrors.ustc.edu.cn", "https://docker.nju.edu.cn" ], "host" : [ "tcp://0.0.0.0:2375", "unix:///var/run/docker.sock" ], "insecure-registries" : [ "http://10.0.0.100:5000" ], # "bip"代表 Backend IP,Flannel为所在节点分配的子网的起始IP地址 "bip" : "172.20.82.1/24", "mtu" : 1472 } # 修改docker.sock [root@node22 ~]# vim /usr/lib/systemd/system/docker.service ExecStart=/usr/bin/dockerd # 重载deamon文件 [root@node22 ~]# systemctl daemon-reload # 重启docker服务 [root@node22 ~]# systemctl restart docker # 查看docker的IP地址与flannel的IP地址,它们属于同一网段 [root@node22 ~]# ip a s 3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1472 qdisc noqueue state DOWN group default link/ether 02:42:ce:a7:7c:ea brd ff:ff:ff:ff:ff:ff inet 172.20.82.1/24 brd 172.20.82.255 scope global docker0 5: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500 link/none inet 172.20.82.0/16 scope global flannel0 # 拉取镜像 [root@node22 ~]# docker pull centos # 创建容器 [root@node22 ~]# docker run -it centos:latest # ping node11中的容器的ip地址 [root@2b47b26a7286 /]# ping 172.20.13.2 PING 172.20.13.2 (172.20.13.2) 56(84) bytes of data. 64 bytes from 172.20.13.2: icmp_seq=1 ttl=60 time=8.16 ms 64 bytes from 172.20.13.2: icmp_seq=2 ttl=60 time=0.717 ms ^C --- 172.20.13.2 ping statistics --- 2 packets transmitted, 2 received, 0% packet loss, time 1002ms rtt min/avg/max/mdev = 0.717/4.440/8.163/3.723 ms
3)总结,工作原理
1、使用flanneld为docker主机(宿主)分配网段
2、网段的信息以及ip的信息保存在etcd数据库中
3、当flanneld开始运行的时候,会从etcd数据库中读取{ “Network” : “172.20.0.0/16” },随机为当前的主机添加一个flannel0网卡
4、配置docker的daemon文件,让docker0网卡变成和flanneld的网段一致,之后docker下创建的容器的ip就在flanneld的网段控制之内