# Docker进阶
# Docker网络
# 理解Docker0
[root@VM-20-7-centos dockerfiles]# docker run -d -P -it mytomcat:1.0
# 查看容器内部的ip地址 ip addr ,发现容器启动的时候,会得到一个eth0@if155 是docker分配的 172.17.0.3
[root@VM-20-7-centos dockerfiles]# docker exec -it 243b9d1f9c6b ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
160: eth0@if161: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.3/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
#当然是可以pin通的
[root@VM-20-7-centos /]# ping 172.17.0.3
PING 172.17.0.3 (172.17.0.3) 56(84) bytes of data.
64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.063 ms
64 bytes from 172.17.0.3: icmp_seq=2 ttl=64 time=0.045 ms
64 bytes from 172.17.0.3: icmp_seq=3 ttl=64 time=0.045 ms
# 宿主机测试
[root@VM-20-7-centos dockerfiles]# ip addr
...
161: veth6c6429b@if160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether 72:a4:05:71:cd:8a brd ff:ff:ff:ff:ff:ff link-netnsid 1
inet6 fe80::70a4:5ff:fe71:cd8a/64 scope link
valid_lft forever preferred_lft forever
# 可以看到宿主机的161网口和docker容器的160网口是桥接的
每启动一个docker容器,docker就会给容器分配一个ip,宿主机的网卡docker0采用桥接模式,使用evth-pair技术
# 测试容器间网络
# 开启第二个容器centos02
[root@VM-20-7-centos dockerfiles]# docker run -d -P -it --name centos02 mycentos /bin/bash
[root@VM-20-7-centos dockerfiles]# docker exec -it centos02 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
168: eth0@if169: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
# 宿主机查看网络,又多了一对网卡
[root@VM-20-7-centos dockerfiles]# ip addr
...
169: vetha1f3b53@if168: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether 4a:49:ee:60:08:bf brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet6 fe80::4849:eeff:fe60:8bf/64 scope link
valid_lft forever preferred_lft forever
# 再次进入centos01,发现又多了一对网卡,对应centos02 的网络
[root@VM-20-7-centos dockerfiles]# docker exex -it centos01 /bin/bash
[root@VM-20-7-centos dockerfiles]# ip addr
...
169: vetha1f3b53@if168: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether 4a:49:ee:60:08:bf brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet6 fe80::4849:eeff:fe60:8bf/64 scope link
valid_lft forever preferred_lft forever
# 测试连接centos02,可以ping通
[root@VM-20-7-centos dockerfiles]# ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.056 ms
64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.047 ms
64 bytes from 172.17.0.2: icmp_seq=3 ttl=64 time=0.046 ms
64 bytes from 172.17.0.2: icmp_seq=4 ttl=64 time=0.045 ms
veth pair
技术:
- 在Docker等容器平台,
veth pair
是容器网络的基础技术之一。Docker会为每个容器创建一个veth pair
,一端连接容器内的网络接口,另一端链接主机上的网桥(如docker0)。
# --link
# centos02 不能直接 ping centos01
[root@VM-20-7-centos ~]# docker exec -it centos02 ping centos01
ping: centos01: Name or service not known
# 启动centos02 并通过--link 连接centos02
[root@VM-20-7-centos ~]# docker run -it -P --name centos03 --link centos02 mycentos /bin/bash
[root@d013f40d4c5d local]# [root@VM-20-7-centos ~]#
# centos03直接 pingcentos02 可以ping通
[root@VM-20-7-centos ~]# docker exec -it centos03 ping centos02
PING centos02 (172.17.0.2) 56(84) bytes of data.
64 bytes from centos02 (172.17.0.2): icmp_seq=1 ttl=64 time=0.082 ms
64 bytes from centos02 (172.17.0.2): icmp_seq=2 ttl=64 time=0.056 ms
64 bytes from centos02 (172.17.0.2): icmp_seq=3 ttl=64 time=0.052 ms
# centos02 连接centos03是不行的,也需要配置
[root@VM-20-7-centos ~]# docker exec -it centos02 ping centos03
ping: centos03: Name or service not known
[root@VM-20-7-centos ~]#
原理:--link操作的本质就是,centos03容器内配置了centos02的host文件
# 查看centos03的host文件
[root@VM-20-7-centos ~]# docker exec -it centos03 cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.2 centos02 9ab99e00f62a
172.17.0.4 d013f40d4c5d
# 查看centos02的host文件
[root@VM-20-7-centos ~]# docker exec -it centos02 cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.2 9ab99e00f62a
不建议使用--link
docker-问题:不支持容器名连接访问
# 自定义网络
# 查看docker所有网络
[root@VM-20-7-centos ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
09972acd76c8 bridge bridge local
8870905238c3 host host local
4b1583138926 none null local
# 网络模式
- bridge:桥接模式
- none:不配置网络
- host:和宿主机共享网络
- container:容器网络连通(用得少,局限很大)
# 创建自定义网络
# --net bridge 而这个就是我们的docker0
[root@VM-20-7-centos ~]# docker run -d -P -it --name centos01 --net bridge mycentos
# 自定义网络
[root@VM-20-7-centos ~]# docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
03d7b4b35b5a03dc1ffbab1c5fa5b12d1f4a4319ca346960fddb091db3a246c2
[root@VM-20-7-centos ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
09972acd76c8 bridge bridge local
8870905238c3 host host local
03d7b4b35b5a mynet bridge local
4b1583138926 none null local
# 使用自定义网络
# 启动两个容器,并指定自定义的两个网络
[root@VM-20-7-centos ~]# docker run -d -P -it --name centos-net-01 --net mynet mycentos
8b738252f40f56f222c975522458ec4cf5ac5dbfcf282f84b2e9f4a9a807a2ff
[root@VM-20-7-centos ~]# docker run -d -P -it --name centos-net-02 --net mynet mycentos
5f7226fd89e14fb4397705754d796365b2d6be1078d974bc1a0b476a2acf09e3
# 检查自定义网络,发现已经分配了两个ip地址
[root@VM-20-7-centos ~]# docker network inspect mynet
[
{
"Name": "mynet",
...
"Containers": {
"5f7226fd89e14fb4397705754d796365b2d6be1078d974bc1a0b476a2acf09e3": {
"Name": "centos-net-02",
"EndpointID": "52ecc2536cf7196b65f4c9b3b821c423d8ce4ae76dbb5a652cfe6f79b398d1c0",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
},
"8b738252f40f56f222c975522458ec4cf5ac5dbfcf282f84b2e9f4a9a807a2ff": {
"Name": "centos-net-01",
"EndpointID": "6d8ed215536ce89e8a35c0ff479f6615c0567d5cc3ec27bbb3c8484db815823a",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
# 测试容器之间ip连接
[root@VM-20-7-centos ~]# docker exec -it centos-net-01 ping 192.168.0.2
PING 192.168.0.2 (192.168.0.2) 56(84) bytes of data.
64 bytes from 192.168.0.2: icmp_seq=1 ttl=64 time=0.033 ms
64 bytes from 192.168.0.2: icmp_seq=2 ttl=64 time=0.034 ms
^C
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.033/0.033/0.034/0.005 ms
# 测试容器之间,名称连接
[root@VM-20-7-centos ~]# docker exec -it centos-net-01 ping centos-net-02
PING centos-net-02 (192.168.0.3) 56(84) bytes of data.
64 bytes from centos-net-02.mynet (192.168.0.3): icmp_seq=1 ttl=64 time=0.059 ms
64 bytes from centos-net-02.mynet (192.168.0.3): icmp_seq=2 ttl=64 time=0.058 ms
64 bytes from centos-net-02.mynet (192.168.0.3): icmp_seq=3 ttl=64 time=0.053 ms
自定义的网络docker都已经维护好了对应的关系,推荐使用!
优点:
- redis集群使用网络1
- mysql集群使用网络2
- 网络是互相隔离的,但是同一个网络内的容器是可以互相访问的
# 网络连通
使用docker0网桥创建两个容器
[root@VM-20-7-centos ~]# docker run -d -P -it --name centos01 mycentos
[root@VM-20-7-centos ~]# docker run -d -P -it --name centos02 mycentos
测试打通centos01到centos-net-01
[root@VM-20-7-centos ~]# docker network connect mynet centos01
# 可以看到mynet网桥为centos01分配了一个网址
[root@VM-20-7-centos ~]# docker network inspect mynet
[
{
"Name": "mynet",
...
"Containers": {
...
"5f7226fd89e14fb4397705754d796365b2d6be1078d974bc1a0b476a2acf09e3": {
"Name": "centos-net-02",
"EndpointID": "52ecc2536cf7196b65f4c9b3b821c423d8ce4ae76dbb5a652cfe6f79b398d1c0",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
},
...
},
"Options": {},
"Labels": {}
}
]
# 已经可以ping通
[root@VM-20-7-centos ~]# docker exec -it centos01 ping centos-net-01
PING centos-net-01 (192.168.0.2) 56(84) bytes of data.
64 bytes from centos-net-01.mynet (192.168.0.2): icmp_seq=1 ttl=64 time=0.066 ms
64 bytes from centos-net-01.mynet (192.168.0.2): icmp_seq=2 ttl=64 time=0.052 ms
64 bytes from centos-net-01.mynet (192.168.0.2): icmp_seq=3 ttl=64 time=0.053 ms
# 实战:部署redis集群
# 创建redis网桥
[root@VM-20-7-centos ~]# docker network create redis --subnet 172.38.0.0/16
8c7119216a61cd986a98b39d238404aaea2915ad584d980e259135e4846589d0
[root@VM-20-7-centos ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
09972acd76c8 bridge bridge local
8870905238c3 host host local
03d7b4b35b5a mynet bridge local
4b1583138926 none null local
8c7119216a61 redis bridge local
# 通过脚本创建6个redis配置
for port in $(seq 1 6);
do
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat << EOF >/mydata/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done
# 通过shell脚本 启动6个redis容器
start_port=1
end_port=6
for port in $(seq $start_port $end_port);
do
docker run -p 637${port}:6379 \
-p 1637${port}:16379 \
--name redis-${port} \
-v /mydata/redis/node-${port}/data:/data \
-v /mydata/redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.1${port} \
redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
done
# 创建集群,输入yes
redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 172.38.0.16:6379 --cluster-replicas 1
# 测试集群
/data # redis-cli -c
127.0.0.1:6379> cluster nodes # 查看集群节点
127.0.0.1:6379> set huya dish # 命令被分配到 172.38.0.12 机子
-> Redirected to slot [8623] located at 172.38.0.12:6379
OK
测试节点网络故障情况
# 关闭前文 redis-2 节点
[root@VM-20-7-centos ~]# docker stop redis-2
redis-2
# 在redis-1内 测试集群,发现请求被重定向到了6号机
/data # redis-cli -c
127.0.0.1:6379> get huya
-> Redirected to slot [8623] located at 172.38.0.16:6379
"dish"
docker 搭建redis集群 完成!
# SpringBoot微服务打包Docker镜像
1、构建springboot项目
2、打包应用
3、编写dockerfile
FROM java:8
LABEL authors="oldstag"
COPY *.jar /app.jar
CMD ["--server.port=8080"]
EXPOSE 8080
ENTRYPOINT ["java","-jar","/app.jar"]
将jar包合dockerfile上传到服务器
4、构建镜像
docker build -t springboot:1.0 .
5、发布运行
[root@VM-20-7-centos idea]# docker run -d -P -it --name springboot-demo springboot:1.0
8383c2e96f399f9a62a824c5e223ea827eed00fa9dbc52cfb7d5968b2ce684c9
# 这里宿主机分配了32796端口
[root@VM-20-7-centos idea]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
8383c2e96f39 springboot:1.0 "java -jar /app.jar …" 3 seconds ago Up 2 seconds 0.0.0.0:32796->8080/tcp, :::32796->8080/tcp springboot-demo
# Docker Compose
# Docker Swarm
# Docker Stack
# Docker secret
# Docker Config
P34 11:36