当前位置: 首页>后端>正文

docker swarm 安装及简单使用

所有节点安装docker

curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo
sed -i 's#download.docker.com#mirrors.tuna.tsinghua.edu.cn/docker-ce#g' /etc/yum.repos.d/docker-ce.repo
yum install docker-ce -y    # 安装最新版
yum install docker-ce-18.06.3.ce  -y  # 安装指定版本

节点规划

IP 角色 主机名
10.0.0.11 master节点 k8s-master
10.0.0.12 node节点 k8s-slave1
10.0.0.13 node节点 k8s-slave2

安装swarm

# 初始化swarm地址10.0.0.11为本机IP
docker swarm init --advertise-addr 10.0.0.11

# 复制生成的代码在其他node节点运行,会自动加入集群
docker swarm join --token SWMTKN-1-0eufh9m99nc2nuijhn35cc1fkqk68kzqs09xlfpj9wt71wgo8f-1xl2mnz3qc4uv9k89qmef15cq 192.168.14.132:2377

添加删除节点

# 添加节点
    添加普通工作节点(执行这个命令会打印出加入集群的命令)
    docker swarm join-token worker

    主节点 (执行这个命令会打印出加入集群的命令)
    docker swarm join-token manager

# 删除节点node130

    # 容器迁移 (其目的是为了将该节点的服务迁移到其他可用节点上,确保服务正常)
    docker node update --availability drain 节点名称|节点ID  # 移除
    docker node update --availability active 节点名称|节点ID  # 恢复

    docker node demote node130   # 在Manager节点上将master变成node如果删除的不是主节点就不用做这步
    docker swarm leave   # 在node节点上执行离开集群
    docker node rm node130  # 在master节点上删除节点
    
    docker node promote 工作节点主机名1 # 将工作节点升级为管理节点

# 解散集群
docker swarm leave --force   # master上执行
查看容器   docker service ls 
节点信息   docker node ls 
查看服务   docker service ps nginx
查看数据卷 docker volume ls
查看负载均衡模式 docker service inspect nginx 
官方文档: https://docs.docker.com/compose/compose-file/

网络

介绍:创建overlay网络实现集群跨主机通讯 

docker network create -d overlay  kang  # 创建网络 不加-d指定网络 默认创建桥接网络,集群不能用
docker network rm kang   #删除docker中的kang网络
docker service update --network-add 网络名 服务名  # 添加容器网络
docker service update --network-rm 网络名 服务名  # 删除容器网络

# 查看网络信息
docker network ls
docker network inspect kang

创建容器

端口,副本数
docker service   固定用法
--replicas 2   容器副本数
--network kang    指定使用的网络 overlay网络
--name  busybox   服务名称
-p82:80  端口映射82是vip端口 80是容器端口

#启动两个副本 --eplicas 2 , --mode global 每个节点都有且只有一个这个容器 不能跟replicas一起使用
docker service create --replicas 2 --network kang --name  busybox -p82:80  busybox sleep 3600s

# 外网8080映射到内网80  -p8080:80
docker service create --name nginx -p8080:80   --replicas 2  nginx
数据卷挂载
# 容器卷挂载 --mount=type=volume,src=nginx,dst=/usr
docker service create --replicas 2 --network kang --name hello -p82:80 --mount=type=volume,src=nginx,dst=/usr  busybox sleep 36000
    --mount=type=volume,src=nginx,dst=/usr     等同于docker的  -v nginx:/usr
    --mount=type=bind,src=/home,dst=/usr       等同于docker的   -v /home:/usr
打标签
# 指定生成在哪个节点 --constraint=node.hostname==node131
docker service create --name hello -e MYSQL_ROOT_PASSWORD=654321 --constraint=node.hostname==k8s-slave2  busybox sleep 36000

    --constraint=node.hostname==node131   指定容器生成在主机名node131 的节点上
    --constraint=node.role==manager    指定容器生成在管理节点上
    --constraint=node.role==worker   指定容器生成在工作节点上
    --constraint=node.labels.role==web   指定容器生成在标签是web的节点上
    
    #给节点k8s-slave2添加标签web2
    docker node update --label-add role=web2 k8s-slave2
    #删除标签
    docker node update --label-rm role web2
    # 查看标签 "role": "web2"
    docker node inspect  k8s-slave2
查询
docker service  ls
docker service  ps busybox
docker node inspect  k8s-slave2   # 查看标签
docker service logs -f 容器名
# 删除容器
docker service rm nginx
docker service inspect my_web
# --network kang 指定在统一网络下,进入容器ping busybox 显示vip地址
# 三台机器8080端口无论有没有nginx容器都可以访问,且端口都在占用
# 容器卷挂载 每个容器里的内容不互通,改变一个容器内容其他容器不会改变

# NFS创建共享数据卷

docker service create --mount 'type=volume,src=nfs-test,dst=/usr/share/nginx/html,volume-driver=local,volume-opt=type=nfs,volume-opt=device=10.0.0.11:/opt/docker/wwwroot,"volume-opt=o=addr=10.0.0.11,vers=4,soft,timeo=180,bg,tcp,rw"' -p 8888:80 --name nginx nginx:1.12

镜像更新

# 设定每3秒更新一次,每次更新五个  httpd--替换nginx
docker service update --image httpd --update-delay 3s --update-parallelism 5 web

# 增加容器数量
docker service scale web=4

#服务添加暴露端口
docker service update --publish-add 暴露端口:容器端口 服务名
docker service update --publish-rm  暴露端口:容器端口 服务名   # 移除暴露端口

高可用

# 如果swarm失去法定人数,swarm不能自动恢复,工作节点上的任务继续运行,不受影响,但无法执行管理任务,包括扩展或更新服务,加入或删除节点。
# 恢复的最佳方式是将丢失的leader节点重新联机。如果不可能,唯一方法是使用—force-newcluster管理节点的操作,这将去除本机之外的所有管理器身份。
docker swarm init --force-new-cluster  # 去除本机之外的所有管理器身份 需要在一台活着的manager节点上执行


docker node promote 工作节点主机名1 # 将工作节点升级为管理节点
docker node demote node130  # 将主节点降为工作节点

集群yml文件

#启动服务
docker stack deploy -c docker-compose.yml mysql

#删除服务
docker stack deploy rm mysql

#停止服务
docker stack deploy -c docker-compose.yml mysql
安装mysql
# 准备工作
mkdir -p /home/mysql/data
mkdir -p /home/mysql/conf

vim /data/mysql/conf/my.cnf
[mysqld]
# innodb_buffer_pool_size = 128M
# join_buffer_size = 128M
# sort_buffer_size = 2M
# read_rnd_buffer_size = 2M

default-authentication-plugin=mysql_native_password
skip-host-cache
server_id=6
skip-name-resolve
port=3306
datadir=/var/lib/mysql
socket=/var/run/mysqld/mysqld.sock
secure-file-priv=/var/lib/mysql-files
user=mysql
pid-file=/var/run/mysqld/mysqld.pid
[client]
socket=/var/run/mysqld/mysqld.sock
prompt=3306 [\d]>

# 编辑 docker-compose.yml
version: "3"
services:
  mysql:
    environment:
      MYSQL_ROOT_PASSWORD: "root"
    image: "mysql:5.7"
    container_name: mysql
    networks:
      - kang
    volumes:
      - "/home/mysql/data:/var/lib/mysql"
      - "/home/mysql/conf/my.cnf:/etc/my.cnf"
    ports:
      - "3306:3306"
    deploy:
      mode: replicated
      replicas: 2
      placement:
#        constraints: [node.hostname == node131]
#        constraints: [node.role == manager]
        constraints: [node.role == worker]
networks:
  kang:
    driver: overlay
安装rabbitmq
# 编辑 docker-compose.yml
version: "3"
services:
  rabbitmq:
    image: "rabbitmq:latest"
    container_name: rabbitmq
    environment:
      - RABBITMQ_DEFAULT_USER=root
      - RABBITMQ_DEFAULT_PASS=root
    restart: always
    networks:
      - kang
    deploy:
      mode: replicated
      replicas: 2
      placement:
#        constraints: [node.hostname == node131]
#        constraints: [node.role == manager]
        constraints: [node.role == worker]
    ports:
      - "15672:15672"
      - "5672:5672"
networks:
  kang:
    driver: overlay
安装redis
# 准备工作
mkdir -p /home/redis/conf/
mkdir -p /home/redis/data/
vi  /home/redis/conf/redis.conf  

# 编辑 docker-compose.yml
version: "3"
services:
  redis:
    image: "redis:latest"
    container_name: redis
    restart: always
    networks:
      - kang
    ports:
      - 6379:6379
    volumes:
      - /home/redis/conf/redis.conf:/etc/redis/redis.conf:rw
      - /home/redis/data/:/data:rw
    command:
      # 执行的命令
      redis-server /etc/redis/redis.conf --appendonly yes
    deploy:
      mode: replicated
      replicas: 2
      placement:
#        constraints: [node.hostname == node131]
#        constraints: [node.role == manager]
        constraints: [node.role == worker]
    ports:
      - "15672:15672"
      - "5672:5672"
networks:
  kang:
    driver: overlay
安装tomcat
# 准备工作
mkdir -p /home/tomcat/webapps
mkdir -p /home/tomcat/logs
 

# 编辑 docker-compose.yml
version: "3"
services:
  tomcat:
    image: "tomcat:9.0.0.M10"
    container_name: tomcat
    restart: always
    depends_on:
      - mysql
    volumes:
      - "/home/tomcat/webapps:/usr/local/tomcat/webapps"
      - "tomcat-conf:/usr/local/tomcat/conf"
      - "/home/tomcat/logs:/usr/local/tomcat/logs"
    networks:
      - kang
    ports:
      - "8080:8080"
    deploy:
      mode: replicated
      replicas: 2
      placement:
#        constraints: [node.hostname == node131]
#        constraints: [node.role == manager]
        constraints: [node.role == worker]

networks:
  kang:
    driver: overlay
volumes:
  tomcat-conf:
安装nginx,php
# 准备工作
mkdir -p /home/nginx/html
mkdir -p /home/nginx/conf
mkdir -p /home/nginx/html
mkdir -p /home/nginx/logs
 

# 编辑 docker-compose.yml
version: "3"
services:
  nginx:
    image: "nginx"
    container_name: nginx
    restart: always
    volumes:
      - "/home/nginx/html:/usr/share/nginx/html"
      - "/home/nginx/conf:/etc/nginx/conf.d"
      - "/home/nginx/logs:/var/log/nginx"
    networks:
      - kang
    ports:
      - "80:80"
    deploy:
      mode: replicated
      replicas: 2
      placement:
#        constraints: [node.hostname == node131]
#        constraints: [node.role == manager]
        constraints: [node.role == worker]
  php:
    image: "php:7.1.30-fpm"
    container_name: php
    restart: always
    volumes:
      - "/home/nginx/html:/www"
    networks:
      - kang
    ports:
      - "9000:9000"
    deploy:
      mode: replicated
      replicas: 2
      placement:
#        constraints: [node.hostname == node131]
#        constraints: [node.role == manager]
        constraints: [node.role == worker]

networks:
  kang:
    driver: overlay
安装fastdfs
# 准备工作
mkdir -p /home/fdfs/tracker
mkdir -p /home/fdfs/storage

 
# 编辑 docker-compose.yml
version: "3"
services:
  tracker:
    image: "delron/fastdfs"
    networks:
      - kang
    container_name: tracker
    volumes:
      - "/home/fdfs/tracker:/var/fdfs"
    ports:
      - "22122:22122"
    command:
      tracker
    deploy:
      mode: replicated
      replicas: 2
      placement:
#        constraints: [node.hostname == node131]
#        constraints: [node.role == manager]
        constraints: [node.role == worker]
  storage:
    environment:
      - TRACKER_SERVER=tracker:22122
      - GROUP_NAME=group1
    image: "delron/fastdfs"
    networks:
      - kang
    container_name: storage
    volumes:
      - "/home/fdfs/storage:/var/fdfs"
    ports:
      - "23000:23000"
      - "8888:8888"
    command:
      storage
    deploy:
      mode: replicated
      replicas: 2
      placement:
#        constraints: [node.hostname == node131]
#        constraints: [node.role == manager]
        constraints: [node.role == worker]

networks:
  kang:
    driver: overlay

监控管理页面

visualizer

访问页面 10.0.0.11:8088

docker service create --name viz --publish=8088:8080/tcp --constraint=node.role==manager --mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock dockersamples/visualizer
portainer

访问页面 10.0.0.11:9000

docker run -d -p 9000:9000 --restart=always  -v /var/run/docker.sock:/var/run/docker.sock  -v /root/public:/public --name prtainer  portainer/portainer
docker-ui

访问页面 10.0.0.11:8999

docker run -d -p8999:9000 --privileged -v /var/run/docker.sock:/var/run/docker.sock uifd/ui-for-docker

配置文件

redis.conf
bind 0.0.0.0
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile /data/redis.log
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
requirepass food
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

END


https://www.xamrdz.com/backend/3av1994822.html

相关文章: