当前位置: 首页>后端>正文

k8s 部署 harbor 问题 k8s部署cdh

运维环境中有套外部etcd集群的k8s,为模拟相关证书续期操作,需要搭建环境进行验证

一、【环境准备】

主机

IP

VIP

角色

master01

192.168.137.20

192.168.137.200

etcd01、master

master02

192.168.137.21

192.168.137.200

etcd02、master

node01

192.168.137.22

etcd03、node

二、【环境初始化配置】

在三台节点设备上都进行如下操作:

######设置主机名并配置/etc/hosts

hostnamectl set-hostname master01
hostnamectl set-hostname master02
hostnamectl set-hostname node01

分别登录三台主机

echo '192.168.137.20 master01' >> /etc/hosts
echo '192.168.137.21 master02' >> /etc/hosts
echo '192.168.137.22 node01' >> /etc/hosts

###### 关闭 防火墙

systemctl stop firewalld
systemctl disable firewalld

###### 关闭 SeLinux

setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

###### 关闭 swap

swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab

# 安装依赖包

yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git wget telnet nfs-utils

###### yum epel源

mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

yum clean all

yum makecache

###### 修改 /etc/sysctl.conf

modprobe br_netfilter

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF

sysctl -p /etc/sysctl.d/k8s.conf

###### 开启 ipvs

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

# 设置 yum repository

yum install -y yum-utils \

device-mapper-persistent-data \

lvm2



yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 安装并启动 docker

yum install -y docker-ce-18.09.8 docker-ce-cli-18.09.8d.io

后者安装最新版本 

yum update -y && yum install -y docker-ce

创建/etc/docker目录并配置daemon.json

# mkdir /etc/docker

# 配 置 daemon.json

cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://j2n2rp9s.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
  "max-size": "100m"
  }
}
EOF

创建docker.service.d目录

# mkdir -p /etc/systemd/system/docker.service.d

重启docker服务并设置开机自启动

# systemctl daemon-reload && systemctl restart docker && systemctl enable docker

三、【安装keepalived创建VIP】

1、安装keepalived
master01 master02 两台主机操作

yum install -y keepalived

2、配置
master01配置
vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived
 
vrrp_instance VI_1 {
 state MASTER    #BACKUP上修改为BACKUP
 interface ens33   #主网卡信息
 virtual_router_id 44   #虚拟路由标识,主从相同
 priority 100
 advert_int 1
 authentication {
 auth_type PASS
 auth_pass 1111    #主从认证密码必须一致
 }
 virtual_ipaddress {    #虚拟IP(VIP)
 192.168.137.200
 }
}

virtual_server 192.168.137.200 6443 {  #对外虚拟IP地址
 delay_loop 6      #检查真实服务器时间,单位秒
 lb_algo rr        #设置负载调度算法,rr为轮询
 lb_kind DR        #设置LVS负载均衡NAT模式
 protocol TCP      #使用TCP协议检查realserver状态
 
 real_server 192.168.137.20 6443 {  #第一个节点
  weight 3                     #节点权重值
  TCP_CHECK {                  #健康检查方式
  connect_timeout 3            #连接超时
  nb_get_retry 3               #重试次数
  delay_before_retry 3          #重试间隔/S
   }
  }
  
 real_server 192.168.137.21 6443 {   #第二个节点
  weight 3
  TCP_CHECK {
  connect_timeout 3
  nb_get_retry 3
  delay_before_retry 3
    }
  }
}

master02 配置
vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived
 
vrrp_instance VI_1 {
 state BACKUP   #BACKUP上修改为BACKUP
 interface ens33   #主网卡信息
 virtual_router_id 44   #虚拟路由标识,主从相同
 priority 80   #权重
 advert_int 1
 authentication {
 auth_type PASS
 auth_pass 1111   #主从认证密码必须一致
 }
 virtual_ipaddress {  #虚拟IP(VIP)
 192.168.137.200
 }
}

virtual_server 192.168.137.200 6443 {   #对外虚拟IP地址
 delay_loop 6    #检查真实服务器时间,单位秒
 lb_algo rr      #设置负载调度算法,rr为轮询
 lb_kind DR     #设置LVS负载均衡NAT模式
 protocol TCP    #使用TCP协议检查realserver状态
 
 real_server 192.168.137.20 6443 {  #第一个节点
  weight 3          #节点权重值
  TCP_CHECK {       #健康检查方式
  connect_timeout 3 #连接超时
  nb_get_retry 3    #重试次数
  delay_before_retry 3  #重试间隔/S
   }
  }
  
 real_server 192.168.137.21 6443 {  #第二个节点
  weight 3
  TCP_CHECK {
  connect_timeout 3
  nb_get_retry 3
  delay_before_retry 3
    }
  }
}

在实际配置中将注释信息给删除掉

四、外部etcd方式部署

1、装必要的软件cfssl(仅192.268.137.20安装即可)

首先下载安装包

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

赋予执行权限

chmod +x cfssl*

重命名

for x in cfssl*; do mv $x ${x%*_linux-amd64};  done

移动文件到目录 (/usr/bin)

mv cfssl* /usr/bin

2、配置 CA 并创建 TLS 证书

我们将使用 CloudFlare's PKI 工具 cfssl 来配置 PKI Infrastructure,然后使用它去创建 Certificate Authority(CA), 并为 etcd创建 TLS 证书。

(1)创建目录

mkdir /opt/etcd/{bin,cfg,ssl} -p

cd /opt/etcd/ssl/

(2)创建ca证书
创建vim ca-config.json

{
    "signing": {
        "default": {
            "expiry": "438000h"
        },
        "profiles": {
            "server": {
                "expiry": "438000h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            },
            "client": {
                "expiry": "438000h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "438000h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}

server auth表示client可以用该ca对server提供的证书进行验证
client auth表示server可以用该ca对client提供的证书进行验证

(3)创建证书签名请求ca-csr.json

vim ca-csr.json

{
    "CN": "etcd",
    "key": {
        "algo": "rsa",
        "size": 2048
    }
}

生成CA证书和私钥

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

# ls ca*

# ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem

k8s 部署 harbor 问题 k8s部署cdh,k8s 部署 harbor 问题 k8s部署cdh_etcd,第1张

生成客户端证书

     vim client.json

{

    "CN": "client",

    "key": {

        "algo": "ecdsa",

        "size": 256

    }

}

生成

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client.json  | cfssljson -bare client -

# ls 

# ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem client-key.pem client.pem

k8s 部署 harbor 问题 k8s部署cdh,k8s 部署 harbor 问题 k8s部署cdh_json_02,第2张

生成server,peer证书

    创建配置 vim etcd.json

{
    "CN": "etcd",
    "hosts": [
        "192.168.137.20",
        "192.168.137.21",
        "192.168.137.22"
    ],
    "key": {
        "algo": "ecdsa",
        "size": 256
    },
    "names": [
        {
            "C": "CN",
            "L": "BJ",
            "ST": "BJ"
        }
    ]
}

生成

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server etcd.json | cfssljson -bare server

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd.json | cfssljson -bare peer

k8s 部署 harbor 问题 k8s部署cdh,k8s 部署 harbor 问题 k8s部署cdh_k8s 部署 harbor 问题_03,第3张

将master01的/data/etcd/ssl目录同步到master02和node01上

scp -r /opt/etcd/ssl 192.168.137.21:/opt/etcd

scp -r /opt/etcd/ssl 192.168.137.22:/opt/etcd

4、etcd安装

(1)、下载etcd-v3.4.13版本

cd /opt
wget https://storage.googleapis.com/etcd/v3.4.13/etcd-v3.4.13-linux-amd64.tar.gz
tar -zxvf etcd-v3.4.13-linux-amd64.tar.gz && cd etcd-v3.4.13-linux-amd64 && cp etcd etcdctl /opt/etcd/bin/

(2)配置etcd主文件

  vim /opt/etcd/cfg/etcd.conf

#[Member]
ETCD_NAME="etcd01"   #修改为etcd01
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.137.20:2380"     #master01地址
ETCD_LISTEN_CLIENT_URLS="https://192.168.137.20:2379,http://127.0.0.1:2379"  #master01地址

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.137.20:2380"   #master01地址
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.137.20:2379"           #master01地址
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.137.20:2380,etcd02=https://192.168.137.21:2380,etcd03=https://192.168.137.22:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_ENABLE_V2="true"

#[Security]
ETCD_CERT_FILE="/opt/etcd/ssl/server.pem"
ETCD_KEY_FILE="/opt/etcd/ssl/server-key.pem"
ETCD_TRUSTED_CA_FILE="/opt/etcd/ssl/ca.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_PEER_CERT_FILE="/opt/etcd/ssl/server.pem"
ETCD_PEER_KEY_FILE="/opt/etcd/ssl/server-key.pem"
ETCD_PEER_TRUSTED_CA_FILE="/opt/etcd/ssl/ca.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"

注释实际编辑使用需要删除

注意:ETCD3.4版本ETCDCTL_API=3 etcdctl 和 etcd --enable-v2=false 成为了默认配置,如要使用v2版本,执行etcdctl时候需要设置ETCDCTL_API环境变量,例如:ETCDCTL_API=2 etcdctl,flannel操作etcd使用的是v2的API,而kubernetes操作etcd使用的v3的API,为了兼容flannel,将默认开启v2版本,故配置文件中设置 ETCD_ENABLE_V2="true"

(3)配置etcd启动文件

vim /usr/lib/systemd/system/etcd.service

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd 
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

注意:ETCD3.4版本会自动读取环境变量的参数,所以EnvironmentFile文件中有的参数,不需要再次在ExecStart启动参数中添加,二选一,如同时配置,会触发以下类似报错“etcd: conflicting environment variable "ETCD_NAME" is shadowed by corresponding command-line flag (either unset environment variable or disable flag)”

(4)分发并修改文件到集群另外两个节点(192.168.137.21、192.168.137.22)

scp -r /opt/etcd/ 192.168.137.21:/opt/

scp -r /opt/etcd/ 192.168.137.22:/opt/

scp /usr/lib/systemd/system/etcd.service 192.168.137.21:/usr/lib/systemd/system

scp /usr/lib/systemd/system/etcd.service 192.168.137.22:/usr/lib/systemd/system
192.168.137.21  上/opt/etcd/cfg/etcd.conf配置
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.137.21:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.137.21:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.137.21:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.137.21:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.137.20:2380,etcd02=https://192.168.137.21:2380,etcd03=https://192.168.137.22:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_ENABLE_V2="true"

#[Security]
ETCD_CERT_FILE="/opt/etcd/ssl/server.pem"
ETCD_KEY_FILE="/opt/etcd/ssl/server-key.pem"
ETCD_TRUSTED_CA_FILE="/opt/etcd/ssl/ca.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_PEER_CERT_FILE="/opt/etcd/ssl/server.pem"
ETCD_PEER_KEY_FILE="/opt/etcd/ssl/server-key.pem"
ETCD_PEER_TRUSTED_CA_FILE="/opt/etcd/ssl/ca.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
192.168.137.22 上  /opt/etcd/cfg/etcd.conf  配置
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.137.22:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.137.22:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.137.22:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.137.22:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.137.20:2380,etcd02=https://192.168.137.21:2380,etcd03=https://192.168.137.22:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_ENABLE_V2="true"

#[Security]
ETCD_CERT_FILE="/opt/etcd/ssl/server.pem"
ETCD_KEY_FILE="/opt/etcd/ssl/server-key.pem"
ETCD_TRUSTED_CA_FILE="/opt/etcd/ssl/ca.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_PEER_CERT_FILE="/opt/etcd/ssl/server.pem"
ETCD_PEER_KEY_FILE="/opt/etcd/ssl/server-key.pem"
ETCD_PEER_TRUSTED_CA_FILE="/opt/etcd/ssl/ca.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"

5、重启 etcd服务

systemctl daemon-reload

systemctl restart etcd

systemctl status etcd

k8s 部署 harbor 问题 k8s部署cdh,k8s 部署 harbor 问题 k8s部署cdh_k8s 部署 harbor 问题_04,第4张

k8s 部署 harbor 问题 k8s部署cdh,k8s 部署 harbor 问题 k8s部署cdh_json_05,第5张

k8s 部署 harbor 问题 k8s部署cdh,k8s 部署 harbor 问题 k8s部署cdh_json_06,第6张

 

6、验证etcd状态

查询etcd状态

/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.137.20:2379,https://192.168.137.21:2379,https://192.168.137.22:2379" endpoint status --write-out=table

k8s 部署 harbor 问题 k8s部署cdh,k8s 部署 harbor 问题 k8s部署cdh_etcd_07,第7张

查询etcd健康

/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.137.20:2379,https://192.168.137.21:2379,https://192.168.137.22:2379" endpoint health

k8s 部署 harbor 问题 k8s部署cdh,k8s 部署 harbor 问题 k8s部署cdh_docker_08,第8张

查询etcd组员信息

/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.137.20:2379,https://192.168.137.21:2379,https://192.168.137.22:2379" member list

k8s 部署 harbor 问题 k8s部署cdh,k8s 部署 harbor 问题 k8s部署cdh_k8s 部署 harbor 问题_09,第9张

 


https://www.xamrdz.com/backend/37g1942309.html

相关文章: