环境准备

集群规划信息,本文为测试环境一主两从配置,文档会按照多主多从配置

主机名称 IP地址 说明
master01 192.168.200.51 master节点
master02 192.168.200.52 master节点
master03 192.168.200.53 master节点
node01 192.168.200.54 node节点
node02 192.168.200.55 node节点
master-lb 192.168.200.50:16443 nginx组件监听地址

基础环境配置

置hosts

[root@k8s01 ~]# cat >>/etc/hosts<<EOF
192.168.200.51 k8s01 k8s01.op.local
192.168.200.52 k8s02 k8s02.op.local
192.168.200.53 k8s03 k8s03.op.local
192.168.200.54 k8s04 k8s04.op.local
192.168.200.55 k8s05 k8s05.op.local
EOF

所有节点关闭防火墙、selinux、dnsmasq、swap

#关闭防火墙
[root@k8s01 ~]# systemctl disable --now firewalld
#关闭dnsmasq
[root@k8s01 ~]# systemctl disable --now dnsmasq
#关闭postfix
[root@k8s01 ~]# systemctl  disable --now postfix
#关闭NetworkManager
[root@k8s01 ~]# systemctl disable --now NetworkManager
#关闭selinux
[root@k8s01 ~]# sed -ri 's/(^SELINUX=).*/\1disabled/' /etc/selinux/config
setenforce 0
#关闭swap
[root@k8s01 ~]# sed -ri 's@(^.*swap *swap.*0 0$)@#\1@' /etc/fstab
swapoff -a

所有节点修改资源限制

[root@k8s01 ~]# cat > /etc/security/limits.conf <<EOF
*       soft        core        unlimited
*       hard        core        unlimited
*       soft        nproc       1000000
*       hard        nproc       1000000
*       soft        nofile      1000000
*       hard        nofile      1000000
*       soft        memlock     32000
*       hard        memlock     32000
*       soft        msgqueue    8192000
EOF

ssh免密登录

[root@k8s01 ~]# cat ssh.sh
yum install -y sshpass
ssh-keygen -f /root/.ssh/id_rsa -P ''
export IP="192.168.200.51 192.168.200.52 192.168.200.53 192.168.200.54 192.168.200.55"
export SSHPASS=123456
for HOST in $IP;do
     sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
done

升级系统以及内核

安装升级内核版本

#载入公钥
[root@k8s01 ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

#安装 ELRepo 最新版本
[root@k8s01 ~]# yum install -y https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm

#列出可以使用的kernel包版本: lt 长期维护版;ml:最新稳定版
[root@ansible ~]# yum list available --disablerepo=* --enablerepo=elrepo-kernel

#安装指定的 kernel 版本
[root@ansible ~]# yum install -y kernel-lt-5.4.199-1.el7.elrepo --enablerepo=elrepo-kernel

设置开启系统启动时使用的内核版本

#查看系统可用内核
[root@k8s01 ~]# cat /boot/grub2/grub.cfg | grep menuentry
...
menuentry 'CentOS Linux 7 Rescue 033d258d6ff74053b1fc22f512e9431d (5.4.199-1.el7.elrepo.x86_64)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-1160.62.1.el7.x86_64-advanced-2c04c946-7fee-41c2-a99f-f53e2532e4f7' {
menuentry 'CentOS Linux (5.4.199-1.el7.elrepo.x86_64) 7 (Core)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-1160.62.1.el7.x86_64-advanced-2c04c946-7fee-41c2-a99f-f53e2532e4f7' {
menuentry 'CentOS Linux (3.10.0-1160.62.1.el7.x86_64) 7 (Core)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-1160.62.1.el7.x86_64-advanced-2c04c946-7fee-41c2-a99f-f53e2532e4f7' {
menuentry 'CentOS Linux (0-rescue-073ab377b18d4451b42261c98789bc31) 7 (Core)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-0-rescue-073ab377b18d4451b42261c98789bc31-advanced-2c04c946-7fee-41c2-a99f-f53e2532e4f7' {
    
#设置开机从新内核启动
[root@ansible ~]# grub2-set-default "CentOS Linux (5.4.199-1.el7.elrepo.x86_64) 7 (Core)"

#查看内核启动项
[root@ansible ~]# grub2-editenv list
saved_entry=CentOS Linux (5.4.199-1.el7.elrepo.x86_64) 7 (Core)

修改内核参数

#修改内核参数
[root@k8s01 ~]# cat >/etc/sysctl.conf<<EOF
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_keepalive_intvl=30
net.ipv4.tcp_keepalive_probes=10
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
net.ipv4.neigh.default.gc_stale_time=120
net.ipv4.conf.all.rp_filter=0 # 默认为1,系统会严格校验数据包的反向路径,可能导致丢包
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
net.ipv4.ip_local_port_range= 45001 65000
net.ipv4.ip_forward=1
net.ipv4.tcp_max_tw_buckets=6000
net.ipv4.tcp_syncookies=1
net.ipv4.tcp_synack_retries=2
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.netfilter.nf_conntrack_max=2310720
net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536
net.core.netdev_max_backlog=16384 # 每CPU网络设备积压队列长度
net.core.rmem_max = 16777216 # 所有协议类型读写的缓存区大小
net.core.wmem_max = 16777216
net.ipv4.tcp_max_syn_backlog = 8096 # 第一个积压队列长度
net.core.somaxconn = 32768 # 第二个积压队列长度
fs.inotify.max_user_instances=8192 # 表示每一个real user ID可创建的inotify instatnces的数量上限,默认128.
fs.inotify.max_user_watches=524288 # 同一用户同时可以添加的watch数目,默认8192。
fs.file-max=52706963
fs.nr_open=52706963
kernel.pid_max = 4194303
net.bridge.bridge-nf-call-arptables=1
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
vm.max_map_count = 262144
EOF

#加载ipvs模块
[root@k8s01 ~]# cat >/etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
EOF
[root@k8s01 ~]# systemctl enable --now systemd-modules-load.service

重启系统并观察内核版本

[root@k8s01 ~]# shutdown -r now
#或者
[root@k8s01 ~]# reboot

#查看内核版本
[root@k8s01 ~]# uname -r
5.4.199-1.el7.elrepo.x86_64

安装基础命令工具

#安装基础命令工具
[root@k8s01 ~]# yum install curl conntrack ipvsadm ipset iptables jq sysstat libseccomp rsync wget jq psmisc vim net-tools telnet -y

优化journald日志

[root@k8s01 ~]# mkdir -p /var/log/journal && mkdir -p /etc/systemd/journald.conf.d
[root@k8s01 ~]# cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=3G
# 单日志文件最大 200M
SystemMaxFileSize=100M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
[root@k8s01 ~]# systemctl restart systemd-journald && systemctl enable systemd-journald

安装docker-ce

需在所有运行kubelet的节点都需要安装

[root@k8s01 ~]# wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.15.tgz
#解压
[root@k8s01 ~]# tar xf docker-19.03.15.tgz
#拷贝二进制文件
[root@k8s01 ~]# cp docker/* /usr/bin/

#创建containerd的service文件,并且启动
[root@k8s01 ~]# cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF
[root@k8s01 ~]# systemctl enable --now containerd.service

#准备docker的service文件
[root@k8s01 ~]# cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service

[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500

[Install]
WantedBy=multi-user.target
EOF

#准备docker的socket文件
[root@k8s01 ~]# cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API

[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target
EOF

#创建docker组
[root@k8s01 ~]# groupadd docker
#启动docker
[root@k8s01 ~]# systemctl enable --now docker.socket  && systemctl enable --now docker.service
#验证
[root@k8s01 ~]# docker info
#创建docker配置文件
[root@k8s01 ~]# mkdir -p /etc/docker
[root@k8s01 ~]# cat >/etc/docker/daemon.json <<EOF
{
   "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
[root@k8s01 ~]# systemctl restart docker

安装cni插件

#创建cni插件所需目录
[root@k8s-master1 ~]# mkdir -p /etc/cni/net.d /opt/cni/bin 
[root@k8s-master1 ~]# wget https://github.com/containernetworking/plugins/releases/download/v1.1.0/cni-plugins-linux-amd64-v1.1.0.tgz
[root@k8s-master1 ~]# mkdir /opt/cni/bin
[root@k8s-master1 ~]# tar zxvf cni-plugins-linux-amd64-v1.1.0.tgz -C /opt/cni/bin
#验证
[root@k8s-master1 ~]# ls /opt/cni/bin/
bandwidth  dhcp      host-device  ipvlan    macvlan  ptp  static  vlan
bridge     firewall  host-local   loopback  portmap  sbr  tuning  vrf

部署Nginx代理kube-apiserver

#解压
[root@k8s-nginx ~]# tar xf nginx.tar.gz -C /usr/bin/
#生成配置文件
[root@k8s-nginx ~]# mkdir /etc/nginx -p
[root@k8s-nginx ~]# mkdir /var/log/nginx -p
[root@k8s-nginx ~]# cat >/etc/nginx/nginx.conf<<EOF 
user root;
worker_processes 4;

error_log  /var/log/nginx/error.log warn;
pid /var/log/nginx/nginx.pid;

events {
    worker_connections  3000;
}

stream {
    upstream apiservers {
        server 192.168.200.51:6443  max_fails=2 fail_timeout=3s;
        server 192.168.200.52:6443  max_fails=2 fail_timeout=3s;
        server 192.168.200.53:6443  max_fails=2 fail_timeout=3s;
    }

    server {
        listen 6443;
        proxy_connect_timeout 1s;
        proxy_pass apiservers;
    }
}
EOF
#生成启动文件
[root@k8s-nginx ~]# cat >/etc/systemd/system/nginx.service <<EOF
[Unit]
Description=nginx proxy
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=forking
ExecStartPre=/usr/bin/nginx -c /etc/nginx/nginx.conf -p /etc/nginx -t
ExecStart=/usr/bin/nginx -c /etc/nginx/nginx.conf -p /etc/nginx
ExecReload=/usr/bin/nginx -c /etc/nginx/nginx.conf -p /etc/nginx -s reload
PrivateTmp=true
Restart=always
RestartSec=15
StartLimitInterval=0
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
#启动
[root@k8s-nginx ~]# systemctl enable --now nginx.service
#验证
[root@k8s-nginx ~]# ss -ntl
State       Recv-Q Send-Q Local Address:Port               Peer Address:Port                             
LISTEN      0      511    0.0.0.0:6443                   *:*

kubernetes集群证书生成

在master01节点执行即可,生成好了直接copy

安装cfssl工具

[root@k8s01 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@k8s01 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@k8s01 ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@k8s01 ~]# chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
[root@k8s01 ~]# mv cfssl_linux-amd64 /usr/local/bin/cfssl
[root@k8s01 ~]# mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
[root@k8s01 ~]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
#生成证书存放目录
[root@k8s01 ~]# mkdir /opt/{etcd,kubernetes}/{ssl,cfg,bin,logs} -p

生成etcd证书

生成etcd证书的ca机构

[root@k8s01 ~]# cd /opt/etcd/
#生成etcd证书ca配置文件
[root@k8s01 etcd]# cat > ca-config.json <<EOF
{
    "signing": {
         "default": {
             "expiry": "87600h"
        },
         "profiles": {
             "etcd": {
                 "expiry": "87600h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "server auth",
                     "client auth"
                 ]
             }
         }
     }
}
EOF

#生成申请文件
[root@k8s01 etcd]# cat > ca-csr.json <<EOF
{
  "CN": "etcd-cluster",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "etcd-cluster",
      "OU": "System"
    }
  ]
}
EOF

#生成ca证书
[root@k8s01 etcd]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca

生成etcd服务端证书

#生成etcd证书申请文件
[root@k8s01 etcd]# cat > etcd-server-csr.json << EOF
{
  "CN": "etcd-server",
  "hosts": [
     "192.168.200.51",
     "192.168.200.52",
     "192.168.200.53",
     "127.0.0.1"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "etcd-server",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
[root@k8s01 etcd]# cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=etcd \
  etcd-server-csr.json | cfssljson -bare etcd-server

生成etcd客户端证书

[root@k8s01 etcd]# cat > etcd-client-csr.json << EOF
{
  "CN": "etcd-client",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "etcd-client",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
[root@k8s01 etcd]# cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=etcd \
  etcd-client-csr.json | cfssljson -bare etcd-client

验证查看

#创建ssl证书目录
[root@k8s01 etcd]# mv *.pem ssl/
[root@k8s01 etcd]# ls ssl/
ca-key.pem  ca.pem  etcd-client-key.pem  etcd-client.pem  etcd-server-key.pem  etcd-server.pem
[root@k8s01 etcd]# tree 
.
|-- ca-config.json
|-- ca.csr
|-- ca-csr.json
|-- cfg
|-- etcd-client.csr
|-- etcd-client-csr.json
|-- etcd-server.csr
|-- etcd-server-csr.json
`-- ssl
    |-- ca-key.pem
    |-- ca.pem
    |-- etcd-client-key.pem  #客户端私钥
    |-- etcd-client.pem      #客户端公钥
    |-- etcd-server-key.pem  #服务端私钥
    `-- etcd-server.pem      #服务端公钥

拷贝证书到master其它节点

master="k8s02 k8s03"
node="k8s04,k8s05"
for i in $master;do
  ssh $i "mkdir /opt/etcd/ssl -p"
  scp -r /opt/etcd/ssl/* $i:/opt/etcd/ssl
done

kubernetes各组件证书生成

创建kubernetes的ca

[root@k8s01 etcd]# cd /opt/kubernetes/
#创建ca配置文件与申请文件
[root@k8s01 kubernetes]# cat > ca-config.json <<EOF
{
    "signing": {
         "default": {
             "expiry": "87600h"
        },
         "profiles": {
             "kubernetes": {
                 "expiry": "87600h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "server auth",
                     "client auth"
                 ]
             }
         }
     }
}
EOF
#生成申请文件
[root@k8s01 kubernetes]# cat > ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "kubernetes",
      "OU": "System"
    }
  ]
}
EOF
#生成ca证书
[root@k8s01 kubernetes]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca

创建kube-apiserver证书

[root@k8s01 kubernetes]# cat > kube-apiserver-csr.json <<EOF
{
  "CN": "kube-apiserver",
  "hosts": [
    "127.0.0.1",
    "192.168.200.51",
    "192.168.200.52",
    "192.168.200.53",
    "10.96.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
   ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "kube-apiserver",
      "OU": "System"
    }
  ]
}
EOF

#生成证书
[root@k8s01 kubernetes]# cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  kube-apiserver-csr.json | cfssljson -bare  kube-apiserver

创建proxy-client证书以及ca

#生成ca配置文件
[root@k8s01 kubernetes]# cat > front-proxy-ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF
#生成ca文件
[root@k8s01 kubernetes]# cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
#生成客户端证书申请文件
[root@k8s01 kubernetes]# cat > front-proxy-client-csr.json <<EOF
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF
#生成证书
[root@k8s01 kubernetes]# cfssl gencert \
-ca=front-proxy-ca.pem \
-ca-key=front-proxy-ca-key.pem  \
-config=ca-config.json   \
-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare front-proxy-client

创建kube-controller-manager证书与认证文件

#生成证书请求文件
[root@k8s01 kubernetes]# cat > kube-controller-manager-csr.json <<EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "system:kube-controller-manager",
      "OU": "System"
    }
  ]
}
EOF

#生成证书文件
[root@k8s01 kubernetes]# cfssl gencert \
   -ca=ca.pem \
   -ca-key=ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
#生成配置文件
[root@k8s01 kubernetes]# kubectl config set-cluster kubernetes \
    --certificate-authority=ca.pem \
    --embed-certs=true \
    --server=https://192.168.200.50:6443 \
    --kubeconfig=kube-controller-manager.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-credentials system:kube-controller-manager \
    --client-certificate=kube-controller-manager.pem \
    --client-key=kube-controller-manager-key.pem \
    --embed-certs=true \
    --kubeconfig=kube-controller-manager.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-context default \
    --cluster=kubernetes \
    --user=system:kube-controller-manager \
    --kubeconfig=kube-controller-manager.kubeconfig

[root@k8s01 kubernetes]# kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig

生成kube-scheduler证书文件

[root@k8s01 kubernetes]# cat > kube-scheduler-csr.json <<EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "system:kube-scheduler",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
[root@k8s01 kubernetes]# cfssl gencert \
   -ca=ca.pem \
   -ca-key=ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   kube-scheduler-csr.json | cfssljson -bare kube-scheduler
#生成配置文件
[root@k8s01 kubernetes]# kubectl config set-cluster kubernetes \
    --certificate-authority=ca.pem \
    --embed-certs=true \
    --server=https://192.168.200.50:6443 \
    --kubeconfig=kube-scheduler.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-credentials system:kube-scheduler \
    --client-certificate=kube-scheduler.pem \
    --client-key=kube-scheduler-key.pem \
    --embed-certs=true \
    --kubeconfig=kube-scheduler.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-context default \
    --cluster=kubernetes \
    --user=system:kube-scheduler \
    --kubeconfig=kube-scheduler.kubeconfig

[root@k8s01 kubernetes]# kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig

生成kubernetes集群管理员证书

[root@k8s01 kubernetes]# cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
[root@k8s01 kubernetes]# cfssl gencert \
   -ca=ca.pem \
   -ca-key=ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   admin-csr.json | cfssljson -bare admin
#生成配置文件
[root@k8s01 kubernetes]# kubectl config set-cluster kubernetes \
    --certificate-authority=ca.pem \
    --embed-certs=true \
    --server=https://192.168.200.50:6443 \
    --kubeconfig=admin.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-credentials admin \
    --client-certificate=admin.pem \
    --client-key=admin-key.pem \
    --embed-certs=true \
    --kubeconfig=admin.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-context default \
    --cluster=kubernetes \
    --user=admin \
    --kubeconfig=admin.kubeconfig

[root@k8s01 kubernetes]# kubectl config use-context default --kubeconfig=admin.kubeconfig

验证及拷贝证书

#创建ssl证书目录
[root@k8s01 kubernetes]# mv *.pem ssl/ 
[root@k8s01 kubernetes]# mv *.kubeconfig ssl/
[root@k8s01 kubernetes]# tree 
.
|-- admin.csr
|-- admin-csr.json
|-- ca-config.json
|-- ca.csr
|-- ca-csr.json
|-- cfg
|-- front-proxy-ca.csr
|-- front-proxy-ca-csr.json
|-- front-proxy-client.csr
|-- front-proxy-client-csr.json
|-- kube-apiserver.csr
|-- kube-apiserver-csr.json
|-- kube-controller-manager.csr
|-- kube-controller-manager-csr.json
|-- kube-scheduler.csr
|-- kube-scheduler-csr.json
`-- ssl
    |-- admin-key.pem
    |-- admin.kubeconfig
    |-- admin.pem
    |-- ca-key.pem
    |-- ca.pem
    |-- front-proxy-ca-key.pem
    |-- front-proxy-ca.pem
    |-- front-proxy-client-key.pem
    |-- front-proxy-client.pem
    |-- kube-apiserver-key.pem
    |-- kube-apiserver.pem
    |-- kube-controller-manager-key.pem
    |-- kube-controller-manager.kubeconfig
    |-- kube-controller-manager.pem
    |-- kube-scheduler-key.pem
    |-- kube-scheduler.kubeconfig
    `-- kube-scheduler.pem
    
#拷贝证书文件到其它master节点
master="k8s02 k8s03"
node="k8s04,k8s05"
for i in $master;do
  ssh $i "mkdir /opt/kubernetes/ssl -p"
  scp -r /opt/kubernetes/ssl/* $i:/opt/kubernetes/ssl
done    

etcd集群

三台master节点安装etcd,以下以一台作为配置,其它两个节点更改配置文件的IP即可

[root@k8s01 ~]# wget https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
[root@k8s01 ~]# tar xf etcd-v3.4.9-linux-amd64.tar.gz
[root@k8s01 ~]# mkdir -p /opt/etcd/bin
[root@k8s01 ~]# mv etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/
[root@k8s01 ~]# rm -fr etcd-v3.4.9-linux-amd64*

创建配置文件

[root@k8s01 ~]# mkdir -p /opt/etcd/cfg
[root@k8s01 ~]# cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.200.51:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.200.51:2379"
 
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.200.51:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.200.51:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.200.51:2380,etcd-2=https://192.168.200.52:2380,etcd-3=https://192.168.200.53:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

ETCD_NAME:节点名称,集群中唯一
ETCD_DATA_DIR:数据目录
ETCD_LISTEN_PEER_URLS:集群通信监听地址
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
ETCD_INITIAL_CLUSTER:集群节点地址
ETCD_INITIAL_CLUSTER_TOKEN:集群Token
ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

systemd管理etcd

[root@k8s01 ~]# cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
 
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
--cert-file=/opt/etcd/ssl/etcd-server.pem \
--key-file=/opt/etcd/ssl/etcd-server-key.pem \
--peer-cert-file=/opt/etcd/ssl/etcd-server.pem \
--peer-key-file=/opt/etcd/ssl/etcd-server-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

启动并设置开机启动

[root@k8s01 ~]# systemctl daemon-reload
[root@k8s01 ~]# systemctl start etcd && systemctl enable etcd

etcd-2和etcd-3参照上面一样部署即可,只需更改对应IP

查看集群状态

[root@k8s01 etcd]# ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/etcd-server.pem --key=/opt/etcd/ssl/etcd-server-key.pem --endpoints="https://192.168.200.51:2379,https://192.168.200.52:2379,https://192.168.200.53:2379" endpoint health
https://192.168.200.51:2379 is healthy: successfully committed proposal: took = 10.166838ms
https://192.168.200.52:2379 is healthy: successfully committed proposal: took = 9.949652ms
https://192.168.200.53:2379 is healthy: successfully committed proposal: took = 11.876573ms

部署kubernetes集群

分发二进制文件

master="k8s01 k8s02 k8s03"
node="k8s04 k8s05"
tar xf kubernetes-server-linux-amd64.tar.gz
#分发master组件
for i in $master;do
  scp kubernetes/server/bin/{kubeadm,kube-apiserver,kube-controller-manager,kube-scheduler,kube-proxy,kubelet,kubectl} $i:/opt/kubernetes/bin
done
#分发node组件
for i in $node;do
  scp kubernetes/server/bin/{kube-proxy,kubelet} $i:/opt/kubernetes/bin
done

安装kube-apiserver

创建ServiceAccount Key

[root@k8s01 ~]# openssl genrsa -out /opt/kubernetes/ssl/sa.key 2048
[root@k8s01 ~]# openssl rsa -in /opt/kubernetes/ssl/sa.key -pubout -out /opt/kubernetes/ssl/sa.pub

拷贝至其它master节点

master="k8s01 k8s02 k8s03"
#分发master组件
for i in $master;do
  scp /opt/kubernetes/ssl/{sa.pub,sa.key} $i:/opt/kubernetes/ssl/
done

创建配置文件

[root@k8s01 ~]#  cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--v=2  \\
--logtostderr=false  \\
--log-dir=/opt/kubernetes/logs \\
--allow-privileged=true  \\
--bind-address=0.0.0.0  \\
--secure-port=6443  \\
--insecure-port=0  \\
--advertise-address=192.168.200.51 \\
--service-cluster-ip-range=10.96.0.0/16  \\
--service-node-port-range=8000-42767  \\
--etcd-servers=https://192.168.200.51:2379,https://192.168.200.52:2379,https://192.168.200.53:2379 \\
--etcd-cafile=/opt/etcd/ssl/ca.pem  \\
--etcd-certfile=/opt/etcd/ssl/etcd-client.pem  \\
--etcd-keyfile=/opt/etcd/ssl/etcd-client-key.pem  \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem  \\
--tls-cert-file=/opt/kubernetes/ssl/kube-apiserver.pem  \\
--tls-private-key-file=/opt/kubernetes/ssl/kube-apiserver-key.pem  \\
--kubelet-client-certificate=/opt/kubernetes/ssl/kube-apiserver.pem  \\
--kubelet-client-key=/opt/kubernetes/ssl/kube-apiserver-key.pem  \\
--service-account-key-file=/opt/kubernetes/ssl/sa.pub  \\
--service-account-signing-key-file=/opt/kubernetes/ssl/sa.key  \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
--authorization-mode=Node,RBAC  \\
--enable-bootstrap-token-auth=true  \\
--requestheader-client-ca-file=/opt/kubernetes/ssl/front-proxy-ca.pem  \\
--proxy-client-cert-file=/opt/kubernetes/ssl/front-proxy-client.pem  \\
--proxy-client-key-file=/opt/kubernetes/ssl/front-proxy-client-key.pem  \\
--requestheader-allowed-names=aggregator  \\
--requestheader-group-headers=X-Remote-Group  \\
--requestheader-extra-headers-prefix=X-Remote-Extra-  \\
--requestheader-username-headers=X-Remote-User"
EOF

–logtostderr:启用日志
—v:日志等级
–log-dir:日志目录
–etcd-servers:etcd集群地址
–bind-address:监听地址
–secure-port:https安全端口
–advertise-address:集群通告地址
–allow-privileged:启用授权
–service-cluster-ip-range:Service虚拟IP地址段
–enable-admission-plugins:准入控制模块
–authorization-mode:认证授权,启用RBAC授权和节点自管理
–enable-bootstrap-token-auth:启用TLS bootstrap机制
–token-auth-file:bootstrap token文件
–service-node-port-range:Service nodeport类型默认分配端口范围
–kubelet-client-xxx:apiserver访问kubelet客户端证书
–tls-xxx-file:apiserver https证书
–etcd-xxxfile:连接Etcd集群证书
–audit-log-xxx:审计日志

systemd管理apiserver

[root@k8s01 ~]# cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

设置开机启动

[root@k8s01 ~]# systemctl daemon-reload
[root@k8s01 ~]# systemctl start kube-apiserver.service
[root@k8s01 ~]# systemctl enable kube-apiserver.service

安装kube-controller-manager

创建配置文件

[root@k8s01 ~]# cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--v=2 \\
--logtostderr=false \\
--log-dir=/opt/kubernetes/logs \\
--address=0.0.0.0 \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/sa.key \\
--kubeconfig=/opt/kubernetes/ssl/kube-controller-manager.kubeconfig \\
--leader-elect=true \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=40s \\
--node-monitor-period=5s \\
--pod-eviction-timeout=2m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.96.0.0/16 \\
--requestheader-client-ca-file=/opt/kubernetes/ssl/front-proxy-ca.pem \\
--node-cidr-mask-size=24"
EOF

–master:通过本地非安全本地端口8080连接apiserver。
–leader-elect:当该组件启动多个时,自动选举(HA)
–cluster-signing-cert-file/–cluster-signing-key-file:自动为kubelet颁发证书的CA,与apiserver保持一致

systemd管理controller-manager

[root@k8s01 ~]# cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

设置开机启动

[root@k8s01 ~]# systemctl daemon-reload
[root@k8s01 ~]# systemctl start kube-controller-manager
[root@k8s01 ~]# systemctl enable kube-controller-manager

安装kube-scheduler

创建配置文件

[root@k8s01 ~]# cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect=true \
--address=127.0.0.1 \
--kubeconfig=/opt/kubernetes/ssl/kube-scheduler.kubeconfig"
EOF

–address:通过本地非安全本地端口8080连接apiserver
–leader-elect:当该组件启动多个时,自动选举(HA)

systemd管理scheduler

[root@k8s01 ~]# cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

设置开机启动

[root@k8s01 ~]# systemctl daemon-reload
[root@k8s01 ~]#  systemctl start kube-scheduler.service &&  systemctl enable kube-scheduler.service

master节点配置kubectl工具验证

#拷贝admin.kubeconfig到~/.kube/config
[root@k8s01 ~]# mkdir /root/.kube/ -p
[root@k8s01 ~]# cp /opt/kubernetes/ssl/admin.kubeconfig  /root/.kube/config
#验证集群状态,以下显示信息表示master节点的所有组件运行正常
[root@k8s01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}

部署kubelet

使用TLS Bootstrapping自动认证kubelet

创建TLS Bootstrapping认证文件(master某个节点运行即可),这里只展示master节点,其它master节点和node节点,只需要更改配置文件中的bindAddress和service配置文件中的hostname-override,根据实际情况修改即可

[root@k8s01 ~]# cd /opt/kubernetes/
[root@k8s01 kubernetes]# a=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c6`
[root@k8s01 kubernetes]# b=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c16`
#生成权限绑定文件
[root@k8s01 kubernetes]# cat > bootstrap.secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-$a
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: $a
  token-secret: $b
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF

#生成配置文件
[root@k8s01 kubernetes]# kubectl config set-cluster kubernetes  \
--certificate-authority=ssl/ca.pem   \
--embed-certs=true   \
--server=https://192.168.200.50:6443   \
--kubeconfig=bootstrap-kubelet.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-credentials tls-bootstrap-token-user  \
--token=$a.$b \
--kubeconfig=bootstrap-kubelet.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes   \
--user=tls-bootstrap-token-user  \
--kubeconfig=bootstrap-kubelet.kubeconfig

[root@k8s01 kubernetes]# kubectl config use-context tls-bootstrap-token-user@kubernetes  \
--kubeconfig=bootstrap-kubelet.kubeconfig

#创建权限
[root@k8s01 kubernetes]# kubectl apply -f bootstrap.secret.yaml

#将生成的文件放到ssl
[root@k8s01 kubernetes]# mv bootstrap-kubelet.kubeconfig ssl/

分发文件至其它所有节点

node="k8s02 k8s03 k8s04 k8s05"
for i in $node;do
  ssh $i "mkdir /opt/kubernetes -p"
  scp -r /opt/kubernetes/ssl/bootstrap-kubelet.kubeconfig $i:/opt/kubernetes/ssl
done

生成配置文件

[root@k8s01 kubernetes]# cat > /opt/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--bootstrap-kubeconfig=/opt/kubernetes/ssl/bootstrap-kubelet.kubeconfig \\
--kubeconfig=/opt/kubernetes/ssl/kubelet.kubeconfig \\
--network-plugin=cni \\
--cni-conf-dir=/etc/cni/net.d \\
--cni-bin-dir=/opt/cni/bin \\
--hostname-override=k8s01.op.local \\
--container-runtime=docker \\
--container-runtime-endpoint=unix:///var/run/dockershim.sock \\
--config=/opt/kubernetes/cfg/kubelet-conf.yml \\
--node-labels=node.kubernetes.io/node='' \\
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
EOF

–hostname-override:显示名称,集群中唯一
–network-plugin:启用CNI
–kubeconfig:空路径,会自动生成,后面用于连接apiserver
–bootstrap-kubeconfig:首次启动向apiserver申请证书
–config:配置参数文件
–cert-dir:kubelet证书生成目录
–pod-infra-container-image:管理Pod网络容器的镜像

配置参数文件生成

[root@k8s01 kubernetes]# cat > /opt/kubernetes/cfg/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 192.168.200.51
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.2
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 0.0.0.0
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF

systemd管理kubelet

[root@k8s01 kubernetes]# cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
 
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

启动服务

[root@k8s01 kubernetes]# systemctl enable --now kubelet.service

部署kube-proxy

这里只展示master节点,其它master节点和node节点,只需要更改参数配置文件中的bindAddress和hostnameOverride,根据实际情况修改即可

生成kube-proxy所需配置文件

[root@k8s01 kubernetes]# cd  /opt/kubernetes
#生成配置文件
[root@k8s01 kubernetes]# kubectl -n kube-system create serviceaccount kube-proxy
[root@k8s01 kubernetes]# kubectl create clusterrolebinding  system:kube-proxy  --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy

[root@k8s01 kubernetes]# SECRET=$(kubectl -n kube-system get sa/kube-proxy \
--output=jsonpath='{.secrets[0].name}')
	
[root@k8s01 kubernetes]# JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \
--output=jsonpath='{.data.token}' | base64 -d)

[root@k8s01 kubernetes]# kubectl config set-cluster kubernetes   \
--certificate-authority=/opt/kubernetes/ssl/ca.pem    \
--embed-certs=true    \
--server=https://192.168.200.50:6443    \
--kubeconfig=kube-proxy.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-credentials kubernetes    \
--token=${JWT_TOKEN}   \
--kubeconfig=kube-proxy.kubeconfig

[root@k8s01 kubernetes]# kubectl config set-context kubernetes    \
--cluster=kubernetes   \
--user=kubernetes   \
--kubeconfig=kube-proxy.kubeconfig

[root@k8s01 kubernetes]# kubectl config use-context kubernetes   \
--kubeconfig=kube-proxy.kubeconfig

[root@k8s01 kubernetes]# mv kube-proxy.kubeconfig ssl/

拷贝配置文件到node节点

node="k8s02 k8s03 k8s04 k8s05 "
for i in $node;do
  scp  /opt/kubernetes/ssl/kube-proxy.kubeconfig $i:/opt/kubernetes/ssl
done

创建配置文件

[root@k8s01 kubernetes]# cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--config=/opt/kubernetes/cfg/kube-proxy-config.yml"
EOF

生成配置参数文件

[root@k8s01 kubernetes]# cat > /opt/kubernetes/cfg/kube-proxy-config.yml <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.200.51
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /opt/kubernetes/ssl/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 10.244.0.0/16
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: "k8s01.op.local"
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 0.0.0.0:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF

systemd管理kube-proxy

[root@k8s-master1 ~]# cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
 
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

启动服务

[root@k8s01 kubernetes]# systemctl enable --now kube-proxy.service

组件安装

安装calico-etcdv3.15.5版本

yaml下载地址:https://docs.projectcalico.org/v3.15/manifests/calico-etcd.yaml

[root@k8s01 calico]# cd /home/deploy/calico/
#下载yaml文件,修改的内容为下
#以下需要使用命令编码证书文件之后粘贴内容,命令如下
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>" (修改为集群etcd连接地址)
  etcd-key: (cat /opt/pki/etcd/etcd-client-key.pem | base64 -w 0)
  etcd-cert: (cat /opt/pki/etcd/etcd-client.pem | base64 -w 0)
  etcd-ca: (cat /opt/pki/etcd/ca/ca.pem | base64 -w 0)
#以下改为默认值 
  etcd_ca: "/calico-secrets/etcd-ca"   
  etcd_cert: "/calico-secrets/etcd-cert" 
  etcd_key: "/calico-secrets/etcd-key" 
#以下取消注释,把网络地址改为自己集群的pod地址池
            - name: CALICO_IPV4POOL_CIDR
               value: "10.244.0.0/16"
#创建
[root@k8s01 calico]# kubectl apply -f calico-etcd.yaml

集群状态

其它master节点和node节点参照上述部署,更改配置文件中部分参数即可(本文仅展示一主两从)

[root@k8s01 ~]# kubectl get node
NAME             STATUS   ROLES    AGE   VERSION
k8s01.op.local   Ready    <none>   34m   v1.23.8
k8s02.op.local   Ready    <none>   33m   v1.23.8
k8s03.op.local   Ready    <none>   32m   v1.23.8
文章作者: 鲜花的主人
版权声明: 本站所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 爱吃可爱多
Kubernetes Kubernetes
喜欢就支持一下吧
打赏
微信 微信
支付宝 支付宝