节点维护

设置节点不可调度

[root@master ~]# kubectl cordon node02
node/node02 cordoned
[root@master ~]# kubectl get node
NAME     STATUS                     ROLES    AGE    VERSION
master   Ready                      master   184d   v1.18.4
node01   Ready                      <none>   183d   v1.18.4
node02   Ready,SchedulingDisabled   <none>   182d   v1.18.4
[root@master ~]# kubectl get po -o wide
NAME                           READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
nginx-master-9d4cf4f77-47vfj   1/1     Running   0          30m   10.244.0.129   master   <none>           <none>
nginx-master-9d4cf4f77-69jn6   1/1     Running   0          30m   10.244.2.206   node02   <none>           <none>
nginx-master-9d4cf4f77-6drhg   1/1     Running   0          30m   10.244.1.218   node01   <none>           <none>
nginx-master-9d4cf4f77-b7zfd   1/1     Running   0          30m   10.244.1.219   node01   <none>           <none>
nginx-master-9d4cf4f77-fxsjd   1/1     Running   0          30m   10.244.2.204   node02   <none>           <none>
nginx-master-9d4cf4f77-ktnvk   1/1     Running   0          30m   10.244.0.128   master   <none>           <none>
nginx-master-9d4cf4f77-mzrx7   1/1     Running   0          30m   10.244.1.217   node01   <none>           <none>
nginx-master-9d4cf4f77-pcznk   1/1     Running   0          30m   10.244.2.203   node02   <none>           <none>
nginx-master-9d4cf4f77-px98b   1/1     Running   0          30m   10.244.2.205   node02   <none>           <none>
nginx-master-9d4cf4f77-wtcwt   1/1     Running   0          30m   10.244.1.220   node01   <none>           <none>

设置node02不可调度,查看各节点状态,发现node02为SchedulingDisabled,此时集群不会将新的pod调度到该节点上,但是node02上pod还是正常运行

驱逐节点上的pod

[root@master ~]# kubectl drain node02 --delete-local-data --ignore-daemonsets --force 
node/node02 already cordoned

参数说明:

  • –delete-local-data 即使pod使用了emptyDir也删除
  • –ignore-daemonsets 忽略deamonset控制器的pod,如果不忽略,deamonset控制器控制的pod被删除后可能马上又在此节点上启动起来,会成为死循环
  • --force 不加force参数只会删除该NODE上由ReplicationController, ReplicaSet, DaemonSet,StatefulSet or Job创建的Pod,加了后还会删除"裸奔的pod"(没有绑定到任何replication controller)

同一时刻只有一个pod进行迁移,对外提供服务的pod始终有9个

维护结束

维护结束,重新将node02节点置为可调度状态

[root@master ~]# kubectl uncordon node02
node/node02 uncordoned
[root@master ~]# kubectl get node
NAME     STATUS                     ROLES    AGE    VERSION
master   Ready                      master   184d   v1.18.4
node01   Ready                      <none>   183d   v1.18.4
node02   Ready                      <none>   182d   v1.18.4

pod回迁

采用delete然后重建的方式回迁

[root@master ~]# kubectl get po -o wide
NAME                           READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
nginx-master-9d4cf4f77-2vnvk   1/1     Running   0          33m   10.244.1.222   node01   <none>           <none>
nginx-master-9d4cf4f77-47vfj   1/1     Running   0          73m   10.244.0.129   master   <none>           <none>
nginx-master-9d4cf4f77-6drhg   1/1     Running   0          73m   10.244.1.218   node01   <none>           <none>
nginx-master-9d4cf4f77-7n7pt   1/1     Running   0          32m   10.244.0.131   master   <none>           <none>
nginx-master-9d4cf4f77-b7zfd   1/1     Running   0          73m   10.244.1.219   node01   <none>           <none>
nginx-master-9d4cf4f77-ktnvk   1/1     Running   0          73m   10.244.0.128   master   <none>           <none>
nginx-master-9d4cf4f77-mzrx7   1/1     Running   0          73m   10.244.1.217   node01   <none>           <none>
nginx-master-9d4cf4f77-pdkst   1/1     Running   0          32m   10.244.0.130   master   <none>           <none>
nginx-master-9d4cf4f77-pskmp   1/1     Running   0          32m   10.244.0.132   master   <none>           <none>
nginx-master-9d4cf4f77-wtcwt   1/1     Running   0          73m   10.244.1.220   node01   <none>           <none>

[root@master ~]# kubectl delete po nginx-master-9d4cf4f77-47vfj
pod "nginx-master-9d4cf4f77-47vfj" deleted
[root@master ~]# kubectl delete po nginx-master-9d4cf4f77-2vnvk
pod "nginx-master-9d4cf4f77-2vnvk" deleted

[root@master ~]# kubectl get po -o wide
NAME                           READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
nginx-master-9d4cf4f77-6drhg   1/1     Running   0          76m   10.244.1.218   node01   <none>           <none>
nginx-master-9d4cf4f77-7n7pt   1/1     Running   0          35m   10.244.0.131   master   <none>           <none>
nginx-master-9d4cf4f77-b7zfd   1/1     Running   0          76m   10.244.1.219   node01   <none>           <none>
nginx-master-9d4cf4f77-f92hp   1/1     Running   0          44s   10.244.2.207   node02   <none>           <none>
nginx-master-9d4cf4f77-ktnvk   1/1     Running   0          76m   10.244.0.128   master   <none>           <none>
nginx-master-9d4cf4f77-mzrx7   1/1     Running   0          76m   10.244.1.217   node01   <none>           <none>
nginx-master-9d4cf4f77-pdkst   1/1     Running   0          35m   10.244.0.130   master   <none>           <none>
nginx-master-9d4cf4f77-pskmp   1/1     Running   0          35m   10.244.0.132   master   <none>           <none>
nginx-master-9d4cf4f77-tdghn   1/1     Running   0          15s   10.244.2.208   node02   <none>           <none>
nginx-master-9d4cf4f77-wtcwt   1/1     Running   0          76m   10.244.1.220   node01   <none>           <none>

在业务低峰delete pod nginx-master-9d4cf4f77-47vfj和nginx-master-9d4cf4f77-2vnvk,由于node02上的pod之前都被驱逐,此时资源使用率最低,所以pod重建时会调度值该节点,完成pod回迁

节点删除

删除节点

实际运维过程中可能会删除某个node节点

[root@master ~]# kubectl cordon node02
[root@master ~]# kubectl drain node02 --delete-local-data --ignore-daemonsets --force 
[root@master ~]# kubectl delete node node02

#登录node2节点重置
[root@node02 ~]# kubeadm reset

节点重新加入

[root@master ~]# kubeadm token create --print-join-command
kubeadm join 10.0.20.151:6443 --token kpz40z.tuxb4t4m1q37vwl1     --discovery-token-ca-cert-hash sha256:5f656ae26b5e7d4641a979cbfdffeb7845cc5962bbfcd1d5435f00a25c02ea50 

#node02节点
[root@node02 ~]# kubeadm join 10.0.20.151:6443 --token svrip0.lajrfl4jgal0ul6i     --discovery-token-ca-cert-hash sha256:5f656ae26b5e7d4641a979cbfdffeb7845cc5962bbfcd1d5435f00a25c02ea50 

#查看node
[root@master ~]# kubectl get node
NAME     STATUS                     ROLES    AGE    VERSION
master   Ready                      master   184d   v1.18.4
node01   Ready                      <none>   183d   v1.18.4
node02   Ready                      <none>   182d   v1.18.4
文章作者: 鲜花的主人
版权声明: 本站所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 爱吃可爱多
Kubernetes Kubernetes
喜欢就支持一下吧
打赏
微信 微信
支付宝 支付宝