Thursday, November 27, 2025

Kubernetes : Remove Nodes

 

Remove Nodes from existing Kubernetes Cluster.

[1]Only when deleting a Control Plane node, it is necessary to delete Etcd and load balancing configuration beforehand like follows.
root@ctrl:~# 
kubectl get nodes

NAME               STATUS   ROLES           AGE     VERSION
dlp-1.srv.world    Ready    control-plane   12m     v1.33.4
dlp.srv.world      Ready    control-plane   3h25m   v1.33.4
node01.srv.world   Ready    <none>          3h18m   v1.33.4
node02.srv.world   Ready    <none>          3h16m   v1.33.4
node03.srv.world   Ready    <none>          3m42s   v1.33.4

root@ctrl:~# 
kubectl get pods -n kube-system | grep etcd

etcd-dlp-1.srv.world                      1/1     Running   0             12m
etcd-dlp.srv.world                        1/1     Running   1 (26m ago)   3h25m

# as an example, delete [dlp-1.srv.world] node
# access to the Etcd on a Control Plane which is not the delete target and
# remove configuration for delete target

root@ctrl:~# 
kubectl -n kube-system exec -it etcd-dlp.srv.world -- sh

sh-5.2#
sh-5.2# etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key member list 
2000d81d53d2d7c5, started, dlp-1.srv.world, https://10.0.0.31:2380, https://10.0.0.31:2379, false
dd4b95995dc266b1, started, dlp.srv.world, https://10.0.0.30:2380, https://10.0.0.30:2379, false

# remove a member which you like to delete
sh-5.2# etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key member remove 2000d81d53d2d7c5 
Member 2000d81d53d2d7c5 removed from cluster 63678238411c70a3

sh-5.2# exit 
exit

root@ctrl:~# 
vi /etc/nginx/nginx.conf
# remove the target Control Plane
stream {
    upstream k8s-api {
        server 10.0.0.30:6443;
        ###server 10.0.0.31:6443;
    }
    server {
        listen 6443;
        proxy_pass k8s-api;
    }
}

root@ctrl:~# 
systemctl reload nginx
[2]Remove a node from the cluster.
From this point, the procedure is the same for both the Control Plane and Worker.
root@ctrl:~# 
kubectl get nodes

NAME               STATUS   ROLES           AGE     VERSION
dlp-1.srv.world    Ready    control-plane   14m     v1.33.4
dlp.srv.world      Ready    control-plane   3h27m   v1.33.4
node01.srv.world   Ready    <none>          3h21m   v1.33.4
node02.srv.world   Ready    <none>          3h18m   v1.33.4
node03.srv.world   Ready    <none>          6m6s    v1.33.4

# prepare to remove a target node
# --ignore-daemonsets ⇒ ignore pods in DaemonSet
# --delete-emptydir-data ⇒ ignore pods that has emptyDir volumes
# --force ⇒ also remove pods that was created as a pod, not as deployment or others

root@ctrl:~# 
kubectl drain dlp-1.srv.world --ignore-daemonsets --delete-emptydir-data --force

node/dlp-1.srv.world cordoned
Warning: ignoring DaemonSet-managed Pods: calico-system/calico-node-7sb9c, calico-system/csi-node-driver-5875w, kube-system/kube-proxy-bhdv8
node/dlp-1.srv.world drained

# verify a few minutes later

root@ctrl:~# 
kubectl get nodes dlp-1.srv.world

NAME              STATUS                        ROLES           AGE   VERSION
dlp-1.srv.world   NotReady,SchedulingDisabled   control-plane   16m   v1.33.4

# run delete method

root@ctrl:~# 
kubectl delete node dlp-1.srv.world

node "dlp-1.srv.world" deleted

root@ctrl:~# 
kubectl get nodes

NAME               STATUS   ROLES           AGE     VERSION
dlp.srv.world      Ready    control-plane   3h30m   v1.33.4
node01.srv.world   Ready    <none>          3h23m   v1.33.4
node02.srv.world   Ready    <none>          3h21m   v1.33.4
node03.srv.world   Ready    <none>          8m34s   v1.33.4
[3]On the removed Node, Reset kubeadm settings.
root@dlp-1:~# 
kubeadm reset

[reset] Reading configuration from the "kubeadm-config" ConfigMap in namespace "kube-system"...
[reset] Use 'kubeadm init phase upload-config --config your-config-file' to re-upload it.
W0822 22:48:51.733080    6850 reset.go:137] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get node registration: failed to get corresponding node: nodes "dlp-1.srv.world" not found
W0822 22:48:51.733402    6850 preflight.go:56] [reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] Are you sure you want to proceed? [y/N]: y
[preflight] Running pre-flight checks
W0822 22:48:52.972691    6850 removeetcdmember.go:106] [reset] No kubeadm config, using etcd pod spec to get data directory
[reset] Deleted contents of the etcd data directory: /var/lib/etcd
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of directories: [/etc/kubernetes/manifests /var/lib/kubelet /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]

The reset process does not perform cleanup of CNI plugin configuration,
network filtering rules and kubeconfig files.

For information on how to perform this cleanup manually, please see:
    https://k8s.io/docs/reference/setup-tools/kubeadm/kubeadm-reset/

No comments:

Post a Comment