Thursday, November 27, 2025

 

Kubernetes : Configure Control Plane Node

 

Configure Multi Nodes Kubernetes Cluster.

This example is based on the environment like follows.

+----------------------+   +----------------------+
|  [ ctrl.srv.world ]  |   |   [ dlp.srv.world ]  |
|     Manager Node     |   |     Control Plane    |
+-----------+----------+   +-----------+----------+
        eth0|10.0.0.25             eth0|10.0.0.30
            |                          |
------------+--------------------------+-----------
            |                          |
        eth0|10.0.0.51             eth0|10.0.0.52
+-----------+----------+   +-----------+----------+
| [ node01.srv.world ] |   | [ node02.srv.world ] |
|     Worker Node#1    |   |     Worker Node#2    |
+----------------------+   +----------------------+

[1]

Configure pre-requirements on all Nodes, refer to here.

[2]Configure initial setup on Control Plane Node.
root@dlp:~# 
kubeadm config print init-defaults > config.yaml

root@dlp:~# 
vi config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  # change to specify Control Plane Node IP address
  advertiseAddress: 10.0.0.30
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  imagePullSerial: true
  # change to specify Control Plane Node Hostname
  name: dlp.srv.world
  taints: null
timeouts:
  controlPlaneComponentHealthCheck: 4m0s
  discovery: 5m0s
  etcdAPICall: 2m0s
  kubeletHealthCheck: 4m0s
  kubernetesAPICall: 1m0s
  tlsBootstrap: 5m0s
  upgradeManifests: 5m0s
---
# add to specify the Hostname or IP address that is shared among the Kubernetes Cluster,
# for the case proxying Kubernetes cluster with a Manager node like this example,
# specify the Manager Node IP address
controlPlaneEndpoint: "10.0.0.25:6443"
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
encryptionAlgorithm: RSA-2048
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.32.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  # add to specify the pod network
  # Calico default is the follows
  podSubnet: 192.168.0.0/16
proxy: {}
scheduler: {}
# add follows to switch to the nftables kube-proxy enabled
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: nftables

root@dlp:~# 
kubeadm init --config=config.yaml

[init] Using Kubernetes version: v1.33.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [dlp.srv.world kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.30 10.0.0.25]
[certs] Generating "apiserver-kubelet-client" certificate and key

.....
.....

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 10.0.0.25:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:16a2b904a92ce82b3096969e8f68c9050014f6f40ad742c1ff4050af1e504e65 \
        --control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.0.25:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:16a2b904a92ce82b3096969e8f68c9050014f6f40ad742c1ff4050af1e504e65

# transfrer authentication file for cluster admin to Manager Node with any user

root@dlp:~# 
scp /etc/kubernetes/admin.conf debian@10.0.0.25:/tmp

debian@10.0.0.25's password:
admin.conf                     100% 5649    10.4MB/s   00:00
[3]Work on Manager Node. Configure Pod Network with Calico.
# set cluster admin user with a file you transferred from Control Plane
# if you set common user as cluster admin, login with it and run [sudo cp/chown ***]

root@ctrl:~# 
mkdir -p $HOME/.kube

root@ctrl:~# 
mv /tmp/admin.conf $HOME/.kube/config

root@ctrl:~# 
chown $(id -u):$(id -g) $HOME/.kube/config
root@ctrl:~# 
wget https://raw.githubusercontent.com/projectcalico/calico/v3.30.2/manifests/operator-crds.yaml

root@ctrl:~# 
wget https://raw.githubusercontent.com/projectcalico/calico/v3.30.2/manifests/tigera-operator.yaml

root@ctrl:~# 
kubectl apply -f operator-crds.yaml

root@ctrl:~# 
kubectl apply -f tigera-operator.yaml

root@ctrl:~# 
cat > custom-resources.yaml <<EOF

apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
  name: default
spec:
  calicoNetwork:
    linuxDataplane: Nftables
    ipPools:
    - name: default-ipv4-ippool
      blockSize: 26
      cidr: 192.168.0.0/16
      encapsulation: VXLANCrossSubnet
      natOutgoing: Enabled
      nodeSelector: all()
---
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
  name: default
spec: {}
EOF

root@ctrl:~# 
kubectl apply -f custom-resources.yaml

installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created

# show state : OK if STATUS = Ready

root@ctrl:~# 
kubectl get nodes

NAME            STATUS   ROLES           AGE     VERSION
dlp.srv.world   Ready    control-plane   2m19s   v1.33.4

# show state : OK if all are Running

root@ctrl:~# 
kubectl get pods -A

NAMESPACE          NAME                                       READY   STATUS    RESTARTS   AGE
calico-apiserver   calico-apiserver-558d99c869-gqcx9          1/1     Running   0          61s
calico-apiserver   calico-apiserver-558d99c869-jsnjr          1/1     Running   0          61s
calico-system      calico-kube-controllers-5bbbf54fc7-jcj8r   1/1     Running   0          59s
calico-system      calico-node-gchx7                          1/1     Running   0          59s
calico-system      calico-typha-6cd5496bcb-lg594              1/1     Running   0          59s
calico-system      csi-node-driver-wfzrs                      2/2     Running   0          59s
kube-system        coredns-674b8bbfcf-v5696                   1/1     Running   0          2m35s
kube-system        coredns-674b8bbfcf-xsmfg                   1/1     Running   0          2m35s
kube-system        etcd-dlp.srv.world                         1/1     Running   0          2m42s
kube-system        kube-apiserver-dlp.srv.world               1/1     Running   0          2m42s
kube-system        kube-controller-manager-dlp.srv.world      1/1     Running   0          2m42s
kube-system        kube-proxy-xqdss                           1/1     Running   0          2m35s
kube-system        kube-scheduler-dlp.srv.world               1/1     Running   0          2m42s
tigera-operator    tigera-operator-747864d56d-bh2hl           1/1     Running   0          75s

No comments:

Post a Comment