kubeadm deploys highly available K8S clusters (v1.14.0)

I. Cluster Planning

host name IP role Main Plugins
VIP 172.16.1.10 Implement master high availability and load balancing
k8s-master01 172.16.1.11 master kube-apiserver,kube-controller,kube-scheduler,kubelet,kube-proxy,kube-flannel,etcd
k8s-master02 172.16.1.12 master kube-apiserver,kube-controller,kube-scheduler,kubelet,kube-proxy,kube-flannel,etcd
k8s-master03 172.16.1.13 master kube-apiserver,kube-controller,kube-scheduler,kubelet,kube-proxy,kube-flannel,etcd
k8s-node01 172.16.1.21 node kubelet,kube-proxy,kube-flannel
k8s-node02 172.16.1.22 node kubelet,kube-proxy,kube-flannel

The master node achieves high availability and load balancing through keepalived and haproxy. For cloud hosts, related cloud products such as slb in Ali Cloud or clb in Tencent Cloud can be used directly.

2. Preparations

Prepare the following on all nodes

1. Hardware Configuration

It is recommended that at least 2 CPU, 2G, non-hard requirements, 1CPU, 1G can also set up a cluster, but there will be WARNING prompts when deploying:

#Report when initializing master for a CPU
 [WARNING NumCPU]: the number of available CPUs 1 is less than the required 2
#May report when deploying plug-ins or pod s
warning: FailedScheduling: Insufficient cpu, Insufficient memory

2. Modify Kernel Parameters

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0   #Maximize physical memory, then swap space
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF
sysctl --system

3. Close Swap

After version k8s1.8, swap is required to be turned off, otherwise kubelet will not start under the default configuration.

#Temporarily Closed
swapoff -a
#Permanent shutdown
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

4. Turn on ipvs

modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
#Check whether to load
lsmod | grep ip_vs
#Configure boot-up self-loading
cat <<EOF>> /etc/rc.local
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod +x /etc/rc.d/rc.local

5. Disable selinux

#Temporarily Closed
setenforce 0
#Permanent shutdown
sed -ir 's/(SELINUX=)[a-z]*/\1diabled/' /etc/selinux/config

6. Close the firewall

systemctl stop firewalld
systemctl disable firewalld

7. Install docker

#Get the yum source for docker-ce
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 
#Get epel source
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo 
yum -y install epel-release
#Install docker
yum -y install docker-ce
docker version
systemctl start docker
systemctl enable docker

#Attention
There is no docker-ce version specified for the installation. The latest version is installed by default. If you need to install the specified version, you can:

#List docker-ce versions
yum list docker-ce --showduplicates
#Install the specified version
yum -y install docker-ce-<VERSION_STRING>

8. Other

ssh Secret Login, hosts file, ntp time synchronization

3. Install and configure keepalived and haproxy

master node execution

1. Installation

yum install -y socat keepalived haproxy ipvsadm
systemctl enable haproxy
systemctl enable keepalived

2. Configuration

haproxy profile:

#/etc/haproxy/haproxy.cfg
global
    log         127.0.0.1 local3
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     32768
    user        haproxy
    group       haproxy
    daemon
    nbproc      1
    stats socket /var/lib/haproxy/stats

defaults
    mode                    tcp
    log                     global
    option                  tcplog
    option                  dontlognull
    option                  redispatch
    retries                 3
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout check           10s

listen stats
    mode   http
    bind :8888
    stats   enable
    stats   uri     /admin?stats
    stats   auth    admin:admin
    stats   admin   if TRUE

frontend  k8s_https *:8443
    mode      tcp
    maxconn      2000
    default_backend     https_sri

backend https_sri
    balance      roundrobin
    server master1-api 172.16.1.11:6443  check inter 10000 fall 2 rise 2 weight 1
    server master2-api 172.16.1.12:6443  check inter 10000 fall 2 rise 2 weight 1
    server master3-api 172.16.1.13:6443  check inter 10000 fall 2 rise 2 weight 1

keepalived profile:

#/etc/keepalived/keepalived.conf
global_defs {
   router_id master01
}

vrrp_script check_haproxy {
    script /etc/keepalived/check_haproxy.sh
    interval 3
}

vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 80
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        172.16.1.10
    }
    track_script {   
        check_haproxy
    }
}

}
#/etc/keepalived/check_haproxy.sh
#!/bin/bash
NUM=`ps -C haproxy --no-header |wc -l`
if [ $NUM -eq 0 ];then
    systemctl stop keepalived
fi

Note that there are differences among the three keepalived configuration files:
router_id is master 01, master 02, master 03, respectively
state is MASTER, BACKUP, BACKUP
priority is 100, 90, 80

IV. K8S Cluster Deployment

1. Install kubeadm, kubelet, kubectl

All nodes have kubeadm, kubelet, and kubectl installed. Note that kubectl for a node is not required.

#Configure yum source
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpghttps://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#install
yum -y install kubeadm-1.14.0 kubelet-1.14.0 kubectl-1.14.0
systemctl enable kubelet

2. Initialize master

The default configuration file is available through kubeadm config print init-defaults > kubeadm.conf.

#View required mirrors
kubeadm config images list --config kubeadm.conf
#Pull the desired image
kubeadm config images pull --config kubeadm.conf
#Initialization
kubeadm init –config kubeadm.conf

(1) master 01 node

Configuration file kubeadm_master01.conf

apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.11
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: "172.16.1.10:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  certSANs:
  - "k8s-master01"
  - "k8s-master02"
  - "k8s-master03"
  - 172.16.1.11
  - 172.16.1.12
  - 172.16.1.13
  - 172.16.1.10
networking:
  podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://172.16.1.11:2379"
      advertise-client-urls: "https://172.16.1.11:2379"
      listen-peer-urls: "https://172.16.1.11:2380"
      initial-advertise-peer-urls: "https://172.16.1.11:2380"
      initial-cluster: "k8s-master01=https://172.16.1.11:2380"
      initial-cluster-state: new
    serverCertSANs:
      - k8s-master01
      - 172.16.1.11
    peerCertSANs:
      - k8s-master01
      - 172.16.1.11
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#Initialize master01
kubeadm init --config kubeadm_master01.conf
#Configure the kubectl management cluster
mkdir .kube
cp -i /etc/kubernetes/admin.conf .kube/config

kubectl-related commands can then be executed, such as checking the current existence of pods, and you can see that only coredns'pods are in Pending state because network plug-ins have not been installed.

[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-96lr9               0/1     Pending   0          40m
coredns-8686dcc4fd-xk9st               0/1     Pending   0          40m
etcd-k8s-master01                      1/1     Running   0          39m
kube-apiserver-k8s-master01            1/1     Running   0          39m
kube-controller-manager-k8s-master01   1/1     Running   0          39m
kube-proxy-2cb7r                       1/1     Running   0          40m
kube-scheduler-k8s-master01            1/1     Running   0          39m
#Install Flannel Network Plugin
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#Bind Network Card
Flannel uses the host's first network card by default. If you have multiple network cards and need to specify one, you can modify the following sections in kube-flannel.yml

      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=ens192        #Add the row
#Then apply the configuration file (note that only in master01)
kubectl apply -f kube-flannel.yml

#At this point, we look at the pod and find that coredns is no longer Pending

[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-96lr9               1/1     Running   0          54m
coredns-8686dcc4fd-xk9st               1/1     Running   0          54m
etcd-k8s-master01                      1/1     Running   0          53m
kube-apiserver-k8s-master01            1/1     Running   0          53m
kube-controller-manager-k8s-master01   1/1     Running   0          53m
kube-flannel-ds-amd64-4vg2s            1/1     Running   0          50s
kube-proxy-2cb7r                       1/1     Running   0          54m
kube-scheduler-k8s-master01            1/1     Running   0          53m

(2) Distributing certificates

Execute the following script

#!/bin/bash
for index in 12 13; do
  ip=172.16.1.${index}
  ssh $ip "mkdir -p /etc/kubernetes/pki/etcd; mkdir -p ~/.kube/"
  scp /etc/kubernetes/pki/ca.crt $ip:/etc/kubernetes/pki/ca.crt
  scp /etc/kubernetes/pki/ca.key $ip:/etc/kubernetes/pki/ca.key
  scp /etc/kubernetes/pki/sa.key $ip:/etc/kubernetes/pki/sa.key
  scp /etc/kubernetes/pki/sa.pub $ip:/etc/kubernetes/pki/sa.pub
  scp /etc/kubernetes/pki/front-proxy-ca.crt $ip:/etc/kubernetes/pki/front-proxy-ca.crt
  scp /etc/kubernetes/pki/front-proxy-ca.key $ip:/etc/kubernetes/pki/front-proxy-ca.key
  scp /etc/kubernetes/pki/etcd/ca.crt $ip:/etc/kubernetes/pki/etcd/ca.crt
  scp /etc/kubernetes/pki/etcd/ca.key $ip:/etc/kubernetes/pki/etcd/ca.key
  scp /etc/kubernetes/admin.conf $ip:/etc/kubernetes/admin.conf
  scp /etc/kubernetes/admin.conf $ip:~/.kube/config
done

(3) master 02 node

Configuration file kubeadm_master02.conf

apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.12
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: "172.16.1.10:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  certSANs:
  - "k8s-master01"
  - "k8s-master02"
  - "k8s-master03"
  - 172.16.1.11
  - 172.16.1.12
  - 172.16.1.13
  - 172.16.1.10
networking:
  podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://172.16.1.12:2379"
      advertise-client-urls: "https://172.16.1.12:2379"
      listen-peer-urls: "https://172.16.1.12:2380"
      initial-advertise-peer-urls: "https://172.16.1.12:2380"
      initial-cluster: "k8s-master01=https://172.16.1.11:2380,k8s-master02=https://172.16.1.12:2380"
      initial-cluster-state: existing
    serverCertSANs:
      - k8s-master02
      - 172.16.1.12
    peerCertSANs:
      - k8s-master02
      - 172.16.1.12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#Configure Certificate
kubeadm init phase certs all --config kubeadm_master02.conf
#Configure etcd
kubeadm init phase etcd local --config kubeadm_master02.conf
#Generate kubelet configuration file
kubeadm init phase kubeconfig kubelet --config kubeadm_master02.conf
#Start kubelet
kubeadm init phase kubelet-start --config kubeadm_master02.conf
#Add etcd of master 02 to cluster
kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.16.1.11:2379 member add master2 https://172.16.1.12:2380
#Start kube-apiserver, kube-controller-manager, kube-scheduler
kubeadm init phase kubeconfig all --config kubeadm_master02.conf
kubeadm init phase control-plane all --config kubeadm_master02.conf

#View node status

[root@k8s-master02 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE    VERSION
k8s-master01   Ready    master   3h2m   v1.14.0
k8s-master02   Ready    <none>   14m    v1.14.0
#Mark node as master
kubeadm init phase mark-control-plane --config kubeadm_master02.conf

#Check again

[root@k8s-master02 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE    VERSION
k8s-master01   Ready    master   3h3m   v1.14.0
k8s-master02   Ready    master   16m    v1.14.0

(4) master 03 node

Configuration file kubeadm_master03.conf

apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.13
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: "172.16.1.10:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  certSANs:
  - "k8s-master01"
  - "k8s-master02"
  - "k8s-master03"
  - 172.16.1.11
  - 172.16.1.12
  - 172.16.1.13
  - 172.16.1.10
networking:
  podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://172.16.1.13:2379"
      advertise-client-urls: "https://172.16.1.13:2379"
      listen-peer-urls: "https://172.16.1.13:2380"
      initial-advertise-peer-urls: "https://172.16.1.13:2380"
      initial-cluster: "k8s-master01=https://172.16.1.11:2380,k8s-master02=https://172.16.1.12:2380,k8s-master03=https://172.16.1.13:2380"
      initial-cluster-state: existing
    serverCertSANs:
      - k8s-master03
      - 172.16.1.13
    peerCertSANs:
      - k8s-master03
      - 172.16.1.13
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#Configure Certificate
kubeadm init phase certs all --config kubeadm_master03.conf
#Configure etcd
kubeadm init phase etcd local --config kubeadm_master03.conf
#Generate kubelet configuration file
kubeadm init phase kubeconfig kubelet --config kubeadm_master03.conf
#Start kubelet
kubeadm init phase kubelet-start --config kubeadm_master03.conf
#Add etcd of master 03 to cluster
kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.16.1.11:2379 member add master3 https://172.16.1.13:2380
#Start kube-apiserver, kube-controller-manager, kube-scheduler
kubeadm init phase kubeconfig all --config kubeadm_master03.conf
kubeadm init phase control-plane all --config kubeadm_master03.conf
#Mark node as master
kubeadm init phase mark-control-plane --config kubeadm_master03.conf

Through the above steps, the three master s have been initialized.

3. Workser Node Join Cluster

#The following commands are prompted when initializing master01:
kubeadm join 172.16.1.10:8443 --token 8j5lga.y2cei06i6cfxbxmo \
--discovery-token-ca-cert-hash sha256:9eff14803a65631b74e4db6dfa9e7362eb1dd62cd76d56e840d33b1f5a3aa93b

4. Status Check

#View node information

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE     VERSION
k8s-master01   Ready    master   3h19m   v1.14.0
k8s-master02   Ready    master   114m    v1.14.0
k8s-master03   Ready    master   95m     v1.14.0
k8s-node01     Ready    <none>   64m     v1.14.0
k8s-node02     Ready    <none>   50m     v1.14.0

#View cluster information

[root@k8s-master01 ~]# kubectl cluster-info
Kubernetes master is running at https://172.16.1.10:8443
KubeDNS is running at https://172.16.1.10:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

#View Controller Status

[root@k8s-master01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   

#View etcd cluster member information

[root@k8s-master01 ~]# kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.16.1.11:2379 member list 
2cd4d60db6db4371: name=k8s-master01 peerURLs=https://172.16.1.11:2380 clientURLs=https://172.16.1.11:2379 isLeader=true
707da0ac9cb69832: name=k8s-master02 peerURLs=https://172.16.1.12:2380 clientURLs=https://172.16.1.12:2379 isLeader=false
c702920d32ced638: name=k8s-master03 peerURLs=https://172.16.1.13:2380 clientURLs=https://172.16.1.13:2379 isLeader=false

#Check if ipvs are enabled
Rules can be seen through ipvsadm

[root@k8s-master01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 172.16.1.11:6443             Masq    1      0          0         
  -> 172.16.1.12:6443             Masq    1      0          0         
  -> 172.16.1.13:6443             Masq    1      1          0         
TCP  10.96.0.10:53 rr
  -> 10.244.3.2:53                Masq    1      0          0         
  -> 10.244.4.2:53                Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 10.244.3.2:9153              Masq    1      0          0         
  -> 10.244.4.2:9153              Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 10.244.3.2:53                Masq    1      0          0         
  -> 10.244.4.2:53                Masq    1      0          0       

Looking at the pod log from kubectl log --tail=10 kube-proxy-tqxlq-n kube-system, you can see: Using ipvs Proxier.
If the ipvsadm fails to check the rules and is found in the kube-proxy log:

can't determine whether to use ipvs proxy, error: IPVS proxier will not be used because the following required kernel modules are not loaded: [ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh]
Using iptables Proxier.

Indicates that ipvs enablement failed.

Tags: Linux Kubernetes kubelet yum Docker

Posted on Wed, 01 May 2019 18:20:36 -0700 by samdennis