Kubernetes Cluster Installation Deployment

1. Server preparation

Explain
host name Server IP To configure
ETCD Cluster kubernetes-etcd01
192.168.110.96 4c/3g

kubernetes-etcd02
192.168.110.97 4c/3g

kubernetes-etcd03 192.168.110.98 4c/3g
Master Cluster
kubernetes-master01 192.168.110.128
4c/3g

kubernetes-master02 192.168.110.130 4c/3g

kubernetes-master03 192.168.110.131 4c/3g
Worker Cluster kubernetes-worker01 192.168.110.106 4c/3g

kubernetes-worker02 192.168.110.107 4c/3g

kubernetes-worker03 192.168.110.108 4c/3g
Cluster Proxy kubernetes-nginx01
192.168.110.104 4c/3g

kubernetes-nginx02 192.168.110.105
4c/3g


2. Cluster Architecture


3. Proxy Cluster Installation

Note: Because k8s can only configure single IP for communication between its components, there is a single point of failure, so use Nginx for load balancing and Keepalived for high availability to solve single cluster problems


1. Install nginx service

[root@jumpserver ~]# ansible test_k8s_ng -m yum -a "name=nginx"
[root@jumpserver ~]# ansible test_k8s_ng -m shell -a "systemctl enable nginx"

1.1, Edit nginx configuration file

# For more information on configuration, see:
#   * Official English Documentation: http://nginx.org/en/docs/
#   * Official Russian Documentation: http://nginx.org/ru/docs/

user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 65535;
    use epoll;
}

stream {
  upstream etcd_cluster {
    server 192.168.110.96:2379 max_fails=1 fail_timeout=2s;
    server 192.168.110.97:2379 max_fails=1 fail_timeout=2s;
    server 192.168.110.98:2379 max_fails=1 fail_timeout=2s;
  }

 upstream apiserver_cluster {
    server 192.168.110.128:6443 max_fails=1 fail_timeout=2s;
    server 192.168.110.130:6443 max_fails=1 fail_timeout=2s;
    server 192.168.110.131:6443 max_fails=1 fail_timeout=2s;
  }

  server {
    listen 2379;
    proxy_connect_timeout 1s;
    proxy_pass etcd_cluster;
  }

 server {
    listen 6443;
    proxy_connect_timeout 1s;
    proxy_pass apiserver_cluster;
  }
}

1.2, nginx configuration files distributed to nginx nodes

[root@jumpserver json]# ansible test_k8s_ng -m copy -a "src=./nginx.conf dest=/etc/nginx/"


2. Install keepalived service

[root@jumpserver json]# ansible test_k8s_ng -m yum -a "name=keepalived"
[root@jumpserver json]# ansible test_k8s_ng -m shell -a "systemctl enable keepalived"

2.1. Edit nginx monitoring script

[root@jumpserver json]# vim nginx_check.sh
#!/bin/bash

counter=$(ps -C nginx --no-heading|wc -l)
if [ "${counter}" = "0" ]; then
  /usr/sbin/nginx
  sleep 2
  counter=$(ps -C nginx --no-heading|wc -l)
  if [ "${counter}" = "0" ]; then
    systemctl stop keepalievd
  fi
fi

[root@jumpserver json]# ansible test_k8s_ng -m copy -a "src=./nginx_check.sh dest=/etc/keepalived/ mode=755"
[root@jumpserver json]# ansible test_k8s_ng -m shell -a "ls -l /etc/keepalived/"
192.168.110.104 | CHANGED | rc=0 >>
//Total usage 8
-rw-r--r-- 1 root root 3598 8 month  13 2019 keepalived.conf
-rw-r--r-- 1 root root  232 4 month   8 10:47 nginx_check.sh

192.168.110.105 | CHANGED | rc=0 >>
//Total usage 8
-rw-r--r-- 1 root root 3598 8 month  13 2019 keepalived.conf
-rw-r--r-- 1 root root  232 4 month   8 10:47 nginx_check.sh

2.2. Edit keepalived profile

! Configuration File for keepalived

global_defs {
    router_id lb01
  }

vrrp_script chk_nginx {
    script "/etc/keepalived/nginx_check.sh" #Configure Nginx status check script path
    interval 2
    weight -20
  }

vrrp_instance VI_1 {
    state MASTER #Configuration node is master master master
    interface eth0
    virtual_router_id 51
    priority 100 #Configuration weight is 100
    advert_int 1

  authentication {
      auth_type PASS
      auth_pass 1111
    }

  track_script {
      chk_nginx #Configure execution scripts
    }

  virtual_ipaddress {
      192.168.110.230/24 dev eth0 label eth0:0 #Configure Network Card Binding VIP Address
    }
  }


2.3. Distribute the keepalived configuration file to the nginx node

[root@jumpserver json]# ansible test_k8s_ng -m copy -a "src=keepalived.conf dest=/etc/keepalived/"

2.4, Modify nginx keepalived configuration file from node

[root@jumpserver json]# ansible 192.168.110.105 -m shell -a "sed -i 's/state MASTER/state BACKUP/g' /etc/keepalived/keepalived.conf"
[root@jumpserver json]# ansible 192.168.110.105 -m shell -a "sed -i 's/priority 100/priority 50/g' /etc/keepalived/keepalived.conf"


3. Start nginx service

[root@jumpserver json]# ansible test_k8s_ng -m service -a "name=nginx state=started"
[root@jumpserver json]# ansible test_k8s_ng -m shell -a "netstat -tlunp |egrep '6443|2379'"
192.168.110.104 | CHANGED | rc=0 >>
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      18174/nginx: master 
tcp        0      0 0.0.0.0:2379            0.0.0.0:*               LISTEN      18174/nginx: master 

192.168.110.105 | CHANGED | rc=0 >>
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      18200/nginx: master 
tcp        0      0 0.0.0.0:2379            0.0.0.0:*               LISTEN      18200/nginx: master



4. Start the keepalived service

4.1. Start the main keepalived service first

[root@jumpserver json]# ansible 192.168.110.104 -m service -a "name=keepalived state=started"
[root@jumpserver json]# ansible 192.168.110.104 -m shell -a "ip addr"
192.168.110.104 | CHANGED | rc=0 >>
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
    link/ether 00:50:56:be:3d:18 brd ff:ff:ff:ff:ff:ff
    inet 192.168.110.104/24 brd 192.168.110.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 192.168.110.230/24 scope global secondary eth0:0
       valid_lft forever preferred_lft forever
    inet6 fe80::250:56ff:febe:3d18/64 scope link 
       valid_lft forever preferred_lft forever
    
[root@jumpserver json]# ping 192.168.110.230
PING 192.168.110.230 (192.168.110.230) 56(84) bytes of data.
64 bytes from 192.168.110.230: icmp_seq=1 ttl=64 time=0.393 ms
64 bytes from 192.168.110.230: icmp_seq=2 ttl=64 time=0.343 ms
64 bytes from 192.168.110.230: icmp_seq=3 ttl=64 time=0.383 ms
64 bytes from 192.168.110.230: icmp_seq=4 ttl=64 time=0.435 ms

Note: You can see that the virtual ip is activated and accessible

4.2. Start the keepalived service

[root@jumpserver json]# ansible 192.168.110.105 -m service -a "name=keepalived state=started"
[root@jumpserver json]# ansible test_k8s_ng -m shell -a "ps -ef |grep keepalived"
192.168.110.105 | CHANGED | rc=0 >>
root     18482     1  0 11:16 ?        00:00:00 /usr/sbin/keepalived -D
root     18483 18482  0 11:16 ?        00:00:00 /usr/sbin/keepalived -D
root     18484 18482  0 11:16 ?        00:00:00 /usr/sbin/keepalived -D
root     18535 18534  0 11:19 pts/1    00:00:00 /bin/sh -c ps -ef |grep keepalived
root     18537 18535  0 11:19 pts/1    00:00:00 grep keepalived

192.168.110.104 | CHANGED | rc=0 >>
root     18463     1  0 11:14 ?        00:00:00 /usr/sbin/keepalived -D
root     18464 18463  0 11:14 ?        00:00:00 /usr/sbin/keepalived -D
root     18465 18463  0 11:14 ?        00:00:00 /usr/sbin/keepalived -D
root     18562 18561  0 11:19 pts/0    00:00:00 /bin/sh -c ps -ef |grep keepalived
root     18564 18562  0 11:19 pts/0    00:00:00 grep keepalived


5. Use etcdctl to test proxy address

[root@kubernetes-etcd01 ~]# etcdctl --endpoints "https://192.168.110.104:2379" --cert "/application/etcd/pki/server.pem" --key "/application/etcd/pki/server-key.pem" --cacert "/application/etcd/pki/ca.pem" member list
71b29764d8cc5f1a, started, kubernetes-etcd03, https://192.168.110.98:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
8d3877faaee1ab32, started, kubernetes-etcd01, https://192.168.110.96:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
c9ef20e9b8fa5e73, started, kubernetes-etcd02, https://192.168.110.97:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
[root@kubernetes-etcd01 ~]# 
[root@kubernetes-etcd01 ~]# etcdctl --endpoints "https://192.168.110.105:2379" --cert "/application/etcd/pki/server.pem" --key "/application/etcd/pki/server-key.pem" --cacert "/application/etcd/pki/ca.pem" member list
71b29764d8cc5f1a, started, kubernetes-etcd03, https://192.168.110.98:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
8d3877faaee1ab32, started, kubernetes-etcd01, https://192.168.110.96:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
c9ef20e9b8fa5e73, started, kubernetes-etcd02, https://192.168.110.97:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
[root@kubernetes-etcd01 ~]# 
[root@kubernetes-etcd01 ~]# etcdctl --endpoints "https://192.168.110.230:2379" --cert "/application/etcd/pki/server.pem" --key "/application/etcd/pki/server-key.pem" --cacert "/application/etcd/pki/ca.pem" member list
71b29764d8cc5f1a, started, kubernetes-etcd03, https://192.168.110.98:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
8d3877faaee1ab32, started, kubernetes-etcd01, https://192.168.110.96:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
c9ef20e9b8fa5e73, started, kubernetes-etcd02, https://192.168.110.97:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379


6. Testing keepalived virtual IP drift

6.1. Stop the Keepalived service on the primary node

[root@kubernetes-nginx01 ~]# service keepalived stop
Redirecting to /bin/systemctl stop  keepalived.service
[root@kubernetes-nginx01 ~]# 
[root@kubernetes-nginx01 ~]# 
[root@kubernetes-nginx01 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
    link/ether 00:50:56:be:3d:18 brd ff:ff:ff:ff:ff:ff
    inet 192.168.110.104/24 brd 192.168.110.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::250:56ff:febe:3d18/64 scope link 
       valid_lft forever preferred_lft forever

[root@kubernetes-etcd02 ~]# etcdctl --endpoints "https://192.168.110.230:2379" --cert "/application/etcd/pki/server.pem" --key "/application/etcd/pki/server-key.pem" --cacert "/application/etcd/pki/ca.pem" member list
71b29764d8cc5f1a, started, kubernetes-etcd03, https://192.168.110.98:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
8d3877faaee1ab32, started, kubernetes-etcd01, https://192.168.110.96:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379
c9ef20e9b8fa5e73, started, kubernetes-etcd02, https://192.168.110.97:2380, https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379

Note: You can see that the virtual IP has drifted to nginx from the node.The etcd cluster can also be accessed through virtual ip.


6.2. Restore nginx on the primary node to see if virtual ip is restored

[root@kubernetes-nginx01 ~]# service keepalived start
Redirecting to /bin/systemctl start  keepalived.service
[root@kubernetes-nginx01 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
    link/ether 00:50:56:be:3d:18 brd ff:ff:ff:ff:ff:ff
    inet 192.168.110.104/24 brd 192.168.110.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 192.168.110.230/24 scope global secondary eth0:0
       valid_lft forever preferred_lft forever
    inet6 fe80::250:56ff:febe:3d18/64 scope link 
       valid_lft forever preferred_lft forever

Note: You can see that the virtual IP has drifted to the main nginx node.


IV. ETCD Cluster Installation

Note: Because cluster installation requires a lot of duplication of work, I use ansible for operation


1. Install etcd service

[root@jumpserver ~]# ansible test_k8s_etcd -m yum -a "name=etcd"

1.1. Configure hosts resolution

[root@jumpserver ~]# ansible test_k8s_etcd -m shell -a "echo -e '192.168.110.96 kubernetes-etcd01\n192.168.110.97 kubetnetes-etcd02\n192.168.110.98 kubernetes-etcd03' >>/etc/hosts"

1.2. Configuration environment variable specifies the version of etcdctl_api

[root@jumpserver ~]# ansible test_k8s_etcd -m shell -a "echo 'export ETCDCTL_API=3' >> /etc/profile"
[root@jumpserver ~]# ansible test_k8s_etcd -m shell -a "source /etc/profile"

1.3. Add etcd to system services

[root@jumpserver ~]# ansible test_k8s_etcd -m shell -a "systemctl enable etcd"


2. Generate etcd cluster certificate file

2.1. Generate ca root certificate

Edit ca-config Certificate profile

[root@jumpserver json]# mkdir /root/create_cert/{cert,json} -p && cd /root/create_cert/json
[root@jumpserver json]# cat ca-config.json
{
    "signing": {
        "default": {
            "expiry": "43800h"
        },
        "profiles": {
            "server": {
                "expiry": "43800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            },
            "client": {
                "expiry": "43800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "43800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}

Edit ca-csr profile

[root@jumpserver json]# cat ca-csr.json 
{
    "CN": "etcd",
    "key": {
        "algo": "rsa",
        "size": 2048
    }
}

Generate a ca certificate

cfssl gencert -initca /root/create_cert/json/ca-csr.json |cfssljson -bare ca -


2.2. Generating client certificates

Edit client Certificate Profile

[root@jumpserver json]# cat client.json 
{
    "CN": "client",
    "key": {
        "algo": "ecdsa",
        "size": 256
    }
}

Generate client certificate

cfssl gencert -ca=../cert/ca.pem -ca-key=../cert/ca-key.pem -config=./ca-config.json -profile=client ./client.json | cfssljson -bare clien


2.3. Generating server certificates

Edit config.json configuration file

[root@jumpserver json]# cat config.json 
{
    "CN": "citylife",
    "hosts": [
        "kubernetes-etcd01",
        "kubernetes-etcd02",
        "kubernetes-etcd03",
        "192.168.110.96",
        "192.168.110.97",
        "192.168.110.98",
        "192.168.110.128",
        "192.168.110.130",
        "192.168.110.131",
        "192.168.110.104",
        "192.168.110.230",
    ],
    "key": {
        "algo": "ecdsa",
        "size": 256
    },
    "names": [
        {
            "C": "US",
            "ST": "CA",
            "L": "San Francisco"
        }
    ]
}

Note: The hosts here configure the ip addresses of all the components that will communicate with the etcd cluster

Generate server and peer certificates

[root@jumpserver json]# cfssl gencert -ca=../cert/ca.pem -ca-key=../cert/ca-key.pem -config=ca-config.json -profile=server config.json |cfssljson -bare server
[root@jumpserver json]# cfssl gencert -ca=../cert/ca.pem -ca-key=../cert/ca-key.pem -config=ca-config.json -profile=peer config.json |cfssljson -bare peer


2.4. Copy the generated certificate to other etcd nodes

[root@jumpserver json]# ls -l ../cert/
//Total dosage 48
-rw-r--r-- 1 root root  883 4 month   3 16:02 ca.csr
-rw------- 1 root root 1679 4 month   3 16:02 ca-key.pem
-rw-r--r-- 1 root root 1078 4 month   3 16:02 ca.pem
-rw-r--r-- 1 root root  351 4 month   3 16:08 client.csr
-rw------- 1 root root  227 4 month   3 16:08 client-key.pem
-rw-r--r-- 1 root root  875 4 month   3 16:08 client.pem
-rw-r--r-- 1 root root  590 4 month   3 18:33 peer.csr
-rw------- 1 root root  227 4 month   3 18:33 peer-key.pem
-rw-r--r-- 1 root root 1103 4 month   3 18:33 peer.pem
-rw-r--r-- 1 root root  590 4 month   3 18:33 server.csr
-rw------- 1 root root  227 4 month   3 18:33 server-key.pem
-rw-r--r-- 1 root root 1103 4 month   3 18:33 server.pem
[root@jumpserver json]# ansible test_k8s_etcd -m copy -a "mkdir -p /aplication/etcd/pki"
[root@jumpserver json]# ansible test_k8s_etcd -m copy -a "src=/root/create_cert/cert/ dest=/application/etcd/pki/ owner=etcd mode=600"


2.5, Edit etcd profile

[root@jumpserver json]# ansible test_k8s_etcd -m shell -a "mkdir -p /application/etcd/default.etcd"

[root@kubernetes-etcd01 ~]# cat /etc/etcd/etcd.conf 
#[Member]
ETCD_DATA_DIR="/application/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.110.96:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.110.96:2379"
ETCD_NAME="kubernetes-etcd01"                              #hostname of etcd node

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="    #The address the current node is listening on
ETCD_ADVERTISE_CLIENT_URLS=" 

ETCD_INITIAL_CLUSTER="kubernetes-etcd01=https://192.168.110.96:2380,kubernetes-etcd02=https://192.168.110.97:2380,kubernetes-etcd03=https://192.168.110.98:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

#[Security]
ETCD_CERT_FILE="/application/etcd/pki/server.pem"
ETCD_KEY_FILE="/application/etcd/pki/server-key.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_TRUSTED_CA_FILE="/application/etcd/pki/ca.pem"
ETCD_AUTO_TLS="true"
ETCD_PEER_CERT_FILE="/application/etcd/pki/peer.pem"
ETCD_PEER_KEY_FILE="/application/etcd/pki/peer-key.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_TRUSTED_CA_FILE="/application/etcd/pki/ca.pem"
ETCD_PEER_AUTO_TLS="true"


2.5, Start etcd cluster

[root@jumpserver json]# ansible test_k8s_etcd -m service -a "name=etcd state=started"

Note: The second time you start the cluster, change the "ETCD_INITIAL_CLUSTER_STATE=new" field to "existing" in the configuration file, or the start will not succeed


2.6. Verify cluster state

[root@kubernetes-etcd01 ~]# etcdctl --endpoints "https://192.168.110.96:2379,https://192.168.110.97:2379,https://192.168.110.98:2379" --cert "/application/etcd/pki/server.pem" --key "/application/etcd/pki/server-key.pem" --cacert "/application/etcd/pki/ca.pem"  endpoint health
https://192.168.110.96:2379 is healthy: successfully committed proposal: took = 4.662674ms
https://192.168.110.97:2379 is healthy: successfully committed proposal: took = 2.713563ms
https://192.168.110.98:2379 is healthy: successfully committed proposal: took = 4.360699ms

Return the above information to indicate that the cluster installation is working


Adding data to a cluster

[root@kubernetes-etcd01 ~]# etcdctl --endpoints "https://192.168.110.96:2379" --cert "/application/etcd/pki/server.pem" --key "/application/etcd/pki/server-key.pem" --cacert "/application/etcd/pki/ca.pem" put etcd-cluster ok
OK
[root@kubernetes-etcd01 ~]#

View data at other nodes

[root@kubernetes-etcd01 ~]# etcdctl --endpoints "https://192.168.110.97:2379" --cert "/application/etcd/pki/server.pem" --key "/application/etcd/pki/server-key.pem" --cacert "/application/etcd/pki/ca.pem" get etcd-cluster
etcd-cluster
ok


Delete cluster data

[root@kubernetes-etcd01 ~]# etcdctl --endpoints "https://192.168.110.98:2379" --cert "/application/etcd/pki/server.pem" --key "/application/etcd/pki/server-key.pem" --cacert "/application/etcd/pki/ca.pem" del etcd-cluster ok
1
[root@kubernetes-etcd01 ~]#

View the data again

[root@kubernetes-etcd01 ~]# etcdctl --endpoints "https://192.168.110.97:2379" --cert "/application/etcd/pki/server.pem" --key "/application/etcd/pki/server-key.pem" --cacert "/application/etcd/pki/ca.pem" get etcd-cluster
[root@kubernetes-etcd01 ~]#

No return represents data that has been deleted


5. Install and deploy the kubernetes cluster

1. Preparations

1.1. Set hosts resolution

[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "echo -e '192.168.110.128 kubernetes-master01\n192.168.110.230 kubernetes-mster02\n192.168.110.231 kubernetes-master03\n192.168.110.106 kubernetes-worker01\n192.168.110.107 kubernetes-worker02\n192.168.110.108 kubernetes-worker03' >>/etc/hosts"

1.2. Setting system parameters

[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "echo -e 'net.bridge.bridge-nf-call-iptables = 1\nnet.ipv4.ip_forward = 1' >>/etc/sysctl.conf"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "sysctl -p"
192.168.110.107 | CHANGED | rc=0 >>
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

192.168.110.106 | CHANGED | rc=0 >>
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

192.168.110.131 | CHANGED | rc=0 >>
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

192.168.110.130 | CHANGED | rc=0 >>
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

192.168.110.128 | CHANGED | rc=0 >>
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

192.168.110.108 | CHANGED | rc=0 >>
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

1.3, Turn off swap partitions, firewalls, and selinux

[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "systemctl disable firewalld"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "systemctl stop firewalld"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "setenforce 0"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "swapoff -a"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "sed -i '/swap/d' /etc/fstab"


2. Install docker (executed on all master&worker nodes)

2.1. Install yum warehouse management tools

[root@jumpserver json]# ansible test_k8s_cluster -m yum -a "name=yum-utils"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "yum-config-manager --add-repo

2.2, Install docker and start

[root@jumpserver json]# ansible test_k8s_cluster -m yum -a "name=docker-ce"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "systemctl enable docker"
[root@kubernetes-master01 ~]# cat /etc/docker/daemon.json
{
        "data-root": "/application/docker",
        "registry-mirrors": ["https://registry.cn-beijing.aliyuncs.com"],
        "exec-opts": ["native.cgroupdriver=systemd"]
}

[root@jumpserver json]# ansible test_k8s_cluster -m copy -a "src=./daemon.json dest=/etc/docker/"
[root@jumpserver json]# ansible test_k8s_cluster -m service -a "name=docker state=restarted"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "docker info" |grep cgroup
Cgroup Driver: cgroupfs
Cgroup Driver: cgroupfs
Cgroup Driver: cgroupfs
Cgroup Driver: cgroupfs
Cgroup Driver: cgroupfs
Cgroup Driver: cgroupfs
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "docker info" |grep overlay2
 Storage Driver: overlay2
 Storage Driver: overlay2
 Storage Driver: overlay2
 Storage Driver: overlay2
 Storage Driver: overlay2
 Storage Driver: overlay2


3. Client tools for installing kubernetes, cluster initialization tools, and kubelet components

[root@jumpserver json]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey= https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[root@jumpserver json]# ansible test_k8s_cluster -m copy -a "src=/etc/yum.repos.d/kubernetes.repo  dest=/etc/yum.repos.d/"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "yum install kubeadm-1.16.8 kubectl-1.16.8 kubelet-1.16.8 -y"


4. Configure the cgroups of kubelet (must be the same as docker info)

[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "sed -i '4a Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"' /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf"
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "cat  /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf" |grep cgroupfs
Environment=KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs
Environment=KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs
Environment=KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs
Environment=KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs
Environment=KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs
Environment=KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs
[root@jumpserver json]# ansible test_k8s_cluster -m shell -a "systemctl daemon-reload"


5. Download the kubernetes mirror

View required mirrors

[root@kubernetes-master01 ~]# kubeadm config images list
I0407 16:34:47.889136    8134 version.go:251] remote version is much newer: v1.18.0; falling back to: stable-1.16
k8s.gcr.io/kube-apiserver:v1.16.8
k8s.gcr.io/kube-controller-manager:v1.16.8
k8s.gcr.io/kube-scheduler:v1.16.8
k8s.gcr.io/kube-proxy:v1.16.8
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.15-0
k8s.gcr.io/coredns:1.6.2

Download Mirror

[root@jumpserver json]# ansible test_k8s_cluster -m shell -a 'images=(kube-apiserver:v1.16.8 kube-controller-manager:v1.16.8 kube-scheduler:v1.16.8 kube-proxy:v1.16.8 pause:3.1 coredns:1.6.2);\
for image in ${images[@]};do \
docker pull registry.cn-beijing.aliyuncs.com/citylife-k8s/${image};\
docker tag registry.cn-beijing.aliyuncs.com/citylife-k8s/${image} k8s.gcr.io/${image};\
docker image rm registry.cn-beijing.aliyuncs.com/citylife-k8s/${image}; done'

[root@jumpserver json]# ansible test_k8s_cluster -m shell -a 'docker images'
192.168.110.107 | CHANGED | rc=0 >>
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-apiserver            v1.16.8             48db9392345b        3 weeks ago         160MB
k8s.gcr.io/kube-controller-manager            v1.16.8             01aec835c89f        3 weeks ago         151MB
k8s.gcr.io/kube-scheduler            v1.16.8             133a50b2b327        3 weeks ago         83.6MB
k8s.gcr.io/kube-proxy              v1.16.8             3b8ffbdbcca3        3 weeks ago         82.8MB
k8s.gcr.io/coredns                1.6.2              bf261d157914        7 months ago        44.1MB
k8s.gcr.io/pause                 3.1               da86e6ba6ca1        2 years ago         742kB

192.168.110.106 | CHANGED | rc=0 >>
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-apiserver            v1.16.8             48db9392345b        3 weeks ago         160MB
k8s.gcr.io/kube-controller-manager            v1.16.8             01aec835c89f        3 weeks ago         151MB
k8s.gcr.io/kube-proxy              v1.16.8             3b8ffbdbcca3        3 weeks ago         82.8MB
k8s.gcr.io/kube-scheduler            v1.16.8             133a50b2b327        3 weeks ago         83.6MB
k8s.gcr.io/coredns                1.6.2              bf261d157914        7 months ago        44.1MB
k8s.gcr.io/pause                 3.1               da86e6ba6ca1        2 years ago         742kB
...
...


6. Initialize Master

The following actions operate on three master s

6.1. Copy the certificate generated by etcd to master node (master needs to communicate with etcd cluster, so certificate file is required)

[root@jumpserver create_cert]# ansible test_k8s_master -m copy -a "src=./cert dest=/application/kubernetes/pki/"
[root@jumpserver create_cert]# ansible test_k8s_master -m shell -a "mv /application/kubernetes/pki/cert/* /application/kubernetes/pki/"
[root@jumpserver create_cert]# ansible test_k8s_master -m shell -a "rm -rf /application/kubernetes/pki/cert"

6.2. Edit Initialization Profile

[root@jumpserver json]# cat init_kubernetes.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.110.128 #IP Configured for Native Machine
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: kubernetes-master01 #Configure as Native Host Name
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  #The IP and hostname of all master nodes in the apiserver certificate written below also includes the load balanced virtual IP
  - kubernetes-master01
  - kubernetes-master02
  - kubernetes-master03
  - 192.168.110.128
  - 192.168.110.130
  - 192.168.110.131
  - 192.168.110.230
  - 192.168.110.104
  - 192.168.110.105
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes #The name of the cluster
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  external:
    endpoints:
    #Load balancing virtual IP for external ETCD clusters
    - https://192.168.110.230:2379
    #Access all certificate file paths for ETCD cluster
    caFile: /application/kubernetes/pki/ca.pem
    certFile: /application/kubernetes/pki/client.pem
    keyFile: /application/kubernetes/pki/client-key.pem
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.16.8 #K8S Mirror Version
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16 #Default IP address segment of POD matches flannel network
scheduler: {}

6.3. Distributing configuration files to master nodes

[root@jumpserver json]# ansible test_k8s_master -m copy -a "src=./init_kubernetes.yaml dest=/application/kubernetes/"

6.4, Initialize the first master

[root@kubernetes-master01 kubernetes]# kubeadm init --config=./init_kubernetes.yaml
...
...
[bootstrap-token] Using token: 83k4wo.uirwmdd3tajnkg23
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.110.128:6443 --token 83k4wo.uirwmdd3tajnkg23 \
    --discovery-token-ca-cert-hash sha256:424baf048926a2a42ea651443ff3803da3366135f76b75aa843970abc1bf009e

6.4.1, Create kubectl Terminal Profile

[root@kubernetes-master01 kubernetes]# mkdir -p $HOME/.kube
[root@kubernetes-master01 kubernetes]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
cp: Whether to Overwrite"/root/.kube/config"? y
[root@kubernetes-master01 kubernetes]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

6.4.2, Change the apiserver address of the kubectl configuration file to the payload address

[root@kubernetes-master01 kubernetes]# vim /root/.kube/config 

apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01EUXdPREE0TWprek9Wb1hEVE13TURRd05qQTRNamt6T1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT1ZhClZpYU9WZTlKZmpTTHpuV08rVld6bmFLMFpUYnpCWk9HZ0hTRWNENitYZTFBODhKVHR5aCtWSGtGd1M2dFk2bU8KQXl1TnRibUhMaWlnREVlWUREbUlyMUtVa24wdFg0RmV6TzFaYzIzV1NwMG91Wnprbk1XQXVxTmYzRHBhL0x2Rgovc3VsMzd1RkJHOW1KVitJUFQ5MUNxSFRlbHpmMnhXUlhBVFhBYWhKL2xCSzdQaXVORzdvVW5OR2xVOEpSZFRvClBjQ1ozcytpSEFUamxSSWE3ajdvUmVzYUgxUUxvSFRmeVFEbWdWNnl3TzljTVc4VEt5a0FKbk53MDcyR3gra2YKSGJmMnlPUm5ieXYzNVRDeW9nQk8rYkM5OEpCNFVGQ1JvbFNYS1QxN25oR2pDVkd1cTNlV05TRnBQWWozWHZNdgo5N3VEelRyTk9nK1dPSitzL29jQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFMaGJPZ2kvWjRaLytuMC93Y3ltRktQQzI0SHoKUGtBcUIrNzd4ZmVWQUEvWCtzMFA2WThhZnVyb0oyL2VZY3NHbHhHUG15NGd5ell2TkhvTi9SVjNHc1dhVS9yZwpaK2xwZm0yOWJEUjNodXEyQnArNHl4czRnKzl5N1JrOGNRYzlWQlFmZmJhblk3N1kzclIzNGJFZ2FlL1FjbVd3CitLbFdrdFJKUDIrNTU3Vjl0VjdwRnBwbjVjekZqTE9xMXhaaUhObmRQRVhSNVNiZk9yQVFkbkRIVThrSG1BV1kKWU8zYjBjYk9yL05CeG9zVTNqUnRyK01oTE5SWDQ3OTdxcXN1bmNxbWF5VGErYjBlNy8wTU5mQS8vZEZsL0s0bgoyaUhmN2wzbHkzSUVaQUNaOW1RaFBNME9QN2dwa1pKTjhSbXJYS21sTzYzak1QdWl2Nmc5Rk95SGdUOD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    server: https://192.168.110.230:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes

6.4.3, Install flannel network plug-in

[root@kubernetes-master01 kubernetes]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@kubernetes-master01 kubernetes]# kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created

6.4.4. View the pod status of k8s after installation

[root@kubernetes-master01 kubernetes]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                          READY   STATUS    RESTARTS   AGE
kube-system   coredns-5644d7b6d9-sxhm2                      1/1     Running   0          14m
kube-system   coredns-5644d7b6d9-v67nr                      1/1     Running   0          14m
kube-system   kube-apiserver-kubernetes-master01            1/1     Running   0          13m
kube-system   kube-controller-manager-kubernetes-master01   1/1     Running   0          12m
kube-system   kube-flannel-ds-amd64-zr82z                   1/1     Running   0          72s
kube-system   kube-proxy-hgsxr                              1/1     Running   0          14m
kube-system   kube-scheduler-kubernetes-master01            1/1     Running   0          13m
[root@kubernetes-master01 kubernetes]#
[root@kubernetes-master01 kubernetes]# kubectl get nodes
NAME                  STATUS   ROLES    AGE   VERSION
kubernetes-master01   Ready    master   15m   v1.16.8

Description: All pod s are running and node is Ready, indicating that the first master initialization is complete

6.4.5, Copy the certificate generated by master 01 to two other Masters

[root@kubernetes-master01 kubernetes]# scp /etc/kubernetes/pki/* 192.168.110.130:/etc/kubernetes/pki/                                                                                                                                                                                    100%  451   408.5KB/s   00:00    
[root@kubernetes-master01 kubernetes]# 
[root@kubernetes-master01 kubernetes]# scp /etc/kubernetes/pki/* 192.168.110.131:/etc/kubernetes/pki/

6.5, Initialize the second master

Since the previously copied initialization profile is a master 01 node, change the advertiseAddress address to the master 02 address and the name to the master 02 host name

[root@kubernetes-master02 ~]# vim /application/kubernetes/init_kubernetes.yaml 

apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.110.130 #IP Configured for Native Machine
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: kubernetes-master02 #Configure as Native Host Name
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
...
...

6.5.1, Initialization

[root@kubernetes-master02 ~]# kubeadm init --config=/application/kubernetes/init_kubernetes.yaml
...
...
[bootstrap-token] Using token: 7d99tg.l61o827mk9oqz6qu
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-check] Initial timeout of 40s passed.
[addons]: Migrating CoreDNS Corefile
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.110.130:6443 --token 7d99tg.l61o827mk9oqz6qu \
    --discovery-token-ca-cert-hash sha256:424baf048926a2a42ea651443ff3803da3366135f76b75aa843970abc1bf009e

6.5.2, Create a kubectl configuration file

[root@kubernetes-master02 ~]# mkdir -p $HOME/.kube
[root@kubernetes-master02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@kubernetes-master02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

6.5.3, Modify apiserver address to nginx payload address

[root@kubernetes-master02 ~]# vim .kube/config 

apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01EUXdPREE1TURFd05Wb1hEVE13TURRd05qQTVNREV3TlZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTTh6CnJPZ1JNbmwyWUhTRXFkQmdyOXhqZWpGNEYxUGhnNlRxTC9OVlNCSDBCSVhHMjRXTUczS3lLb1o5US9QdElXTHEKUmZjdklVaVcwR1ZSTSttSGJJN1IxTWNRa3ovY2lPZTh2Rm5JV2RDTVhGNmtnenRTSVhiTnJORGZrOTMxQWgvbApjWW55bEYwaU1Id1FrOHl4V1QvTWh2dEh3QVR3MTJFM25NZTBrd1plY2puYnpYSGpqVmJZYVpZUHhEc2pWM3I3CmFIYVo0d0llZ2tSVTRYYi9VbkFGcE9TVkhFRnExMGltNGpwNGtaV3dzckhRenRkYVhhelBWU0hEdnM1bUFkeDgKcjcybGhhQnZyUytGeGY4elI5UTdncGtzeW5zMUZqNmpmSTFlR1Fjamt3eEkwdTU3TUVoOVZDbWFVSWRCU1g2UAo1S1JGVkVQRS94azllMWg0RlNzQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFFdDVwNjVzZm5VUWdGaUJDN043RjVLSmZvaFkKZCs3WGxUalByYUVhT0x4SFIvMTJYdmRRVStGTmZxUVU4WTNqZVU2SEtiYjBjMEVYeFA3SzBPbGZoV0N3SHpJOApmbHJYR2hRUEt3eDN5V1haUktlV1N5b2kvdW15eFRrUytMQTBBaUVWYURETlBBVDM0NHdoZmlRYjJpS0NSZ2x3ClRCVFBhTlVmNURpeXZrQ0lLaDBqYkpjazMyYVFoYzVXbU94c05yR0ZuZFBqRm83d0IyM3hWOVVFNGlFY0hNN2UKOUZ4VWdZb0NZbVJjdVEzNkpZYi9HTkVxUTNaTjdUYzg4UWlua3J0RHpac211dUVHR3ZGKzVBMWNZRTNSYnVhZgpFU2xLUlVqRjRvaE81dUV5Rjc2ZlVzdS9rb2NadDRiTU9aNUFwYTlPWVNYbVRZbVRXWVR3dm9vYkd6cz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    server: https://192.168.110.230:6443
  name: kubernetes
contexts:

6.5.4, View node and pod information

[root@kubernetes-master02 ~]# kubectl get nodes
NAME                  STATUS   ROLES    AGE   VERSION
kubernetes-master01   Ready    master   79m   v1.16.8
kubernetes-master02   Ready    master   17m   v1.16.8
[root@kubernetes-master02 ~]# 
[root@kubernetes-master02 ~]# kubectl get pods -A -o wide
NAMESPACE     NAME                                          READY   STATUS    RESTARTS   AGE   IP                NODE                  NOMINATED NODE   READINESS GATES
kube-system   coredns-5644d7b6d9-4p799                      1/1     Running   0          47m   10.244.0.6        kubernetes-master01   <none>           <none>
kube-system   coredns-5644d7b6d9-rzvcn                      1/1     Running   0          11m   10.244.0.14       kubernetes-master01   <none>           <none>
kube-system   kube-apiserver-kubernetes-master01            1/1     Running   0          18m   192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-apiserver-kubernetes-master02            1/1     Running   0          10m   192.168.110.130   kubernetes-master02   <none>           <none>
kube-system   kube-controller-manager-kubernetes-master01   1/1     Running   0          78m   192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-controller-manager-kubernetes-master02   1/1     Running   0          10m   192.168.110.130   kubernetes-master02   <none>           <none>
kube-system   kube-flannel-ds-amd64-5jsd9                   1/1     Running   0          17m   192.168.110.130   kubernetes-master02   <none>           <none>
kube-system   kube-flannel-ds-amd64-zr82z                   1/1     Running   0          66m   192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-proxy-hgsxr                              1/1     Running   0          79m   192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-proxy-txmxc                              1/1     Running   0          17m   192.168.110.130   kubernetes-master02   <none>           <none>
kube-system   kube-scheduler-kubernetes-master01            1/1     Running   0          78m   192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-scheduler-kubernetes-master02            1/1     Running   0          10m   192.168.110.130   kubernetes-master02   <none>           <none>

6.5.5, Summary of initialization errors:

1. Error: Initialization of the second master succeeded, pod on Master 2 was not seen, and get nodes had no master 02

Solution steps:

1. View the initialization profile because the profile was copied from master 1, the name field was not changed, or the hostname of master 01 was used

2. Error: Initialization of the second master succeeded, get node could already see Master 02, but get pods could not see the pod of master 02

Solution steps:

1. The system automatically cleared the certificate file after reset due to a previous error.Every time you perform an initialization action, it is important to remember to re-copy the master 01 certificate file to another master node./etc/kubernetes/pki/{sa. *,*ca. *} Two certificate files


6.6. Initialize the Third master

6.6.1, Change the host address and host name fields of the initialization profile

[root@kubernetes-master03 ~]# vim /application/kubernetes/init_kubernetes.yaml 

apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.110.131 #IP Configured for Native Machine
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: kubernetes-master03 #Configure as Native Host Name
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---

6.6.2, Initialization

[root@kubernetes-master03 ~]# kubeadm init --config=/application/kubernetes/init_kubernetes.yaml
...
...
[bootstrap-token] Using token: wzsclp.2puihpckhg9otv4n
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons]: Migrating CoreDNS Corefile
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.110.131:6443 --token wzsclp.2puihpckhg9otv4n \
    --discovery-token-ca-cert-hash sha256:424baf048926a2a42ea651443ff3803da3366135f76b75aa843970abc1bf009e

6.6.3, Create a kubectl configuration file

[root@kubernetes-master03 ~]# mkdir -p $HOME/.kube
[root@kubernetes-master03 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@kubernetes-master03 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

6.6.4, Modify apiserver address to nginx payload address

[root@kubernetes-master03 ~]# vim .kube/config 

apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01EUXdPREE0TWprek9Wb1hEVE13TURRd05qQTRNamt6T1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT1ZhClZpYU9WZTlKZmpTTHpuV08rVld6bmFLMFpUYnpCWk9HZ0hTRWNENitYZTFBODhKVHR5aCtWSGtGd1M2dFk2bU8KQXl1TnRibUhMaWlnREVlWUREbUlyMUtVa24wdFg0RmV6TzFaYzIzV1NwMG91Wnprbk1XQXVxTmYzRHBhL0x2Rgovc3VsMzd1RkJHOW1KVitJUFQ5MUNxSFRlbHpmMnhXUlhBVFhBYWhKL2xCSzdQaXVORzdvVW5OR2xVOEpSZFRvClBjQ1ozcytpSEFUamxSSWE3ajdvUmVzYUgxUUxvSFRmeVFEbWdWNnl3TzljTVc4VEt5a0FKbk53MDcyR3gra2YKSGJmMnlPUm5ieXYzNVRDeW9nQk8rYkM5OEpCNFVGQ1JvbFNYS1QxN25oR2pDVkd1cTNlV05TRnBQWWozWHZNdgo5N3VEelRyTk9nK1dPSitzL29jQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFMaGJPZ2kvWjRaLytuMC93Y3ltRktQQzI0SHoKUGtBcUIrNzd4ZmVWQUEvWCtzMFA2WThhZnVyb0oyL2VZY3NHbHhHUG15NGd5ell2TkhvTi9SVjNHc1dhVS9yZwpaK2xwZm0yOWJEUjNodXEyQnArNHl4czRnKzl5N1JrOGNRYzlWQlFmZmJhblk3N1kzclIzNGJFZ2FlL1FjbVd3CitLbFdrdFJKUDIrNTU3Vjl0VjdwRnBwbjVjekZqTE9xMXhaaUhObmRQRVhSNVNiZk9yQVFkbkRIVThrSG1BV1kKWU8zYjBjYk9yL05CeG9zVTNqUnRyK01oTE5SWDQ3OTdxcXN1bmNxbWF5VGErYjBlNy8wTU5mQS8vZEZsL0s0bgoyaUhmN2wzbHkzSUVaQUNaOW1RaFBNME9QN2dwa1pKTjhSbXJYS21sTzYzak1QdWl2Nmc5Rk95SGdUOD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    server: https://192.168.110.230:6443
  name: kubernetes
contexts:

6.6.5, View pod and node status

[root@kubernetes-master03 ~]# kubectl get nodes
NAME                  STATUS   ROLES    AGE    VERSION
kubernetes-master01   Ready    master   118m   v1.16.8
kubernetes-master02   Ready    master   56m    v1.16.8
kubernetes-master03   Ready    master   30m    v1.16.8
[root@kubernetes-master03 ~]# 
[root@kubernetes-master03 ~]# kubectl get pods -A -o wide
NAMESPACE     NAME                                          READY   STATUS    RESTARTS   AGE    IP                NODE                  NOMINATED NODE   READINESS GATES
kube-system   coredns-5644d7b6d9-4p799                      1/1     Running   0          87m    10.244.0.6        kubernetes-master01   <none>           <none>
kube-system   coredns-5644d7b6d9-s259f                      1/1     Running   0          71s    10.244.2.2        kubernetes-master03   <none>           <none>
kube-system   kube-apiserver-kubernetes-master01            1/1     Running   0          57m    192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-apiserver-kubernetes-master02            1/1     Running   0          50m    192.168.110.130   kubernetes-master02   <none>           <none>
kube-system   kube-apiserver-kubernetes-master03            1/1     Running   0          28m    192.168.110.131   kubernetes-master03   <none>           <none>
kube-system   kube-controller-manager-kubernetes-master01   1/1     Running   0          117m   192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-controller-manager-kubernetes-master02   1/1     Running   0          49m    192.168.110.130   kubernetes-master02   <none>           <none>
kube-system   kube-controller-manager-kubernetes-master03   1/1     Running   0          28m    192.168.110.131   kubernetes-master03   <none>           <none>
kube-system   kube-flannel-ds-amd64-58m8x                   1/1     Running   0          30m    192.168.110.131   kubernetes-master03   <none>           <none>
kube-system   kube-flannel-ds-amd64-5jsd9                   1/1     Running   0          56m    192.168.110.130   kubernetes-master02   <none>           <none>
kube-system   kube-flannel-ds-amd64-zr82z                   1/1     Running   0          105m   192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-proxy-hgsxr                              1/1     Running   0          118m   192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-proxy-lf9vn                              1/1     Running   0          30m    192.168.110.131   kubernetes-master03   <none>           <none>
kube-system   kube-proxy-txmxc                              1/1     Running   0          56m    192.168.110.130   kubernetes-master02   <none>           <none>
kube-system   kube-scheduler-kubernetes-master01            1/1     Running   0          117m   192.168.110.128   kubernetes-master01   <none>           <none>
kube-system   kube-scheduler-kubernetes-master02            1/1     Running   0          49m    192.168.110.130   kubernetes-master02   <none>           <none>
kube-system   kube-scheduler-kubernetes-master03            1/1     Running   0          29m    192.168.110.131   kubernetes-master03   <none>           <none>


7. Initialize worker nodes

7.1. Use ansible to batch execute commands on worker nodes to join the cluster

[root@jumpserver ~]# ansible test_k8s_worker -m shell -a "kubeadm join 192.168.110.131:6443 --token wzsclp.2puihpckhg9otv4n --discovery-token-ca-cert-hash sha256:424baf048926a2a42ea651443ff3803da3366135f76b75aa843970abc1bf009e"

7.2. Modify the kubelet configuration file to change the apiserver address to the payload address

[root@jumpserver ~]# ansible test_k8s_worker -m shell -a "sed -i 's#server: https://192.168.110.131:6443#server: https://192.168.110.230:6443#g' /etc/kubernetes/kubelet.conf"

7.3. Restart the kubelet service

[root@jumpserver ~]# ansible test_k8s_worker -m service -a "name=kubelet state=restarted"

7.3. label the worker node at the master node

[root@kubernetes-master03 ~]# kubectl label node kubernetes-node01 node-role.kubernetes.io/worker=''
node/kubernetes-node01 labeled
[root@kubernetes-master03 ~]# kubectl label node kubernetes-node02 node-role.kubernetes.io/worker=''
node/kubernetes-node02 labeled
[root@kubernetes-master03 ~]# kubectl label node kubernetes-node03 node-role.kubernetes.io/worker=''
node/kubernetes-node03 labeled
[root@kubernetes-master03 ~]#

7.4. Viewing cluster status

[root@kubernetes-master03 ~]# kubectl get node
NAME                  STATUS   ROLES    AGE   VERSION
kubernetes-master01   Ready    master   17h   v1.16.8
kubernetes-master02   Ready    master   16h   v1.16.8
kubernetes-master03   Ready    master   16h   v1.16.8
kubernetes-node01     Ready    worker   14m   v1.16.8
kubernetes-node02     Ready    worker   14m   v1.16.8
kubernetes-node03     Ready    worker   14m   v1.16.8
[root@kubernetes-master03 ~]#


At this point, the highly available kubernetes cluster deployment is complete.


Tags: Kubernetes JSON ansible shell

Posted on Wed, 08 Apr 2020 20:46:42 -0700 by jrose83