Etcd cluster deployment

#!/bin/bash
#k8s cluster quick deployment script
#The package is located in the directory / nfs/k8s, and the local yum source of k8s points to the directory / nfs/k8s
#Server side of nfs share
##Package description:
#docker: protagonist, needless to say
#Kubernetes master: kubernetes server
#Kubernetes node: kubernetes client
#etcd: key value store discovered by server
#flannel: get through the network interworking between docker containers on multiple servers

NFS_SERVER(){
yum install -y nfs-utils rpcbind
mkdir -pv /nfs
systemctl restart nfs rpcbind
systemctl enable nfs rpcbind
echo '/nfs  *(rw,no_root_squash)' > /etc/exports
exportfs -rav
cd /nfs
}

#nfs shared client

NFS_CLIENT(){
yum install -y nfs-utils rpcbind
systemctl restart nfs rpcbind
systemctl enable nfs rpcbind
mkdir -pv /nfs
showmount -e 192.168.11.11
mount.nfs 192.168.11.11:/nfs  /nfs
df -hT
}

#Host name settings

HOST_NAME(){
read -p 'please inpute hostname(eg.srv11):' NAME
HOSTNAME=${NAME}
hostnamectl --static set-hostname $HOSTNAME
hostname
#Modify the hosts file
grep -E 'node|srv|dk.io' /etc/hosts || echo '
192.168.11.11   node1  k8s-master  srv11
192.168.11.12   node2  k8s-node1   srv12
192.168.11.13   node3  k8s-node2   srv13
192.168.11.14   node4  k8s-node3   srv14
192.168.11.11   dk.io
' >> /etc/hosts
cat /etc/hosts
}

#etcd, flannel installation configuration

ETCD_INST(){
\cp -fv /nfs/k8s/k8s.repo /etc/yum.repos.d/
yum install -y etcd flannel
[ -f /etc/etcd/etcd.conf.bak ] || cp -v /etc/etcd/etcd.conf{,.bak}
cd /etc/etcd
#Remove lines 5, 20, 26, 27, 28#Number
sed -i 's/localhost/0.0.0.0/' etcd.conf
sed -i '5s/^#//;20s/^#//;26s/^#//;27s/^#//;28s/^#//;' etcd.conf
egrep -nv '^#|^$' etcd.conf

sed -i "9,26s/default/${HOSTNAME}/;" etcd.conf
sed -i "20,21s/0.0.0.0/${HOSTNAME}/;" etcd.conf
egrep -nv '^#|^$' etcd.conf

Cluster='ETCD_INITIAL_CLUSTER="srv11=http://srv11:2380,srv12=http://srv12:2380,srv13=http://srv13:2380"'
sed -i "26c ${Cluster}" etcd.conf
egrep -nv '^#|^$' etcd.conf

sed -i '4s/127.0.0.1/srv11/' /etc/sysconfig/flanneld
cat /etc/sysconfig/flanneld

#Start etcd service

systemctl enable etcd
systemctl restart etcd
etcdctl get /atomic.io/network/config
[ $? -eq 0 ] || etcdctl mk /atomic.io/network/config '{ "Network": "10.0.0.0/16" }'
systemctl enable flanneld
systemctl restart flanneld
}

#Install k8s master server

K8S_MASTER(){
yum install -y docker kubernetes
systemctl restart docker
systemctl enable docker
docker info

#API server, config file modification script

cd /etc/kubernetes/
sed -i '11s/^# //;14s/^# //' apiserver
sed -i '8s/127.0.0.1/0.0.0.0/' apiserver
sed -i "17s/127.0.0.1/${HOSTNAME}/" apiserver
sed -i "23s/SecurityContextDeny,ServiceAccount,//" apiserver
grep -Ev '^$|^#' apiserver
cd /etc/kubernetes/
sed -i "22s/127.0.0.1/srv11/" config
grep -Ev '^$|^#' config

#K8s master starts the service and sets the power on self start

systemctl enable docker.service
systemctl restart docker.service
systemctl enable kube-apiserver.service
systemctl restart kube-apiserver.service
systemctl enable kube-controller-manager.service
systemctl restart kube-controller-manager.service
systemctl enable kube-scheduler.service
systemctl restart kube-scheduler.service
kubectl get nodes

#Visit http://kube-apiserver:port to view all request URLs and creation status

curl http://192.168.11.11:8080 
curl http://192.168.11.11:8080/healthz/ping
}

#K8s node deployment
#Install docker and kubernets (refer to k8s master installation method)

K8S_NODE(){
yum install -y docker kubernetes
systemctl restart docker
systemctl enable docker
docker info

#config, kubelet file modification script

cd /etc/kubernetes/
MASTER='srv11'
sed -i "22s/127.0.0.1/${MASTER}/" config
grep -Ev '^$|^#' config

cd /etc/kubernetes/
sed -i "5s/127.0.0.1/0.0.0.0/" kubelet
sed -i '8s/^# //' kubelet
sed -i "11s/127.0.0.1/${HOSTNAME}/" kubelet
sed -i "14s/127.0.0.1/srv11/" kubelet
grep -Ev '^$|^#' kubelet

#k8s-node1 starts the service and sets the power on self start
systemctl enable docker.service
systemctl restart docker.service
systemctl enable kubelet.service
systemctl restart kubelet.service
systemctl enable kube-proxy.service
systemctl restart kube-proxy.service
kubectl -s http://srv11:8080 get nodes
}

#k8s cluster state detection

K8S_STAT(){
etcdctl member list
etcdctl cluster-health
kubectl -s http://srv11:8080 get node 
}

cat <<-EOF
---K8S_Cluster_install--
1.k8s-master install
2.k8s-node install
3.k8s-status query
EOF
read -p 'please input choice:' I
case $I in
1)
    NFS_SERVER
    read -p 'press enter to continue.'
    HOST_NAME
    ETCD_INST
    read -p 'press enter to continue.'
    K8S_MASTER

;;
2)
    NFS_CLIENT
    read -p 'press enter to continue.'
    HOST_NAME
    ETCD_INST
    read -p 'press enter to continue.'
    K8S_NODE
;;

3)
    K8S_STAT
;;
*)
    echo 'input error choice.'
    exit 127
;;
esac

Tags: Docker Kubernetes kubelet yum

Posted on Sat, 02 Nov 2019 00:29:16 -0700 by Iconoclast