k8s uses Harbor warehouse

title: kubeadm installation k8s version 1.15
image: /images/theme/k8s.jpg

Premise:
1. The node where k8s is installed must be a CPU larger than 1 core
2. Alibaba cloud's repo and base sources

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

1. Set the system host name and host resolution

hostnamectl  set-hostname  k8s-master
10.0.0.20 k8s-node01
10.0.0.21 k8s-node02  
10.0.0.10 k8s-master

2. Install dependency package

yum install -y  conntrack ntpdate  ntp  ipvsadmin ipset  jq iptables  curl  sysstat  libseccomp wget  vim   net-tools git

3. Set firewall to Iptables and set empty rules

systemctl  stop firewalld  &&  systemctl  disable firewalld
 yum -y install iptables-services  &&  systemctl  start iptables  &&  systemctl  enable iptables  &&  iptables -F  &&  service iptables save

4. Close selinux close swap
When virtual memory is turned on, kubernetes will report an error if pod is running in swap

   swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
   setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

5. Adjust kernel parameters for K8S

   cat > kubernetes.conf <<EOF
   net.bridge.bridge-nf-call-iptables=1    #Turn on bridge mode
   net.bridge.bridge-nf-call-ip6tables=1    #Turn on bridge mode
   net.ipv4.ip_forward=1
   net.ipv4.tcp_tw_recycle=0
   vm.swappiness=0 # Disable the use of swap space, only when the system is OOM
   vm.overcommit_memory=1 # Don't check if physical memory is sufficient
   vm.panic_on_oom=0 # Open OOM  
   fs.inotify.max_user_instances=8192
   fs.inotify.max_user_watches=1048576
   fs.file-max=52706963
   fs.nr_open=52706963
   net.ipv6.conf.all.disable_ipv6=1   #Turn off ipv6
   net.netfilter.nf_conntrack_max=2310720
   EOF
   cp kubernetes.conf  /etc/sysctl.d/kubernetes.conf  Power on call
   sysctl -p /etc/sysctl.d/kubernetes.conf    Effective immediately

6. Adjust system time zone

   # Set the system time zone to China / Shanghai
   timedatectl set-timezone Asia/Shanghai
   # Write the current UTC time to the hardware clock
   timedatectl set-local-rtc 0
   # Restart services that depend on system time
   systemctl restart rsyslog 
   systemctl restart crond

7. No service required to shut down the system

   systemctl stop postfix && systemctl disable postfix

8. Set rsyslogd and SYSTEMd Journal
Using journal as journal

   mkdir /var/log/journal # Persist the directory where the logs are stored
   mkdir /etc/systemd/journald.conf.d  Create profile storage directory

Create journal's log profile

  cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
   [Journal]
   # Persist to disk
   Storage=persistent
    
   # Compress history log
   Compress=yes
    
   SyncIntervalSec=5m
   RateLimitInterval=30s
   RateLimitBurst=1000
    
   # Maximum occupancy: 10G
   SystemMaxUse=10G
    
   # Single log file maximum 200M
   SystemMaxFileSize=200M
    
   # Log retention time 2 weeks
   MaxRetentionSec=2week
    
   # Do not forward logs to syslog
   ForwardToSyslog=no
   EOF

9. Restart service

  systemctl restart systemd-journald

10. Upgrade the system kernel to 4.44

CentOS 7.x System's own 3.10.x There are some Bugs´╝îResulting in Docker,Kubernetes Instability, such as: rpm -Uvh
http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

upgrade

   rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
      # Check / boot / Grub2 after installation/ grub.cfg  Whether initrd16 configuration is included in the corresponding kernel menu in. If not, install it again
   //once!
   yum --enablerepo=elrepo-kernel install -y kernel-lt
   # Set boot from new kernel
   grub2-set-default 'CentOS Linux (4.4.226-1.el7.elrepo.x86_64) 7 (Core)' && reboot
   
   
   uname  -r  

11. Preconditions for Kube proxy to enable ipvs

Modify the scheduling mode to ipvs to solve the scheduling mode of pod and service

  modprobe br_netfilter load module 
   cat > /etc/sysconfig/modules/ipvs.modules <<EOF
   #!/bin/bash
   modprobe -- ip_vs
   modprobe -- ip_vs_rr
   modprobe -- ip_vs_wrr
   modprobe -- ip_vs_sh
   modprobe -- nf_conntrack_ipv4
   EOF
  chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

12. Install Docker software

  yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

 yum update -y && yum install -y docker-ce
 systemctl restart  docker  && systemctl  enable  docker
 uname  -r
   ## Create / etc/docker directory
   mkdir /etc/docker
    
   # Configure the daemon
   cat > /etc/docker/daemon.json <<EOF
   {
     "exec-opts": ["native.cgroupdriver=systemd"],
     "log-driver": "json-file",
     "log-opts": {
       "max-size": "100m"
     }
   }
   EOF
   
      mkdir -p /etc/systemd/system/docker.service.d

13. Restart the docker service

   systemctl daemon-reload && systemctl restart docker && systemctl enable docker

Modify / etc/docker/daemon.json And send to other nodes

{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "insecure-registries":["https://hub.atguigu.com"]
}

 scp  /etc/docker/daemon.json   10.0.0.10:/etc/docker/daemon.json
  scp  /etc/docker/daemon.json   10.0.0.20:/etc/docker/daemon.json
  scp  /etc/docker/daemon.json   10.0.0.21:/etc/docker/daemon.json 

Restart docker (all nodes)

systemctl restart docker systemctl restart docker

Import docker compose file and give execution permission

  mv docker-compose /usr/local/
  cd /usr/local/
  ll docker-compose 
 mv docker-compose   /usr/local/bin/
  chmod  a+x   /usr/local/bin/docker-compose 

Link: https://pan.baidu.com/s/1H5zIMIoFCzip9T34mEe1mQ
Extraction code: xdfg
After copying this content, open Baidu online mobile App, which is more convenient

Import package, extract and modify file
Link: https://pan.baidu.com/s/12fmXkfSV7Ar-bNAlHyLINQ
Extraction code: 85q0

tar zxvf  harbor-offline-installer-v1.2.0.tgz 
  vim harbor.cfg 
  //Modification
  hostname = hub.atguigu.com
  ui_url_protocol = https

//Create a directory to hold certificates  
  
  mkdir  /data/cert/
  mkdir  /data/cert/ -p
cd /data/cert/

openssl genrsa -des3  -out server.key 2048

cat server.key 
openssl  req -new  -key  server.key  -oout  server.csr
 openssl  req -new  -key  server.key  -out  server.csr
cp  server.key  server.key.org
openssl rsa -in server.key.org  -out  server.key
openssl  x509 -req -days  365  -in server.csr -signkey server.key -out server.crt
chmod a+x *
./install.sh  Execute script

Wait until the script execution is completed to add resolution in the local win

10.0.0.100 hub.atguigu.com

docker login test

[root@localhost harbor]# docker login  https://hub.atguigu.com
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
//password:
Harbor12345


(a blog written by Billy when watching video learning in Silicon Valley)

docker pull wangyanglinux/myapp:v1
Download an image
push to harbor

docker tag  wangyanglinux/myapp:v1  hub.atguigu.com/library/myapp:v1 
docker push   hub.atguigu.com/library/myapp:v1

Start a deployment in kubernetes to test whether the image can be called
Start a deployment on the command line

kubectl   run nginx-deployment  --image=hub.atguigu.com/library/myapp:v1 --port=80 --replicas=1
 kubectl   run 
nginx-deployment    appoint deployment 's name
--image=hub.atguigu.com/library/myapp:v1   Specify the mirror to use
--port=80   Specify the exposed port (it's OK not to specify it, because it's in a flat network)
--replicas=1   Specify 1 copy
see deployment
[root@k8s-master ~]# kubectl  get deployment
NAME               READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   0/1     1            0           16s
//View rs (deployment controls pod through rs)
[root@k8s-master ~]# kubectl  get rs
NAME                         DESIRED   CURRENT   READY   AGE
nginx-deployment-85756b779   1         1         1       23s
[root@k8s-master ~]# 
//View pod
[root@k8s-master ~]# kubectl  get pod
NAME                               READY   STATUS    RESTARTS   AGE
nginx-deployment-85756b779-bqjqg   1/1     Running   0          29s
[root@k8s-master ~]# kubectl  get pod -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP           NODE         NOMINATED NODE   READINESS GATES
nginx-deployment-85756b779-bqjqg   1/1     Running   0          41s   10.244.2.2   k8s-node02   <none>           <none>
//Visit (through a flat network)
[root@k8s-master ~]# curl 10.244.2.2
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@k8s-master ~]# curl 10.244.2.2/hostname.html
nginx-deployment-85756b779-bqjqg
[root@k8s-master ~]# 
Number of copies of deployment added by command line method
kubectl  scale --replicas=3 deployment/nginx-deployment

How to access the pod of these three nginx
Through svc
Create an svc

kubectl   expose  deployment  nginx-deployment  --port=30000 --target-port=80
kubectl   expose  
deployment   The specified type is deployment
nginx-deployment    appoint deployment 's name
--port=30000   Specify the port of the host
--target-port=80   appoint pod Port for 
establish svc
[root@k8s-master ~]# kubectl   expose  deployment  nginx-deployment  --port=30000 --target-port=80
service/nginx-deployment exposed
//View the address of the svc
[root@k8s-master ~]# kubectl  get  svc  -o wide
NAME               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)     AGE    SELECTOR
kubernetes         ClusterIP   10.96.0.1       <none>        443/TCP     7h6m   <none>
nginx-deployment   ClusterIP   10.105.52.122   <none>        30000/TCP   2m4s   run=nginx-deployment
//Access test
[root@k8s-master ~]# curl 10.105.52.122:30000
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
[root@k8s-master ~]# 
//Load by default polling

View address rules

yum install -y  ipvsadm-1.27-8.el7.x86_64
[root@k8s-master ~]# ipvsadm -Ln |grep  10.105.52.122 
TCP  10.105.52.122:30000 rr

The scheduling mechanism is to use scheduling ipvs module to achieve load balancing

This is an internal address that cannot be accessed externally

[root@k8s-master ~]# kubectl  get  svc
NAME               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)     AGE
kubernetes         ClusterIP   10.96.0.1       <none>        443/TCP     7h14m
nginx-deployment   ClusterIP   10.105.52.122   <none>        30000/TCP   9m50s

Modify svc by

kubectl   edit  svc  nginx-deployment
//Modify the ClusterIP mode to NodePort
  type: NodePort
[root@k8s-master ~]# kubectl  get  svc
NAME               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)           AGE
kubernetes         ClusterIP   10.96.0.1       <none>        443/TCP           7h17m
nginx-deployment   NodePort    10.105.52.122   <none>        30000:30048/TCP   12m

View port

[root@k8s-master ~]# netstat  -anpt |grep  :30048
tcp6       0      0 :::30048                :::*                    LISTEN      100907/kube-proxy   
[root@k8s-master ~]# 

All nodes expose this port 30048

Tags: Docker Nginx yum JSON

Posted on Sun, 07 Jun 2020 03:02:58 -0700 by williamg