Cluster deployment - Kubenetes installation

4. master installation kept

#master01
yum install -y keepalived
cat >/etc/keepalived/keepalived.conf <<EOF
global_defs {
   route_id KUB_LVS
}

vrrp_script CheckMaster{
    script "curl -k https://192.168.68.1:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens160
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 111111
    }
    virtual_ipaddress {
        192.168.68.1/24 dev ens160
    }
    track_script{
        CheckMaster
    }
}
EOF

#master02/03
yum install -y keepalived
cat >/etc/keepalived/keepalived.conf <<EOF
global_defs {
   route_id KUB_LVS
}

vrrp_script CheckMaster{
    script "curl -k https://192.168.68.1:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state SLAVE
    interface ens160
    virtual_router_id 51
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 111111
    }
    virtual_ipaddress {
        192.168.68.1/24 dev ens160
    }
    track_script{
        CheckMaster
    }
}
EOF

systemctl enable keepalived && systemctl restart keepalived

5. Configure certificate

5.1 download the self signed certificate generation tool

Operate on the distribution machine master01
It can be generated using openssl or cfssl tools
cfssl is used to generate the signature certificate this time.
Script cfssl SH download cfssl tool

mkdir cfssl && cd cfssl
cat >> cfssl.sh<<EOF
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl*
mv cfssl_linux-amd64 /usr/bin/cfssl
mv cfssljson_linux-amd64 /usr/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
EOF
chmod +x cfssl.sh
sh cfssl.sh

5.2 generating Etcd certificate

Create directory
mkdir -p /root/etcd && cd /root/etcd

Certificate configuration
Etcd certificate configuration

#CA certificate configuration
cat > ca-config.json <<EOF
{
    "signing": {
      "default": {
        "expiry": "87600h"
      },
      "profiles": {
        "www": {
          "expiry": "89600h",
          "usages": [
             "signing",
             "key encipherment",
             "server auth",
             "client auth"
          ]
        }
      }  
    }
}
EOF

#Create CA certificate request file
cat > ca-csr.json <<EOF
{
    "CN": "etcd CA",
    "key": {
          "algo": "rsa",
          "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

#Create ETCD certificate request file and add all master IP S to csr file  
cat > service-csr.json <<EOF
{
    "CN": "etcd",
    "hosts": [
    "master01",
    "master02",
    "master03",
    "192.168.68.146",
    "192.168.68.147",
    "192.168.68.148"
    ],
    "key": {
       "algo": "rsa",
       "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "L": "Beijing",
        "ST": "Beijing"
      }
    ]
}
EOF

#Generate CA certificate  
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#Generate Etcd certificate  
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www service-csr.json | cfssljson -bare server

[root@master01 etcd]# ll
total 36
-rw-r--r-- 1 root root  315 Aug  8 01:25 ca-config.json
-rw-r--r-- 1 root root  956 Aug  8 01:31 ca.csr
-rw-r--r-- 1 root root  213 Aug  8 01:26 ca-csr.json
-rw------- 1 root root 1679 Aug  8 01:31 ca-key.pem
-rw-r--r-- 1 root root 1265 Aug  8 01:31 ca.pem
-rw-r--r-- 1 root root 1054 Aug  8 01:40 server.csr
-rw------- 1 root root 1675 Aug  8 01:40 server-key.pem #etcd client usage
-rw-r--r-- 1 root root 1379 Aug  8 01:40 server.pem
-rw-r--r-- 1 root root  323 Aug  8 01:29 service-csr.json

kubernetes certificate configuration

mkdir -p /root/kubernetes && cd /root/kubernetes
#CA certificate configuration
cat > ca-config.json <<EOF
{
    "signing": {
      "default": {
        "expiry": "87600h"
      },
      "profiles": {
        "kubernetes": {
          "expiry": "89600h",
          "usages": [
             "signing",
             "key encipherment",
             "server auth",
             "client auth"
          ]
        }
      }  
    }
}
EOF

#Create CA certificate request file
cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
          "algo": "rsa",
          "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

#Create API-SERVER certificate request file and add all master IP S to csr file  
cat > service-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
    "master01",
    "master02",
    "master03",
    "node01",
    "node02",
    "192.168.68.146",
    "192.168.68.147",
    "192.168.68.148",
    "192.168.68.149",
    "192.168.68.151",
    "192.168.68.1",
    "10.0.0.1",
    "10.0.0.2",
    "127.0.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
    ],
    "key": {
       "algo": "rsa",
       "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "L": "Beijing",
        "ST": "Beijing",
        "O": "k8s",
        "OU": "System"
      }
    ]
}
EOF

#Create kubernetes proxy certificate request
cat > kube-proxy-csr.json <<EOF
{
    "CN": "system:kube-proxy",
    "hosts": [],
    "key": {
          "algo": "rsa",
          "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

#Generate CA certificate  
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#Generate API server certificate  
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes service-csr.json | cfssljson -bare server
#Generate Kube proxy certificate  
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

[root@master01 kubernetes]# ll
total 52
-rw-r--r-- 1 root root  322 Aug  8 01:43 ca-config.json
-rw-r--r-- 1 root root 1001 Aug  8 01:55 ca.csr
-rw-r--r-- 1 root root  268 Aug  8 01:53 ca-csr.json
-rw------- 1 root root 1675 Aug  8 01:55 ca-key.pem
-rw-r--r-- 1 root root 1359 Aug  8 01:55 ca.pem
-rw-r--r-- 1 root root 1009 Aug  8 01:57 kube-proxy.csr
-rw-r--r-- 1 root root  292 Aug  8 01:54 kube-proxy-csr.json
-rw------- 1 root root 1675 Aug  8 01:57 kube-proxy-key.pem
-rw-r--r-- 1 root root 1403 Aug  8 01:57 kube-proxy.pem
-rw-r--r-- 1 root root 1358 Aug  8 01:56 server.csr
-rw------- 1 root root 1675 Aug  8 01:56 server-key.pem
-rw-r--r-- 1 root root 1724 Aug  8 01:56 server.pem
-rw-r--r-- 1 root root  670 Aug  8 01:51 service-csr.json

6. Install Etcd

Download etcd binaries

mkdir /root/soft && cd /root/soft
wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz
tar zxvf etcd-v3.3.10-linux-amd64.tar.gz
cd etcd-v3.3.10-linux-amd64
cp etcd etcdctl /usr/local/bin/

6.1 edit etcd configuration file

#master01
mkdir -p /etc/etcd/{cfg,ssl}
cat >/etc/etcd/cfg/etcd.conf<<EOF
#{Member}
ETCD_NAME="master01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.68.146:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.68.146:2379,http://192.168.68.146:2390"

#{Clustering}
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.68.146:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.68.146:2379"
ETCD_INITIAL_CLUSTER="master01=https://192.168.68.146:2380,master02=https://192.168.68.147:2380,master03=https://192.168.68.148:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#master02
mkdir -p /etc/etcd/{cfg,ssl}
cat >/etc/etcd/cfg/etcd.conf<<EOF
#{Member}
ETCD_NAME="master02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.68.147:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.68.147:2379,http://192.168.68.147:2390"

#{Clustering}
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.68.147:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.68.147:2379"
ETCD_INITIAL_CLUSTER="master01=https://192.168.68.146:2380,master02=https://192.168.68.147:2380,master03=https://192.168.68.148:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#master03
mkdir -p /etc/etcd/{cfg,ssl}
cat >/etc/etcd/cfg/etcd.conf<<EOF
#{Member}
ETCD_NAME="master03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.68.148:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.68.148:2379,http://192.168.68.148:2390"

#{Clustering}
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.68.148:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.68.148:2379"
ETCD_INITIAL_CLUSTER="master01=https://192.168.68.146:2380,master02=https://192.168.68.147:2380,master03=https://192.168.68.148:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

Parameter Description:

Field name explain
ETCD_NAME Node name. If there are multiple nodes, each node should be modified to the name of this node.
ETCD_DATA_DIR Data directory.
ETCD_LISTEN_PEER_URLS Cluster communication listening address.
ETCD_LISTEN_CLIENT_URLS Client access listening address.
ETCD_INITIAL_ADVERTISE_PEER_URLS Cluster notification address.
ETCD_ADVERTISE_CLIENT_URLS Client notification address.
ETCD_INITIAL_CLUSTER Cluster node addresses are separated by commas if there are multiple nodes.
ETCD_INITIAL_CLUSTER_TOKEN Cluster token.
ETCD_INITIAL_CLUSTER_STATE The current status of joining a cluster. New is a new cluster, and existing means joining an existing cluster.

Et6.6 system startup service for CD creation

Create etcd system startup files on master01/02/03 respectively

cat >/usr/lib/systemd/system/etcd.service<<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/etc/etcd/cfg/etcd.conf
ExecStart=/usr/local/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/etc/etcd/ssl/server.pem \
--key-file=/etc/etcd/ssl/server-key.pem \
--peer-cert-file=/etc/etcd/ssl/server.pem \
--peer-key-file=/etc/etcd/ssl/server-key.pem \
--trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

6.3 copy etcd certificate to the specified directory

This directory is consistent with the previous etcd startup directory
If there are multiple master nodes, you need to copy to each master

mkdir -p /etc/etcd/ssl
\cp /root/etcd/*pem /etc/etcd/ssl/ -rf
#Copy the etcd certificate to each etcd node (three master nodes this time)
for i in master02 master03;do ssh $i mkdir -p /etc/etcd/{cfg,ssl};done
for i in master02 master03;do scp /etc/etcd/ssl/* $i:/etc/etcd/ssl/;done

6.4 starting etcd

systemctl enable etcd
systemctl start etcd
systemctl status etcd

6.5 check whether etcd cluster operates normally

[root@master01 system]# etcdctl \
-ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/server.pem \
--key-file=/etc/etcd/ssl/server-key.pem \
--endpoint="https://192.168.68.146:2379" \
cluster-health
member 518905a4e1408b4a is healthy: got healthy result from https://192.168.68.148:2379
member 9affe5eacb47bb95 is healthy: got healthy result from https://192.168.68.147:2379
member d040d1696a38da95 is healthy: got healthy result from https://192.168.68.146:2379
cluster is healthy

6.6 assign POD network segment to create docker

Write cluster pod segment information to etcd
172.17.0.0/16 is the IP address segment of kubernetes pod
The network segment must be consistent with the -- cluster CIDR parameter of Kube controller manager

 etcdctl --endpoint="https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379" \
 -ca-file=/etc/etcd/ssl/ca.pem \
 --cert-file=/etc/etcd/ssl/server.pem \
 --key-file=/etc/etcd/ssl/server-key.pem \
 set /coreos.com/network/config \
 '{"Network":"172.17.0.0/16","Backend":{"Type":"vxlan"}}'

Check whether the network segment is established

etcdctl --endpoint="https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379" \
 -ca-file=/etc/etcd/ssl/ca.pem \
 --cert-file=/etc/etcd/ssl/server.pem \
 --key-file=/etc/etcd/ssl/server-key.pem \
 get /coreos.com/network/config
 {"Network":"172.17.0.0/16","Backend":{"Type":"vxlan"}}

7. Install docker

Install docker on all node nodes
Run the previous system initialization script to deploy docker
Note: the configuration of docker startup file is as follows:

[root@node02 ~]# more /usr/lib/systemd/system/docker.service 
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd --data-root=/data/docker $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP 
TimeoutSec=0
RestartSec=2
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target

8. Install flannel

8.1 download flannel binary package

All nodes

mkdir soft && cd soft
#Download link: https://pan.baidu.com/s/1M-3tgKkA0Pl0qMtlyT3G8Q Extraction code: drtr
tar zxvf flannel-v0.10.0-linux-amd64.tar.gz
mv flanneld mk-docker-opts.sh /usr/local/bin/
#Copy to other nodes
for i in master02 master03 node01 node02;do scp /usr/local/bin/flanneld $i:/usr/local/bin/;done
for i in master02 master03 node01 node02;do scp /usr/local/bin/mk-docker-opts.sh $i:/usr/local/bin/;done

8.2 configuring flannel

mkdir -p /etc/flannel
cat >/etc/flannel/flannel.cfg<<EOF
FLANNEL_OPTIONS="-etcd-endpoints=https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379  -etcd-cafile=/etc/etcd/ssl/ca.pem  -etcd-certfile=/etc/etcd/ssl/server.pem -etcd-keyfile=/etc/etcd/ssl/server-key.pem"
EOF

8.3 configuring the flanneld system startup file

cat >/usr/lib/systemd/system/flanneld.service<<EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/etc/flannel/flannel.cfg
ExecStart=/usr/local/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

for i in master02 master03 node01 node01;do scp /usr/lib/systemd/system/flanneld.service $i:/usr/lib/systemd/system/;done

Startup script description
mk-docker-opts. The SH script writes the Pod subnet segment information assigned to the flannel into the / run/flannel/docker file. When subsequent dockers are started, the environment variables in this file are used to configure the docker0 bridge.
flanneld uses the interface where the system default route is located to communicate with other nodes. For nodes with multiple network interfaces (such as public network and intranet), you can use the - iface parameter to specify the communication interface, such as eth0, ens160, etc.

8.4 start flanneld and check the status

systemctl enable flanneld
systemctl start flanneld

All nodes need 172.17.0.0/16 network segment IP

4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default 
    link/ether 26:c8:e8:41:78:d4 brd ff:ff:ff:ff:ff:ff
    inet 172.17.39.0/32 scope global flannel.1
       valid_lft forever preferred_lft forever
    inet6 fe80::24c8:e8ff:fe41:78d4/64 scope link 
       valid_lft forever preferred_lft forever

Node node stop flanneld
systemctl stop flanneld

8.5 modify the docker startup file in node01 and node02

cat >/usr/lib/systemd/system/docker.service<<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env #docker uses flannel's address file
ExecStart=/usr/bin/dockerd --data-root=/data/docker \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF

8.6 restart docker

systemctl daemon-reload
systemctl restart docker

Check that the IP addresses of docker0 and flannel are in the same network segment

3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:22:f5:c8:4a brd ff:ff:ff:ff:ff:ff
    inet 172.17.49.1/24 brd 172.17.49.255 scope global docker0
       valid_lft forever preferred_lft forever
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default 
    link/ether e2:18:c3:93:cb:92 brd ff:ff:ff:ff:ff:ff
    inet 172.17.49.0/32 scope global flannel.1
       valid_lft forever preferred_lft forever
    inet6 fe80::e018:c3ff:fe93:cb92/64 scope link 
       valid_lft forever preferred_lft forever

8.7 node node verification whether other nodes can be accessed docker0

ping other nodes in each node node, and the network segment is connected

#node02 docker0 ip 172.17.49.1. ping the IP on node01 is connected
[root@node01 soft]# ping 172.17.49.1
PING 172.17.49.1 (172.17.49.1) 56(84) bytes of data.
64 bytes from 172.17.49.1: icmp_seq=1 ttl=64 time=0.299 ms
64 bytes from 172.17.49.1: icmp_seq=2 ttl=64 time=0.234 ms
^C
--- 172.17.49.1 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.234/0.266/0.299/0.036 ms

9 install the master component

The components to be installed at the Master end are as follows:
kube-apiserver
kube-scheduler
kube-controller-manager

9.1 install Api Server service (all master nodes)

9.1.1 download kubernetes binary package
cd /root/soft
tar zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
cp kube-scheduler kube-apiserver kube-controller-manager kubectl /usr/local/bin/
#Copy execution files to other master nodes
for i in master02 master03;do scp /usr/local/bin/kube* $i:/usr/local/bin/;done
9.1.2 configuring kubernetes certificates

Certificates are required for communication between components of kubernetes and need to be copied to each master node

mkdir -p /etc/kubernetes/{cfg,ssl}
cp /root/kubernetes/*.pem /etc/kubernetes/ssl/

#Copy to all nodes
for i in master02 master03 node01 node02;do ssh $i mkdir -p /etc/kubernetes/{cfg,ssl};done
for i in master02 master03 node01 node02;do scp /root/kubernetes/*.pem $i:/etc/kubernetes/ssl/;done
9.1.3 create TLS Bootstrapping Token

TLS Bootstrapping is used to let kubelet connect to apiserver with a predetermined user with low permissions, and then apply for a certificate from apiserver. Kubelet's certificate is dynamically signed by apiserver.
The Token can be any string containing 128bit and can be generated by using a secure random number generator

[root@master01 ~]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
a37e9d743248a4589728d60cd35c159c
9.1.4 edit Token file

msater01 operation
a37e9d743248a4589728d60cd35c159c: random string, custom generated.
Kubelet bootstrap: user name
10001 UID
System kubelet bootstrap user group

cat >/etc/kubernetes/cfg/token.csv<<EOF
a37e9d743248a4589728d60cd35c159c,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
#Transfer the token file to master02 and master03.
for i in master02 master03;do scp /etc/kubernetes/cfg/token.csv $i:/etc/kubernetes/cfg;done
9.1.5 create apiserver configuration file

The contents of the configuration file are basically the same. If there are multiple master nodes, you can modify the IP address.
master01

cat >/etc/kubernetes/cfg/kube-apiserver.cfg<<EOF
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--insecure-bind-address=0.0.0.0 \
--etcd-servers=https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379 \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=0.0.0.0 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/etc/kubernetes/ssl/server.pem \
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/server.pem \
--etcd-keyfile=/etc/etcd/ssl/server-key.pem"
EOF

master02 and master03 can change the bind address and advertisement address in the configuration file to 0.0.0.0 and 0.0.0.0 respectively.
master02

cat >/etc/kubernetes/cfg/kube-apiserver.cfg<<EOF
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--insecure-bind-address=0.0.0.0 \
--etcd-servers=https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379 \
#--bind-address=192.168.68.147 
--bind-address=0.0.0.0 \
--secure-port=6443 \
#--advertise-address=192.168.68.147
--advertise-address=0.0.0.0
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/etc/kubernetes/ssl/server.pem \
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/server.pem \
--etcd-keyfile=/etc/etcd/ssl/server-key.pem"
EOF

master03

cat >/etc/kubernetes/cfg/kube-apiserver.cfg<<EOF
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--insecure-bind-address=0.0.0.0 \
--etcd-servers=https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379 \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=0.0.0.0 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/etc/kubernetes/ssl/server.pem \
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/server.pem \
--etcd-keyfile=/etc/etcd/ssl/server-key.pem"
EOF
9.1.6 create Kube apiserver startup file
cat >/usr/lib/systemd/system/kube-apiserver.service<<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.cfg
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
9.1.7 start Kube apiserver service
systemctl start kube-apiserver
systemctl status kube-apiserver
systemctl enable kube-apiserver

Check whether the encryption port 6443 is enabled

[root@master01 ~]# netstat -lntup | grep 6443   
tcp        0      0 192.168.68.146:6443     0.0.0.0:*               LISTEN      32470/kube-apiserve 

9.2 deploy Kube Scheduler service

Create Kube scheduler profile (all master nodes)

cat >/etc/kubernetes/cfg/kube-scheduler.cfg<<EOF
KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --bind-address=0.0.0.0 --master=127.0.0.1:8080 --leader-elect"
EOF

Parameter Description:
--Bind address = 0.0.0.0 start binding address
--master connects to local apiserver (non encrypted port)
--Leader select = true cluster operation mode, enable the election function, the node selected as leader is responsible for processing, and other nodes are blocked.

9.2.1 create Kube scheduler startup file

Create Kube scheduler SYSTEMd unit file

cat >/usr/lib/systemd/system/kube-scheduler.service<<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.cfg
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
9.2.2 start Kube Scheduler service
systemctl start kube-scheduler
systemctl status kube-scheduler
systemctl enable kube-scheduler
9.2.3 viewing master component status
[root@master01 ~]# kubectl get cs
NAME                 STATUS      MESSAGE                                                                                     ERROR
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused   
scheduler            Healthy     ok                                                                                          
etcd-0               Healthy     {"health":"true"}                                                                           
etcd-2               Healthy     {"health":"true"}                                                                           
etcd-1               Healthy     {"health":"true"} 

9.3 deploy Kube Controller Manager

9.3.1 create Kube contaoller manager configuration file
cat >/etc/kubernetes/cfg/kube-controller-manager.cfg<<EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect=true \
--address=0.0.0.0 \
--service-cluster-ip-range=10.0.0.0/24 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem"
EOF

Parameter Description:
--master=127.0.0.1:8080 specify the master address
--The leader select competitive election mechanism generates a lead node, and other nodes are blocked
--Service cluster IP range the IP address range specified by the kubernetes service.

9.3.2 create Kube controller manager startup file
cat >/usr/lib/systemd/system/kube-controller-manager.service<<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.cfg
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
9.3.3 start Kube Controller Manager Service
systemctl start kube-controller-manager
systemctl status kube-controller-manager
systemctl enable kube-controller-manager

[root@master01 ~]# systemctl status kube-controller-manager
● kube-controller-manager.service - Kubernetes Controller Manager
   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; disabled; vendor preset: disabled)
   Active: active (running) since Mon 2020-08-10 10:31:05 CST; 50s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 8635 (kube-controller)
   CGroup: /system.slice/kube-controller-manager.service
           └─8635 /usr/local/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=0.0.0.0 --service-cluster-ip-range=10.0.0.0/24 --c...

9.3.4 viewing master component status
[root@master01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   

10 Node components

Components to be deployed for Node
kubelet
kube-proxy
flannel
docker

10.1 deploying kubelet components

kubelet runs on each node node, receives requests sent by Kube apiserver, manages pod containers, and executes interactive commands, such as exec, run, log, and so on
When kubelet starts, it automatically registers node information with Kube apiserver, and the built-in cadvisor counts and monitors the resource usage of the node.

10.1.1 copy kubernetes file from master node to node

Copy kubelet and Kube proxy binaries from master01 to node01 and node02

cd /root/soft
scp kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy node01:/usr/local/bin/
scp kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy node02:/usr/local/bin/
10.1.2 create kubelet bootstrap Kubeconfig file

The kubeconfig configuration file in kubernetes is used to access cluster information. In a cluster with TLS enabled, identity authentication is required for every interaction with the cluster. The production environment is generally authenticated with a certificate, and the information required for authentication will be placed in the kubeconfig file.

master01 node

mkdir /root/config && cd /root/config
cat >environment.sh<<EOF
#Create kubelet bootstrapping kubeconfig
BOOTSTRAP_TOKEN=a37e9d743248a4589728d60cd35c159c #token created earlier
KUBE_APISERVER="https://192.168.68.1:6443 "#vip address
#Set cluster parameters
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=\${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig
#Set client authentication parameters
kubectl config set-credentials kubelet-bootstrap \
  --token=\${BOOTSTRAP_TOKEN} \
  --kubeconfig=bootstrap.kubeconfig
#Setting context parameters
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig
#Set default context
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#Through bash environment SH get bootstrap Kubeconfig configuration file
EOF

Execute script

sh environment.sh
[root@master01 config]# sh environment.sh 
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
10.1.3 create Kube proxy kubeconfig file
cat >env_proxy.sh<<EOF
#Create Kube proxy kubeconfig file
BOOTSTRAP_TOKEN=a37e9d743248a4589728d60cd35c159c
KUBE_APISERVER="https://192.168.68.1:6443"

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=\${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
  
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig
  
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
EOF

Execute script

[root@master01 config]# sh env_proxy.sh  
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" created.
Switched to context "default".
10.1.4 copy kubeconfig file and certificate to all node nodes

Set bootstrap kubeconfig Kube proxy Copy the kubeconfig file to all node nodes

ssh node01 "mkdir -p /etc/kubernetes/{cfg,ssl}"
ssh node02 "mkdir -p /etc/kubernetes/{cfg,ssl}"

Copy certificate file

scp /etc/kubernetes/ssl/* node01:/etc/kubernetes/ssl/
scp /etc/kubernetes/ssl/* node02:/etc/kubernetes/ssl/

Copy kubeconfig file

cd /root/config
scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig node01:/etc/kubernetes/cfg/
scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig node02:/etc/kubernetes/cfg/
[root@master01 config]# scp /etc/kubernetes/ssl/* node01:/etc/kubernetes/ssl/
ca-key.pem                                                                                                                                                 100% 1675     3.5MB/s   00:00    
ca.pem                                                                                                                                                     100% 1359     3.0MB/s   00:00    
kube-proxy-key.pem                                                                                                                                         100% 1675     4.2MB/s   00:00    
kube-proxy.pem                                                                                                                                             100% 1403     3.9MB/s   00:00    
server-key.pem                                                                                                                                             100% 1675     4.2MB/s   00:00    
server.pem                                                                                                                                                 100% 1724     4.4MB/s   00:00    
[root@master01 config]# scp /etc/kubernetes/ssl/* node02:/etc/kubernetes/ssl/
ca-key.pem                                                                                                                                                 100% 1675     2.7MB/s   00:00    
ca.pem                                                                                                                                                     100% 1359     2.9MB/s   00:00    
kube-proxy-key.pem                                                                                                                                         100% 1675     4.0MB/s   00:00    
kube-proxy.pem                                                                                                                                             100% 1403     3.0MB/s   00:00    
server-key.pem                                                                                                                                             100% 1675     4.4MB/s   00:00    
server.pem                                                                                                                                                 100% 1724     4.0MB/s   00:00    
[root@master01 config]# cd /root/config/
[root@master01 config]# scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig node01:/etc/kubernetes/cfg/
bootstrap.kubeconfig                                                                                                                                       100% 2166     1.6MB/s   00:00    
kube-proxy.kubeconfig                                                                                                                                      100% 6268     4.8MB/s   00:00    
[root@master01 config]# scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig node02:/etc/kubernetes/cfg/
bootstrap.kubeconfig                                                                                                                                       100% 2166     1.6MB/s   00:00    
kube-proxy.kubeconfig                                                                                                                                      100% 6268     5.2MB/s   00:00    
[root@master01 config]# 
10.1.5 create kubelet parameter file

For different node, the IP address needs to be modified (node operation)
node01

cat >/etc/kubernetes/cfg/kubelet.config<<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.68.149
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS: ["10.0.0.2"]
clusterDomain: cluster.local
failSwapOn: false
authentication:
  anonymous:
    enabled: true
EOF

node02

cat >/etc/kubernetes/cfg/kubelet.config<<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.68.151
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS: ["10.0.0.2"]
clusterDomain: cluster.local
failSwapOn: false
authentication:
  anonymous:
    enabled: true
EOF
10.1.6 create kubelet configuration file

The IP address needs to be modified for different node nodes
/etc/kubernetes/cfg/kubelet.kubeconfig files are generated automatically

cat >/etc/kubernetes/cfg/kubelet<<EOF
KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.68.149 \
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \
--config=/etc/kubernetes/cfg/kubelet.config \
--cert-dir=/etc/kubernetes/ssl \
--pod-infra-container-image=docker.io/kubernetes/pause:latest"
EOF
cat >/etc/kubernetes/cfg/kubelet<<EOF
KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.68.151 \
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \
--config=/etc/kubernetes/cfg/kubelet.config \
--cert-dir=/etc/kubernetes/ssl \
--pod-infra-container-image=docker.io/kubernetes/pause:latest"
EOF
10.1.7 create kubelet system startup file
cat >/usr/lib/systemd/system/kubelet.service<<EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target
EOF
10.1.8 bind kubelet bootstrap user to system cluster role

Master01 node operation

kubectl create clusterrolebinding kubelet-bootstrap \
  --clusterrole=system:node-bootstrapper \
  --user=kubelet-bootstrap
10.1.9 start kubelet service (node node)
systemctl start kubelet
systemctl status kubelet
systemctl enable kubelet

10.2 server approves and views csr requests

View csr requests
msater01 node operation

[root@master01 config]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-EIYu6J_7noPLUQc28Z3kEUQPlD0SdVOdexxFQqclQyQ   5m26s   kubelet-bootstrap   Pending
node-csr-k6HGdR3UQ0cpvFKot2it_YsUN8uHWlsFq0fFiA5bnzU   12m     kubelet-bootstrap   Pending
10.2.1 approval request

master01 node operation

kubectl certificate approve node-csr-EIYu6J_7noPLUQc28Z3kEUQPlD0SdVOdexxFQqclQyQ
kubectl certificate approve node-csr-k6HGdR3UQ0cpvFKot2it_YsUN8uHWlsFq0fFiA5bnzU

[root@master01 config]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-EIYu6J_7noPLUQc28Z3kEUQPlD0SdVOdexxFQqclQyQ   8m46s   kubelet-bootstrap   Approved,Issued
node-csr-k6HGdR3UQ0cpvFKot2it_YsUN8uHWlsFq0fFiA5bnzU   16m     kubelet-bootstrap   Approved,Issued

10.3 node duplicate name processing method

If the field -- hostname override = 192.168.68.151 is not modified in the / etc/kubernetes/cfg/kubelet file where the new node is written, resulting in node duplicate names, you can delete the certificate first and then apply again
master node operation

kubectl  delete csr node-csr-EIYu6J_7noPLUQc28Z3kEUQPlD0SdVOdexxFQqclQyQ

Node node delete kubelet kubeconfig
The client restarts the kubelet service and re applies for the certificate

rm -rf /etc/kubernetes/cfg/kubelet.kubeconfig

10.4 viewing node status

The status of all node nodes must be Ready

[root@master01 config]# kubectl get nodes
NAME             STATUS   ROLES    AGE     VERSION
192.168.68.149   Ready    <none>   6m24s   v1.15.1
192.168.68.151   Ready    <none>   6m38s   v1.15.1

10.5 deploying Kube proxy components

Kube proxy runs on the node node, monitors the changes of service and endpoint in apiserver, and creates routing rules to balance the service load.

10.5.1 create Kube proxy configuration file

Pay attention to modifying the hostname override address. Different node IP S are different.
node01

cat >/etc/kubernetes/cfg/kube-proxy<<EOF
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--metrics-bind-address=0.0.0.0 \
--hostname-override=192.168.68.149 \
--cluster-cidr=10.0.0.0/24 \
--kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig"
EOF

node02

cat >/etc/kubernetes/cfg/kube-proxy<<EOF
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--metrics-bind-address=0.0.0.0 \
--hostname-override=192.168.68.151 \
--cluster-cidr=10.0.0.0/24 \
--kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig"
EOF
10.5.2 create Kube proxy startup file
cat >/usr/lib/systemd/system/kube-proxy.service<<EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
10.5.3 start up service
systemctl start kube-proxy
systemctl status kube-proxy
systemctl enable kube-proxy

11. Run Demo project

kubectl run nginx --image=nginx --replicas=2
[root@master01 config]# kubectl run nginx --image=nginx --replicas=2
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.

kubectl expose deployment nginx --port 88 --target-port=80 --type=NodePort

11.1 viewing pod

kubectl get pods
[root@master01 config]# kubectl get pods
NAME                     READY   STATUS              RESTARTS   AGE
nginx-7bb7cd8db5-577pp   0/1     ContainerCreating   0          27s
nginx-7bb7cd8db5-lqpzd   0/1     ContainerCreating   0          27s

[root@master01 config]# kubectl get pods
NAME                     READY   STATUS    RESTARTS   AGE
nginx-7bb7cd8db5-577pp   1/1     Running   0          108s
nginx-7bb7cd8db5-lqpzd   1/1     Running   0          108s

11.2 view svc

[root@master01 config]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP        6h58m
nginx        NodePort    10.0.0.61    <none>        88:42780/TCP   39s

11.3 accessing the web

[root@master01 config]# curl http://192.168.68.149:42780
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

11.4 delete items

kubectl delete deployment nginx
kubectl delete pods nginx
kubectl delete svc -l run=nginx
kubectl delete deployment.apps/nginx

[root@master01 config]# kubectl delete deployment nginx
deployment.extensions "nginx" deleted
[root@master01 config]# kubectl delete pods nginx
Error from server (NotFound): pods "nginx" not found
[root@master01 config]# kubectl delete svc -l run=nginx
service "nginx" deleted
[root@master01 config]# kubectl delete deployment.apps/nginx
Error from server (NotFound): deployments.apps "nginx" not found

11.5 service startup sequence

11.5.1 start the master node
systemctl start keepalived
systemctl start etcd
systemctl start kube-apiserver
systemctl start kube-scheduler
systemctl start kube-controller-manager
systemctl start flanneld
#View k8s cluster status
kubectl get cs
kubectl get nodes
kubectl get pods -A
11.5.2 start node
systemctl start flanneld
systemctl start docker
systemctl start kubelet
systemctl start kube-proxy
11.5.3 stop node
systemctl stop kube-proxy
systemctl stop kubelet
systemctl stop docker
systemctl stop flanneld

Tags: Docker Kubernetes

Posted by 8ennett on Sat, 21 May 2022 16:21:17 +0300