使用sealos v3离线搭建3master kubernetes高可用集群…

env

  • sealos-v3.3.9-rc.3
  • kube1.19.0.tar.gz
  • OpenCloudOS 8.6 x3 2c4g/40g

1.sealos

2.init kube

# help
./sealos init -h

# init
./sealos init --user root --passwd '321321' \
	--master 172.24.20.51  --master 172.24.20.52  --master  172.24.20.53 \
	--pkg-url /root/kube1.19.0.tar.gz \
	--version v1.19.0

# init more self define
./sealos init --user root --passwd '321321' \
	--master 172.24.20.51  --master 172.24.20.52  --master  172.24.20.53 \
    --podcidr '10.20.0.0/16' --svccidr '10.40.0.0/16' --vip 10.10.10.10 \
	--pkg-url /root/kube1.19.0.tar.gz \
	--version v1.19.0

./sealos init --user root --passwd '321321' \
	--master 172.24.20.51  --master 172.24.20.52  --master  172.24.20.53 \
    --node 192.168.0.5 \
	--pkg-url /root/kube1.19.0.tar.gz \
	--version v1.19.0

#log

[root@cloudos-51 ~]# ./sealos init --user root --passwd '321321' \
> --master 172.24.20.51  --master 172.24.20.52  --master  172.24.20.53 \
>     --podcidr '10.20.0.0/16' --svccidr '10.40.0.0/16' --vip 10.10.10.10 \
> --pkg-url /root/kube1.19.0.tar.gz \
> --version v1.19.0
06:50:47 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] hostname
06:50:47 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: cloudos-51

06:50:47 [DEBG] [ssh.go:58] [172.24.20.51:22] cat /etc/hosts |grep cloudos-51 || echo '172.24.20.51 cloudos-51' >> /etc/hosts
06:50:48 [INFO] [ssh.go:51] [172.24.20.51:22] 172.24.20.51 cloudos-51
06:50:48 [INFO] [ssh.go:51] [172.24.20.51:22] 172.20.20.51 cloudos-51
06:50:48 [INFO] [check.go:51] [172.24.20.51:22]  ------------ check ok
06:50:48 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] hostname
06:50:48 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: cloudos-52

06:50:48 [DEBG] [ssh.go:58] [172.24.20.52:22] cat /etc/hosts |grep cloudos-52 || echo '172.24.20.52 cloudos-52' >> /etc/hosts
06:50:48 [INFO] [ssh.go:51] [172.24.20.52:22] 172.24.20.52 cloudos-52
06:50:48 [INFO] [ssh.go:51] [172.24.20.52:22] 172.20.20.52 cloudos-52
06:50:48 [INFO] [check.go:51] [172.24.20.52:22]  ------------ check ok
06:50:48 [INFO] [ssh.go:13] [ssh][172.24.20.53:22] hostname
06:50:49 [DEBG] [ssh.go:25] [ssh][172.24.20.53:22]command result is: cloudos-53

06:50:49 [DEBG] [ssh.go:58] [172.24.20.53:22] cat /etc/hosts |grep cloudos-53 || echo '172.24.20.53 cloudos-53' >> /etc/hosts
06:50:49 [INFO] [ssh.go:51] [172.24.20.53:22] 172.24.20.53 cloudos-53
06:50:49 [INFO] [ssh.go:51] [172.24.20.53:22] 172.20.20.53 cloudos-53
06:50:49 [INFO] [check.go:51] [172.24.20.53:22]  ------------ check ok
06:50:49 [INFO] [print.go:14] 
[globals]sealos config is:  {"Hosts":["172.24.20.51:22","172.24.20.52:22","172.24.20.53:22"],"Masters":["172.24.20.51:22","172.24.20.52:22","172.24.20.53:22"],"Nodes":null,"Network":"calico","ApiServer":"apiserver.cluster.local"}
06:50:49 [DEBG] [ssh.go:58] [172.24.20.53:22] mkdir -p /usr/bin || true
06:50:49 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /usr/bin || true
06:50:49 [DEBG] [ssh.go:58] [172.24.20.52:22] mkdir -p /usr/bin || true
06:50:50 [DEBG] [download.go:30] [172.24.20.52:22]please wait for mkDstDir
06:50:50 [DEBG] [download.go:32] [172.24.20.52:22]please wait for before hook
06:50:50 [DEBG] [ssh.go:58] [172.24.20.52:22] ps -ef |grep -v 'grep'|grep sealos >/dev/null || rm -rf /usr/bin/sealos
06:50:50 [DEBG] [download.go:30] [172.24.20.53:22]please wait for mkDstDir
06:50:50 [DEBG] [download.go:32] [172.24.20.53:22]please wait for before hook
06:50:50 [DEBG] [ssh.go:58] [172.24.20.53:22] ps -ef |grep -v 'grep'|grep sealos >/dev/null || rm -rf /usr/bin/sealos
06:50:50 [DEBG] [download.go:30] [172.24.20.51:22]please wait for mkDstDir
06:50:50 [DEBG] [download.go:32] [172.24.20.51:22]please wait for before hook
06:50:50 [DEBG] [ssh.go:58] [172.24.20.51:22] ps -ef |grep -v 'grep'|grep sealos >/dev/null || rm -rf /usr/bin/sealos
06:50:50 [INFO] [ssh.go:13] [ssh][172.24.20.53:22] ls -l /usr/bin/sealos 2>/dev/null |wc -l
06:50:50 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] ls -l /usr/bin/sealos 2>/dev/null |wc -l
06:50:50 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] ls -l /usr/bin/sealos 2>/dev/null |wc -l
06:50:50 [DEBG] [ssh.go:25] [ssh][172.24.20.53:22]command result is: 0

06:50:50 [DEBG] [scp.go:27] [ssh]source file md5 value is b1fef37dd355c6d6842a20345a48b4fd
06:50:50 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: 0

06:50:50 [DEBG] [scp.go:27] [ssh]source file md5 value is b1fef37dd355c6d6842a20345a48b4fd
06:50:50 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: 1

06:50:50 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] md5sum /usr/bin/sealos | cut -d" " -f1
06:50:51 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: b1fef37dd355c6d6842a20345a48b4fd

06:50:51 [INFO] [download.go:37] [172.24.20.51:22]SendPackage:  /usr/bin/sealos file is exist and ValidateMd5 success
06:50:51 [DEBG] [download.go:56] [172.24.20.51:22]please wait for after hook
06:50:51 [DEBG] [ssh.go:58] [172.24.20.51:22] chmod a+x /usr/bin/sealos
06:50:53 [INFO] [scp.go:101] [ssh][172.24.20.53:22]transfer total size is: 42.27MB ;speed is 42MB
06:50:53 [INFO] [ssh.go:13] [ssh][172.24.20.53:22] md5sum /usr/bin/sealos | cut -d" " -f1
06:50:53 [INFO] [scp.go:101] [ssh][172.24.20.52:22]transfer total size is: 42.27MB ;speed is 42MB
06:50:53 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] md5sum /usr/bin/sealos | cut -d" " -f1
06:50:54 [DEBG] [ssh.go:25] [ssh][172.24.20.53:22]command result is: b1fef37dd355c6d6842a20345a48b4fd

06:50:54 [DEBG] [scp.go:30] [ssh]host: 172.24.20.53:22 , remote md5: b1fef37dd355c6d6842a20345a48b4fd
06:50:54 [INFO] [scp.go:34] [ssh]md5 validate true
06:50:54 [INFO] [download.go:50] [172.24.20.53:22]copy file md5 validate success
06:50:54 [DEBG] [download.go:56] [172.24.20.53:22]please wait for after hook
06:50:54 [DEBG] [ssh.go:58] [172.24.20.53:22] chmod a+x /usr/bin/sealos
06:50:54 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: b1fef37dd355c6d6842a20345a48b4fd

06:50:54 [DEBG] [scp.go:30] [ssh]host: 172.24.20.52:22 , remote md5: b1fef37dd355c6d6842a20345a48b4fd
06:50:54 [INFO] [scp.go:34] [ssh]md5 validate true
06:50:54 [INFO] [download.go:50] [172.24.20.52:22]copy file md5 validate success
06:50:54 [DEBG] [download.go:56] [172.24.20.52:22]please wait for after hook
06:50:54 [DEBG] [ssh.go:58] [172.24.20.52:22] chmod a+x /usr/bin/sealos
06:50:56 [DEBG] [ssh.go:58] [172.24.20.53:22] mkdir -p /root || true
06:50:56 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /root || true
06:50:56 [DEBG] [ssh.go:58] [172.24.20.52:22] mkdir -p /root || true
06:50:57 [DEBG] [download.go:30] [172.24.20.53:22]please wait for mkDstDir
06:50:57 [INFO] [ssh.go:13] [ssh][172.24.20.53:22] ls -l /root/kube1.19.0.tar.gz 2>/dev/null |wc -l
06:50:57 [DEBG] [download.go:30] [172.24.20.52:22]please wait for mkDstDir
06:50:57 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] ls -l /root/kube1.19.0.tar.gz 2>/dev/null |wc -l
06:50:57 [DEBG] [download.go:30] [172.24.20.51:22]please wait for mkDstDir
06:50:57 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] ls -l /root/kube1.19.0.tar.gz 2>/dev/null |wc -l
06:50:57 [DEBG] [ssh.go:25] [ssh][172.24.20.53:22]command result is: 0

06:50:57 [DEBG] [scp.go:27] [ssh]source file md5 value is bdd6c97922918f6070a65521df2a8b47
06:50:57 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: 1

06:50:57 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: 1

06:50:59 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] md5sum /root/kube1.19.0.tar.gz | cut -d" " -f1
06:50:59 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] md5sum /root/kube1.19.0.tar.gz | cut -d" " -f1
06:51:01 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: bdd6c97922918f6070a65521df2a8b47

06:51:01 [INFO] [download.go:37] [172.24.20.51:22]SendPackage:  /root/kube1.19.0.tar.gz file is exist and ValidateMd5 success
06:51:01 [DEBG] [download.go:56] [172.24.20.51:22]please wait for after hook
06:51:01 [DEBG] [ssh.go:58] [172.24.20.51:22] cd /root && rm -rf kube && tar zxvf kube1.19.0.tar.gz  && cd /root/kube/shell && rm -f ../bin/sealos && bash init.sh && sed -i '/kubectl/d;/sealos/d' /root/.bashrc  && echo 'command -v kubectl &>/dev/null && source <(kubectl completion bash)' >> /root/.bashrc && echo '[ -x /usr/bin/sealos ] && source <(sealos completion bash)' >> /root/.bashrc && source /root/.bashrc
06:51:02 [INFO] [ssh.go:51] [172.24.20.51:22] kube/
06:51:02 [INFO] [ssh.go:51] [172.24.20.51:22] kube/README.md
06:51:02 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/
06:51:02 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/kubeadm
06:51:03 [INFO] [scp.go:101] [ssh][172.24.20.53:22]transfer total size is: 100.00MB ;speed is 100MB
06:51:04 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: bdd6c97922918f6070a65521df2a8b47

06:51:04 [INFO] [download.go:37] [172.24.20.52:22]SendPackage:  /root/kube1.19.0.tar.gz file is exist and ValidateMd5 success
06:51:04 [DEBG] [download.go:56] [172.24.20.52:22]please wait for after hook
06:51:04 [DEBG] [ssh.go:58] [172.24.20.52:22] cd /root && rm -rf kube && tar zxvf kube1.19.0.tar.gz  && cd /root/kube/shell && rm -f ../bin/sealos && bash init.sh && sed -i '/kubectl/d;/sealos/d' /root/.bashrc  && echo 'command -v kubectl &>/dev/null && source <(kubectl completion bash)' >> /root/.bashrc && echo '[ -x /usr/bin/sealos ] && source <(sealos completion bash)' >> /root/.bashrc && source /root/.bashrc
06:51:04 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/kubectl
06:51:04 [INFO] [ssh.go:51] [172.24.20.52:22] kube/
06:51:04 [INFO] [ssh.go:51] [172.24.20.52:22] kube/README.md
06:51:04 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/
06:51:04 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/kubeadm
06:51:05 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/kubectl
06:51:06 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/kubelet
06:51:06 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/kubelet
06:51:08 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/kubelet-pre-start.sh
06:51:08 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/conntrack
06:51:08 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/sealos
06:51:08 [INFO] [scp.go:101] [ssh][172.24.20.53:22]transfer total size is: 200.00MB ;speed is 100MB
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/10-kubeadm.conf
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/calico.yaml
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/docker.service
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/kubeadm.yaml
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/kubelet.service
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/net/
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/net/calico.yaml
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/docker/
06:51:09 [INFO] [ssh.go:51] [172.24.20.52:22] kube/docker/docker.tgz
06:51:10 [INFO] [ssh.go:51] [172.24.20.52:22] kube/images/
06:51:10 [INFO] [ssh.go:51] [172.24.20.52:22] kube/images/images.tar
06:51:11 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/kubelet-pre-start.sh
06:51:11 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/conntrack
06:51:11 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/sealos
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/10-kubeadm.conf
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/calico.yaml
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/docker.service
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/kubeadm.yaml
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/kubelet.service
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/net/
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/net/calico.yaml
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/docker/
06:51:14 [INFO] [ssh.go:51] [172.24.20.51:22] kube/docker/docker.tgz
06:51:14 [INFO] [scp.go:101] [ssh][172.24.20.53:22]transfer total size is: 300.00MB ;speed is 100MB
06:51:16 [INFO] [ssh.go:51] [172.24.20.51:22] kube/images/
06:51:16 [INFO] [ssh.go:51] [172.24.20.51:22] kube/images/images.tar
06:51:19 [INFO] [scp.go:101] [ssh][172.24.20.53:22]transfer total size is: 400.00MB ;speed is 100MB
06:51:22 [INFO] [scp.go:101] [ssh][172.24.20.53:22]transfer total size is: 459.87MB ;speed is 59MB
06:51:22 [INFO] [ssh.go:13] [ssh][172.24.20.53:22] md5sum /root/kube1.19.0.tar.gz | cut -d" " -f1
06:51:24 [DEBG] [ssh.go:25] [ssh][172.24.20.53:22]command result is: bdd6c97922918f6070a65521df2a8b47

06:51:24 [DEBG] [scp.go:30] [ssh]host: 172.24.20.53:22 , remote md5: bdd6c97922918f6070a65521df2a8b47
06:51:24 [INFO] [scp.go:34] [ssh]md5 validate true
06:51:24 [INFO] [download.go:50] [172.24.20.53:22]copy file md5 validate success
06:51:24 [DEBG] [download.go:56] [172.24.20.53:22]please wait for after hook
06:51:24 [DEBG] [ssh.go:58] [172.24.20.53:22] cd /root && rm -rf kube && tar zxvf kube1.19.0.tar.gz  && cd /root/kube/shell && rm -f ../bin/sealos && bash init.sh && sed -i '/kubectl/d;/sealos/d' /root/.bashrc  && echo 'command -v kubectl &>/dev/null && source <(kubectl completion bash)' >> /root/.bashrc && echo '[ -x /usr/bin/sealos ] && source <(sealos completion bash)' >> /root/.bashrc && source /root/.bashrc
06:51:24 [INFO] [ssh.go:51] [172.24.20.53:22] kube/
06:51:24 [INFO] [ssh.go:51] [172.24.20.53:22] kube/README.md
06:51:24 [INFO] [ssh.go:51] [172.24.20.53:22] kube/bin/
06:51:24 [INFO] [ssh.go:51] [172.24.20.53:22] kube/bin/kubeadm
06:51:25 [INFO] [ssh.go:51] [172.24.20.53:22] kube/bin/kubectl
06:51:26 [INFO] [ssh.go:51] [172.24.20.53:22] kube/bin/kubelet
06:51:27 [INFO] [ssh.go:51] [172.24.20.53:22] kube/bin/kubelet-pre-start.sh
06:51:27 [INFO] [ssh.go:51] [172.24.20.53:22] kube/bin/conntrack
06:51:27 [INFO] [ssh.go:51] [172.24.20.53:22] kube/bin/sealos
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/conf/
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/conf/10-kubeadm.conf
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/conf/calico.yaml
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/conf/docker.service
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/conf/kubeadm.yaml
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/conf/kubelet.service
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/conf/net/
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/conf/net/calico.yaml
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/docker/
06:51:28 [INFO] [ssh.go:51] [172.24.20.53:22] kube/docker/docker.tgz
06:51:29 [INFO] [ssh.go:51] [172.24.20.53:22] kube/images/
06:51:29 [INFO] [ssh.go:51] [172.24.20.53:22] kube/images/images.tar
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] kube/shell/
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] kube/shell/init.sh
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] kube/shell/master.sh
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] kube/shell/docker.sh
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] + storage=/var/lib/docker
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] + harbor_ip=127.0.0.1
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] + mkdir -p /var/lib/docker
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] + command_exists docker
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] + command -v docker
06:51:31 [INFO] [ssh.go:51] [172.24.20.52:22] + systemctl restart docker.service
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22] + docker version
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22] Client: Docker Engine - Community
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  Version:           19.03.0
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  API version:       1.40
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  Go version:        go1.12.5
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  Git commit:        aeac9490dc
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  Built:             Wed Jul 17 18:11:50 2019
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  OS/Arch:           linux/amd64
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  Experimental:      false
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22] 
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22] Server: Docker Engine - Community
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  Engine:
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   Version:          19.03.0
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   API version:      1.40 (minimum version 1.12)
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   Go version:       go1.12.5
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   Git commit:       aeac9490dc
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   Built:            Wed Jul 17 18:22:15 2019
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   OS/Arch:          linux/amd64
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   Experimental:     false
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  containerd:
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   Version:          v1.2.6
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   GitCommit:        894b81a4b802e4eb2a91d1ce216b8817763c29fb
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  runc:
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   Version:          1.0.0-rc8
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   GitCommit:        425e105d5a03fabd737a126ad93d62a9eeede87f
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]  docker-init:
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   Version:          0.18.0
06:51:33 [INFO] [ssh.go:51] [172.24.20.52:22]   GitCommit:        fec3683
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.4.119-20.0009.20
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/00-tencentos.conf ...
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.printk = 4
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv6.conf.all.disable_ipv6 = 0
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] vm.oom_dump_tasks = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.ctrl-alt-del = 0
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.sysrq_use_leftctrl = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.ip_local_reserved_ports = 48369,36000,56000
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.yama.ptrace_scope = 0
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/50-coredump.conf ...
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.core_pattern = |/usr/lib/systemd/systemd-coredump %P %u %g %s %t %c %h %e
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.core_pipe_limit = 16
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/50-default.conf ...
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.sysrq = 16
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.core_uses_pid = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.kptr_restrict = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.conf.all.rp_filter = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.conf.all.accept_source_route = 0
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.conf.all.promote_secondaries = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.core.default_qdisc = fq_codel
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] fs.protected_hardlinks = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] fs.protected_symlinks = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/50-libkcapi-optmem_max.conf ...
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.core.optmem_max = 81920
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/50-pid-max.conf ...
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.pid_max = 4194304
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /etc/sysctl.d/99-sysctl.conf ...
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /etc/sysctl.d/k8s.conf ...
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.bridge.bridge-nf-call-ip6tables = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.bridge.bridge-nf-call-iptables = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /etc/sysctl.conf ...
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.ip_forward = 1
06:51:34 [INFO] [ssh.go:51] [172.24.20.52:22] setenforce: SELinux is disabled
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] kube/shell/
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] kube/shell/init.sh
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] kube/shell/master.sh
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] kube/shell/docker.sh
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] + storage=/var/lib/docker
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] + harbor_ip=127.0.0.1
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] + mkdir -p /var/lib/docker
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] + command_exists docker
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] + command -v docker
06:51:40 [INFO] [ssh.go:51] [172.24.20.51:22] + systemctl restart docker.service
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] + docker version
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] Client: Docker Engine - Community
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  Version:           19.03.0
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  API version:       1.40
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  Go version:        go1.12.5
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  Git commit:        aeac9490dc
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  Built:             Wed Jul 17 18:11:50 2019
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  OS/Arch:           linux/amd64
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  Experimental:      false
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] 
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] Server: Docker Engine - Community
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  Engine:
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   Version:          19.03.0
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   API version:      1.40 (minimum version 1.12)
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   Go version:       go1.12.5
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   Git commit:       aeac9490dc
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   Built:            Wed Jul 17 18:22:15 2019
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   OS/Arch:          linux/amd64
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   Experimental:     false
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  containerd:
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   Version:          v1.2.6
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   GitCommit:        894b81a4b802e4eb2a91d1ce216b8817763c29fb
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  runc:
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   Version:          1.0.0-rc8
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   GitCommit:        425e105d5a03fabd737a126ad93d62a9eeede87f
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]  docker-init:
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   Version:          0.18.0
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22]   GitCommit:        fec3683
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.4.119-20.0009.20
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/00-tencentos.conf ...
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.printk = 4
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv6.conf.all.disable_ipv6 = 0
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] vm.oom_dump_tasks = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.ctrl-alt-del = 0
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.sysrq_use_leftctrl = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.ip_local_reserved_ports = 48369,36000,56000
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.yama.ptrace_scope = 0
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/50-coredump.conf ...
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.core_pattern = |/usr/lib/systemd/systemd-coredump %P %u %g %s %t %c %h %e
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.core_pipe_limit = 16
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/50-default.conf ...
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.sysrq = 16
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.core_uses_pid = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.kptr_restrict = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.conf.all.rp_filter = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.conf.all.accept_source_route = 0
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.conf.all.promote_secondaries = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.core.default_qdisc = fq_codel
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] fs.protected_hardlinks = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] fs.protected_symlinks = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/50-libkcapi-optmem_max.conf ...
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.core.optmem_max = 81920
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/50-pid-max.conf ...
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.pid_max = 4194304
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /etc/sysctl.d/99-sysctl.conf ...
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /etc/sysctl.d/k8s.conf ...
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.bridge.bridge-nf-call-ip6tables = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.bridge.bridge-nf-call-iptables = 1
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /etc/sysctl.conf ...
06:51:43 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.ip_forward = 1
06:51:45 [INFO] [ssh.go:51] [172.24.20.51:22] setenforce: SELinux is disabled
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: k8s.gcr.io/kube-controller-manager:v1.19.0
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: k8s.gcr.io/kube-apiserver:v1.19.0
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: k8s.gcr.io/etcd:3.4.9-1
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: k8s.gcr.io/pause:3.2
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: calico/kube-controllers:v3.8.2
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: k8s.gcr.io/kube-proxy:v1.19.0
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: k8s.gcr.io/kube-scheduler:v1.19.0
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: k8s.gcr.io/coredns:1.7.0
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: calico/node:v3.8.2
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: calico/cni:v3.8.2
06:51:51 [INFO] [ssh.go:51] [172.24.20.52:22] Loaded image: calico/pod2daemon-flexvol:v3.8.2
06:51:55 [INFO] [ssh.go:51] [172.24.20.52:22] driver is cgroupfs
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] kube/shell/
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] kube/shell/init.sh
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] kube/shell/master.sh
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] kube/shell/docker.sh
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + storage=/var/lib/docker
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + harbor_ip=127.0.0.1
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + mkdir -p /var/lib/docker
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + command_exists docker
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + command -v docker
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++ get_distribution
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++ lsb_dist=
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++ '[' -r /etc/os-release ']'
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] +++ . /etc/os-release
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ NAME=OpenCloudOS
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ VERSION=8.6
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ ID=opencloudos
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ ID_LIKE='rhel fedora'
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ VERSION_ID=8.6
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ PLATFORM_ID=platform:oc8
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ PRETTY_NAME='OpenCloudOS 8.6'
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ ANSI_COLOR='0;31'
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ CPE_NAME=cpe:/o:opencloudos:opencloudos:8
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ HOME_URL=https://www.opencloudos.org/
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++++ BUG_REPORT_URL=https://bugs.opencloudos.tech/
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] +++ echo opencloudos
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++ lsb_dist=opencloudos
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++ echo opencloudos
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + lsb_dist=opencloudos
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++ echo opencloudos
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] ++ tr '[:upper:]' '[:lower:]'
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + lsb_dist=opencloudos
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + echo 'current system is opencloudos'
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] current system is opencloudos
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + case "$lsb_dist" in
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + cp ../conf/docker.service /usr/lib/systemd/system/docker.service
06:51:55 [INFO] [ssh.go:51] [172.24.20.53:22] + tar --strip-components=1 -xvzf ../docker/docker.tgz -C /usr/bin
06:51:56 [INFO] [ssh.go:51] [172.24.20.53:22] docker/ctr
06:51:57 [INFO] [ssh.go:51] [172.24.20.53:22] docker/runc
06:51:57 [INFO] [ssh.go:51] [172.24.20.53:22] docker/dockerd
06:51:59 [INFO] [ssh.go:51] [172.24.20.53:22] docker/docker
06:52:01 [INFO] [ssh.go:51] [172.24.20.53:22] docker/containerd
06:52:02 [INFO] [ssh.go:51] [172.24.20.53:22] docker/docker-init
06:52:02 [INFO] [ssh.go:51] [172.24.20.53:22] docker/containerd-shim
06:52:03 [INFO] [ssh.go:51] [172.24.20.53:22] docker/docker-proxy
06:52:03 [INFO] [ssh.go:51] [172.24.20.53:22] + chmod a+x /usr/bin
06:52:03 [INFO] [ssh.go:51] [172.24.20.53:22] + systemctl enable docker.service
06:52:03 [INFO] [ssh.go:51] [172.24.20.53:22] Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
06:52:04 [INFO] [ssh.go:51] [172.24.20.53:22] + systemctl restart docker.service
06:52:06 [INFO] [ssh.go:51] [172.24.20.53:22] + cat
06:52:06 [INFO] [ssh.go:51] [172.24.20.53:22] + systemctl restart docker.service
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: k8s.gcr.io/kube-controller-manager:v1.19.0
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: k8s.gcr.io/kube-apiserver:v1.19.0
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: k8s.gcr.io/etcd:3.4.9-1
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: k8s.gcr.io/pause:3.2
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: calico/kube-controllers:v3.8.2
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: k8s.gcr.io/kube-proxy:v1.19.0
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: k8s.gcr.io/kube-scheduler:v1.19.0
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: k8s.gcr.io/coredns:1.7.0
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: calico/node:v3.8.2
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: calico/cni:v3.8.2
06:52:06 [INFO] [ssh.go:51] [172.24.20.51:22] Loaded image: calico/pod2daemon-flexvol:v3.8.2
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] + docker version
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] Client: Docker Engine - Community
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  Version:           19.03.0
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  API version:       1.40
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  Go version:        go1.12.5
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  Git commit:        aeac9490dc
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  Built:             Wed Jul 17 18:11:50 2019
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  OS/Arch:           linux/amd64
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  Experimental:      false
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] 
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] Server: Docker Engine - Community
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  Engine:
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   Version:          19.03.0
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   API version:      1.40 (minimum version 1.12)
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   Go version:       go1.12.5
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   Git commit:       aeac9490dc
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   Built:            Wed Jul 17 18:22:15 2019
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   OS/Arch:          linux/amd64
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   Experimental:     false
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  containerd:
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   Version:          v1.2.6
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   GitCommit:        894b81a4b802e4eb2a91d1ce216b8817763c29fb
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  runc:
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   Version:          1.0.0-rc8
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   GitCommit:        425e105d5a03fabd737a126ad93d62a9eeede87f
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]  docker-init:
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   Version:          0.18.0
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22]   GitCommit:        fec3683
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.4.119-20.0009.20
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] * Applying /usr/lib/sysctl.d/00-tencentos.conf ...
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.printk = 4
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.ipv6.conf.all.disable_ipv6 = 0
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] vm.oom_dump_tasks = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.ctrl-alt-del = 0
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.sysrq_use_leftctrl = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.ipv4.ip_local_reserved_ports = 48369,36000,56000
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] * Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.yama.ptrace_scope = 0
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] * Applying /usr/lib/sysctl.d/50-coredump.conf ...
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.core_pattern = |/usr/lib/systemd/systemd-coredump %P %u %g %s %t %c %h %e
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.core_pipe_limit = 16
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] * Applying /usr/lib/sysctl.d/50-default.conf ...
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.sysrq = 16
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.core_uses_pid = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.kptr_restrict = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.ipv4.conf.all.rp_filter = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.ipv4.conf.all.accept_source_route = 0
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.ipv4.conf.all.promote_secondaries = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.core.default_qdisc = fq_codel
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] fs.protected_hardlinks = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] fs.protected_symlinks = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] * Applying /usr/lib/sysctl.d/50-libkcapi-optmem_max.conf ...
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.core.optmem_max = 81920
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] * Applying /usr/lib/sysctl.d/50-pid-max.conf ...
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] kernel.pid_max = 4194304
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] * Applying /etc/sysctl.d/99-sysctl.conf ...
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] * Applying /etc/sysctl.d/k8s.conf ...
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.bridge.bridge-nf-call-ip6tables = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.bridge.bridge-nf-call-iptables = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] * Applying /etc/sysctl.conf ...
06:52:09 [INFO] [ssh.go:51] [172.24.20.53:22] net.ipv4.ip_forward = 1
06:52:09 [INFO] [ssh.go:51] [172.24.20.51:22] driver is cgroupfs
06:52:10 [INFO] [ssh.go:51] [172.24.20.53:22] setenforce: SELinux is disabled
79d541cda6cb: Loading layer  3.041MB/3.041MB
e9933a1f21f5: Loading layer  1.734MB/1.734MB
d85a13cfa53e: Loading layer  107.3MB/107.3MB
Loaded image: k8s.gcr.io/kube-controller-manager:v1.19.0
c3a6120d2fd6: Loading layer  115.2MB/115.2MB
Loaded image: k8s.gcr.io/kube-apiserver:v1.19.0
0d1435bd79e4: Loading layer  3.062MB/3.062MB
2aef7a73d4b0: Loading layer   2.13MB/2.13MB
ec3830e15d9c: Loading layer  225.3MB/225.3MB
4d5d7883c216: Loading layer   2.19MB/2.19MB
5d3a32005e6b: Loading layer  21.95MB/21.95MB
Loaded image: k8s.gcr.io/etcd:3.4.9-10.53:22] 
ba0dae6243cc: Loading layer  684.5kB/684.5kB
Loaded image: k8s.gcr.io/pause:3.24.20.53:22] 
8b62fd4eb2dd: Loading layer  43.99MB/43.99MB
40fe7b163104: Loading layer  2.828MB/2.828MB
Loaded image: calico/kube-controllers:v3.8.2] 
91e3a07063b3: Loading layer  53.89MB/53.89MB
b4e54f331697: Loading layer  21.78MB/21.78MB
b9b82a97c787: Loading layer  5.168MB/5.168MB
1b55846906e8: Loading layer  4.608kB/4.608kB
061bfb5cb861: Loading layer  8.192kB/8.192kB
78dd6c0504a7: Loading layer  8.704kB/8.704kB
f83925edb29c: Loading layer  38.81MB/38.81MB
Loaded image: k8s.gcr.io/kube-proxy:v1.19.02] 
a2a6ea4dde58: Loading layer  42.13MB/42.13MB
Loaded image: k8s.gcr.io/kube-scheduler:v1.19.0
225df95e717c: Loading layer  336.4kB/336.4kB
96d17b0b58a7: Loading layer  45.02MB/45.02MB
Loaded image: k8s.gcr.io/coredns:1.7.0.53:22] 
d8a33133e477: Loading layer  72.47MB/72.47MB
337ec577cf9c: Loading layer     33MB/33MB
45cc6dfacce1: Loading layer  3.584kB/3.584kB
7b3ecdc818b0: Loading layer  3.584kB/3.584kB
2b0805a50f82: Loading layer  21.85MB/21.85MB
c9bf76343513: Loading layer  11.26kB/11.26kB
f4176618c27b: Loading layer  11.26kB/11.26kB
4dcaff1da822: Loading layer   6.55MB/6.55MB
92e6b8f58573: Loading layer  2.945MB/2.945MB
5f970d4ac62d: Loading layer  35.84kB/35.84kB
b1a2a2446599: Loading layer  55.22MB/55.22MB
014866f8df9e: Loading layer   1.14MB/1.14MB
Loaded image: calico/node:v3.8.2.24.20.53:22] 
466b4a33898e: Loading layer  88.05MB/88.05MB
dd824a99572a: Loading layer  10.24kB/10.24kB
d8fdd74cc7ed: Loading layer   2.56kB/2.56kB
Loaded image: calico/cni:v3.8.22.24.20.53:22] 
3fc64803ca2d: Loading layer  4.463MB/4.463MB
f03a403b18a7: Loading layer   5.12kB/5.12kB
0de6f9b8b1f7: Loading layer  5.166MB/5.166MB
Loaded image: calico/pod2daemon-flexvol:v3.8.2
06:52:52 [INFO] [ssh.go:51] [172.24.20.53:22] driver is cgroupfs
06:52:52 [INFO] [ssh.go:51] [172.24.20.53:22] Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /etc/systemd/system/kubelet.service.
06:52:53 [DEBG] [print.go:21] ==>SendPackage
06:52:53 [DEBG] [ssh.go:58] [172.24.20.51:22] echo "apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.24.20.51
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
controlPlaneEndpoint: "apiserver.cluster.local:6443"
imageRepository: k8s.gcr.io
networking:
  # dnsDomain: cluster.local
  podSubnet: 10.20.0.0/16
  serviceSubnet: 10.40.0.0/16
apiServer:
  certSANs:
  - 127.0.0.1
  - apiserver.cluster.local
  - 172.24.20.51
  - 172.24.20.52
  - 172.24.20.53
  - 10.10.10.10
  extraArgs:
    feature-gates: TTLAfterFinished=true
  extraVolumes:
  - name: localtime
    hostPath: /etc/localtime
    mountPath: /etc/localtime
    readOnly: true
    pathType: File
controllerManager:
  extraArgs:
    feature-gates: TTLAfterFinished=true
    experimental-cluster-signing-duration: 876000h
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    readOnly: true
    pathType: File
scheduler:
  extraArgs:
    feature-gates: TTLAfterFinished=true
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    readOnly: true
    pathType: File
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
ipvs:
  excludeCIDRs:
  - "10.10.10.10/32"" > /root/kubeadm-config.yaml
06:52:53 [DEBG] [print.go:21] ==>SendPackage==>KubeadmConfigInstall
06:52:53 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] hostname
06:52:54 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: cloudos-51

06:52:54 [INFO] [kube_certs.go:223] apiserver altNames : {map[apiserver.cluster.local:apiserver.cluster.local cloudos-51:cloudos-51 kubernetes:kubernetes kubernetes.default:kubernetes.default kubernetes.default.svc:kubernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc.cluster.local localhost:localhost] map[10.10.10.10:10.10.10.10 10.40.0.1:10.40.0.1 127.0.0.1:127.0.0.1 172.24.20.51:172.24.20.51 172.24.20.52:172.24.20.52 172.24.20.53:172.24.20.53]}
06:52:54 [INFO] [kube_certs.go:243] Etcd altnames : {map[cloudos-51:cloudos-51 localhost:localhost] map[127.0.0.1:127.0.0.1 172.24.20.51:172.24.20.51 ::1:::1]}, commonName : cloudos-51
06:52:58 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] hostname
06:52:58 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: cloudos-51

[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
06:53:00 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /etc/kubernetes || true
06:53:00 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/kubelet.conf] to Dst [/etc/kubernetes/kubelet.conf] total size is: 5.48KB ;speed is 5KB
06:53:01 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /etc/kubernetes || true
06:53:02 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/admin.conf] to Dst [/etc/kubernetes/admin.conf] total size is: 5.46KB ;speed is 5KB
06:53:03 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /etc/kubernetes || true
06:53:04 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/controller-manager.conf] to Dst [/etc/kubernetes/controller-manager.conf] total size is: 5.49KB ;speed is 5KB
06:53:05 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /etc/kubernetes || true
06:53:05 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/scheduler.conf] to Dst [/etc/kubernetes/scheduler.conf] total size is: 5.44KB ;speed is 5KB
06:53:06 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver-etcd-client.crt] to Dst [/etc/kubernetes/pki/apiserver-etcd-client.crt] total size is: 1.11KB ;speed is 1KB
06:53:07 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver-etcd-client.key] to Dst [/etc/kubernetes/pki/apiserver-etcd-client.key] total size is: 1.64KB ;speed is 1KB
06:53:08 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver-kubelet-client.crt] to Dst [/etc/kubernetes/pki/apiserver-kubelet-client.crt] total size is: 1.12KB ;speed is 1KB
06:53:09 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver-kubelet-client.key] to Dst [/etc/kubernetes/pki/apiserver-kubelet-client.key] total size is: 1.64KB ;speed is 1KB
06:53:09 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver.crt] to Dst [/etc/kubernetes/pki/apiserver.crt] total size is: 1.33KB ;speed is 1KB
06:53:09 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver.key] to Dst [/etc/kubernetes/pki/apiserver.key] total size is: 1.64KB ;speed is 1KB
06:53:10 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/ca.crt] to Dst [/etc/kubernetes/pki/ca.crt] total size is: 1.04KB ;speed is 1KB
06:53:10 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/ca.key] to Dst [/etc/kubernetes/pki/ca.key] total size is: 1.64KB ;speed is 1KB
06:53:10 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/ca.crt] to Dst [/etc/kubernetes/pki/etcd/ca.crt] total size is: 1.04KB ;speed is 1KB
06:53:11 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/ca.key] to Dst [/etc/kubernetes/pki/etcd/ca.key] total size is: 1.64KB ;speed is 1KB
06:53:11 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/healthcheck-client.crt] to Dst [/etc/kubernetes/pki/etcd/healthcheck-client.crt] total size is: 1.12KB ;speed is 1KB
06:53:12 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/healthcheck-client.key] to Dst [/etc/kubernetes/pki/etcd/healthcheck-client.key] total size is: 1.64KB ;speed is 1KB
06:53:12 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/peer.crt] to Dst [/etc/kubernetes/pki/etcd/peer.crt] total size is: 1.16KB ;speed is 1KB
06:53:13 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/peer.key] to Dst [/etc/kubernetes/pki/etcd/peer.key] total size is: 1.64KB ;speed is 1KB
06:53:13 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/server.crt] to Dst [/etc/kubernetes/pki/etcd/server.crt] total size is: 1.16KB ;speed is 1KB
06:53:13 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/server.key] to Dst [/etc/kubernetes/pki/etcd/server.key] total size is: 1.64KB ;speed is 1KB
06:53:14 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/front-proxy-ca.crt] to Dst [/etc/kubernetes/pki/front-proxy-ca.crt] total size is: 1.06KB ;speed is 1KB
06:53:14 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/front-proxy-ca.key] to Dst [/etc/kubernetes/pki/front-proxy-ca.key] total size is: 1.64KB ;speed is 1KB
06:53:14 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/front-proxy-client.crt] to Dst [/etc/kubernetes/pki/front-proxy-client.crt] total size is: 1.08KB ;speed is 1KB
06:53:15 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/front-proxy-client.key] to Dst [/etc/kubernetes/pki/front-proxy-client.key] total size is: 1.64KB ;speed is 1KB
06:53:15 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/sa.key] to Dst [/etc/kubernetes/pki/sa.key] total size is: 1.64KB ;speed is 1KB
06:53:15 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/sa.pub] to Dst [/etc/kubernetes/pki/sa.pub] total size is: 0.44KB ;speed is 0KB
06:53:16 [DEBG] [ssh.go:58] [172.24.20.51:22] grep -qF '172.24.20.51 apiserver.cluster.local' /etc/hosts || echo 172.24.20.51 apiserver.cluster.local >> /etc/hosts
06:53:18 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] kubeadm init --config=/root/kubeadm-config.yaml --upload-certs -v 0
06:53:49 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: W0609 06:53:19.184294    4607 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
W0609 06:53:19.187726    4607 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
W0609 06:53:19.475223    4607 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.0
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileExisting-socat]: socat not found in system path
        [WARNING FileExisting-tc]: tc not found in system path
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 24.509231 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
53536c5a0d142e6dbbc178f5c132f918d9c83088c6d1c9f54800467cdf054eee
[mark-control-plane] Marking the node cloudos-51 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node cloudos-51 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 7tfbyw.qgkimu2s8z4wq3fh
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join apiserver.cluster.local:6443 --token 7tfbyw.qgkimu2s8z4wq3fh \
    --discovery-token-ca-cert-hash sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567 \
    --control-plane --certificate-key 53536c5a0d142e6dbbc178f5c132f918d9c83088c6d1c9f54800467cdf054eee

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join apiserver.cluster.local:6443 --token 7tfbyw.qgkimu2s8z4wq3fh \
    --discovery-token-ca-cert-hash sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567 

06:53:49 [DEBG] [sealos.go:102] [globals]decodeOutput: W0609 06:53:19.184294    4607 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
W0609 06:53:19.187726    4607 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
W0609 06:53:19.475223    4607 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.0
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileExisting-socat]: socat not found in system path
        [WARNING FileExisting-tc]: tc not found in system path
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 24.509231 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
53536c5a0d142e6dbbc178f5c132f918d9c83088c6d1c9f54800467cdf054eee
[mark-control-plane] Marking the node cloudos-51 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node cloudos-51 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 7tfbyw.qgkimu2s8z4wq3fh
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join apiserver.cluster.local:6443 --token 7tfbyw.qgkimu2s8z4wq3fh \
    --discovery-token-ca-cert-hash sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567 \
    --control-plane --certificate-key 53536c5a0d142e6dbbc178f5c132f918d9c83088c6d1c9f54800467cdf054eee

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join apiserver.cluster.local:6443 --token 7tfbyw.qgkimu2s8z4wq3fh \
    --discovery-token-ca-cert-hash sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567 

06:53:49 [INFO] [sealos.go:105] [globals]join command is:  apiserver.cluster.local:6443 --token 7tfbyw.qgkimu2s8z4wq3fh \
    --discovery-token-ca-cert-hash sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567 \
    --control-plane --certificate-key 53536c5a0d142e6dbbc178f5c132f918d9c83088c6d1c9f54800467cdf054eee


06:53:49 [DEBG] [sealos.go:111] [globals]decodeJoinCmd:  apiserver.cluster.local:6443 --token 7tfbyw.qgkimu2s8z4wq3fh \
    --discovery-token-ca-cert-hash sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567 \
    --control-plane --certificate-key 53536c5a0d142e6dbbc178f5c132f918d9c83088c6d1c9f54800467cdf054eee


06:53:49 [DEBG] [sealos.go:119] [####]0 ::
06:53:49 [DEBG] [sealos.go:119] [####]1 :apiserver.cluster.local:6443:
06:53:49 [DEBG] [sealos.go:119] [####]2 :--token:
06:53:49 [DEBG] [sealos.go:119] [####]3 :7tfbyw.qgkimu2s8z4wq3fh:
06:53:49 [DEBG] [sealos.go:119] [####]4 ::
06:53:49 [DEBG] [sealos.go:119] [####]5 ::
06:53:49 [DEBG] [sealos.go:119] [####]6 ::
06:53:49 [DEBG] [sealos.go:119] [####]7 ::
06:53:49 [DEBG] [sealos.go:119] [####]8 :--discovery-token-ca-cert-hash:
06:53:49 [DEBG] [sealos.go:119] [####]9 :sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567:
06:53:49 [DEBG] [sealos.go:119] [####]10 ::
06:53:49 [DEBG] [sealos.go:119] [####]11 ::
06:53:49 [DEBG] [sealos.go:119] [####]12 ::
06:53:49 [DEBG] [sealos.go:119] [####]13 ::
06:53:49 [DEBG] [sealos.go:119] [####]14 :--control-plane:
06:53:49 [DEBG] [sealos.go:119] [####]15 :--certificate-key:
06:53:49 [DEBG] [sealos.go:119] [####]16 :53536c5a0d142e6dbbc178f5c132f918d9c83088c6d1c9f54800467cdf054eee:
06:53:49 [DEBG] [sealos.go:140] [####]JoinToken :7tfbyw.qgkimu2s8z4wq3fh
06:53:49 [DEBG] [sealos.go:141] [####]TokenCaCertHash :sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567
06:53:49 [DEBG] [sealos.go:142] [####]CertificateKey :53536c5a0d142e6dbbc178f5c132f918d9c83088c6d1c9f54800467cdf054eee
06:53:49 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] mkdir -p /root/.kube && cp /etc/kubernetes/admin.conf /root/.kube/config && chmod 600 /root/.kube/config
06:53:50 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: 
06:53:50 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] echo '
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
  name: calico-config
  namespace: kube-system
data:
  # Typha is disabled.
  typha_service_name: "none"
  # Configure the backend to use.
  calico_backend: "bird"

  # Configure the MTU to use
  veth_mtu: "1440"

  # The CNI network configuration to install on each node.  The special
  # values in this config will be automatically populated.
  cni_network_config: |-
    {
      "name": "k8s-pod-network",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "calico",
          "log_level": "info",
          "datastore_type": "kubernetes",
          "nodename": "__KUBERNETES_NODE_NAME__",
          "mtu": __CNI_MTU__,
          "ipam": {
              "type": "calico-ipam"
          },
          "policy": {
              "type": "k8s"
          },
          "kubernetes": {
              "kubeconfig": "__KUBECONFIG_FILEPATH__"
          }
        },
        {
          "type": "portmap",
          "snat": true,
          "capabilities": {"portMappings": true}
        }
      ]
    }
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
   name: felixconfigurations.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: FelixConfiguration
    plural: felixconfigurations
    singular: felixconfiguration
---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ipamblocks.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: IPAMBlock
    plural: ipamblocks
    singular: ipamblock

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: blockaffinities.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: BlockAffinity
    plural: blockaffinities
    singular: blockaffinity

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ipamhandles.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: IPAMHandle
    plural: ipamhandles
    singular: ipamhandle

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ipamconfigs.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: IPAMConfig
    plural: ipamconfigs
    singular: ipamconfig

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: bgppeers.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: BGPPeer
    plural: bgppeers
    singular: bgppeer

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: bgpconfigurations.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: BGPConfiguration
    plural: bgpconfigurations
    singular: bgpconfiguration

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ippools.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: IPPool
    plural: ippools
    singular: ippool

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: hostendpoints.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: HostEndpoint
    plural: hostendpoints
    singular: hostendpoint

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: clusterinformations.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: ClusterInformation
    plural: clusterinformations
    singular: clusterinformation

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: globalnetworkpolicies.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: GlobalNetworkPolicy
    plural: globalnetworkpolicies
    singular: globalnetworkpolicy

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: globalnetworksets.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: GlobalNetworkSet
    plural: globalnetworksets
    singular: globalnetworkset

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: networkpolicies.crd.projectcalico.org
spec:
  scope: Namespaced
  group: crd.projectcalico.org
  version: v1
  names:
    kind: NetworkPolicy
    plural: networkpolicies
    singular: networkpolicy

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: networksets.crd.projectcalico.org
spec:
  scope: Namespaced
  group: crd.projectcalico.org
  version: v1
  names:
    kind: NetworkSet
    plural: networksets
    singular: networkset
---
# Source: calico/templates/rbac.yaml

# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
rules:
  # Nodes are watched to monitor for deletions.
  - apiGroups: [""]
    resources:
      - nodes
    verbs:
      - watch
      - list
      - get
  # Pods are queried to check for existence.
  - apiGroups: [""]
    resources:
      - pods
    verbs:
      - get
  # IPAM resources are manipulated when nodes are deleted.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - ippools
    verbs:
      - list
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - blockaffinities
      - ipamblocks
      - ipamhandles
    verbs:
      - get
      - list
      - create
      - update
      - delete
  # Needs access to update clusterinformations.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - clusterinformations
    verbs:
      - get
      - create
      - update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-kube-controllers
subjects:
- kind: ServiceAccount
  name: calico-kube-controllers
  namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-node
rules:
  # The CNI plugin needs to get pods, nodes, and namespaces.
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
    verbs:
      - get
  - apiGroups: [""]
    resources:
      - endpoints
      - services
    verbs:
      # Used to discover service IPs for advertisement.
      - watch
      - list
      # Used to discover Typhas.
      - get
  - apiGroups: [""]
    resources:
      - nodes/status
    verbs:
      # Needed for clearing NodeNetworkUnavailable flag.
      - patch
      # Calico stores some configuration information in node annotations.
      - update
  # Watch for changes to Kubernetes NetworkPolicies.
  - apiGroups: ["networking.k8s.io"]
    resources:
      - networkpolicies
    verbs:
      - watch
      - list
  # Used by Calico for policy information.
  - apiGroups: [""]
    resources:
      - pods
      - namespaces
      - serviceaccounts
    verbs:
      - list
      - watch
  # The CNI plugin patches pods/status.
  - apiGroups: [""]
    resources:
      - pods/status
    verbs:
      - patch
  # Calico monitors various CRDs for config.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - globalfelixconfigs
      - felixconfigurations
      - bgppeers
      - globalbgpconfigs
      - bgpconfigurations
      - ippools
      - ipamblocks
      - globalnetworkpolicies
      - globalnetworksets
      - networkpolicies
      - networksets
      - clusterinformations
      - hostendpoints
    verbs:
      - get
      - list
      - watch
  # Calico must create and update some CRDs on startup.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - ippools
      - felixconfigurations
      - clusterinformations
    verbs:
      - create
      - update
  # Calico stores some configuration information on the node.
  - apiGroups: [""]
    resources:
      - nodes
    verbs:
      - get
      - list
      - watch
  # These permissions are only requried for upgrade from v2.6, and can
  # be removed after upgrade or on fresh installations.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - bgpconfigurations
      - bgppeers
    verbs:
      - create
      - update
  # These permissions are required for Calico CNI to perform IPAM allocations.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - blockaffinities
      - ipamblocks
      - ipamhandles
    verbs:
      - get
      - list
      - create
      - update
      - delete
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - ipamconfigs
    verbs:
      - get
  # Block affinities must also be watchable by confd for route aggregation.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - blockaffinities
    verbs:
      - watch
  # The Calico IPAM migration needs to get daemonsets. These permissions can be
  # removed if not upgrading from an installation using host-local IPAM.
  - apiGroups: ["apps"]
    resources:
      - daemonsets
    verbs:
      - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: calico-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-node
subjects:
- kind: ServiceAccount
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: calico-node
  namespace: kube-system
  labels:
    k8s-app: calico-node
spec:
  selector:
    matchLabels:
      k8s-app: calico-node
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  template:
    metadata:
      labels:
        k8s-app: calico-node
      annotations:
        # This, along with the CriticalAddonsOnly toleration below,
        # marks the pod as a critical add-on, ensuring it gets
        # priority scheduling and that its resources are reserved
        # if it ever gets evicted.
    spec:
      nodeSelector:
        beta.kubernetes.io/os: linux
      hostNetwork: true
      tolerations:
        # Make sure calico-node gets scheduled on all nodes.
        - effect: NoSchedule
          operator: Exists
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - effect: NoExecute
          operator: Exists
      serviceAccountName: calico-node
      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
      terminationGracePeriodSeconds: 0
      priorityClassName: system-node-critical
      initContainers:
        # This container performs upgrade from host-local IPAM to calico-ipam.
        # It can be deleted if this is a fresh installation, or if you have already
        # upgraded to use calico-ipam.
        - name: upgrade-ipam
          image: calico/cni:v3.8.2
          command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
          env:
            - name: KUBERNETES_NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: CALICO_NETWORKING_BACKEND
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: calico_backend
          volumeMounts:
            - mountPath: /var/lib/cni/networks
              name: host-local-net-dir
            - mountPath: /host/opt/cni/bin
              name: cni-bin-dir
        # This container installs the CNI binaries
        # and CNI network config file on each node.
        - name: install-cni
          image: calico/cni:v3.8.2
          command: ["/install-cni.sh"]
          env:
            # Name of the CNI config file to create.
            - name: CNI_CONF_NAME
              value: "10-calico.conflist"
            # The CNI network config to install on each node.
            - name: CNI_NETWORK_CONFIG
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: cni_network_config
            # Set the hostname based on the k8s node name.
            - name: KUBERNETES_NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            # CNI MTU Config variable
            - name: CNI_MTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Prevents the container from sleeping forever.
            - name: SLEEP
              value: "false"
          volumeMounts:
            - mountPath: /host/opt/cni/bin
              name: cni-bin-dir
            - mountPath: /host/etc/cni/net.d
              name: cni-net-dir
        # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
        # to communicate with Felix over the Policy Sync API.
        - name: flexvol-driver
          image: calico/pod2daemon-flexvol:v3.8.2
          volumeMounts:
          - name: flexvol-driver-host
            mountPath: /host/driver
      containers:
        # Runs calico-node container on each Kubernetes node.  This
        # container programs network policy and routes on each
        # host.
        - name: calico-node
          image: calico/node:v3.8.2
          env:
            # Use Kubernetes API as the backing datastore.
            - name: DATASTORE_TYPE
              value: "kubernetes"
            # Wait for the datastore.
            - name: WAIT_FOR_DATASTORE
              value: "true"
            # Set based on the k8s node name.
            - name: NODENAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            # Choose the backend to use.
            - name: CALICO_NETWORKING_BACKEND
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: calico_backend
            # Cluster type to identify the deployment type
            - name: CLUSTER_TYPE
              value: "k8s,bgp"
            # Auto-detect the BGP IP address.
            - name: IP
              value: "autodetect"
            - name: IP_AUTODETECTION_METHOD
              value: "interface=eth.*|en.*|em.*"
            # Enable IPIP
            - name: CALICO_IPV4POOL_IPIP
              value: "Always"
            # Set MTU for tunnel device used if ipip is enabled
            - name: FELIX_IPINIPMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # The default IPv4 pool to create on startup if none exists. Pod IPs will be
            # chosen from this range. Changing this value after installation will have
            - name: CALICO_IPV4POOL_CIDR
              value: "10.20.0.0/16"
            - name: CALICO_DISABLE_FILE_LOGGING
              value: "true"
            # Set Felix endpoint to host default action to ACCEPT.
            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
              value: "ACCEPT"
            # Disable IPv6 on Kubernetes.
            - name: FELIX_IPV6SUPPORT
              value: "false"
            # Set Felix logging to "info"
            - name: FELIX_LOGSEVERITYSCREEN
              value: "info"
            - name: FELIX_HEALTHENABLED
              value: "true"
          securityContext:
            privileged: true
          resources:
            requests:
              cpu: 250m
          livenessProbe:
            httpGet:
              path: /liveness
              port: 9099
              host: localhost
            periodSeconds: 10
            initialDelaySeconds: 10
            failureThreshold: 6
          readinessProbe:
            exec:
              command:
              - /bin/calico-node
              - -bird-ready
              - -felix-ready
            periodSeconds: 10
          volumeMounts:
            - mountPath: /lib/modules
              name: lib-modules
              readOnly: true
            - mountPath: /run/xtables.lock
              name: xtables-lock
              readOnly: false
            - mountPath: /var/run/calico
              name: var-run-calico
              readOnly: false
            - mountPath: /var/lib/calico
              name: var-lib-calico
              readOnly: false
            - name: policysync
              mountPath: /var/run/nodeagent
      volumes:
        # Used by calico-node.
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: var-run-calico
          hostPath:
            path: /var/run/calico
        - name: var-lib-calico
          hostPath:
            path: /var/lib/calico
        - name: xtables-lock
          hostPath:
            path: /run/xtables.lock
            type: FileOrCreate
        # Used to install CNI.
        - name: cni-bin-dir
          hostPath:
            path: /opt/cni/bin
        - name: cni-net-dir
          hostPath:
            path: /etc/cni/net.d
        # Mount in the directory for host-local IPAM allocations. This is
        # used when upgrading from host-local to calico-ipam, and can be removed
        # if not using the upgrade-ipam init container.
        - name: host-local-net-dir
          hostPath:
            path: /var/lib/cni/networks
        # Used to create per-pod Unix Domain Sockets
        - name: policysync
          hostPath:
            type: DirectoryOrCreate
            path: /var/run/nodeagent
        # Used to install Flex Volume Driver
        - name: flexvol-driver-host
          hostPath:
            type: DirectoryOrCreate
            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-kube-controllers.yaml

# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
  name: calico-kube-controllers
  namespace: kube-system
  labels:
    k8s-app: calico-kube-controllers
spec:
  # The controllers can only have a single active instance.
  replicas: 1
  selector:
    matchLabels:
      k8s-app: calico-kube-controllers
  strategy:
    type: Recreate
  template:
    metadata:
      name: calico-kube-controllers
      namespace: kube-system
      labels:
        k8s-app: calico-kube-controllers
      annotations:
    spec:
      nodeSelector:
        beta.kubernetes.io/os: linux
      tolerations:
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      serviceAccountName: calico-kube-controllers
      priorityClassName: system-cluster-critical
      containers:
        - name: calico-kube-controllers
          image: calico/kube-controllers:v3.8.2
          env:
            # Choose which controllers to run.
            - name: ENABLED_CONTROLLERS
              value: node
            - name: DATASTORE_TYPE
              value: kubernetes
          readinessProbe:
            exec:
              command:
              - /usr/bin/check-status
              - -r

---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-kube-controllers
  namespace: kube-system
' | kubectl apply -f -
06:53:54 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: configmap/calico-config created
Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

06:53:54 [DEBG] [print.go:21] ==>SendPackage==>KubeadmConfigInstall==>InstallMaster0
06:53:55 [DEBG] [ssh.go:58] [172.24.20.53:22] mkdir -p /etc/kubernetes || true
06:53:55 [DEBG] [ssh.go:58] [172.24.20.52:22] mkdir -p /etc/kubernetes || true
06:53:57 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/admin.conf] to Dst [/etc/kubernetes/admin.conf] total size is: 5.46KB ;speed is 5KB
06:53:57 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/admin.conf] to Dst [/etc/kubernetes/admin.conf] total size is: 5.46KB ;speed is 5KB
06:53:58 [DEBG] [ssh.go:58] [172.24.20.53:22] mkdir -p /etc/kubernetes || true
06:53:58 [DEBG] [ssh.go:58] [172.24.20.52:22] mkdir -p /etc/kubernetes || true
06:53:59 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/controller-manager.conf] to Dst [/etc/kubernetes/controller-manager.conf] total size is: 5.49KB ;speed is 5KB
06:53:59 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/controller-manager.conf] to Dst [/etc/kubernetes/controller-manager.conf] total size is: 5.49KB ;speed is 5KB
06:54:00 [DEBG] [ssh.go:58] [172.24.20.53:22] mkdir -p /etc/kubernetes || true
06:54:00 [DEBG] [ssh.go:58] [172.24.20.52:22] mkdir -p /etc/kubernetes || true
06:54:01 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/scheduler.conf] to Dst [/etc/kubernetes/scheduler.conf] total size is: 5.44KB ;speed is 5KB
06:54:01 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/scheduler.conf] to Dst [/etc/kubernetes/scheduler.conf] total size is: 5.44KB ;speed is 5KB
06:54:02 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/apiserver-etcd-client.crt] to Dst [/etc/kubernetes/pki/apiserver-etcd-client.crt] total size is: 1.11KB ;speed is 1KB
06:54:02 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/apiserver-etcd-client.crt] to Dst [/etc/kubernetes/pki/apiserver-etcd-client.crt] total size is: 1.11KB ;speed is 1KB
06:54:03 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/apiserver-etcd-client.key] to Dst [/etc/kubernetes/pki/apiserver-etcd-client.key] total size is: 1.64KB ;speed is 1KB
06:54:03 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/apiserver-etcd-client.key] to Dst [/etc/kubernetes/pki/apiserver-etcd-client.key] total size is: 1.64KB ;speed is 1KB
06:54:03 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/apiserver-kubelet-client.crt] to Dst [/etc/kubernetes/pki/apiserver-kubelet-client.crt] total size is: 1.12KB ;speed is 1KB
06:54:03 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/apiserver-kubelet-client.crt] to Dst [/etc/kubernetes/pki/apiserver-kubelet-client.crt] total size is: 1.12KB ;speed is 1KB
06:54:04 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/apiserver-kubelet-client.key] to Dst [/etc/kubernetes/pki/apiserver-kubelet-client.key] total size is: 1.64KB ;speed is 1KB
06:54:04 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/apiserver-kubelet-client.key] to Dst [/etc/kubernetes/pki/apiserver-kubelet-client.key] total size is: 1.64KB ;speed is 1KB
06:54:04 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/apiserver.crt] to Dst [/etc/kubernetes/pki/apiserver.crt] total size is: 1.33KB ;speed is 1KB
06:54:04 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/apiserver.crt] to Dst [/etc/kubernetes/pki/apiserver.crt] total size is: 1.33KB ;speed is 1KB
06:54:05 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/apiserver.key] to Dst [/etc/kubernetes/pki/apiserver.key] total size is: 1.64KB ;speed is 1KB
06:54:05 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/apiserver.key] to Dst [/etc/kubernetes/pki/apiserver.key] total size is: 1.64KB ;speed is 1KB
06:54:05 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/ca.crt] to Dst [/etc/kubernetes/pki/ca.crt] total size is: 1.04KB ;speed is 1KB
06:54:05 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/ca.crt] to Dst [/etc/kubernetes/pki/ca.crt] total size is: 1.04KB ;speed is 1KB
06:54:06 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/ca.key] to Dst [/etc/kubernetes/pki/ca.key] total size is: 1.64KB ;speed is 1KB
06:54:06 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/ca.key] to Dst [/etc/kubernetes/pki/ca.key] total size is: 1.64KB ;speed is 1KB
06:54:06 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/etcd/ca.crt] to Dst [/etc/kubernetes/pki/etcd/ca.crt] total size is: 1.04KB ;speed is 1KB
06:54:06 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/etcd/ca.crt] to Dst [/etc/kubernetes/pki/etcd/ca.crt] total size is: 1.04KB ;speed is 1KB
06:54:07 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/etcd/ca.key] to Dst [/etc/kubernetes/pki/etcd/ca.key] total size is: 1.64KB ;speed is 1KB
06:54:07 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/etcd/ca.key] to Dst [/etc/kubernetes/pki/etcd/ca.key] total size is: 1.64KB ;speed is 1KB
06:54:07 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/etcd/healthcheck-client.crt] to Dst [/etc/kubernetes/pki/etcd/healthcheck-client.crt] total size is: 1.12KB ;speed is 1KB
06:54:07 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/etcd/healthcheck-client.crt] to Dst [/etc/kubernetes/pki/etcd/healthcheck-client.crt] total size is: 1.12KB ;speed is 1KB
06:54:08 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/etcd/healthcheck-client.key] to Dst [/etc/kubernetes/pki/etcd/healthcheck-client.key] total size is: 1.64KB ;speed is 1KB
06:54:08 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/etcd/healthcheck-client.key] to Dst [/etc/kubernetes/pki/etcd/healthcheck-client.key] total size is: 1.64KB ;speed is 1KB
06:54:08 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/etcd/peer.crt] to Dst [/etc/kubernetes/pki/etcd/peer.crt] total size is: 1.16KB ;speed is 1KB
06:54:08 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/etcd/peer.crt] to Dst [/etc/kubernetes/pki/etcd/peer.crt] total size is: 1.16KB ;speed is 1KB
06:54:09 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/etcd/peer.key] to Dst [/etc/kubernetes/pki/etcd/peer.key] total size is: 1.64KB ;speed is 1KB
06:54:09 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/etcd/peer.key] to Dst [/etc/kubernetes/pki/etcd/peer.key] total size is: 1.64KB ;speed is 1KB
06:54:09 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/etcd/server.crt] to Dst [/etc/kubernetes/pki/etcd/server.crt] total size is: 1.16KB ;speed is 1KB
06:54:10 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/etcd/server.crt] to Dst [/etc/kubernetes/pki/etcd/server.crt] total size is: 1.16KB ;speed is 1KB
06:54:10 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/etcd/server.key] to Dst [/etc/kubernetes/pki/etcd/server.key] total size is: 1.64KB ;speed is 1KB
06:54:10 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/etcd/server.key] to Dst [/etc/kubernetes/pki/etcd/server.key] total size is: 1.64KB ;speed is 1KB
06:54:11 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/front-proxy-ca.crt] to Dst [/etc/kubernetes/pki/front-proxy-ca.crt] total size is: 1.06KB ;speed is 1KB
06:54:11 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/front-proxy-ca.crt] to Dst [/etc/kubernetes/pki/front-proxy-ca.crt] total size is: 1.06KB ;speed is 1KB
06:54:11 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/front-proxy-ca.key] to Dst [/etc/kubernetes/pki/front-proxy-ca.key] total size is: 1.64KB ;speed is 1KB
06:54:12 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/front-proxy-ca.key] to Dst [/etc/kubernetes/pki/front-proxy-ca.key] total size is: 1.64KB ;speed is 1KB
06:54:12 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/front-proxy-client.crt] to Dst [/etc/kubernetes/pki/front-proxy-client.crt] total size is: 1.08KB ;speed is 1KB
06:54:12 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/front-proxy-client.crt] to Dst [/etc/kubernetes/pki/front-proxy-client.crt] total size is: 1.08KB ;speed is 1KB
06:54:12 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/front-proxy-client.key] to Dst [/etc/kubernetes/pki/front-proxy-client.key] total size is: 1.64KB ;speed is 1KB
06:54:13 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/front-proxy-client.key] to Dst [/etc/kubernetes/pki/front-proxy-client.key] total size is: 1.64KB ;speed is 1KB
06:54:13 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/sa.key] to Dst [/etc/kubernetes/pki/sa.key] total size is: 1.64KB ;speed is 1KB
06:54:13 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/sa.key] to Dst [/etc/kubernetes/pki/sa.key] total size is: 1.64KB ;speed is 1KB
06:54:14 [DEBG] [scp.go:337] [ssh][172.24.20.53:22]transfer local [/root/.sealos/pki/sa.pub] to Dst [/etc/kubernetes/pki/sa.pub] total size is: 0.44KB ;speed is 0KB
06:54:14 [DEBG] [scp.go:337] [ssh][172.24.20.52:22]transfer local [/root/.sealos/pki/sa.pub] to Dst [/etc/kubernetes/pki/sa.pub] total size is: 0.44KB ;speed is 0KB
06:54:14 [DEBG] [ssh.go:58] [172.24.20.52:22] echo "apiVersion: kubeadm.k8s.io/v1beta2
caCertPath: /etc/kubernetes/pki/ca.crt
discovery:
  bootstrapToken:
    apiServerEndpoint: 172.24.20.51:6443
    token: 7tfbyw.qgkimu2s8z4wq3fh
    caCertHashes:
    - sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567
  timeout: 5m0s
kind: JoinConfiguration
controlPlane:
  localAPIEndpoint:
    advertiseAddress: 172.24.20.52
    bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock" > /root/kubeadm-join-config.yaml
06:54:14 [DEBG] [ssh.go:58] [172.24.20.53:22] echo "apiVersion: kubeadm.k8s.io/v1beta2
caCertPath: /etc/kubernetes/pki/ca.crt
discovery:
  bootstrapToken:
    apiServerEndpoint: 172.24.20.51:6443
    token: 7tfbyw.qgkimu2s8z4wq3fh
    caCertHashes:
    - sha256:495a8e10ba234a29e827960e1b985e326bd27471631396b6c4b26084796b8567
  timeout: 5m0s
kind: JoinConfiguration
controlPlane:
  localAPIEndpoint:
    advertiseAddress: 172.24.20.53
    bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock" > /root/kubeadm-join-config.yaml
06:54:15 [INFO] [ssh.go:13] [ssh][172.24.20.53:22] hostname
06:54:15 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] hostname
06:54:16 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: cloudos-52

06:54:16 [DEBG] [ssh.go:58] [172.24.20.52:22] sealos cert  --node-ip 172.24.20.52 --node-name cloudos-52 --service-cidr 10.40.0.0/16 --dns-domain cluster.local --alt-names 127.0.0.1 --alt-names apiserver.cluster.local --alt-names 172.24.20.51 --alt-names 172.24.20.52 --alt-names 172.24.20.53 --alt-names 10.10.10.10
06:54:16 [DEBG] [ssh.go:25] [ssh][172.24.20.53:22]command result is: cloudos-53

06:54:16 [DEBG] [ssh.go:58] [172.24.20.53:22] sealos cert  --node-ip 172.24.20.53 --node-name cloudos-53 --service-cidr 10.40.0.0/16 --dns-domain cluster.local --alt-names 127.0.0.1 --alt-names apiserver.cluster.local --alt-names 172.24.20.51 --alt-names 172.24.20.52 --alt-names 172.24.20.53 --alt-names 10.10.10.10
06:54:16 [INFO] [ssh.go:51] [172.24.20.53:22] 06:54:19 [INFO] [kube_certs.go:223] apiserver altNames : {map[apiserver.cluster.local:apiserver.cluster.local cloudos-53:cloudos-53 kubernetes:kubernetes kubernetes.default:kubernetes.default kubernetes.default.svc:kubernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc.cluster.local localhost:localhost] map[10.10.10.10:10.10.10.10 10.40.0.1:10.40.0.1 127.0.0.1:127.0.0.1 172.24.20.51:172.24.20.51 172.24.20.52:172.24.20.52 172.24.20.53:172.24.20.53]}
06:54:16 [INFO] [ssh.go:51] [172.24.20.53:22] 06:54:19 [INFO] [kube_certs.go:243] Etcd altnames : {map[cloudos-53:cloudos-53 localhost:localhost] map[127.0.0.1:127.0.0.1 172.24.20.53:172.24.20.53 ::1:::1]}, commonName : cloudos-53
06:54:16 [INFO] [ssh.go:51] [172.24.20.53:22] 06:54:19 [INFO] [kube_certs.go:251] sa.key sa.pub already exist
06:54:16 [INFO] [ssh.go:51] [172.24.20.52:22] 06:54:19 [INFO] [kube_certs.go:223] apiserver altNames : {map[apiserver.cluster.local:apiserver.cluster.local cloudos-52:cloudos-52 kubernetes:kubernetes kubernetes.default:kubernetes.default kubernetes.default.svc:kubernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc.cluster.local localhost:localhost] map[10.10.10.10:10.10.10.10 10.40.0.1:10.40.0.1 127.0.0.1:127.0.0.1 172.24.20.51:172.24.20.51 172.24.20.52:172.24.20.52 172.24.20.53:172.24.20.53]}
06:54:16 [INFO] [ssh.go:51] [172.24.20.52:22] 06:54:19 [INFO] [kube_certs.go:243] Etcd altnames : {map[cloudos-52:cloudos-52 localhost:localhost] map[127.0.0.1:127.0.0.1 172.24.20.52:172.24.20.52 ::1:::1]}, commonName : cloudos-52
06:54:16 [INFO] [ssh.go:51] [172.24.20.52:22] 06:54:19 [INFO] [kube_certs.go:251] sa.key sa.pub already exist
06:54:19 [DEBG] [ssh.go:58] [172.24.20.53:22] echo 172.24.20.51 apiserver.cluster.local >> /etc/hosts
06:54:19 [DEBG] [ssh.go:58] [172.24.20.53:22] kubeadm join --config=/root/kubeadm-join-config.yaml  -v 0
06:54:19 [DEBG] [ssh.go:58] [172.24.20.52:22] echo 172.24.20.51 apiserver.cluster.local >> /etc/hosts
06:54:20 [INFO] [ssh.go:51] [172.24.20.53:22] [preflight] Running pre-flight checks
06:54:20 [DEBG] [ssh.go:58] [172.24.20.52:22] kubeadm join --config=/root/kubeadm-join-config.yaml  -v 0
06:54:20 [INFO] [ssh.go:51] [172.24.20.53:22]   [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
06:54:20 [INFO] [ssh.go:51] [172.24.20.53:22]   [WARNING FileExisting-socat]: socat not found in system path
06:54:20 [INFO] [ssh.go:51] [172.24.20.53:22]   [WARNING FileExisting-tc]: tc not found in system path
06:54:21 [INFO] [ssh.go:51] [172.24.20.53:22] [preflight] Reading configuration from the cluster...
06:54:21 [INFO] [ssh.go:51] [172.24.20.53:22] [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
06:54:21 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] Running pre-flight checks
06:54:21 [INFO] [ssh.go:51] [172.24.20.53:22] [preflight] Running pre-flight checks before initializing the new control plane instance
06:54:21 [INFO] [ssh.go:51] [172.24.20.53:22] [preflight] Pulling images required for setting up a Kubernetes cluster
06:54:21 [INFO] [ssh.go:51] [172.24.20.53:22] [preflight] This might take a minute or two, depending on the speed of your internet connection
06:54:21 [INFO] [ssh.go:51] [172.24.20.53:22] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
06:54:21 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Using certificateDir folder "/etc/kubernetes/pki"
06:54:22 [INFO] [ssh.go:51] [172.24.20.52:22]   [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
06:54:22 [INFO] [ssh.go:51] [172.24.20.52:22]   [WARNING FileExisting-socat]: socat not found in system path
06:54:22 [INFO] [ssh.go:51] [172.24.20.52:22]   [WARNING FileExisting-tc]: tc not found in system path
06:54:22 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Using the existing "front-proxy-client" certificate and key
06:54:22 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Using the existing "etcd/server" certificate and key
06:54:22 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] Reading configuration from the cluster...
06:54:22 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
06:54:23 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] Running pre-flight checks before initializing the new control plane instance
06:54:23 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] Pulling images required for setting up a Kubernetes cluster
06:54:23 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] This might take a minute or two, depending on the speed of your internet connection
06:54:23 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
06:54:23 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Using the existing "etcd/peer" certificate and key
06:54:23 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Using certificateDir folder "/etc/kubernetes/pki"
06:54:23 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Using the existing "etcd/healthcheck-client" certificate and key
06:54:23 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Using the existing "etcd/healthcheck-client" certificate and key
06:54:23 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Using the existing "apiserver-etcd-client" certificate and key
06:54:24 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Using the existing "apiserver-kubelet-client" certificate and key
06:54:24 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Using the existing "apiserver" certificate and key
06:54:24 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
06:54:24 [INFO] [ssh.go:51] [172.24.20.53:22] [certs] Using the existing "sa" key
06:54:24 [INFO] [ssh.go:51] [172.24.20.53:22] [kubeconfig] Generating kubeconfig files
06:54:24 [INFO] [ssh.go:51] [172.24.20.53:22] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
06:54:24 [INFO] [ssh.go:51] [172.24.20.53:22] [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
06:54:24 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Using the existing "apiserver-etcd-client" certificate and key
06:54:24 [INFO] [ssh.go:51] [172.24.20.53:22] [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
06:54:24 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Using the existing "etcd/peer" certificate and key
06:54:25 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Using the existing "etcd/server" certificate and key
06:54:25 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Using the existing "apiserver" certificate and key
06:54:25 [INFO] [ssh.go:51] [172.24.20.53:22] [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
06:54:25 [INFO] [ssh.go:51] [172.24.20.53:22] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
06:54:25 [INFO] [ssh.go:51] [172.24.20.53:22] [control-plane] Creating static Pod manifest for "kube-apiserver"
06:54:25 [INFO] [ssh.go:51] [172.24.20.53:22] [control-plane] Creating static Pod manifest for "kube-controller-manager"
06:54:25 [INFO] [ssh.go:51] [172.24.20.53:22] [control-plane] Creating static Pod manifest for "kube-scheduler"
06:54:25 [INFO] [ssh.go:51] [172.24.20.53:22] [check-etcd] Checking that the etcd cluster is healthy
06:54:25 [INFO] [ssh.go:51] [172.24.20.53:22] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
06:54:25 [INFO] [ssh.go:51] [172.24.20.53:22] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
06:54:25 [INFO] [ssh.go:51] [172.24.20.53:22] [kubelet-start] Starting the kubelet
06:54:25 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Using the existing "apiserver-kubelet-client" certificate and key
06:54:25 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Using the existing "front-proxy-client" certificate and key
06:54:25 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
06:54:25 [INFO] [ssh.go:51] [172.24.20.52:22] [certs] Using the existing "sa" key
06:54:25 [INFO] [ssh.go:51] [172.24.20.52:22] [kubeconfig] Generating kubeconfig files
06:54:25 [INFO] [ssh.go:51] [172.24.20.52:22] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
06:54:26 [INFO] [ssh.go:51] [172.24.20.53:22] [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [control-plane] Creating static Pod manifest for "kube-apiserver"
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [control-plane] Creating static Pod manifest for "kube-controller-manager"
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [control-plane] Creating static Pod manifest for "kube-scheduler"
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [check-etcd] Checking that the etcd cluster is healthy
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
06:54:26 [INFO] [ssh.go:51] [172.24.20.52:22] [kubelet-start] Starting the kubelet
06:54:28 [INFO] [ssh.go:51] [172.24.20.52:22] [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
06:54:34 [INFO] [ssh.go:51] [172.24.20.53:22] [etcd] Announced new etcd member joining to the existing etcd cluster
06:54:34 [INFO] [ssh.go:51] [172.24.20.53:22] [etcd] Creating static Pod manifest for "etcd"
06:54:34 [INFO] [ssh.go:51] [172.24.20.53:22] [etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
06:54:45 [INFO] [ssh.go:51] [172.24.20.53:22] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
06:54:52 [INFO] [ssh.go:51] [172.24.20.52:22] [etcd] Announced new etcd member joining to the existing etcd cluster
06:54:52 [INFO] [ssh.go:51] [172.24.20.52:22] [etcd] Creating static Pod manifest for "etcd"
06:54:52 [INFO] [ssh.go:51] [172.24.20.52:22] [etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
06:54:53 [INFO] [ssh.go:51] [172.24.20.53:22] [mark-control-plane] Marking the node cloudos-53 as control-plane by adding the label "node-role.kubernetes.io/master=''"
06:54:53 [INFO] [ssh.go:51] [172.24.20.53:22] [mark-control-plane] Marking the node cloudos-53 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] 
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] This node has joined the cluster and a new control plane instance was created:
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] 
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] * Certificate signing request was sent to apiserver and approval was received.
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] * The Kubelet was informed of the new secure connection details.
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] * Control plane (master) label and taint were applied to the new node.
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] * The Kubernetes control plane instances scaled up.
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] * A new etcd member was added to the local/stacked etcd cluster.
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] 
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] To start administering your cluster from this node, you need to run the following as a regular user:
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] 
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22]   mkdir -p $HOME/.kube
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] 
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] Run 'kubectl get nodes' to see this node join the cluster.
06:54:54 [INFO] [ssh.go:51] [172.24.20.53:22] 
06:54:54 [DEBG] [ssh.go:58] [172.24.20.53:22] sed "s/172.24.20.51 apiserver.cluster.local/172.24.20.53 apiserver.cluster.local/g" -i /etc/hosts
06:54:55 [INFO] [ssh.go:51] [172.24.20.52:22] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
06:54:55 [INFO] [ssh.go:51] [172.24.20.52:22] [mark-control-plane] Marking the node cloudos-52 as control-plane by adding the label "node-role.kubernetes.io/master=''"
06:54:55 [INFO] [ssh.go:51] [172.24.20.52:22] [mark-control-plane] Marking the node cloudos-52 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] 
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] This node has joined the cluster and a new control plane instance was created:
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] 
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] * Certificate signing request was sent to apiserver and approval was received.
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] * The Kubelet was informed of the new secure connection details.
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] * Control plane (master) label and taint were applied to the new node.
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] * The Kubernetes control plane instances scaled up.
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] * A new etcd member was added to the local/stacked etcd cluster.
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] 
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] To start administering your cluster from this node, you need to run the following as a regular user:
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] 
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22]   mkdir -p $HOME/.kube
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] 
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] Run 'kubectl get nodes' to see this node join the cluster.
06:54:56 [INFO] [ssh.go:51] [172.24.20.52:22] 
06:54:56 [DEBG] [ssh.go:58] [172.24.20.52:22] sed "s/172.24.20.51 apiserver.cluster.local/172.24.20.52 apiserver.cluster.local/g" -i /etc/hosts
06:54:56 [DEBG] [ssh.go:58] [172.24.20.53:22] rm -rf .kube/config && mkdir -p /root/.kube && cp /etc/kubernetes/admin.conf /root/.kube/config && chmod 600 /root/.kube/config
06:54:57 [DEBG] [ssh.go:58] [172.24.20.52:22] rm -rf .kube/config && mkdir -p /root/.kube && cp /etc/kubernetes/admin.conf /root/.kube/config && chmod 600 /root/.kube/config
06:54:58 [DEBG] [ssh.go:58] [172.24.20.53:22] rm -rf /root/kube || :
06:54:58 [DEBG] [ssh.go:58] [172.24.20.52:22] rm -rf /root/kube || :
06:55:02 [DEBG] [print.go:21] ==>SendPackage==>KubeadmConfigInstall==>InstallMaster0==>JoinMasters
06:55:02 [INFO] [print.go:26] sealos install success.
06:55:02 [INFO] [init.go:95] 

3.验证

[root@cloudos-51 ~]# kubectl get node -o wide
NAME         STATUS   ROLES    AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE          KERNEL-VERSION       CONTAINER-RUNTIME
cloudos-51   Ready    master   7m32s   v1.19.0   172.24.20.51   <none>        OpenCloudOS 8.6   5.4.119-20.0009.20   docker://19.3.0
cloudos-52   Ready    master   6m42s   v1.19.0   172.24.20.52   <none>        OpenCloudOS 8.6   5.4.119-20.0009.20   docker://19.3.0
cloudos-53   Ready    master   6m43s   v1.19.0   172.24.20.53   <none>        OpenCloudOS 8.6   5.4.119-20.0009.20   docker://19.3.0
[root@cloudos-51 ~]# 
[root@cloudos-51 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
etcd-0               Healthy     {"health":"true"}   

//解决cs组件状态异常
/etc/kubernetes/manifests/    //注释  #- --port=0
kube-controller-manager.yaml
kube-scheduler.yaml

[root@cloudos-51 manifests]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}
//取消污点
kubectl taint node cloudos-51  node-role.kubernetes.io/master-

//Start a busybox pod and keep it in the foreground, don't restart it if it exits.kubectl run -i -t busybox --image=busybox --restart=Never

kubectl run -i -t busybox --image=busybox  -- sh

[root@cloudos-51 ~]# kubectl exec -it busybox -- sh
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: tunl0@NONE: <NOARP> mtu 1480 qdisc noop qlen 1000
    link/ipip 0.0.0.0 brd 0.0.0.0
4: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1440 qdisc noqueue 
    link/ether 02:4c:0b:4a:4a:c7 brd ff:ff:ff:ff:ff:ff
    inet 10.20.174.194/32 scope global eth0
       valid_lft forever preferred_lft forever
/ # ping 10.40.0.1
PING 10.40.0.1 (10.40.0.1): 56 data bytes
64 bytes from 10.40.0.1: seq=0 ttl=64 time=2.869 ms
64 bytes from 10.40.0.1: seq=1 ttl=64 time=0.191 ms
64 bytes from 10.40.0.1: seq=2 ttl=64 time=0.109 ms
^C
--- 10.40.0.1 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.109/1.056/2.869 ms
/ # ping 172.24.20.51
PING 172.24.20.51 (172.24.20.51): 56 data bytes
64 bytes from 172.24.20.51: seq=0 ttl=63 time=2.362 ms
^C
--- 172.24.20.51 ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 2.362/2.362/2.362 ms
/ # ping 172.24.20.53
PING 172.24.20.53 (172.24.20.53): 56 data bytes
64 bytes from 172.24.20.53: seq=0 ttl=64 time=0.155 ms
64 bytes from 172.24.20.53: seq=1 ttl=64 time=0.198 ms
^C
--- 172.24.20.53 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.155/0.176/0.198 ms
/ # ping 172.20.20.53
PING 172.20.20.53 (172.20.20.53): 56 data bytes
64 bytes from 172.20.20.53: seq=0 ttl=64 time=0.140 ms
64 bytes from 172.20.20.53: seq=1 ttl=64 time=0.113 ms
^C
--- 172.20.20.53 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.113/0.126/0.140 ms

4.集群节点操作

# 增加master
sealos join --master 192.168.0.6 --master 192.168.0.7
sealos join --master 192.168.0.6-192.168.0.9  # 或者多个连续IP

# 增加node 
sealos join --node 192.168.0.6 --node 192.168.0.7
sealos join --node 192.168.0.6-192.168.0.9  # 或者多个连续IP

# 删除指定master节点 
sealos clean --master 192.168.0.6 --master 192.168.0.7
sealos clean --master 192.168.0.6-192.168.0.9  # 或者多个连续IP

# 删除指定node节点 
sealos clean --node 192.168.0.6 --node 192.168.0.7
sealos clean --node 192.168.0.6-192.168.0.9  # 或者多个连续IP

# 清理集群
sealos clean --all