Sealos v3 还提供了强大的工具来管理 Kubernetes 集群的整个生命周期,包括但不限于安装裸 Kubernetes 集群、在 Kubernetes 之上组装各种上层分布式应用、构建生产高可用集群等。即使在离线环境中,Sealos v3 也能提供出色的 Kubernetes 运行体验

env

  • sealos-v3.3.9-rc.3
  • kube1.19.0.tar.gz
  • OpenCloudOS 8.6 x1 2c4g/40g

1.download sealos

2.init kubernetes

2.1sealos-v3

[root@cloudos-51 ~]# ./sealos version
Version: 3.3.9-rc.3
Last Commit: 4db4953
Build Date: 2021-04-10T11:25:04Z
[root@cloudos-51 ~]# ./sealos -h
A longer description that spans multiple lines and likely contains
examples and usage of using your application. For example:

Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.

Usage:
  sealos [command]

Available Commands:
  cert        generate certs
  clean       Simplest way to clean your kubernets HA cluster
  cloud       sealos on cloud
  completion  Output shell completion code for the specified shell (bash or zsh)
  config      print config template to console
  delete      delete kubernetes apps installled by sealos..
  etcd        Simplest way to snapshot/restore your kubernets etcd
  exec        support exec cmd or copy file by Label/nodes 
  help        Help about any command
  init        Simplest way to init your kubernets HA cluster   //初始化安装
  install     install kubernetes apps, like dashboard prometheus ..
  ipvs        sealos create or care local ipvs lb
  join        Simplest way to join your kubernets HA cluster
  route       set default route gateway
  upgrade     upgrade your kubernetes version by sealos
  version     Print the version of sealos

Flags:
      --config string   config file (default is $HOME/.sealos/config.yaml)
  -h, --help            help for sealos
      --info            logger ture for Info, false for Debug

Use "sealos [command] --help" for more information about a command.

2.2init-deploy

[root@cloudos-51 ~]# ./sealos init -h
sealos init --master 192.168.0.2 --master 192.168.0.3 --master 192.168.0.4 \
        --node 192.168.0.5 --user root --passwd your-server-password \
        --version v1.18.0 --pkg-url=/root/kube1.18.0.tar.gz

Usage:
  sealos init [flags]
  sealos init [command]

Examples:

        # init with password with three master one node
        sealos init --passwd your-server-password  \
        --master 192.168.0.2 --master 192.168.0.3 --master 192.168.0.4 \
        --node 192.168.0.5 --user root \
        --version v1.18.0 --pkg-url=/root/kube1.18.0.tar.gz 

        # init with pk-file , when your server have different password
        sealos init --pk /root/.ssh/id_rsa \
        --master 192.168.0.2 --node 192.168.0.5 --user root \
        --version v1.18.0 --pkg-url=/root/kube1.18.0.tar.gz 

        # when use multi network. set a can-reach with --interface 
        sealos init --interface 192.168.0.254 \
        --master 192.168.0.2 --master 192.168.0.3 --master 192.168.0.4 \
        --node 192.168.0.5 --user root --passwd your-server-password \
        --version v1.18.0 --pkg-url=/root/kube1.18.0.tar.gz 

        # when your interface is not "eth*|en*|em*" like.
        sealos init --interface your-interface-name \
        --master 192.168.0.2 --master 192.168.0.3 --master 192.168.0.4 \
        --node 192.168.0.5 --user root --passwd your-server-password \
        --version v1.18.0 --pkg-url=/root/kube1.18.0.tar.gz 


Available Commands:
  gen         show default sealos init config

Flags:
      --apiserver string        apiserver domain name (default "apiserver.cluster.local")
      --cert-sans strings       kubernetes apiServerCertSANs ex. 47.0.0.22 sealyun.com 
  -h, --help                    help for init
      --interface string        name of network interface, when use calico IP_AUTODETECTION_METHOD, set your ipv4 with can-reach=192.168.0.1 (default "eth.*|en.*|em.*") 
      --ipip                    ipip mode enable, calico.. (default true) //calico mode ipip
      --kubeadm-config string   kubeadm-config.yaml template file
      --lvscare-image string    lvscare image name (default "fanux/lvscare")
      --lvscare-tag string      lvscare image tag name (default "latest")
      --master strings          kubernetes multi-masters ex. 192.168.0.2-192.168.0.4
      --mtu string              mtu of the ipip mode , calico.. (default "1440")
      --network string          cni plugin, calico.. (default "calico")
      --node strings            kubernetes multi-nodes ex. 192.168.0.5-192.168.0.5
      --passwd string           password for ssh
      --pk string               private key for ssh (default "/root/.ssh/id_rsa")
      --pk-passwd string        private key password for ssh
      --pkg-url string          http://store.lameleg.com/kube1.14.1.tar.gz download offline package url, or file location ex. /root/kube1.14.1.tar.gz  //离线包地址
      --podcidr string          Specify range of IP addresses for the pod network (default "100.64.0.0/10")
      --repo string             choose a container registry to pull control plane images from (default "k8s.gcr.io")
      --svccidr string          Use alternative range of IP address for service VIPs (default "10.96.0.0/12")
      --user string             servers user name for ssh (default "root")
      --version string          version is kubernetes version   //指定安装版本
      --vip string              virtual ip (default "10.103.97.2")
      --vlog int                kubeadm log level
      --without-cni             If true we not install cni plugin

Global Flags:
      --config string   config file (default is $HOME/.sealos/config.yaml)
      --info            logger ture for Info, false for Debug

Use "sealos init [command] --help" for more information about a command.

#init

./sealos init \
    --master 172.24.20.51 \
    --user root --passwd 321321 \
    --version v1.19.0 --pkg-url=/root/kube1.19.0.tar.gz

#log过程分析

1.执行init命令
[root@cloudos-51 ~]# ./sealos init \
>     --master 172.24.20.51 \
>     --user root --passwd 321321 \
>     --version v1.19.0 --pkg-url=/root/kube1.19.0.tar.gz
00:29:58 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] hostname
00:29:59 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: cloudos-51

2.hosts添加解析
00:29:59 [DEBG] [ssh.go:58] [172.24.20.51:22] cat /etc/hosts |grep cloudos-51 || echo '172.24.20.51 cloudos-51' >> /etc/hosts
00:29:59 [INFO] [ssh.go:51] [172.24.20.51:22] 172.24.20.51 cloudos-51
00:29:59 [INFO] [ssh.go:51] [172.24.20.51:22] 172.20.20.51 cloudos-51
00:29:59 [INFO] [check.go:51] [172.24.20.51:22]  ------------ check ok
00:29:59 [INFO] [print.go:14] 
[globals]sealos config is:  {"Hosts":["172.24.20.51:22"],"Masters":["172.24.20.51:22"],"Nodes":null,"Network":"calico","ApiServer":"apiserver.cluster.local"}

3.移动sealos到/usr/bin/
00:29:59 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /usr/bin || true
00:29:59 [DEBG] [download.go:30] [172.24.20.51:22]please wait for mkDstDir
00:29:59 [DEBG] [download.go:32] [172.24.20.51:22]please wait for before hook
00:29:59 [DEBG] [ssh.go:58] [172.24.20.51:22] ps -ef |grep -v 'grep'|grep sealos >/dev/null || rm -rf /usr/bin/sealos
00:29:59 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] ls -l /usr/bin/sealos 2>/dev/null |wc -l
00:29:59 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: 0

00:29:59 [DEBG] [scp.go:27] [ssh]source file md5 value is b1fef37dd355c6d6842a20345a48b4fd
00:30:00 [INFO] [scp.go:101] [ssh][172.24.20.51:22]transfer total size is: 42.27MB ;speed is 42MB
00:30:00 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] md5sum /usr/bin/sealos | cut -d" " -f1
00:30:01 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: b1fef37dd355c6d6842a20345a48b4fd

00:30:01 [DEBG] [scp.go:30] [ssh]host: 172.24.20.51:22 , remote md5: b1fef37dd355c6d6842a20345a48b4fd
00:30:01 [INFO] [scp.go:34] [ssh]md5 validate true
00:30:01 [INFO] [download.go:50] [172.24.20.51:22]copy file md5 validate success
00:30:01 [DEBG] [download.go:56] [172.24.20.51:22]please wait for after hook
00:30:01 [DEBG] [ssh.go:58] [172.24.20.51:22] chmod a+x /usr/bin/sealos
00:30:02 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /root || true
00:30:02 [DEBG] [download.go:30] [172.24.20.51:22]please wait for mkDstDir
00:30:02 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] ls -l /root/kube1.19.0.tar.gz 2>/dev/null |wc -l
00:30:02 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: 1

4.kube离线包处理(解压及配置sealos/kubectl completion)执行init.shell
00:30:03 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] md5sum /root/kube1.19.0.tar.gz | cut -d" " -f1
00:30:05 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: bdd6c97922918f6070a65521df2a8b47

00:30:05 [INFO] [download.go:37] [172.24.20.51:22]SendPackage:  /root/kube1.19.0.tar.gz file is exist and ValidateMd5 success
00:30:05 [DEBG] [download.go:56] [172.24.20.51:22]please wait for after hook
00:30:05 [DEBG] [ssh.go:58] [172.24.20.51:22] cd /root && rm -rf kube && tar zxvf kube1.19.0.tar.gz  && cd /root/kube/shell && rm -f ../bin/sealos && bash init.sh && sed -i '/kubectl/d;/sealos/d' /root/.bashrc  && echo 'command -v kubectl &>/dev/null && source <(kubectl completion bash)' >> /root/.bashrc && echo '[ -x /usr/bin/sealos ] && source <(sealos completion bash)' >> /root/.bashrc && source /root/.bashrc
00:30:05 [INFO] [ssh.go:51] [172.24.20.51:22] kube/
00:30:05 [INFO] [ssh.go:51] [172.24.20.51:22] kube/README.md
00:30:05 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/
00:30:05 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/kubeadm
00:30:05 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/kubectl
00:30:06 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/kubelet
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/kubelet-pre-start.sh
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/conntrack
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/bin/sealos
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/10-kubeadm.conf
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/calico.yaml
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/docker.service
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/kubeadm.yaml
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/kubelet.service
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/net/
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/conf/net/calico.yaml
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/docker/
00:30:07 [INFO] [ssh.go:51] [172.24.20.51:22] kube/docker/docker.tgz
00:30:08 [INFO] [ssh.go:51] [172.24.20.51:22] kube/images/
00:30:08 [INFO] [ssh.go:51] [172.24.20.51:22] kube/images/images.tar
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] kube/shell/
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] kube/shell/init.sh
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] kube/shell/master.sh
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] kube/shell/docker.sh
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + storage=/var/lib/docker
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + harbor_ip=127.0.0.1
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + mkdir -p /var/lib/docker
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + command_exists docker
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + command -v docker
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++ get_distribution
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++ lsb_dist=
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++ '[' -r /etc/os-release ']'
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] +++ . /etc/os-release
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ NAME=OpenCloudOS
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ VERSION=8.6
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ ID=opencloudos
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ ID_LIKE='rhel fedora'
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ VERSION_ID=8.6
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ PLATFORM_ID=platform:oc8
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ PRETTY_NAME='OpenCloudOS 8.6'
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ ANSI_COLOR='0;31'
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ CPE_NAME=cpe:/o:opencloudos:opencloudos:8
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ HOME_URL=https://www.opencloudos.org/
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++++ BUG_REPORT_URL=https://bugs.opencloudos.tech/
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] +++ echo opencloudos
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++ lsb_dist=opencloudos
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++ echo opencloudos
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + lsb_dist=opencloudos
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++ echo opencloudos
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] ++ tr '[:upper:]' '[:lower:]'
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + lsb_dist=opencloudos
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + echo 'current system is opencloudos'
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] current system is opencloudos
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + case "$lsb_dist" in
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + cp ../conf/docker.service /usr/lib/systemd/system/docker.service
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] + tar --strip-components=1 -xvzf ../docker/docker.tgz -C /usr/bin
00:30:21 [INFO] [ssh.go:51] [172.24.20.51:22] docker/ctr
00:30:22 [INFO] [ssh.go:51] [172.24.20.51:22] docker/runc
00:30:22 [INFO] [ssh.go:51] [172.24.20.51:22] docker/dockerd
00:30:23 [INFO] [ssh.go:51] [172.24.20.51:22] docker/docker
00:30:23 [INFO] [ssh.go:51] [172.24.20.51:22] docker/containerd
00:30:24 [INFO] [ssh.go:51] [172.24.20.51:22] docker/docker-init
00:30:24 [INFO] [ssh.go:51] [172.24.20.51:22] docker/containerd-shim
00:30:24 [INFO] [ssh.go:51] [172.24.20.51:22] docker/docker-proxy
00:30:24 [INFO] [ssh.go:51] [172.24.20.51:22] + chmod a+x /usr/bin
00:30:24 [INFO] [ssh.go:51] [172.24.20.51:22] + systemctl enable docker.service
00:30:24 [INFO] [ssh.go:51] [172.24.20.51:22] Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
00:30:24 [INFO] [ssh.go:51] [172.24.20.51:22] + systemctl restart docker.service
00:30:25 [INFO] [ssh.go:51] [172.24.20.51:22] + cat
00:30:25 [INFO] [ssh.go:51] [172.24.20.51:22] + systemctl restart docker.service
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] + docker version
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] Client: Docker Engine - Community
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  Version:           19.03.0
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  API version:       1.40
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  Go version:        go1.12.5
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  Git commit:        aeac9490dc
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  Built:             Wed Jul 17 18:11:50 2019
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  OS/Arch:           linux/amd64
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  Experimental:      false
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] 
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] Server: Docker Engine - Community
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  Engine:
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   Version:          19.03.0
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   API version:      1.40 (minimum version 1.12)
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   Go version:       go1.12.5
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   Git commit:       aeac9490dc
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   Built:            Wed Jul 17 18:22:15 2019
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   OS/Arch:          linux/amd64
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   Experimental:     false
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  containerd:
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   Version:          v1.2.6
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   GitCommit:        894b81a4b802e4eb2a91d1ce216b8817763c29fb
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  runc:
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   Version:          1.0.0-rc8
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   GitCommit:        425e105d5a03fabd737a126ad93d62a9eeede87f
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]  docker-init:
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   Version:          0.18.0
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22]   GitCommit:        fec3683
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.4.119-20.0009.20
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/00-tencentos.conf ...
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.printk = 4
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv6.conf.all.disable_ipv6 = 0
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] vm.oom_dump_tasks = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.ctrl-alt-del = 0
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.sysrq_use_leftctrl = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.ip_local_reserved_ports = 48369,36000,56000
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.yama.ptrace_scope = 0
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/50-coredump.conf ...
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.core_pattern = |/usr/lib/systemd/systemd-coredump %P %u %g %s %t %c %h %e
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.core_pipe_limit = 16
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/50-default.conf ...
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.sysrq = 16
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.core_uses_pid = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.kptr_restrict = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.conf.all.rp_filter = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.conf.all.accept_source_route = 0
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.conf.all.promote_secondaries = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.core.default_qdisc = fq_codel
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] fs.protected_hardlinks = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] fs.protected_symlinks = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/50-libkcapi-optmem_max.conf ...
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.core.optmem_max = 81920
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /usr/lib/sysctl.d/50-pid-max.conf ...
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] kernel.pid_max = 4194304
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /etc/sysctl.d/99-sysctl.conf ...
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /etc/sysctl.d/k8s.conf ...
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.bridge.bridge-nf-call-ip6tables = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.bridge.bridge-nf-call-iptables = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] * Applying /etc/sysctl.conf ...
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] net.ipv4.ip_forward = 1
00:30:27 [INFO] [ssh.go:51] [172.24.20.51:22] setenforce: SELinux is disabled
79d541cda6cb: Loading layer  3.041MB/3.041MB
e9933a1f21f5: Loading layer  1.734MB/1.734MB
d85a13cfa53e: Loading layer  107.3MB/107.3MB
Loaded image: k8s.gcr.io/kube-controller-manager:v1.19.0
c3a6120d2fd6: Loading layer  115.2MB/115.2MB
Loaded image: k8s.gcr.io/kube-apiserver:v1.19.0
0d1435bd79e4: Loading layer  3.062MB/3.062MB
2aef7a73d4b0: Loading layer   2.13MB/2.13MB
ec3830e15d9c: Loading layer  225.3MB/225.3MB
4d5d7883c216: Loading layer   2.19MB/2.19MB
5d3a32005e6b: Loading layer  21.95MB/21.95MB
Loaded image: k8s.gcr.io/etcd:3.4.9-10.51:22] 
ba0dae6243cc: Loading layer  684.5kB/684.5kB
Loaded image: k8s.gcr.io/pause:3.24.20.51:22] 
8b62fd4eb2dd: Loading layer  43.99MB/43.99MB
40fe7b163104: Loading layer  2.828MB/2.828MB
Loaded image: calico/kube-controllers:v3.8.2] 
91e3a07063b3: Loading layer  53.89MB/53.89MB
b4e54f331697: Loading layer  21.78MB/21.78MB
b9b82a97c787: Loading layer  5.168MB/5.168MB
1b55846906e8: Loading layer  4.608kB/4.608kB
061bfb5cb861: Loading layer  8.192kB/8.192kB
78dd6c0504a7: Loading layer  8.704kB/8.704kB
f83925edb29c: Loading layer  38.81MB/38.81MB
Loaded image: k8s.gcr.io/kube-proxy:v1.19.02] 
a2a6ea4dde58: Loading layer  42.13MB/42.13MB
Loaded image: k8s.gcr.io/kube-scheduler:v1.19.0
225df95e717c: Loading layer  336.4kB/336.4kB
96d17b0b58a7: Loading layer  45.02MB/45.02MB
Loaded image: k8s.gcr.io/coredns:1.7.0.51:22] 
d8a33133e477: Loading layer  72.47MB/72.47MB
337ec577cf9c: Loading layer     33MB/33MB
45cc6dfacce1: Loading layer  3.584kB/3.584kB
7b3ecdc818b0: Loading layer  3.584kB/3.584kB
2b0805a50f82: Loading layer  21.85MB/21.85MB
c9bf76343513: Loading layer  11.26kB/11.26kB
f4176618c27b: Loading layer  11.26kB/11.26kB
4dcaff1da822: Loading layer   6.55MB/6.55MB
92e6b8f58573: Loading layer  2.945MB/2.945MB
5f970d4ac62d: Loading layer  35.84kB/35.84kB
b1a2a2446599: Loading layer  55.22MB/55.22MB
014866f8df9e: Loading layer   1.14MB/1.14MB
Loaded image: calico/node:v3.8.2.24.20.51:22] 
466b4a33898e: Loading layer  88.05MB/88.05MB
dd824a99572a: Loading layer  10.24kB/10.24kB
d8fdd74cc7ed: Loading layer   2.56kB/2.56kB
Loaded image: calico/cni:v3.8.22.24.20.51:22] 
3fc64803ca2d: Loading layer  4.463MB/4.463MB
f03a403b18a7: Loading layer   5.12kB/5.12kB
0de6f9b8b1f7: Loading layer  5.166MB/5.166MB
Loaded image: calico/pod2daemon-flexvol:v3.8.2
00:30:48 [INFO] [ssh.go:51] [172.24.20.51:22] driver is cgroupfs
00:30:48 [INFO] [ssh.go:51] [172.24.20.51:22] Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /etc/systemd/system/kubelet.service.
00:30:49 [DEBG] [print.go:21] ==>SendPackage
00:30:49 [DEBG] [ssh.go:58] [172.24.20.51:22] echo "apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.24.20.51
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
controlPlaneEndpoint: "apiserver.cluster.local:6443"
imageRepository: k8s.gcr.io
networking:
  # dnsDomain: cluster.local
  podSubnet: 100.64.0.0/10
  serviceSubnet: 10.96.0.0/12
apiServer:
  certSANs:
  - 127.0.0.1
  - apiserver.cluster.local
  - 172.24.20.51
  - 10.103.97.2
  extraArgs:
    feature-gates: TTLAfterFinished=true
  extraVolumes:
  - name: localtime
    hostPath: /etc/localtime
    mountPath: /etc/localtime
    readOnly: true
    pathType: File
controllerManager:
  extraArgs:
    feature-gates: TTLAfterFinished=true
    experimental-cluster-signing-duration: 876000h
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    readOnly: true
    pathType: File
scheduler:
  extraArgs:
    feature-gates: TTLAfterFinished=true
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    readOnly: true
    pathType: File
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
ipvs:
  excludeCIDRs:
  - "10.103.97.2/32"" > /root/kubeadm-config.yaml
00:30:49 [DEBG] [print.go:21] ==>SendPackage==>KubeadmConfigInstall
00:30:49 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] hostname
00:30:49 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: cloudos-51

00:30:49 [INFO] [kube_certs.go:223] apiserver altNames : {map[apiserver.cluster.local:apiserver.cluster.local cloudos-51:cloudos-51 kubernetes:kubernetes kubernetes.default:kubernetes.default kubernetes.default.svc:kubernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc.cluster.local localhost:localhost] map[10.103.97.2:10.103.97.2 10.96.0.1:10.96.0.1 127.0.0.1:127.0.0.1 172.24.20.51:172.24.20.51]}
00:30:49 [INFO] [kube_certs.go:243] Etcd altnames : {map[cloudos-51:cloudos-51 localhost:localhost] map[127.0.0.1:127.0.0.1 172.24.20.51:172.24.20.51 ::1:::1]}, commonName : cloudos-51
00:30:52 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] hostname
00:30:53 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: cloudos-51

[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
00:30:55 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /etc/kubernetes || true
00:30:55 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/kubelet.conf] to Dst [/etc/kubernetes/kubelet.conf] total size is: 5.48KB ;speed is 5KB
00:30:56 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /etc/kubernetes || true
00:30:56 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/admin.conf] to Dst [/etc/kubernetes/admin.conf] total size is: 5.45KB ;speed is 5KB
00:30:57 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /etc/kubernetes || true
00:30:57 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/controller-manager.conf] to Dst [/etc/kubernetes/controller-manager.conf] total size is: 5.49KB ;speed is 5KB
00:30:58 [DEBG] [ssh.go:58] [172.24.20.51:22] mkdir -p /etc/kubernetes || true
00:30:59 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/scheduler.conf] to Dst [/etc/kubernetes/scheduler.conf] total size is: 5.44KB ;speed is 5KB
00:30:59 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver-etcd-client.crt] to Dst [/etc/kubernetes/pki/apiserver-etcd-client.crt] total size is: 1.11KB ;speed is 1KB
00:31:00 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver-etcd-client.key] to Dst [/etc/kubernetes/pki/apiserver-etcd-client.key] total size is: 1.64KB ;speed is 1KB
00:31:00 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver-kubelet-client.crt] to Dst [/etc/kubernetes/pki/apiserver-kubelet-client.crt] total size is: 1.12KB ;speed is 1KB
00:31:00 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver-kubelet-client.key] to Dst [/etc/kubernetes/pki/apiserver-kubelet-client.key] total size is: 1.64KB ;speed is 1KB
00:31:01 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver.crt] to Dst [/etc/kubernetes/pki/apiserver.crt] total size is: 1.31KB ;speed is 1KB
00:31:01 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/apiserver.key] to Dst [/etc/kubernetes/pki/apiserver.key] total size is: 1.64KB ;speed is 1KB
00:31:01 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/ca.crt] to Dst [/etc/kubernetes/pki/ca.crt] total size is: 1.04KB ;speed is 1KB
00:31:02 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/ca.key] to Dst [/etc/kubernetes/pki/ca.key] total size is: 1.64KB ;speed is 1KB
00:31:02 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/ca.crt] to Dst [/etc/kubernetes/pki/etcd/ca.crt] total size is: 1.04KB ;speed is 1KB
00:31:02 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/ca.key] to Dst [/etc/kubernetes/pki/etcd/ca.key] total size is: 1.64KB ;speed is 1KB
00:31:02 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/healthcheck-client.crt] to Dst [/etc/kubernetes/pki/etcd/healthcheck-client.crt] total size is: 1.12KB ;speed is 1KB
00:31:03 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/healthcheck-client.key] to Dst [/etc/kubernetes/pki/etcd/healthcheck-client.key] total size is: 1.64KB ;speed is 1KB
00:31:03 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/peer.crt] to Dst [/etc/kubernetes/pki/etcd/peer.crt] total size is: 1.16KB ;speed is 1KB
00:31:03 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/peer.key] to Dst [/etc/kubernetes/pki/etcd/peer.key] total size is: 1.64KB ;speed is 1KB
00:31:04 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/server.crt] to Dst [/etc/kubernetes/pki/etcd/server.crt] total size is: 1.16KB ;speed is 1KB
00:31:04 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/etcd/server.key] to Dst [/etc/kubernetes/pki/etcd/server.key] total size is: 1.64KB ;speed is 1KB
00:31:04 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/front-proxy-ca.crt] to Dst [/etc/kubernetes/pki/front-proxy-ca.crt] total size is: 1.06KB ;speed is 1KB
00:31:04 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/front-proxy-ca.key] to Dst [/etc/kubernetes/pki/front-proxy-ca.key] total size is: 1.64KB ;speed is 1KB
00:31:05 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/front-proxy-client.crt] to Dst [/etc/kubernetes/pki/front-proxy-client.crt] total size is: 1.08KB ;speed is 1KB
00:31:05 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/front-proxy-client.key] to Dst [/etc/kubernetes/pki/front-proxy-client.key] total size is: 1.64KB ;speed is 1KB
00:31:05 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/sa.key] to Dst [/etc/kubernetes/pki/sa.key] total size is: 1.64KB ;speed is 1KB
00:31:06 [DEBG] [scp.go:337] [ssh][172.24.20.51:22]transfer local [/root/.sealos/pki/sa.pub] to Dst [/etc/kubernetes/pki/sa.pub] total size is: 0.44KB ;speed is 0KB
00:31:06 [DEBG] [ssh.go:58] [172.24.20.51:22] grep -qF '172.24.20.51 apiserver.cluster.local' /etc/hosts || echo 172.24.20.51 apiserver.cluster.local >> /etc/hosts
00:31:06 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] kubeadm init --config=/root/kubeadm-config.yaml --upload-certs -v 0
00:31:27 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: W0609 00:31:07.163815    5257 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
W0609 00:31:07.165527    5257 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
W0609 00:31:07.241143    5257 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.0
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileExisting-socat]: socat not found in system path
        [WARNING FileExisting-tc]: tc not found in system path
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 16.007866 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
f1b1bfe737945a93faa69902f4a9fcd2742423d25b7b812c233d1c7df2879110
[mark-control-plane] Marking the node cloudos-51 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node cloudos-51 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: gvvbw7.im23nkmkotg5465i
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join apiserver.cluster.local:6443 --token gvvbw7.im23nkmkotg5465i \
    --discovery-token-ca-cert-hash sha256:1b27d6d8f7bb3f901bcf27696936c60d6f0c0ca7dc1e40c45ef49944d30ba9a9 \
    --control-plane --certificate-key f1b1bfe737945a93faa69902f4a9fcd2742423d25b7b812c233d1c7df2879110

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join apiserver.cluster.local:6443 --token gvvbw7.im23nkmkotg5465i \
    --discovery-token-ca-cert-hash sha256:1b27d6d8f7bb3f901bcf27696936c60d6f0c0ca7dc1e40c45ef49944d30ba9a9 

00:31:27 [DEBG] [sealos.go:102] [globals]decodeOutput: W0609 00:31:07.163815    5257 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
W0609 00:31:07.165527    5257 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
W0609 00:31:07.241143    5257 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.0
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileExisting-socat]: socat not found in system path
        [WARNING FileExisting-tc]: tc not found in system path
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/scheduler.conf"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 16.007866 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
f1b1bfe737945a93faa69902f4a9fcd2742423d25b7b812c233d1c7df2879110
[mark-control-plane] Marking the node cloudos-51 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node cloudos-51 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: gvvbw7.im23nkmkotg5465i
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join apiserver.cluster.local:6443 --token gvvbw7.im23nkmkotg5465i \
    --discovery-token-ca-cert-hash sha256:1b27d6d8f7bb3f901bcf27696936c60d6f0c0ca7dc1e40c45ef49944d30ba9a9 \
    --control-plane --certificate-key f1b1bfe737945a93faa69902f4a9fcd2742423d25b7b812c233d1c7df2879110

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join apiserver.cluster.local:6443 --token gvvbw7.im23nkmkotg5465i \
    --discovery-token-ca-cert-hash sha256:1b27d6d8f7bb3f901bcf27696936c60d6f0c0ca7dc1e40c45ef49944d30ba9a9 

00:31:27 [INFO] [sealos.go:105] [globals]join command is:  apiserver.cluster.local:6443 --token gvvbw7.im23nkmkotg5465i \
    --discovery-token-ca-cert-hash sha256:1b27d6d8f7bb3f901bcf27696936c60d6f0c0ca7dc1e40c45ef49944d30ba9a9 \
    --control-plane --certificate-key f1b1bfe737945a93faa69902f4a9fcd2742423d25b7b812c233d1c7df2879110


00:31:27 [DEBG] [sealos.go:111] [globals]decodeJoinCmd:  apiserver.cluster.local:6443 --token gvvbw7.im23nkmkotg5465i \
    --discovery-token-ca-cert-hash sha256:1b27d6d8f7bb3f901bcf27696936c60d6f0c0ca7dc1e40c45ef49944d30ba9a9 \
    --control-plane --certificate-key f1b1bfe737945a93faa69902f4a9fcd2742423d25b7b812c233d1c7df2879110


00:31:27 [DEBG] [sealos.go:119] [####]0 ::
00:31:27 [DEBG] [sealos.go:119] [####]1 :apiserver.cluster.local:6443:
00:31:27 [DEBG] [sealos.go:119] [####]2 :--token:
00:31:27 [DEBG] [sealos.go:119] [####]3 :gvvbw7.im23nkmkotg5465i:
00:31:27 [DEBG] [sealos.go:119] [####]4 ::
00:31:27 [DEBG] [sealos.go:119] [####]5 ::
00:31:27 [DEBG] [sealos.go:119] [####]6 ::
00:31:27 [DEBG] [sealos.go:119] [####]7 ::
00:31:27 [DEBG] [sealos.go:119] [####]8 :--discovery-token-ca-cert-hash:
00:31:27 [DEBG] [sealos.go:119] [####]9 :sha256:1b27d6d8f7bb3f901bcf27696936c60d6f0c0ca7dc1e40c45ef49944d30ba9a9:
00:31:27 [DEBG] [sealos.go:119] [####]10 ::
00:31:27 [DEBG] [sealos.go:119] [####]11 ::
00:31:27 [DEBG] [sealos.go:119] [####]12 ::
00:31:27 [DEBG] [sealos.go:119] [####]13 ::
00:31:27 [DEBG] [sealos.go:119] [####]14 :--control-plane:
00:31:27 [DEBG] [sealos.go:119] [####]15 :--certificate-key:
00:31:27 [DEBG] [sealos.go:119] [####]16 :f1b1bfe737945a93faa69902f4a9fcd2742423d25b7b812c233d1c7df2879110:
00:31:27 [DEBG] [sealos.go:140] [####]JoinToken :gvvbw7.im23nkmkotg5465i
00:31:27 [DEBG] [sealos.go:141] [####]TokenCaCertHash :sha256:1b27d6d8f7bb3f901bcf27696936c60d6f0c0ca7dc1e40c45ef49944d30ba9a9
00:31:27 [DEBG] [sealos.go:142] [####]CertificateKey :f1b1bfe737945a93faa69902f4a9fcd2742423d25b7b812c233d1c7df2879110

5.配置kube
00:31:27 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] mkdir -p /root/.kube && cp /etc/kubernetes/admin.conf /root/.kube/config && chmod 600 /root/.kube/config
00:31:28 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: 

6.cni calico安装
00:31:28 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] echo '
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
  name: calico-config
  namespace: kube-system
data:
  # Typha is disabled.
  typha_service_name: "none"
  # Configure the backend to use.
  calico_backend: "bird"

  # Configure the MTU to use
  veth_mtu: "1440"

  # The CNI network configuration to install on each node.  The special
  # values in this config will be automatically populated.
  cni_network_config: |-
    {
      "name": "k8s-pod-network",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "calico",
          "log_level": "info",
          "datastore_type": "kubernetes",
          "nodename": "__KUBERNETES_NODE_NAME__",
          "mtu": __CNI_MTU__,
          "ipam": {
              "type": "calico-ipam"
          },
          "policy": {
              "type": "k8s"
          },
          "kubernetes": {
              "kubeconfig": "__KUBECONFIG_FILEPATH__"
          }
        },
        {
          "type": "portmap",
          "snat": true,
          "capabilities": {"portMappings": true}
        }
      ]
    }
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
   name: felixconfigurations.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: FelixConfiguration
    plural: felixconfigurations
    singular: felixconfiguration
---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ipamblocks.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: IPAMBlock
    plural: ipamblocks
    singular: ipamblock

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: blockaffinities.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: BlockAffinity
    plural: blockaffinities
    singular: blockaffinity

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ipamhandles.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: IPAMHandle
    plural: ipamhandles
    singular: ipamhandle

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ipamconfigs.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: IPAMConfig
    plural: ipamconfigs
    singular: ipamconfig

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: bgppeers.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: BGPPeer
    plural: bgppeers
    singular: bgppeer

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: bgpconfigurations.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: BGPConfiguration
    plural: bgpconfigurations
    singular: bgpconfiguration

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: ippools.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: IPPool
    plural: ippools
    singular: ippool

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: hostendpoints.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: HostEndpoint
    plural: hostendpoints
    singular: hostendpoint

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: clusterinformations.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: ClusterInformation
    plural: clusterinformations
    singular: clusterinformation

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: globalnetworkpolicies.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: GlobalNetworkPolicy
    plural: globalnetworkpolicies
    singular: globalnetworkpolicy

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: globalnetworksets.crd.projectcalico.org
spec:
  scope: Cluster
  group: crd.projectcalico.org
  version: v1
  names:
    kind: GlobalNetworkSet
    plural: globalnetworksets
    singular: globalnetworkset

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: networkpolicies.crd.projectcalico.org
spec:
  scope: Namespaced
  group: crd.projectcalico.org
  version: v1
  names:
    kind: NetworkPolicy
    plural: networkpolicies
    singular: networkpolicy

---

apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: networksets.crd.projectcalico.org
spec:
  scope: Namespaced
  group: crd.projectcalico.org
  version: v1
  names:
    kind: NetworkSet
    plural: networksets
    singular: networkset
---
# Source: calico/templates/rbac.yaml

# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
rules:
  # Nodes are watched to monitor for deletions.
  - apiGroups: [""]
    resources:
      - nodes
    verbs:
      - watch
      - list
      - get
  # Pods are queried to check for existence.
  - apiGroups: [""]
    resources:
      - pods
    verbs:
      - get
  # IPAM resources are manipulated when nodes are deleted.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - ippools
    verbs:
      - list
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - blockaffinities
      - ipamblocks
      - ipamhandles
    verbs:
      - get
      - list
      - create
      - update
      - delete
  # Needs access to update clusterinformations.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - clusterinformations
    verbs:
      - get
      - create
      - update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-kube-controllers
subjects:
- kind: ServiceAccount
  name: calico-kube-controllers
  namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-node
rules:
  # The CNI plugin needs to get pods, nodes, and namespaces.
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
    verbs:
      - get
  - apiGroups: [""]
    resources:
      - endpoints
      - services
    verbs:
      # Used to discover service IPs for advertisement.
      - watch
      - list
      # Used to discover Typhas.
      - get
  - apiGroups: [""]
    resources:
      - nodes/status
    verbs:
      # Needed for clearing NodeNetworkUnavailable flag.
      - patch
      # Calico stores some configuration information in node annotations.
      - update
  # Watch for changes to Kubernetes NetworkPolicies.
  - apiGroups: ["networking.k8s.io"]
    resources:
      - networkpolicies
    verbs:
      - watch
      - list
  # Used by Calico for policy information.
  - apiGroups: [""]
    resources:
      - pods
      - namespaces
      - serviceaccounts
    verbs:
      - list
      - watch
  # The CNI plugin patches pods/status.
  - apiGroups: [""]
    resources:
      - pods/status
    verbs:
      - patch
  # Calico monitors various CRDs for config.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - globalfelixconfigs
      - felixconfigurations
      - bgppeers
      - globalbgpconfigs
      - bgpconfigurations
      - ippools
      - ipamblocks
      - globalnetworkpolicies
      - globalnetworksets
      - networkpolicies
      - networksets
      - clusterinformations
      - hostendpoints
    verbs:
      - get
      - list
      - watch
  # Calico must create and update some CRDs on startup.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - ippools
      - felixconfigurations
      - clusterinformations
    verbs:
      - create
      - update
  # Calico stores some configuration information on the node.
  - apiGroups: [""]
    resources:
      - nodes
    verbs:
      - get
      - list
      - watch
  # These permissions are only requried for upgrade from v2.6, and can
  # be removed after upgrade or on fresh installations.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - bgpconfigurations
      - bgppeers
    verbs:
      - create
      - update
  # These permissions are required for Calico CNI to perform IPAM allocations.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - blockaffinities
      - ipamblocks
      - ipamhandles
    verbs:
      - get
      - list
      - create
      - update
      - delete
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - ipamconfigs
    verbs:
      - get
  # Block affinities must also be watchable by confd for route aggregation.
  - apiGroups: ["crd.projectcalico.org"]
    resources:
      - blockaffinities
    verbs:
      - watch
  # The Calico IPAM migration needs to get daemonsets. These permissions can be
  # removed if not upgrading from an installation using host-local IPAM.
  - apiGroups: ["apps"]
    resources:
      - daemonsets
    verbs:
      - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: calico-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-node
subjects:
- kind: ServiceAccount
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: calico-node
  namespace: kube-system
  labels:
    k8s-app: calico-node
spec:
  selector:
    matchLabels:
      k8s-app: calico-node
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  template:
    metadata:
      labels:
        k8s-app: calico-node
      annotations:
        # This, along with the CriticalAddonsOnly toleration below,
        # marks the pod as a critical add-on, ensuring it gets
        # priority scheduling and that its resources are reserved
        # if it ever gets evicted.
    spec:
      nodeSelector:
        beta.kubernetes.io/os: linux
      hostNetwork: true
      tolerations:
        # Make sure calico-node gets scheduled on all nodes.
        - effect: NoSchedule
          operator: Exists
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - effect: NoExecute
          operator: Exists
      serviceAccountName: calico-node
      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
      terminationGracePeriodSeconds: 0
      priorityClassName: system-node-critical
      initContainers:
        # This container performs upgrade from host-local IPAM to calico-ipam.
        # It can be deleted if this is a fresh installation, or if you have already
        # upgraded to use calico-ipam.
        - name: upgrade-ipam
          image: calico/cni:v3.8.2
          command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
          env:
            - name: KUBERNETES_NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: CALICO_NETWORKING_BACKEND
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: calico_backend
          volumeMounts:
            - mountPath: /var/lib/cni/networks
              name: host-local-net-dir
            - mountPath: /host/opt/cni/bin
              name: cni-bin-dir
        # This container installs the CNI binaries
        # and CNI network config file on each node.
        - name: install-cni
          image: calico/cni:v3.8.2
          command: ["/install-cni.sh"]
          env:
            # Name of the CNI config file to create.
            - name: CNI_CONF_NAME
              value: "10-calico.conflist"
            # The CNI network config to install on each node.
            - name: CNI_NETWORK_CONFIG
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: cni_network_config
            # Set the hostname based on the k8s node name.
            - name: KUBERNETES_NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            # CNI MTU Config variable
            - name: CNI_MTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Prevents the container from sleeping forever.
            - name: SLEEP
              value: "false"
          volumeMounts:
            - mountPath: /host/opt/cni/bin
              name: cni-bin-dir
            - mountPath: /host/etc/cni/net.d
              name: cni-net-dir
        # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
        # to communicate with Felix over the Policy Sync API.
        - name: flexvol-driver
          image: calico/pod2daemon-flexvol:v3.8.2
          volumeMounts:
          - name: flexvol-driver-host
            mountPath: /host/driver
      containers:
        # Runs calico-node container on each Kubernetes node.  This
        # container programs network policy and routes on each
        # host.
        - name: calico-node
          image: calico/node:v3.8.2
          env:
            # Use Kubernetes API as the backing datastore.
            - name: DATASTORE_TYPE
              value: "kubernetes"
            # Wait for the datastore.
            - name: WAIT_FOR_DATASTORE
              value: "true"
            # Set based on the k8s node name.
            - name: NODENAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            # Choose the backend to use.
            - name: CALICO_NETWORKING_BACKEND
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: calico_backend
            # Cluster type to identify the deployment type
            - name: CLUSTER_TYPE
              value: "k8s,bgp"
            # Auto-detect the BGP IP address.
            - name: IP
              value: "autodetect"
            - name: IP_AUTODETECTION_METHOD
              value: "interface=eth.*|en.*|em.*"
            # Enable IPIP
            - name: CALICO_IPV4POOL_IPIP
              value: "Always"
            # Set MTU for tunnel device used if ipip is enabled
            - name: FELIX_IPINIPMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # The default IPv4 pool to create on startup if none exists. Pod IPs will be
            # chosen from this range. Changing this value after installation will have
            - name: CALICO_IPV4POOL_CIDR
              value: "100.64.0.0/10"
            - name: CALICO_DISABLE_FILE_LOGGING
              value: "true"
            # Set Felix endpoint to host default action to ACCEPT.
            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
              value: "ACCEPT"
            # Disable IPv6 on Kubernetes.
            - name: FELIX_IPV6SUPPORT
              value: "false"
            # Set Felix logging to "info"
            - name: FELIX_LOGSEVERITYSCREEN
              value: "info"
            - name: FELIX_HEALTHENABLED
              value: "true"
          securityContext:
            privileged: true
          resources:
            requests:
              cpu: 250m
          livenessProbe:
            httpGet:
              path: /liveness
              port: 9099
              host: localhost
            periodSeconds: 10
            initialDelaySeconds: 10
            failureThreshold: 6
          readinessProbe:
            exec:
              command:
              - /bin/calico-node
              - -bird-ready
              - -felix-ready
            periodSeconds: 10
          volumeMounts:
            - mountPath: /lib/modules
              name: lib-modules
              readOnly: true
            - mountPath: /run/xtables.lock
              name: xtables-lock
              readOnly: false
            - mountPath: /var/run/calico
              name: var-run-calico
              readOnly: false
            - mountPath: /var/lib/calico
              name: var-lib-calico
              readOnly: false
            - name: policysync
              mountPath: /var/run/nodeagent
      volumes:
        # Used by calico-node.
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: var-run-calico
          hostPath:
            path: /var/run/calico
        - name: var-lib-calico
          hostPath:
            path: /var/lib/calico
        - name: xtables-lock
          hostPath:
            path: /run/xtables.lock
            type: FileOrCreate
        # Used to install CNI.
        - name: cni-bin-dir
          hostPath:
            path: /opt/cni/bin
        - name: cni-net-dir
          hostPath:
            path: /etc/cni/net.d
        # Mount in the directory for host-local IPAM allocations. This is
        # used when upgrading from host-local to calico-ipam, and can be removed
        # if not using the upgrade-ipam init container.
        - name: host-local-net-dir
          hostPath:
            path: /var/lib/cni/networks
        # Used to create per-pod Unix Domain Sockets
        - name: policysync
          hostPath:
            type: DirectoryOrCreate
            path: /var/run/nodeagent
        # Used to install Flex Volume Driver
        - name: flexvol-driver-host
          hostPath:
            type: DirectoryOrCreate
            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-kube-controllers.yaml

# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
  name: calico-kube-controllers
  namespace: kube-system
  labels:
    k8s-app: calico-kube-controllers
spec:
  # The controllers can only have a single active instance.
  replicas: 1
  selector:
    matchLabels:
      k8s-app: calico-kube-controllers
  strategy:
    type: Recreate
  template:
    metadata:
      name: calico-kube-controllers
      namespace: kube-system
      labels:
        k8s-app: calico-kube-controllers
      annotations:
    spec:
      nodeSelector:
        beta.kubernetes.io/os: linux
      tolerations:
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      serviceAccountName: calico-kube-controllers
      priorityClassName: system-cluster-critical
      containers:
        - name: calico-kube-controllers
          image: calico/kube-controllers:v3.8.2
          env:
            # Choose which controllers to run.
            - name: ENABLED_CONTROLLERS
              value: node
            - name: DATASTORE_TYPE
              value: kubernetes
          readinessProbe:
            exec:
              command:
              - /usr/bin/check-status
              - -r

---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-kube-controllers
  namespace: kube-system
' | kubectl apply -f -
00:31:30 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: configmap/calico-config created
Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

7.安装成功日志
00:31:30 [DEBG] [print.go:21] ==>SendPackage==>KubeadmConfigInstall==>InstallMaster0  //总体上思路
00:31:30 [INFO] [print.go:26] sealos install success.
00:31:30 [INFO] [init.go:95] 

#kube1.19.0.tar.gz

[root@cloudos-51 ~]# tree kube
kube
├── bin  //依赖bin
│   ├── conntrack
│   ├── kubeadm
│   ├── kubectl
│   ├── kubelet
│   └── kubelet-pre-start.sh
├── conf  //kube相关配置
│   ├── 10-kubeadm.conf
│   ├── calico.yaml
│   ├── docker.service
│   ├── kubeadm.yaml
│   ├── kubelet.service
│   └── net
│       └── calico.yaml
├── docker  
│   └── docker.tgz //docker安装包
├── images
│   └── images.tar  //依赖镜像包
├── README.md
└── shell    //初始化相关脚本
    ├── docker.sh
    ├── init.sh
    └── master.sh

3.testing

3.1检查组件状态

//检查
[root@cloudos-51 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
etcd-0               Healthy     {"health":"true"}                                                                             
[root@cloudos-51 ~]# kubectl get pods -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-69b47f4dfb-jlt5g   1/1     Running   0          47s
kube-system   calico-node-xvdh8                          1/1     Running   0          47s
kube-system   coredns-f9fd979d6-75hr9                    1/1     Running   0          47s
kube-system   coredns-f9fd979d6-gt2rl                    1/1     Running   0          47s
kube-system   etcd-cloudos-51                            1/1     Running   0          55s
kube-system   kube-apiserver-cloudos-51                  1/1     Running   0          55s
kube-system   kube-controller-manager-cloudos-51         0/1     Running   0          55s
kube-system   kube-proxy-mjwn8                           1/1     Running   0          47s
kube-system   kube-scheduler-cloudos-51                  0/1     Running   0          55s

//解决scheduler、controller-manager不健康
/kubernetes/manifests/    //注释  #- --port=0
kube-controller-manager.yaml
kube-scheduler.yaml

//再次检查
[root@cloudos-51 manifests]# kubectl get pod,svc,ep -A
NAMESPACE     NAME                                           READY   STATUS    RESTARTS   AGE
kube-system   pod/calico-kube-controllers-69b47f4dfb-jlt5g   1/1     Running   0          3m30s
kube-system   pod/calico-node-xvdh8                          1/1     Running   0          3m30s
kube-system   pod/coredns-f9fd979d6-75hr9                    1/1     Running   0          3m30s
kube-system   pod/coredns-f9fd979d6-gt2rl                    1/1     Running   0          3m30s
kube-system   pod/etcd-cloudos-51                            1/1     Running   0          3m38s
kube-system   pod/kube-apiserver-cloudos-51                  1/1     Running   0          3m38s
kube-system   pod/kube-controller-manager-cloudos-51         0/1     Running   0          100s
kube-system   pod/kube-proxy-mjwn8                           1/1     Running   0          3m30s
kube-system   pod/kube-scheduler-cloudos-51                  1/1     Running   0          110s

NAMESPACE     NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
default       service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP                  3m47s
kube-system   service/kube-dns     ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   3m45s

NAMESPACE     NAME                                ENDPOINTS                                                             AGE
default       endpoints/kubernetes                172.24.20.51:6443                                                     3m47s
kube-system   endpoints/kube-controller-manager   <none>                                                                3m47s
kube-system   endpoints/kube-dns                  100.101.220.65:53,100.101.220.66:53,100.101.220.65:9153 + 3 more...   3m30s
kube-system   endpoints/kube-scheduler            <none>                                                                3m45s


//pod/kube-controller-manager-cloudos-51状态没正常检查日志发现如下错误
I0609 00:34:22.295637       1 request.go:645] Throttling request took 1.049020598s, request: GET:https://apiserver.cluster.local:6443/apis/apiextensions.k8s.io/v1beta1?timeout=32s
E0609 00:34:22.295975       1 driver-call.go:266] Failed to unmarshal output for command: init, output: "2024/06/09 00:34:22 Unix syslog delivery error\n", error: invalid character '/' after top-level value
W0609 00:34:22.295983       1 driver-call.go:149] FlexVolume: driver call failed: executable: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds/uds, args: [init], error: exit status 1, output: "2024/06/09 00:34:22 Unix syslog delivery error\n"
E0609 00:34:22.295998       1 plugins.go:734] Error dynamically probing plugins: Error creating Flexvolume plugin from directory nodeagent~uds, skipping. Error: invalid character '/' after top-level value

//重启下kubelet检查controller有正常了
[root@cloudos-51 ~]# systemctl restart kubelet
[root@cloudos-51 ~]# 
[root@cloudos-51 ~]# kubectl get pods -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-69b47f4dfb-jlt5g   1/1     Running   0          7m35s
kube-system   calico-node-xvdh8                          1/1     Running   0          7m35s
kube-system   coredns-f9fd979d6-75hr9                    1/1     Running   0          7m35s
kube-system   coredns-f9fd979d6-gt2rl                    1/1     Running   0          7m35s
kube-system   etcd-cloudos-51                            1/1     Running   0          7m43s
kube-system   kube-apiserver-cloudos-51                  1/1     Running   0          7m43s
kube-system   kube-controller-manager-cloudos-51         1/1     Running   0          5m45s
kube-system   kube-proxy-mjwn8                           1/1     Running   0          7m35s
kube-system   kube-scheduler-cloudos-51                  1/1     Running   0          5m55s

[root@cloudos-51 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   

[root@cloudos-51 ~]# kubectl get node  -o wide
NAME         STATUS   ROLES    AGE   VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE          KERNEL-VERSION       CONTAINER-RUNTIME
cloudos-51   Ready    master   12m   v1.19.0   172.24.20.51   <none>        OpenCloudOS 8.6   5.4.119-20.0009.20   docker://19.3.0

//kube-proxy ipvs
[root@cloudos-51 ~]# kubectl -n kube-system logs  kube-proxy-mjwn8 |grep ipvs
I0608 16:31:43.971080       1 server_others.go:259] Using ipvs Proxier.

3.2许可master被调度pod

//取消污点
kubectl taint node cloudos-51  node-role.kubernetes.io/master-

//Start a busybox pod and keep it in the foreground, don't restart it if it exits.kubectl run -i -t busybox --image=busybox --restart=Never

kubectl run -i -t busybox --image=busybox  -- sh


[root@cloudos-51 ~]# kubectl run -i -t busybox --image=busybox  -- sh
If you don't see a command prompt, try pressing enter.
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: tunl0@NONE: <NOARP> mtu 1480 qdisc noop qlen 1000
    link/ipip 0.0.0.0 brd 0.0.0.0
4: eth0@if13: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1440 qdisc noqueue 
    link/ether d2:a2:80:ed:0e:94 brd ff:ff:ff:ff:ff:ff
    inet 100.101.220.69/32 scope global eth0
       valid_lft forever preferred_lft forever
/ # ping 172.24.20.51
PING 172.24.20.51 (172.24.20.51): 56 data bytes
64 bytes from 172.24.20.51: seq=0 ttl=64 time=0.111 ms
64 bytes from 172.24.20.51: seq=1 ttl=64 time=0.090 ms
^C
--- 172.24.20.51 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.090/0.100/0.111 ms
/ # ping 172.20.20.51
PING 172.20.20.51 (172.20.20.51): 56 data bytes
64 bytes from 172.20.20.51: seq=0 ttl=64 time=0.069 ms
64 bytes from 172.20.20.51: seq=1 ttl=64 time=0.095 ms
^C
--- 172.20.20.51 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.069/0.082/0.095 ms
/ # 

4.安装后sealos相关目录

[root@cloudos-51 ~]# tree .sealos/
.sealos/
├── admin.conf
├── config.yaml   //sealos安装相关配置信息
├── controller-manager.conf
├── kubelet.conf
├── pki
│   ├── apiserver.crt
│   ├── apiserver-etcd-client.crt
│   ├── apiserver-etcd-client.key
│   ├── apiserver.key
│   ├── apiserver-kubelet-client.crt
│   ├── apiserver-kubelet-client.key
│   ├── ca.crt
│   ├── ca.key
│   ├── etcd
│   │   ├── ca.crt
│   │   ├── ca.key
│   │   ├── healthcheck-client.crt
│   │   ├── healthcheck-client.key
│   │   ├── peer.crt
│   │   ├── peer.key
│   │   ├── server.crt
│   │   └── server.key
│   ├── front-proxy-ca.crt
│   ├── front-proxy-ca.key
│   ├── front-proxy-client.crt
│   ├── front-proxy-client.key
│   ├── sa.key
│   └── sa.pub
├── scheduler.conf
└── sealos.log

#config

[root@cloudos-51 ~]# cat .sealos/config.yaml 
masters:   //节点信息
- 172.24.20.51:22
nodes: []
dnsdomain: cluster.local
apiservercertsans:
- 127.0.0.1
- apiserver.cluster.local
- 172.24.20.51
- 10.103.97.2
user: root
passwd: "321321"
privatekey: /root/.ssh/id_rsa
pkpassword: ""
apiserverdomain: apiserver.cluster.local
network: calico  //默认cni
vip: 10.103.97.2
pkgurl: /root/kube1.19.0.tar.gz
version: v1.19.0
repo: k8s.gcr.io
podcidr: 100.64.0.0/10
svccidr: 10.96.0.0/12
certpath: /root/.sealos/pki
certetcdpath: /root/.sealos/pki/etcd
lvscarename: fanux/lvscare
lvscaretag: latest
alioss:
  ossendpoint: ""
  accesskeyid: ""
  accesskeysecrets: ""
  bucketname: ""
  objectpath: ""

5.uninstall

[root@cloudos-51 ~]# sealos clean  -h
sealos clean

Usage:
  sealos clean [flags]

Examples:

        # clean  master
        sealos clean --master 192.168.0.2 \
        --master 192.168.0.3
  
        # clean  node  use --force/-f will be not prompt 
        sealos clean --node 192.168.0.4 \
        --node 192.168.0.5 --force

        # clean master and node
        sealos clean --master 192.168.0.2-192.168.0.3 \
        --node 192.168.0.4-192.168.0.5

        # clean your kubernets HA cluster and use --force/-f will be not prompt (danger)
        sealos clean --all -f


Flags:
      --all              if this is true, delete all 
  -f, --force            if this is true, will no prompt
  -h, --help             help for clean
      --master strings   clean master ips.kubernetes multi-nodes ex. 192.168.0.5-192.168.0.5
      --node strings     clean node ips.kubernetes multi-nodes ex. 192.168.0.5-192.168.0.5
      --vlog int         kubeadm log level

Global Flags:
      --config string   config file (default is $HOME/.sealos/config.yaml)
      --info            logger ture for Info, false for Debug

#clean

m1:
./sealos clean \
    --master 172.24.20.51

m2:(自动读取~/.sealos/config.yml)
sealos clean --all 

#log

1.执行清理命令
[root@cloudos-51 ~]# sealos clean --all 
clean command will clean all masters and nodes, continue clean (y/n)?y   //确认

2.kubeadm reset
02:47:12 [DEBG] [ssh.go:58] [172.24.20.51:22] kubeadm reset -f  -v 0
02:47:13 [INFO] [ssh.go:51] [172.24.20.51:22] [reset] Reading configuration from the cluster...
02:47:13 [INFO] [ssh.go:51] [172.24.20.51:22] [reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
02:47:13 [INFO] [ssh.go:51] [172.24.20.51:22] [preflight] Running pre-flight checks
02:47:13 [INFO] [ssh.go:51] [172.24.20.51:22] [reset] Removing info for node "cloudos-51" from the ConfigMap "kubeadm-config" in the "kube-system" Namespace
02:47:13 [INFO] [ssh.go:51] [172.24.20.51:22] [reset] Stopping the kubelet service
02:47:13 [INFO] [ssh.go:51] [172.24.20.51:22] [reset] Unmounting mounted directories in "/var/lib/kubelet"
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] [reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] [reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] [reset] Deleting contents of stateful directories: [/var/lib/etcd /var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni]
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] 
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] 
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] The reset process does not reset or clean up iptables rules or IPVS tables.
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] If you wish to reset iptables, you must do so manually by using the "iptables" command.
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] 
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] to reset your system's IPVS tables.
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] 
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] The reset process does not clean your kubeconfig files and you must remove them manually.
02:47:18 [INFO] [ssh.go:51] [172.24.20.51:22] Please, check the contents of the $HOME/.kube/config file.
02:47:18 [DEBG] [ssh.go:58] [172.24.20.51:22] sed -i '/kubectl/d;/sealos/d' /root/.bashrc
02:47:19 [DEBG] [ssh.go:58] [172.24.20.51:22] modprobe -r ipip  && lsmod
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] Module                  Size  Used by
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] tcp_diag               16384  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] inet_diag              24576  1 tcp_diag
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] xt_multiport           16384  55
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] iptable_filter         16384  1
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] iptable_mangle         16384  1
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] iptable_nat            16384  1
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] iptable_raw            16384  1
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_tables              24576  4 iptable_filter,iptable_raw,iptable_nat,iptable_mangle
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_set_hash_ip         36864  1
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_set_hash_net        45056  3
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] veth                   24576  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] xt_set                 16384  10
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_set_hash_ipportnet    45056  1
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_set_hash_ipportip    36864  2
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_set_bitmap_port     20480  4
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_set_hash_ipport     36864  8
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_set                 49152  7 ip_set_hash_ipportnet,ip_set_bitmap_port,ip_set_hash_ip,xt_set,ip_set_hash_net,ip_set_hash_ipport,ip_set_hash_ipportip
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] dummy                  16384  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] xt_comment             16384  282
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] xt_mark                16384  69
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_vs_sh               16384  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_vs_wrr              16384  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_vs_rr               16384  4
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ip_vs                 151552  10 ip_vs_rr,ip_vs_sh,ip_vs_wrr
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] xt_MASQUERADE          16384  5
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nf_conntrack_netlink    40960  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nft_counter            16384  36
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nft_chain_nat          16384  8
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] xt_addrtype            16384  7
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nft_compat             20480  30
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nf_tables             139264  67 nft_compat,nft_counter,nft_chain_nat
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nfnetlink              16384  5 nft_compat,nf_conntrack_netlink,nf_tables,ip_set
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] xt_conntrack           16384  22
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nf_nat                 36864  3 nft_chain_nat,iptable_nat,xt_MASQUERADE
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nf_conntrack          126976  5 xt_conntrack,nf_nat,nf_conntrack_netlink,xt_MASQUERADE,ip_vs
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nf_defrag_ipv6         20480  2 nf_conntrack,ip_vs
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] nf_defrag_ipv4         16384  1 nf_conntrack
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] overlay               110592  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] vboxvideo              32768  1
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] drm_vram_helper        20480  1 vboxvideo
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] drm_kms_helper        176128  1 vboxvideo
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] syscopyarea            16384  1 drm_kms_helper
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] sysfillrect            16384  1 drm_kms_helper
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] sysimgblt              16384  1 drm_kms_helper
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] fb_sys_fops            16384  1 drm_kms_helper
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ttm                    98304  1 drm_vram_helper
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] crc32_pclmul           16384  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] drm                   503808  5 drm_kms_helper,drm_vram_helper,vboxvideo,ttm
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] ghash_clmulni_intel    16384  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] aesni_intel           372736  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] crypto_simd            16384  1 aesni_intel
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] cryptd                 24576  2 crypto_simd,ghash_clmulni_intel
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] glue_helper            16384  1 aesni_intel
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] i2c_core               69632  2 drm_kms_helper,drm
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] vboxguest              40960  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] video                  45056  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] backlight              16384  1 video
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] sch_fq_codel           20480  3
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] xfs                  1409024  2
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] crc32c_intel           24576  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] e1000                 143360  0
02:47:19 [INFO] [ssh.go:51] [172.24.20.51:22] autofs4                40960  2

3.执行删除动作
02:47:19 [DEBG] [ssh.go:58] [172.24.20.51:22] rm -rf ~/.kube/ && rm -rf /etc/kubernetes/
02:47:19 [DEBG] [ssh.go:58] [172.24.20.51:22] rm -rf /etc/systemd/system/kubelet.service.d && rm -rf /etc/systemd/system/kubelet.service
02:47:19 [DEBG] [ssh.go:58] [172.24.20.51:22] rm -rf /usr/bin/kube* && rm -rf /usr/bin/crictl
02:47:19 [DEBG] [ssh.go:58] [172.24.20.51:22] rm -rf /etc/cni && rm -rf /opt/cni
02:47:20 [DEBG] [ssh.go:58] [172.24.20.51:22] rm -rf /var/lib/etcd && rm -rf /var/etcd
02:47:20 [DEBG] [ssh.go:58] [172.24.20.51:22] sed -i "/apiserver.cluster.local/d" /etc/hosts 
02:47:20 [DEBG] [ssh.go:58] [172.24.20.51:22] rm -rf ~/kube
02:47:20 [DEBG] [ssh.go:58] [172.24.20.51:22] rm -rf /etc/kubernetes/pki
02:47:20 [DEBG] [ssh.go:58] [172.24.20.51:22] ps -ef |grep -v 'grep'|grep sealos >/dev/null || rm -rf /usr/bin/sealos
02:47:21 [INFO] [clean.go:76] if clean all and clean sealos config
02:47:21 [INFO] [cmd.go:12] [os]exec cmd is :  /bin/sh [-c rm -rf /root/.sealos]

6.init calico disable ipip

[root@cloudos-51 ~]# nmcli dev
DEVICE      TYPE      STATE                   CONNECTION 
eth0        ethernet  connected               eth0       
eth1        ethernet  connected               eth1       
docker0     bridge    connected (externally)  docker0    
kube-ipvs0  dummy     unmanaged               --         
lo          loopback  unmanaged               --  

//enable calico bgp
./sealos init \
    --master 172.24.20.51 --interface "eth.*" --ipip false \
    --user root --passwd 321321 \
    --version v1.19.0 --pkg-url=/root/kube1.19.0.tar.gz

./sealos init \
    --master 172.24.20.51 --ipip false \
    --user root --passwd 321321 \
    --version v1.19.0 --pkg-url=/root/kube1.19.0.tar.gz

      --interface string        name of network interface, when use calico IP_AUTODETECTION_METHOD, set your ipv4 with can-reach=192.168.0.1 (default "eth.*|en.*|em.*")
      --ipip                    ipip mode enable, calico.. (default true)
[root@cloudos-51 ~]# kubectl -n kube-system get ds/calico-node -o yaml |grep  -C 10 CALICO_IPV4POOL_IPIP
...
        - name: CLUSTER_TYPE
          value: k8s,bgp
        - name: IP
          value: autodetect
        - name: IP_AUTODETECTION_METHOD
          value: interface=eth.*|en.*|em.*
        - name: CALICO_IPV4POOL_IPIP
          value: Always
        - name: FELIX_IPINIPMTU
          valueFrom:
            configMapKeyRef:
              key: veth_mtu
              name: calico-config
        - name: CALICO_IPV4POOL_CIDR
          value: 100.64.0.0/10
        - name: CALICO_DISABLE_FILE_LOGGING
          value: "true"
...

检查后发现CALICO_IPV4POOL_IPIP,仍然是Always,改为Never
kubectl -n kube-system edit  ds/calico-node

[root@cloudos-51 ~]# kubectl get pods -A |grep calico
kube-system   calico-kube-controllers-69b47f4dfb-9kzpm   1/1     Running   0          16m
kube-system   calico-node-xpd7p                          0/1     Running   0          7s

7.add node

[root@cloudos-51 ~]# sealos join -h
sealos join --node 192.168.0.5

Usage:
  sealos join [flags]

Flags:
  -h, --help             help for join
      --master strings   kubernetes multi-master ex. 192.168.0.5-192.168.0.5
      --node strings     kubernetes multi-nodes ex. 192.168.0.5-192.168.0.5
      --vlog int         kubeadm log level

Global Flags:
      --config string   config file (default is $HOME/.sealos/config.yaml)
      --info            logger ture for Info, false for Debug

#增加node

./sealos join --node 172.24.20.52

#log

1.join命令
[root@cloudos-51 ~]# ./sealos join --node 172.24.20.52
03:44:27 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] hostname
03:44:28 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: cloudos-52

2.host检查
03:44:28 [DEBG] [ssh.go:58] [172.24.20.52:22] cat /etc/hosts |grep cloudos-52 || echo '172.24.20.52 cloudos-52' >> /etc/hosts
03:44:28 [INFO] [ssh.go:51] [172.24.20.52:22] 172.24.20.52 cloudos-52
03:44:28 [INFO] [ssh.go:51] [172.24.20.52:22] 172.20.20.52 cloudos-52
03:44:28 [INFO] [check.go:51] [172.24.20.52:22]  ------------ check ok
03:44:28 [DEBG] [ssh.go:58] [172.24.20.52:22] mkdir -p /usr/bin || true
03:44:28 [DEBG] [download.go:30] [172.24.20.52:22]please wait for mkDstDir
03:44:28 [DEBG] [download.go:32] [172.24.20.52:22]please wait for before hook
03:44:28 [DEBG] [ssh.go:58] [172.24.20.52:22] ps -ef |grep -v 'grep'|grep sealos >/dev/null || rm -rf /usr/bin/sealos
03:44:28 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] ls -l /usr/bin/sealos 2>/dev/null |wc -l
03:44:29 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: 0

3.sealos/kuber文件传输
03:44:29 [DEBG] [scp.go:27] [ssh]source file md5 value is b1fef37dd355c6d6842a20345a48b4fd
03:44:33 [INFO] [scp.go:101] [ssh][172.24.20.52:22]transfer total size is: 42.27MB ;speed is 42MB
03:44:33 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] md5sum /usr/bin/sealos | cut -d" " -f1
03:44:33 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: b1fef37dd355c6d6842a20345a48b4fd

03:44:33 [DEBG] [scp.go:30] [ssh]host: 172.24.20.52:22 , remote md5: b1fef37dd355c6d6842a20345a48b4fd
03:44:33 [INFO] [scp.go:34] [ssh]md5 validate true
03:44:33 [INFO] [download.go:50] [172.24.20.52:22]copy file md5 validate success
03:44:33 [DEBG] [download.go:56] [172.24.20.52:22]please wait for after hook
03:44:33 [DEBG] [ssh.go:58] [172.24.20.52:22] chmod a+x /usr/bin/sealos
03:44:36 [DEBG] [ssh.go:58] [172.24.20.52:22] mkdir -p /root || true
03:44:36 [DEBG] [download.go:30] [172.24.20.52:22]please wait for mkDstDir
03:44:36 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] ls -l /root/kube1.19.0.tar.gz 2>/dev/null |wc -l
03:44:36 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: 0

03:44:36 [DEBG] [scp.go:27] [ssh]source file md5 value is bdd6c97922918f6070a65521df2a8b47
03:44:45 [INFO] [scp.go:101] [ssh][172.24.20.52:22]transfer total size is: 100.00MB ;speed is 100MB
03:44:52 [INFO] [scp.go:101] [ssh][172.24.20.52:22]transfer total size is: 200.00MB ;speed is 100MB
03:45:00 [INFO] [scp.go:101] [ssh][172.24.20.52:22]transfer total size is: 300.00MB ;speed is 100MB
03:45:07 [INFO] [scp.go:101] [ssh][172.24.20.52:22]transfer total size is: 400.00MB ;speed is 100MB
03:45:11 [INFO] [scp.go:101] [ssh][172.24.20.52:22]transfer total size is: 459.87MB ;speed is 59MB
03:45:11 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] md5sum /root/kube1.19.0.tar.gz | cut -d" " -f1
03:45:12 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: bdd6c97922918f6070a65521df2a8b47

03:45:12 [DEBG] [scp.go:30] [ssh]host: 172.24.20.52:22 , remote md5: bdd6c97922918f6070a65521df2a8b47
03:45:12 [INFO] [scp.go:34] [ssh]md5 validate true

4.解压执行脚本
03:45:12 [INFO] [download.go:50] [172.24.20.52:22]copy file md5 validate success
03:45:12 [DEBG] [download.go:56] [172.24.20.52:22]please wait for after hook
03:45:12 [DEBG] [ssh.go:58] [172.24.20.52:22] cd /root && rm -rf kube && tar zxvf kube1.19.0.tar.gz  && cd /root/kube/shell && rm -f ../bin/sealos && bash init.sh && sed -i '/kubectl/d;/sealos/d' /root/.bashrc  && echo 'command -v kubectl &>/dev/null && source <(kubectl completion bash)' >> /root/.bashrc && echo '[ -x /usr/bin/sealos ] && source <(sealos completion bash)' >> /root/.bashrc && source /root/.bashrc
03:45:12 [INFO] [ssh.go:51] [172.24.20.52:22] kube/
03:45:12 [INFO] [ssh.go:51] [172.24.20.52:22] kube/README.md
03:45:12 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/
03:45:12 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/kubeadm
03:45:13 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/kubectl
03:45:13 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/kubelet
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/kubelet-pre-start.sh
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/conntrack
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/bin/sealos
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/10-kubeadm.conf
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/calico.yaml
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/docker.service
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/kubeadm.yaml
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/kubelet.service
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/net/
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/conf/net/calico.yaml
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/docker/
03:45:15 [INFO] [ssh.go:51] [172.24.20.52:22] kube/docker/docker.tgz
03:45:16 [INFO] [ssh.go:51] [172.24.20.52:22] kube/images/
03:45:16 [INFO] [ssh.go:51] [172.24.20.52:22] kube/images/images.tar
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] kube/shell/
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] kube/shell/init.sh
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] kube/shell/master.sh
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] kube/shell/docker.sh
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + storage=/var/lib/docker
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + harbor_ip=127.0.0.1
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + mkdir -p /var/lib/docker
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + command_exists docker
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + command -v docker
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++ get_distribution
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++ lsb_dist=
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++ '[' -r /etc/os-release ']'
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] +++ . /etc/os-release
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ NAME=OpenCloudOS
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ VERSION=8.6
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ ID=opencloudos
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ ID_LIKE='rhel fedora'
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ VERSION_ID=8.6
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ PLATFORM_ID=platform:oc8
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ PRETTY_NAME='OpenCloudOS 8.6'
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ ANSI_COLOR='0;31'
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ CPE_NAME=cpe:/o:opencloudos:opencloudos:8
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ HOME_URL=https://www.opencloudos.org/
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++++ BUG_REPORT_URL=https://bugs.opencloudos.tech/
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] +++ echo opencloudos
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++ lsb_dist=opencloudos
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++ echo opencloudos
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + lsb_dist=opencloudos
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++ echo opencloudos
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] ++ tr '[:upper:]' '[:lower:]'
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + lsb_dist=opencloudos
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + echo 'current system is opencloudos'
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] current system is opencloudos
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + case "$lsb_dist" in
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + cp ../conf/docker.service /usr/lib/systemd/system/docker.service
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] + tar --strip-components=1 -xvzf ../docker/docker.tgz -C /usr/bin
03:45:31 [INFO] [ssh.go:51] [172.24.20.52:22] docker/ctr
03:45:32 [INFO] [ssh.go:51] [172.24.20.52:22] docker/runc
03:45:32 [INFO] [ssh.go:51] [172.24.20.52:22] docker/dockerd
03:45:33 [INFO] [ssh.go:51] [172.24.20.52:22] docker/docker
03:45:34 [INFO] [ssh.go:51] [172.24.20.52:22] docker/containerd
03:45:34 [INFO] [ssh.go:51] [172.24.20.52:22] docker/docker-init
03:45:34 [INFO] [ssh.go:51] [172.24.20.52:22] docker/containerd-shim
03:45:34 [INFO] [ssh.go:51] [172.24.20.52:22] docker/docker-proxy
03:45:34 [INFO] [ssh.go:51] [172.24.20.52:22] + chmod a+x /usr/bin
03:45:34 [INFO] [ssh.go:51] [172.24.20.52:22] + systemctl enable docker.service
03:45:34 [INFO] [ssh.go:51] [172.24.20.52:22] Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
03:45:35 [INFO] [ssh.go:51] [172.24.20.52:22] + systemctl restart docker.service
03:45:36 [INFO] [ssh.go:51] [172.24.20.52:22] + cat
03:45:36 [INFO] [ssh.go:51] [172.24.20.52:22] + systemctl restart docker.service
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] + docker version
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] Client: Docker Engine - Community
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  Version:           19.03.0
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  API version:       1.40
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  Go version:        go1.12.5
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  Git commit:        aeac9490dc
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  Built:             Wed Jul 17 18:11:50 2019
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  OS/Arch:           linux/amd64
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  Experimental:      false
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] 
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] Server: Docker Engine - Community
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  Engine:
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   Version:          19.03.0
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   API version:      1.40 (minimum version 1.12)
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   Go version:       go1.12.5
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   Git commit:       aeac9490dc
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   Built:            Wed Jul 17 18:22:15 2019
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   OS/Arch:          linux/amd64
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   Experimental:     false
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  containerd:
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   Version:          v1.2.6
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   GitCommit:        894b81a4b802e4eb2a91d1ce216b8817763c29fb
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  runc:
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   Version:          1.0.0-rc8
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   GitCommit:        425e105d5a03fabd737a126ad93d62a9eeede87f
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]  docker-init:
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   Version:          0.18.0
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22]   GitCommit:        fec3683
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] modprobe: FATAL: Module nf_conntrack_ipv4 not found in directory /lib/modules/5.4.119-20.0009.20
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/00-tencentos.conf ...
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.printk = 4
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv6.conf.all.disable_ipv6 = 0
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] vm.oom_dump_tasks = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.ctrl-alt-del = 0
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.sysrq_use_leftctrl = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.ip_local_reserved_ports = 48369,36000,56000
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.yama.ptrace_scope = 0
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/50-coredump.conf ...
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.core_pattern = |/usr/lib/systemd/systemd-coredump %P %u %g %s %t %c %h %e
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.core_pipe_limit = 16
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/50-default.conf ...
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.sysrq = 16
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.core_uses_pid = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.kptr_restrict = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.conf.all.rp_filter = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.conf.all.accept_source_route = 0
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.conf.all.promote_secondaries = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.core.default_qdisc = fq_codel
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] fs.protected_hardlinks = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] fs.protected_symlinks = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/50-libkcapi-optmem_max.conf ...
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.core.optmem_max = 81920
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /usr/lib/sysctl.d/50-pid-max.conf ...
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] kernel.pid_max = 4194304
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /etc/sysctl.d/99-sysctl.conf ...
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /etc/sysctl.d/k8s.conf ...
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.bridge.bridge-nf-call-ip6tables = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.bridge.bridge-nf-call-iptables = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] * Applying /etc/sysctl.conf ...
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] net.ipv4.ip_forward = 1
03:45:38 [INFO] [ssh.go:51] [172.24.20.52:22] setenforce: SELinux is disabled
79d541cda6cb: Loading layer  3.041MB/3.041MB
e9933a1f21f5: Loading layer  1.734MB/1.734MB
d85a13cfa53e: Loading layer  107.3MB/107.3MB
Loaded image: k8s.gcr.io/kube-controller-manager:v1.19.0
c3a6120d2fd6: Loading layer  115.2MB/115.2MB
Loaded image: k8s.gcr.io/kube-apiserver:v1.19.0
0d1435bd79e4: Loading layer  3.062MB/3.062MB
2aef7a73d4b0: Loading layer   2.13MB/2.13MB
ec3830e15d9c: Loading layer  225.3MB/225.3MB
4d5d7883c216: Loading layer   2.19MB/2.19MB
5d3a32005e6b: Loading layer  21.95MB/21.95MB
Loaded image: k8s.gcr.io/etcd:3.4.9-10.52:22] 
ba0dae6243cc: Loading layer  684.5kB/684.5kB
Loaded image: k8s.gcr.io/pause:3.24.20.52:22] 
8b62fd4eb2dd: Loading layer  43.99MB/43.99MB
40fe7b163104: Loading layer  2.828MB/2.828MB
Loaded image: calico/kube-controllers:v3.8.2] 
91e3a07063b3: Loading layer  53.89MB/53.89MB
b4e54f331697: Loading layer  21.78MB/21.78MB
b9b82a97c787: Loading layer  5.168MB/5.168MB
1b55846906e8: Loading layer  4.608kB/4.608kB
061bfb5cb861: Loading layer  8.192kB/8.192kB
78dd6c0504a7: Loading layer  8.704kB/8.704kB
f83925edb29c: Loading layer  38.81MB/38.81MB
Loaded image: k8s.gcr.io/kube-proxy:v1.19.02] 
a2a6ea4dde58: Loading layer  42.13MB/42.13MB
Loaded image: k8s.gcr.io/kube-scheduler:v1.19.0
225df95e717c: Loading layer  336.4kB/336.4kB
96d17b0b58a7: Loading layer  45.02MB/45.02MB
Loaded image: k8s.gcr.io/coredns:1.7.0.52:22] 
d8a33133e477: Loading layer  72.47MB/72.47MB
337ec577cf9c: Loading layer     33MB/33MB
45cc6dfacce1: Loading layer  3.584kB/3.584kB
7b3ecdc818b0: Loading layer  3.584kB/3.584kB
2b0805a50f82: Loading layer  21.85MB/21.85MB
c9bf76343513: Loading layer  11.26kB/11.26kB
f4176618c27b: Loading layer  11.26kB/11.26kB
4dcaff1da822: Loading layer   6.55MB/6.55MB
92e6b8f58573: Loading layer  2.945MB/2.945MB
5f970d4ac62d: Loading layer  35.84kB/35.84kB
b1a2a2446599: Loading layer  55.22MB/55.22MB
014866f8df9e: Loading layer   1.14MB/1.14MB
Loaded image: calico/node:v3.8.2.24.20.52:22] 
466b4a33898e: Loading layer  88.05MB/88.05MB
dd824a99572a: Loading layer  10.24kB/10.24kB
d8fdd74cc7ed: Loading layer   2.56kB/2.56kB
Loaded image: calico/cni:v3.8.22.24.20.52:22] 
3fc64803ca2d: Loading layer  4.463MB/4.463MB
f03a403b18a7: Loading layer   5.12kB/5.12kB
0de6f9b8b1f7: Loading layer  5.166MB/5.166MB
Loaded image: calico/pod2daemon-flexvol:v3.8.2
03:46:03 [INFO] [ssh.go:51] [172.24.20.52:22] driver is cgroupfs
03:46:03 [INFO] [ssh.go:51] [172.24.20.52:22] Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /etc/systemd/system/kubelet.service.
03:46:04 [INFO] [ssh.go:13] [ssh][172.24.20.51:22] kubeadm token create --print-join-command -v 0
03:46:05 [DEBG] [ssh.go:25] [ssh][172.24.20.51:22]command result is: W0609 03:46:05.491131  284796 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
kubeadm join apiserver.cluster.local:6443 --token vtb6w3.fe1y80s0nkefudbt     --discovery-token-ca-cert-hash sha256:9defafa7a4c69a5f5cc325997a76cfa0679b1b1f91dc5b8ff0b0026439a6b704 

03:46:05 [DEBG] [sealos.go:102] [globals]decodeOutput: W0609 03:46:05.491131  284796 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
kubeadm join apiserver.cluster.local:6443 --token vtb6w3.fe1y80s0nkefudbt     --discovery-token-ca-cert-hash sha256:9defafa7a4c69a5f5cc325997a76cfa0679b1b1f91dc5b8ff0b0026439a6b704 

03:46:05 [INFO] [sealos.go:105] [globals]join command is:  apiserver.cluster.local:6443 --token vtb6w3.fe1y80s0nkefudbt     --discovery-token-ca-cert-hash sha256:9defafa7a4c69a5f5cc325997a76cfa0679b1b1f91dc5b8ff0b0026439a6b704 

03:46:05 [DEBG] [sealos.go:111] [globals]decodeJoinCmd:  apiserver.cluster.local:6443 --token vtb6w3.fe1y80s0nkefudbt     --discovery-token-ca-cert-hash sha256:9defafa7a4c69a5f5cc325997a76cfa0679b1b1f91dc5b8ff0b0026439a6b704 

03:46:05 [DEBG] [sealos.go:119] [####]0 ::
03:46:05 [DEBG] [sealos.go:119] [####]1 :apiserver.cluster.local:6443:
03:46:05 [DEBG] [sealos.go:119] [####]2 :--token:
03:46:05 [DEBG] [sealos.go:119] [####]3 :vtb6w3.fe1y80s0nkefudbt:
03:46:05 [DEBG] [sealos.go:119] [####]4 ::
03:46:05 [DEBG] [sealos.go:119] [####]5 ::
03:46:05 [DEBG] [sealos.go:119] [####]6 ::
03:46:05 [DEBG] [sealos.go:119] [####]7 ::
03:46:05 [DEBG] [sealos.go:119] [####]8 :--discovery-token-ca-cert-hash:
03:46:05 [DEBG] [sealos.go:119] [####]9 :sha256:9defafa7a4c69a5f5cc325997a76cfa0679b1b1f91dc5b8ff0b0026439a6b704:
03:46:05 [DEBG] [sealos.go:119] [####]10 ::
03:46:05 [DEBG] [sealos.go:140] [####]JoinToken :vtb6w3.fe1y80s0nkefudbt
03:46:05 [DEBG] [sealos.go:141] [####]TokenCaCertHash :sha256:9defafa7a4c69a5f5cc325997a76cfa0679b1b1f91dc5b8ff0b0026439a6b704
03:46:05 [DEBG] [sealos.go:142] [####]CertificateKey :
03:46:05 [DEBG] [ssh.go:58] [172.24.20.52:22] echo "apiVersion: kubeadm.k8s.io/v1beta2
caCertPath: /etc/kubernetes/pki/ca.crt
discovery:
  bootstrapToken:
    apiServerEndpoint: 10.103.97.2:6443
    token: vtb6w3.fe1y80s0nkefudbt
    caCertHashes:
    - sha256:9defafa7a4c69a5f5cc325997a76cfa0679b1b1f91dc5b8ff0b0026439a6b704
  timeout: 5m0s
kind: JoinConfiguration
nodeRegistration:
  criSocket: /var/run/dockershim.sock" > /root/kubeadm-join-config.yaml
03:46:05 [DEBG] [ssh.go:58] [172.24.20.52:22] echo 10.103.97.2 apiserver.cluster.local >> /etc/hosts
03:46:06 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] sealos route --host 172.24.20.52
03:46:06 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: failed

5.添加路由
03:46:06 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] sealos route add --host 10.103.97.2 --gateway 172.24.20.52
03:46:07 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: 
03:46:07 [DEBG] [ssh.go:58] [172.24.20.52:22] sealos ipvs --vs 10.103.97.2:6443  --rs 172.24.20.51:6443 --health-path /healthz --health-schem https --run-once
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [DEBG] [care.go:16] VsAndRsCare DeleteVirtualServer
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [DEBG] [utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [DEBG] [utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [WARN] [service.go:75] DeleteVirtualServer error:  no such process
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [WARN] [care.go:19] VsAndRsCare DeleteVirtualServer: no such process
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [DEBG] [service.go:129] IsVirtualServerAvailable warn: virtual server is empty.
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [DEBG] [utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [DEBG] [utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [DEBG] [utils.go:21] SplitServer debug: IP: 172.24.20.51, Port: 6443
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [DEBG] [utils.go:21] SplitServer debug: IP: 172.24.20.51, Port: 6443
03:46:07 [INFO] [ssh.go:51] [172.24.20.52:22] 03:46:39 [DEBG] [utils.go:21] SplitServer debug: IP: 10.103.97.2, Port: 6443
03:46:07 [DEBG] [ssh.go:58] [172.24.20.52:22] kubeadm join --config=/root/kubeadm-join-config.yaml  -v 0
03:46:08 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] Running pre-flight checks
03:46:08 [INFO] [ssh.go:51] [172.24.20.52:22]   [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
03:46:08 [INFO] [ssh.go:51] [172.24.20.52:22]   [WARNING FileExisting-socat]: socat not found in system path
03:46:08 [INFO] [ssh.go:51] [172.24.20.52:22]   [WARNING FileExisting-tc]: tc not found in system path
03:46:08 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] Reading configuration from the cluster...
03:46:08 [INFO] [ssh.go:51] [172.24.20.52:22] [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
03:46:09 [INFO] [ssh.go:51] [172.24.20.52:22] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
03:46:09 [INFO] [ssh.go:51] [172.24.20.52:22] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
03:46:09 [INFO] [ssh.go:51] [172.24.20.52:22] [kubelet-start] Starting the kubelet
03:46:09 [INFO] [ssh.go:51] [172.24.20.52:22] [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
03:46:17 [INFO] [ssh.go:51] [172.24.20.52:22] 
03:46:17 [INFO] [ssh.go:51] [172.24.20.52:22] This node has joined the cluster:
03:46:17 [INFO] [ssh.go:51] [172.24.20.52:22] * Certificate signing request was sent to apiserver and a response was received.
03:46:17 [INFO] [ssh.go:51] [172.24.20.52:22] * The Kubelet was informed of the new secure connection details.
03:46:17 [INFO] [ssh.go:51] [172.24.20.52:22] 
03:46:17 [INFO] [ssh.go:51] [172.24.20.52:22] Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
03:46:17 [INFO] [ssh.go:51] [172.24.20.52:22] 
03:46:17 [INFO] [ssh.go:13] [ssh][172.24.20.52:22] mkdir -p /etc/kubernetes/manifests
03:46:17 [DEBG] [ssh.go:25] [ssh][172.24.20.52:22]command result is: 
03:46:18 [INFO] [scp.go:159] [ssh][172.24.20.52:22]transfer total size is: 0MB
03:46:18 [DEBG] [ssh.go:58] [172.24.20.52:22] rm -rf /root/kube

#检查

[root@cloudos-51 ~]# kubectl get node -o wide
NAME         STATUS   ROLES    AGE    VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE          KERNEL-VERSION       CONTAINER-RUNTIME
cloudos-51   Ready    master   32m    v1.19.0   172.24.20.51   <none>        OpenCloudOS 8.6   5.4.119-20.0009.20   docker://19.3.0
cloudos-52   Ready    <none>   3m7s   v1.19.0   172.24.20.52   <none>        OpenCloudOS 8.6   5.4.119-20.0009.20   docker://19.3.0