文章目录

碎碎念

  • kubernetes 已经在 1.24 版本中删除了 docker-shim 这个组件,使得 docker 无法对接 1.24 以后版本的 kubernetes
    • PS:之前都是 kubernetes 团队维护 docker-shim 组件,因为一些历史原因,放弃了 docker-shim
  • 之前写过一篇 基于 containerd 二进制部署 k8s-v1.23.3,总体体验下来,觉得自己被 docker 惯坏了
    • containerd 的上手很不习惯,原生的 ctr 命令不好用,build 镜像也依赖 buildkit 服务
    • 对于菜鸡的我来说,containerd 就好比手动挡的车,只有老司机才能驾驭,我更喜欢自动挡多一点
  • 这篇文章,都是使用了当前最新版本的组件来部署,仅供学习使用
    • 实际的生产也好,公司环境也好,还是要选择当前的稳定版本,直接使用最新版会过于激进
  • 另外,如果有需求部署 kubernetes 的话,在不喜欢 containerd 的前提下,可以直接使用 cri-dockerd ,这样在后期如果需要提升到 1.24 以上版本的 kubernetes 就可以减少很多烦心事
    • 虽然,不管是升级容器运行时版本还是升级 kubernetes 版本,都会很糟心,只不过,能少一个是一个 [手动狗头.jpg]


# kubernetes 二进制文件
https://dl.k8s.io/v1.26.3/kubernetes-server-linux-amd64.tar.gz
# etcd 二进制文件
https://github.com/etcd-io/etcd/releases/download/v3.5.7/etcd-v3.5.7-linux-amd64.tar.gz
# cfssl 二进制文件
https://github.com/cloudflare/cfssl/releases/download/v1.6.3/cfssl_1.6.3_linux_amd64
# cfssljson 二进制文件
https://github.com/cloudflare/cfssl/releases/download/v1.6.3/cfssljson_1.6.3_linux_amd64
# docker 二进制文件
https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/static/stable/x86_64/docker-23.0.1.tgz
# cri-dockerd 二进制文件
https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.1/cri-dockerd-0.3.1.amd64.tgz

开始整活

环境准备

创建干活的目录

work_dir='/data/k8s-work-dir'
mkdir -p ${work_dir}/{bin,images,pkg,tmp/{ssl,service}}

节点免密操作

ssh-keygen -t rsa -P "" -f /root/.ssh/id_rsa -q
ip_head='192.168.11';for i in 147 148 149;do ssh-copy-id ${ip_head}.${i};done

关闭防火墙

ip_head='192.168.11';for i in 147 148 149;do \
ssh ${ip_head}.${i} "systemctl disable firewalld"; \
ssh ${ip_head}.${i} "systemctl stop firewalld"; \
done

关闭 selinux

ip_head='192.168.11';for i in 147 148 149;do \
ssh ${ip_head}.${i} "setenforce 0"; \
ssh ${ip_head}.${i} "sed -i 's/SELINUX=[a-z].*/SELINUX=disabled/g' /etc/selinux/config"; \
done

关闭 swap 分区

ip_head='192.168.11';for i in 147 148 149;do \
ssh ${ip_head}.${i} "swapoff -a"; \
ssh ${ip_head}.${i} "sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab"; \
done

开启内核模块

ip_head='192.168.11';for i in 147 148 149;do \
ssh ${ip_head}.${i} "modprobe ip_vs"; \
ssh ${ip_head}.${i} "modprobe ip_vs_rr"; \
ssh ${ip_head}.${i} "modprobe ip_vs_wrr"; \
ssh ${ip_head}.${i} "modprobe ip_vs_sh"; \
ssh ${ip_head}.${i} "modprobe nf_conntrack"; \
ssh ${ip_head}.${i} "modprobe nf_conntrack_ipv4"; \
ssh ${ip_head}.${i} "modprobe br_netfilter"; \
ssh ${ip_head}.${i} "modprobe overlay"; \
done
cat << EOF > ${work_dir}/tmp/service/k8s-modules.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
nf_conntrack_ipv4
br_netfilter
overlay
EOF
ip_head='192.168.11';for i in 147 148 149;do \
scp ${work_dir}/tmp/service/k8s-modules.conf ${ip_head}.${i}:/etc/modules-load.d/; \
ssh ${ip_head}.${i} "systemctl enable systemd-modules-load"; \
ssh ${ip_head}.${i} "systemctl restart systemd-modules-load"; \
ssh ${ip_head}.${i} "systemctl is-active systemd-modules-load"; \
done

配置内核参数

cat << EOF > ${work_dir}/tmp/service/kubernetes.conf
# 开启数据包转发功能(实现vxlan)
net.ipv4.ip_forward=1
# iptables对bridge的数据进行处理
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-arptables=1
# 关闭tcp_tw_recycle,否则和NAT冲突,会导致服务不通
net.ipv4.tcp_tw_recycle=0
# 不允许将TIME-WAIT sockets重新用于新的TCP连接
net.ipv4.tcp_tw_reuse=0
# socket监听(listen)的backlog上限
net.core.somaxconn=32768
# 最大跟踪连接数,默认 nf_conntrack_buckets * 4
net.netfilter.nf_conntrack_max=1000000
# 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.swappiness=0
# 计算当前的内存映射文件数。
vm.max_map_count=655360
# 内核可分配的最大文件数
fs.file-max=6553600
# 持久连接
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_keepalive_intvl=30
net.ipv4.tcp_keepalive_probes=10
EOF
ip_head='192.168.11';for i in 147 148 149;do \
scp ${work_dir}/tmp/service/kubernetes.conf ${ip_head}.${i}:/etc/sysctl.d/; \
ssh ${ip_head}.${i} "sysctl -p /etc/sysctl.d/kubernetes.conf"; \
done

清空 iptables 规则

ip_head='192.168.11';for i in 147 148 149;do \
ssh ${ip_head}.${i} "iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat"; \
ssh ${ip_head}.${i} "iptables -P FORWARD ACCEPT"; \
done

配置 ca 证书

cat << EOF > ${work_dir}/tmp/ssl/ca-config.json
{
  "signing": {
    "default": {
      "expiry": "876600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876600h"
      }
    }
  }
}
EOF
cat << EOF > ${work_dir}/tmp/ssl/ca-csr.json
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ],
  "ca": {
    "expiry": "876600h"
 }
}
EOF

准备 cfssl 工具

mv ${work_dir}/bin/cfssl_1.6.3_linux_amd64 ${work_dir}/bin/cfssl
mv ${work_dir}/bin/cfssljson_1.6.3_linux_amd64 ${work_dir}/bin/cfssljson
chmod +x ${work_dir}/bin/cfssl*

创建 ca 证书

${work_dir}/bin/cfssl gencert -initca ${work_dir}/tmp/ssl/ca-csr.json | \
${work_dir}/bin/cfssljson -bare ${work_dir}/tmp/ssl/ca

部署 master 组件

部署 etcd 组件

准备 etcd 二进制文件
cd ${work_dir}/bin && \
tar xf etcd-v3.5.7-linux-amd64.tar.gz
配置 etcd 证书
cat << EOF > ${work_dir}/tmp/ssl/etcd-csr.json
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.11.147",
    "192.168.11.148",
    "192.168.11.149"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
创建 etcd 证书
${work_dir}/bin/cfssl gencert \
-ca=${work_dir}/tmp/ssl/ca.pem \
-ca-key=${work_dir}/tmp/ssl/ca-key.pem \
-config=${work_dir}/tmp/ssl/ca-config.json \
-profile=kubernetes ${work_dir}/tmp/ssl/etcd-csr.json | \
${work_dir}/bin/cfssljson -bare ${work_dir}/tmp/ssl/etcd
配置 etcd 为 systemctl 管理
cat << EOF > ${work_dir}/tmp/service/kube-etcd.service.template
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=##etcdDataPath##
ExecStart=##etcdBin##/etcd \\
  --wal-dir= \\
  --data-dir=##etcdDataPath## \\
  --name=etcd-##etcdHost## \\
  --cert-file=##sslPath##/etcd.pem \\
  --key-file=##sslPath##/etcd-key.pem \\
  --peer-cert-file=##sslPath##/etcd.pem \\
  --peer-key-file=##sslPath##/etcd-key.pem \\
  --trusted-ca-file=##sslPath##/ca.pem \\
  --peer-trusted-ca-file=##sslPath##/ca.pem \\
  --initial-advertise-peer-urls=https://##etcdHost##:2380 \\
  --listen-peer-urls=https://##etcdHost##:2380 \\
  --listen-client-urls=https://##etcdHost##:2379,http://127.0.0.1:2379 \\
  --advertise-client-urls=https://##etcdHost##:2379 \\
  --initial-cluster-token=etcd-cluster-0 \\
  --initial-cluster=##etcdNodes## \\
  --initial-cluster-state=new \\
  --snapshot-count=50000 \\
  --auto-compaction-retention=1 \\
  --auto-compaction-mode=periodic \\
  --max-request-bytes=10485760 \\
  --quota-backend-bytes=8589934592
Restart=always
RestartSec=15
LimitNOFILE=65536
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF
ip_head='192.168.11';for i in 147 148 149;do \
etcdHost="${ip_head}.${i}"; \
etcdNodes="etcd-192.168.11.147=https://192.168.11.147:2380,etcd-192.168.11.148=https://192.168.11.148:2380,etcd-192.168.11.149=https://192.168.11.149:2380"; \
etcdBin='/data/etcd-data/bin'; \
etcdDataPath='/data/etcd-data/data'; \
sslPath='/etc/kubernetest/ssl'; \
sed -e "s|##etcdHost##|${etcdHost}|g" -e "s|##etcdBin##|${etcdBin}|g" -e "s|##etcdDataPath##|${etcdDataPath}|g" -e "s|##sslPath##|${sslPath}|g" -e "s|##etcdNodes##|${etcdNodes}|g" ${work_dir}/tmp/service/kube-etcd.service.template > ${work_dir}/tmp/service/kube-etcd.service.${etcdHost}; \
done
分发文件并启动 etcd 集群
ip_head='192.168.11';for i in 147 148 149;do \
etcdHost="${ip_head}.${i}"; \
ssh ${etcdHost} "mkdir -p ${etcdBin} ${sslPath}"; \
ssh ${etcdHost} "mkdir -p ${etcdDataPath} -m 700"; \
scp ${work_dir}/tmp/ssl/{ca*.pem,etcd*.pem} ${etcdHost}:${sslPath}/; \
scp ${work_dir}/tmp/service/kube-etcd.service.${etcdHost} ${etcdHost}:/usr/lib/systemd/system/kube-etcd.service; \
scp ${work_dir}/bin/etcd-v3.5.7-linux-amd64/{etcd,etcdctl} ${etcdHost}:${etcdBin}/; \
ssh ${etcdHost} "systemctl enable kube-etcd && systemctl start kube-etcd --no-block"; \
done
验证 etcd 集群
ip_head='192.168.11';for i in 147 148 149;do \
etcdHost="${ip_head}.${i}"; \
ssh ${etcdHost} "ETCDCTL_API=3 ${etcdBin}/etcdctl \
                 --endpoints=https://${etcdHost}:2379 \
                 --cacert=${sslPath}/ca.pem \
                 --cert=${sslPath}/etcd.pem \
                 --key=${sslPath}/etcd-key.pem \
                 endpoint health"; \
done
https://192.168.11.147:2379 is healthy: successfully committed proposal: took = 6.156014ms
https://192.168.11.148:2379 is healthy: successfully committed proposal: took = 7.048715ms
https://192.168.11.149:2379 is healthy: successfully committed proposal: took = 19.618844ms

部署 apiserver 组件

准备 apiserver 二进制文件
cd ${work_dir}/bin/ && \
tar xf kubernetes-server-linux-amd64.tar.gz
${work_dir}/bin/kubernetes/server/bin/kubeadm config images list
registry.k8s.io/kube-apiserver:v1.26.3
registry.k8s.io/kube-controller-manager:v1.26.3
registry.k8s.io/kube-scheduler:v1.26.3
registry.k8s.io/kube-proxy:v1.26.3
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.6-0
registry.k8s.io/coredns/coredns:v1.9.3
配置 apiserver 证书
cat << EOF > ${work_dir}/tmp/ssl/apiserver-csr.json
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.11.147",
    "192.168.11.148",
    "192.168.11.149",
    "10.88.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
创建 apiserver 证书
${work_dir}/bin/cfssl gencert \
-ca=${work_dir}/tmp/ssl/ca.pem \
-ca-key=${work_dir}/tmp/ssl/ca-key.pem \
-config=${work_dir}/tmp/ssl/ca-config.json \
-profile=kubernetes ${work_dir}/tmp/ssl/apiserver-csr.json | \
${work_dir}/bin/cfssljson -bare ${work_dir}/tmp/ssl/apiserver
配置 metrics-server 证书
cat << EOF > ${work_dir}/tmp/ssl/aggregator-csr.json
{
  "CN": "aggregator",
  "hosts": [
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
创建 metrics-server 证书
${work_dir}/bin/cfssl gencert \
-ca=${work_dir}/tmp/ssl/ca.pem \
-ca-key=${work_dir}/tmp/ssl/ca-key.pem \
-config=${work_dir}/tmp/ssl/ca-config.json \
-profile=kubernetes ${work_dir}/tmp/ssl/aggregator-csr.json | \
${work_dir}/bin/cfssljson -bare ${work_dir}/tmp/ssl/aggregator
配置 apiserver 为 systemctl 管理
cat << EOF > ${work_dir}/tmp/service/kube-apiserver.service.template
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
ExecStart=##k8sBin##/kube-apiserver \\
  --secure-port=6443 \\
  --allow-privileged=true \\
  --anonymous-auth=false \\
  --api-audiences=api,istio-ca \\
  --authorization-mode=Node,RBAC \\
  --bind-address=##k8sHost## \\
  --client-ca-file=##sslPath##/ca.pem \\
  --endpoint-reconciler-type=lease \\
  --etcd-cafile=##sslPath##/ca.pem \\
  --etcd-certfile=##sslPath##/apiserver.pem \\
  --etcd-keyfile=##sslPath##/apiserver-key.pem \\
  --etcd-servers=##etcdEndpoints## \\
  --kubelet-certificate-authority=##sslPath##/ca.pem \\
  --kubelet-client-certificate=##sslPath##/apiserver.pem \\
  --kubelet-client-key=##sslPath##/apiserver-key.pem \\
  --service-account-issuer=https://kubernetes.default.svc \\
  --service-account-signing-key-file=##sslPath##/ca-key.pem \\
  --service-account-key-file=##sslPath##/ca.pem \\
  --service-cluster-ip-range=10.88.0.0/16 \\
  --service-node-port-range=30000-32767 \\
  --tls-cert-file=##sslPath##/apiserver.pem \\
  --tls-private-key-file=##sslPath##/apiserver-key.pem \\
  --requestheader-client-ca-file=##sslPath##/ca.pem \\
  --requestheader-allowed-names= \\
  --requestheader-extra-headers-prefix=X-Remote-Extra- \\
  --requestheader-group-headers=X-Remote-Group \\
  --requestheader-username-headers=X-Remote-User \\
  --proxy-client-cert-file=##sslPath##/aggregator.pem \\
  --proxy-client-key-file=##sslPath##/aggregator-key.pem \\
  --enable-aggregator-routing=true \\
  --v=2
Restart=always
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
ip_head='192.168.11';for i in 147 148 149;do \
k8sHost="${ip_head}.${i}"; \
etcdEndpoints="https://192.168.11.147:2379,https://192.168.11.148:2379,https://192.168.11.149:2379"; \
k8sBin='/data/kubernetes/bin'; \
sslPath='/etc/kubernetest/ssl'; \
sed -e "s|##k8sHost##|${k8sHost}|g" -e "s|##k8sBin##|${k8sBin}|g" -e "s|##sslPath##|${sslPath}|g" -e "s|##etcdEndpoints##|${etcdEndpoints}|g" ${work_dir}/tmp/service/kube-apiserver.service.template > ${work_dir}/tmp/service/kube-apiserver.service.${k8sHost}; \
done
分发文件并启动 apiserver 集群
ip_head='192.168.11';for i in 147 148 149;do \
k8sHost="${ip_head}.${i}"; \
ssh ${k8sHost} "mkdir -p ${k8sBin} ${sslPath}"; \
scp ${work_dir}/tmp/ssl/{ca*.pem,apiserver*.pem,aggregator*.pem} ${k8sHost}:${sslPath}/; \
scp ${work_dir}/tmp/service/kube-apiserver.service.${k8sHost} ${k8sHost}:/usr/lib/systemd/system/kube-apiserver.service; \
scp ${work_dir}/bin/kubernetes/server/bin/{kube-apiserver,kubectl} ${k8sHost}:${k8sBin}/; \
ssh ${k8sHost} "systemctl enable kube-apiserver && systemctl start kube-apiserver --no-block"; \
done
验证 apiserver 集群
ip_head='192.168.11';for i in 147 148 149;do \
curl -k --cacert ${sslPath}/ca.pem \
--cert ${sslPath}/apiserver.pem \
--key ${sslPath}/apiserver-key.pem \
https://${ip_head}.${i}:6443/healthz; \
done

配置 kubectl 命令

配置 admin 证书
cat << EOF > ${work_dir}/tmp/ssl/admin-csr.json
{
  "CN": "admin",
  "hosts": [
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF
创建 admin 证书
${work_dir}/bin/cfssl gencert \
-ca=${work_dir}/tmp/ssl/ca.pem \
-ca-key=${work_dir}/tmp/ssl/ca-key.pem \
-config=${work_dir}/tmp/ssl/ca-config.json \
-profile=kubernetes ${work_dir}/tmp/ssl/admin-csr.json | \
${work_dir}/bin/cfssljson -bare ${work_dir}/tmp/ssl/admin
创建 kubeconfig 证书
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-cluster kubernetes \
--certificate-authority=${work_dir}/tmp/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.11.147:6443 \
--kubeconfig=${work_dir}/tmp/ssl/kubectl.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-credentials admin \
--client-certificate=${work_dir}/tmp/ssl/admin.pem \
--client-key=${work_dir}/tmp/ssl/admin-key.pem \
--embed-certs=true \
--kubeconfig=${work_dir}/tmp/ssl/kubectl.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-context kubernetes \
--cluster=kubernetes \
--user=admin \
--kubeconfig=${work_dir}/tmp/ssl/kubectl.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
use-context kubernetes \
--kubeconfig=${work_dir}/tmp/ssl/kubectl.kubeconfig
mkdir $HOME/.kube
cp ${work_dir}/tmp/ssl/kubectl.kubeconfig $HOME/.kube/config
创建 clusterrolebinding 实现 exec 进入容器权限
${work_dir}/bin/kubernetes/server/bin/kubectl create clusterrolebinding kubernetes \
--clusterrole=cluster-admin \
--user=kubernetes

部署 controller-manager 组件

配置 controller-manager 证书
cat << EOF > ${work_dir}/tmp/ssl/kube-controller-manager-csr.json
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "192.168.11.147",
      "192.168.11.148",
      "192.168.11.149"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "ShangHai",
        "L": "ShangHai",
        "O": "system:kube-controller-manager",
        "OU": "System"
      }
    ]
}
EOF
创建 controller-manager 证书
${work_dir}/bin/cfssl gencert \
-ca=${work_dir}/tmp/ssl/ca.pem \
-ca-key=${work_dir}/tmp/ssl/ca-key.pem \
-config=${work_dir}/tmp/ssl/ca-config.json \
-profile=kubernetes ${work_dir}/tmp/ssl/kube-controller-manager-csr.json | \
${work_dir}/bin/cfssljson -bare ${work_dir}/tmp/ssl/kube-controller-manager
创建 kubeconfig 证书
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-cluster kubernetes \
--certificate-authority=${work_dir}/tmp/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.11.147:6443 \
--kubeconfig=${work_dir}/tmp/ssl/kube-controller-manager.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-credentials system:kube-controller-manager \
--client-certificate=${work_dir}/tmp/ssl/kube-controller-manager.pem \
--client-key=${work_dir}/tmp/ssl/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=${work_dir}/tmp/ssl/kube-controller-manager.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-context system:kube-controller-manager \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=${work_dir}/tmp/ssl/kube-controller-manager.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
use-context system:kube-controller-manager \
--kubeconfig=${work_dir}/tmp/ssl/kube-controller-manager.kubeconfig
配置 controller-manager 为 systemctl 管理
cat << EOF > ${work_dir}/tmp/service/kube-controller-manager.service.template
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=##k8sBin##/kube-controller-manager \\
  --bind-address=0.0.0.0 \\
  --allocate-node-cidrs=true \\
  --cluster-cidr=172.20.0.0/16 \\
  --cluster-name=kubernetes \\
  --cluster-signing-cert-file=##sslPath##/ca.pem \\
  --cluster-signing-key-file=##sslPath##/ca-key.pem \\
  --kubeconfig=##configPath##/kube-controller-manager.kubeconfig \\
  --leader-elect=true \\
  --node-cidr-mask-size=24 \\
  --root-ca-file=##sslPath##/ca.pem \\
  --service-account-private-key-file=##sslPath##/ca-key.pem \\
  --service-cluster-ip-range=10.88.0.0/16 \\
  --use-service-account-credentials=true \\
  --v=2
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
k8sBin='/data/kubernetes/bin'; \
sslPath='/etc/kubernetest/ssl'; \
configPath='/etc/kubernetest'; \
sed -e "s|##k8sBin##|${k8sBin}|g" \
-e "s|##sslPath##|${sslPath}|g" \
-e "s|##configPath##|${configPath}|g" ${work_dir}/tmp/service/kube-controller-manager.service.template \
> ${work_dir}/tmp/service/kube-controller-manager.service
分发文件并启动 controller-manager 集群
ip_head='192.168.11';for i in 147 148 149;do \
k8sHost="${ip_head}.${i}"; \
ssh ${k8sHost} "mkdir -p ${k8sBin} ${sslPath}"; \
scp ${work_dir}/tmp/ssl/ca*.pem ${k8sHost}:${sslPath}/; \
scp ${work_dir}/tmp/ssl/kube-controller-manager.kubeconfig ${k8sHost}:${configPath}/; \
scp ${work_dir}/tmp/service/kube-controller-manager.service ${k8sHost}:/usr/lib/systemd/system/; \
scp ${work_dir}/bin/kubernetes/server/bin/kube-controller-manager ${k8sHost}:${k8sBin}/; \
ssh ${k8sHost} "systemctl enable kube-controller-manager && systemctl start kube-controller-manager --no-block"; \
done

部署 scheduler 组件

配置 scheduler 证书
cat << EOF > ${work_dir}/tmp/ssl/kube-scheduler-csr.json
{
    "CN": "system:kube-scheduler",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "192.168.91.19"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "ShangHai",
        "L": "ShangHai",
        "O": "system:kube-scheduler",
        "OU": "System"
      }
    ]
}
EOF
创建 scheduler 证书
${work_dir}/bin/cfssl gencert \
-ca=${work_dir}/tmp/ssl/ca.pem \
-ca-key=${work_dir}/tmp/ssl/ca-key.pem \
-config=${work_dir}/tmp/ssl/ca-config.json \
-profile=kubernetes ${work_dir}/tmp/ssl/kube-scheduler-csr.json | \
${work_dir}/bin/cfssljson -bare ${work_dir}/tmp/ssl/kube-scheduler
创建 kubeconfig 证书
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-cluster kubernetes \
--certificate-authority=${work_dir}/tmp/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.11.147:6443 \
--kubeconfig=${work_dir}/tmp/ssl/kube-scheduler.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-credentials system:kube-scheduler \
--client-certificate=${work_dir}/tmp/ssl/kube-scheduler.pem \
--client-key=${work_dir}/tmp/ssl/kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=${work_dir}/tmp/ssl/kube-scheduler.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-context system:kube-scheduler \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=${work_dir}/tmp/ssl/kube-scheduler.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
use-context system:kube-scheduler \
--kubeconfig=${work_dir}/tmp/ssl/kube-scheduler.kubeconfig
配置 scheduler 为 systemctl 管理
cat << EOF > ${work_dir}/tmp/service/kube-scheduler.service.template
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=##k8sBin##/kube-scheduler \\
  --authentication-kubeconfig=##configPath##/kube-scheduler.kubeconfig \\
  --authorization-kubeconfig=##configPath##/kube-scheduler.kubeconfig \\
  --bind-address=0.0.0.0 \\
  --kubeconfig=##configPath##/kube-scheduler.kubeconfig \\
  --leader-elect=true \\
  --v=2
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
k8sBin='/data/kubernetes/bin'; \
sslPath='/etc/kubernetest/ssl'; \
configPath='/etc/kubernetest'; \
sed -e "s|##k8sBin##|${k8sBin}|g" \
-e "s|##sslPath##|${sslPath}|g" \
-e "s|##configPath##|${configPath}|g" ${work_dir}/tmp/service/kube-scheduler.service.template \
> ${work_dir}/tmp/service/kube-scheduler.service
分发文件并启动 scheduler 集群
ip_head='192.168.11';for i in 147 148 149;do \
k8sHost="${ip_head}.${i}"; \
ssh ${k8sHost} "mkdir -p ${k8sBin} ${sslPath}"; \
scp ${work_dir}/tmp/ssl/kube-scheduler.kubeconfig ${k8sHost}:${configPath}/; \
scp ${work_dir}/tmp/service/kube-scheduler.service ${k8sHost}:/usr/lib/systemd/system/; \
scp ${work_dir}/bin/kubernetes/server/bin/kube-scheduler ${k8sHost}:${k8sBin}/; \
ssh ${k8sHost} "systemctl enable kube-scheduler && systemctl start kube-scheduler --no-block"; \
done
${work_dir}/bin/kubernetes/server/bin/kubectl get componentstatus
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health":"true","reason":""}
etcd-2               Healthy   {"health":"true","reason":""}
etcd-1               Healthy   {"health":"true","reason":""}

部署 worker 组件

部署 docker 组件

准备 docker 二进制文件
cd ${work_dir}/bin && \
tar xf docker-23.0.1.tgz
配置 docker 为 systemctl 管理
cat <<EOF > ${work_dir}/tmp/service/docker.service.template
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd \\
          -H unix://var/run/docker.sock \\
          --data-root=##dataRoot##
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target
EOF
准备 daemon.json 文件
cat <<EOF > ${work_dir}/tmp/service/daemon.json.template
{
  "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
分发文件并启动 docker 组件
ip_head='192.168.11';for i in 147 148 149;do \
dataRoot='/data/docker'; \
sed "s|##dataRoot##|${dataRoot}|g" ${work_dir}/tmp/service/docker.service.template \
> ${work_dir}/tmp/service/docker.service; \
ssh ${ip_head}.${i} "mkdir -p /etc/docker"; \
scp ${work_dir}/tmp/service/docker.service ${ip_head}.${i}:/usr/lib/systemd/system/; \
scp ${work_dir}/tmp/service/daemon.json.template ${ip_head}.${i}:/etc/docker/daemon.json; \
scp ${work_dir}/bin/docker/* ${ip_head}.${i}:/usr/bin/; \
ssh ${ip_head}.${i} "systemctl enable docker && systemctl start docker --no-block"; \
done

部署 cri-dockerd 组件

准备 cri-dockerd 二进制文件
cd ${work_dir}/bin && \
tar xf cri-dockerd-0.3.1.amd64.tgz
配置 cri-dockerd 为 systemctl 管理
cat <<EOF > ${work_dir}/tmp/service/cri-dockerd.service.template
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
# 懒得配置 cri-docker.socket 了,我这边就直接注释掉了,不然启动会报错:Unit not found
# Requires=cri-docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd \\
          --cri-dockerd-root-directory=##criData## \\
          --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
          
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target
EOF
分发文件并启动 cri-dockerd 组件
ip_head='192.168.11';for i in 147 148 149;do \
criData='/data/cri-dockerd'; \
sed "s|##criData##|${criData}|g" ${work_dir}/tmp/service/cri-dockerd.service.template \
> ${work_dir}/tmp/service/cri-dockerd.service; \
scp ${work_dir}/tmp/service/cri-dockerd.service ${ip_head}.${i}:/usr/lib/systemd/system/; \
scp ${work_dir}/bin/cri-dockerd/* ${ip_head}.${i}:/usr/bin/; \
ssh ${ip_head}.${i} "yum install -y conntrack-tools"; \
ssh ${ip_head}.${i} "systemctl enable cri-dockerd && systemctl start cri-dockerd --no-block"; \
done

部署 kubelet 组件

配置 kubelet 证书
cat << EOF > ${work_dir}/tmp/ssl/kubelet-csr.json.template
{
    "CN": "system:node:##nodeHost##",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "##nodeHost##"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "ShangHai",
        "L": "ShangHai",
        "O": "system:nodes",
        "OU": "System"
      }
    ]
}
EOF
创建 kubelet 证书
ip_head='192.168.11';for i in 147 148 149;do \
nodeHost="${ip_head}.${i}"; \
sed "s|##nodeHost##|${nodeHost}|g" ${work_dir}/tmp/ssl/kubelet-csr.json.template \
> ${work_dir}/tmp/ssl/kubelet-csr.${nodeHost}.json
${work_dir}/bin/cfssl gencert \
-ca=${work_dir}/tmp/ssl/ca.pem \
-ca-key=${work_dir}/tmp/ssl/ca-key.pem \
-config=${work_dir}/tmp/ssl/ca-config.json \
-profile=kubernetes ${work_dir}/tmp/ssl/kubelet-csr.${nodeHost}.json | \
${work_dir}/bin/cfssljson -bare ${work_dir}/tmp/ssl/kubelet.${nodeHost};
done
创建 kubeconfig 证书
ip_head='192.168.11';for i in 147 148 149;do \
nodeHost="${ip_head}.${i}"; \
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-cluster kubernetes \
--certificate-authority=${work_dir}/tmp/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.11.147:6443 \
--kubeconfig=${work_dir}/tmp/ssl/kubelet.kubeconfig.${nodeHost};
done
ip_head='192.168.11';for i in 147 148 149;do \
nodeHost="${ip_head}.${i}"; \
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-credentials system:node:${nodeHost} \
--client-certificate=${work_dir}/tmp/ssl/kubelet.${nodeHost}.pem \
--client-key=${work_dir}/tmp/ssl/kubelet.${nodeHost}-key.pem \
--embed-certs=true \
--kubeconfig=${work_dir}/tmp/ssl/kubelet.kubeconfig.${nodeHost};
done
ip_head='192.168.11';for i in 147 148 149;do \
nodeHost="${ip_head}.${i}"; \
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-context system:node:${nodeHost} \
--cluster=kubernetes \
--user=system:node:${nodeHost} \
--kubeconfig=${work_dir}/tmp/ssl/kubelet.kubeconfig.${nodeHost};
done
ip_head='192.168.11';for i in 147 148 149;do \
nodeHost="${ip_head}.${i}"; \
${work_dir}/bin/kubernetes/server/bin/kubectl config \
use-context system:node:${nodeHost} \
--kubeconfig=${work_dir}/tmp/ssl/kubelet.kubeconfig.${nodeHost};
done
配置 kubelet 配置文件
cat << EOF > ${work_dir}/tmp/service/kubelet-config.yaml.template
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: ##sslPath##/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.88.0.2
clusterDomain: cluster.local
configMapAndSecretChangeDetectionStrategy: Watch
containerLogMaxFiles: 3
containerLogMaxSize: 10Mi
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 300Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 40s
hairpinMode: hairpin-veth
healthzBindAddress: 0.0.0.0
healthzPort: 10248
httpCheckFrequency: 40s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
kubeAPIBurst: 100
kubeAPIQPS: 50
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeLeaseDurationSeconds: 40
nodeStatusReportFrequency: 1m0s
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
port: 10250
# disable readOnlyPort
readOnlyPort: 0
resolvConf: /etc/resolv.conf
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
tlsCertFile: ##sslPath##/kubelet.pem
tlsPrivateKeyFile: ##sslPath##/kubelet-key.pem
EOF
配置 kubelet 为 systemctl 管理
cat << EOF > ${work_dir}/tmp/service/kubelet.service.template
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
WorkingDirectory=##dataRoot##/kubelet
ExecStart=##k8sBin##/kubelet \\
  --config=##dataRoot##/kubelet/kubelet-config.yaml \\
  --container-runtime=remote \\
  --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock \\
  --hostname-override=##nodeHost## \\
  --kubeconfig=##configPath##/kubelet.kubeconfig \\
  --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9 \\
  --root-dir=##dataRoot##/kubelet \\
  --v=2
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
分发文件并启动 kubelet 组件
ip_head='192.168.11';for i in 147 148 149;do \
nodeHost="${ip_head}.${i}"; \
k8sBin='/data/kubernetes/bin'; \
sslPath='/etc/kubernetest/ssl'; \
configPath='/etc/kubernetest'; \
dataRoot='/data/kubernetes/data'
sed "s|##sslPath##|${sslPath}|g" ${work_dir}/tmp/service/kubelet-config.yaml.template \
> ${work_dir}/tmp/service/kubelet-config.yaml;
sed -e "s|##dataRoot##|${dataRoot}|g" \
    -e "s|##k8sBin##|${k8sBin}|g" \
    -e "s|##configPath##|${configPath}|g" \
    -e "s|##nodeHost##|${nodeHost}|g" ${work_dir}/tmp/service/kubelet.service.template \
    > ${work_dir}/tmp/service/kubelet.service.${nodeHost}
ssh ${nodeHost} "mkdir -p ${k8sBin} ${sslPath} ${configPath} ${dataRoot}/kubelet"; \
scp ${work_dir}/tmp/ssl/ca*.pem ${nodeHost}:${sslPath}/; \
scp ${work_dir}/tmp/ssl/kubelet.${nodeHost}.pem ${nodeHost}:${sslPath}/kubelet.pem; \
scp ${work_dir}/tmp/ssl/kubelet.${nodeHost}-key.pem ${nodeHost}:${sslPath}/kubelet-key.pem; \
scp ${work_dir}/tmp/ssl/kubelet.kubeconfig.${nodeHost} ${nodeHost}:${configPath}/kubelet.kubeconfig; \
scp ${work_dir}/tmp/service/kubelet.service.${nodeHost} ${nodeHost}:/usr/lib/systemd/system/kubelet.service; \
scp ${work_dir}/tmp/service/kubelet-config.yaml ${nodeHost}:${dataRoot}/kubelet/kubelet-config.yaml; \
scp ${work_dir}/bin/kubernetes/server/bin/kubelet ${nodeHost}:${k8sBin}/; \
ssh ${nodeHost} "systemctl enable kubelet && systemctl start kubelet --no-block"; \
done

部署 proxy 组件

配置 proxy 证书
cat << EOF > ${work_dir}/tmp/ssl/kube-proxy-csr.json
{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [],
    "names": [
      {
        "C": "CN",
        "ST": "ShangHai",
        "L": "ShangHai",
        "O": "system:kube-proxy",
        "OU": "System"
      }
    ]
}
EOF
创建 proxy 证书
${work_dir}/bin/cfssl gencert \
-ca=${work_dir}/tmp/ssl/ca.pem \
-ca-key=${work_dir}/tmp/ssl/ca-key.pem \
-config=${work_dir}/tmp/ssl/ca-config.json \
-profile=kubernetes ${work_dir}/tmp/ssl/kube-proxy-csr.json | \
${work_dir}/bin/cfssljson -bare ${work_dir}/tmp/ssl/kube-proxy
创建 kubeconfig 证书
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-cluster kubernetes \
--certificate-authority=${work_dir}/tmp/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.11.147:6443 \
--kubeconfig=${work_dir}/tmp/ssl/kube-proxy.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-credentials system:kube-proxy \
--client-certificate=${work_dir}/tmp/ssl/kube-proxy.pem \
--client-key=${work_dir}/tmp/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=${work_dir}/tmp/ssl/kube-proxy.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
set-context system:kube-proxy \
--cluster=kubernetes \
--user=system:kube-proxy \
--kubeconfig=${work_dir}/tmp/ssl/kube-proxy.kubeconfig
${work_dir}/bin/kubernetes/server/bin/kubectl config \
use-context system:kube-proxy \
--kubeconfig=${work_dir}/tmp/ssl/kube-proxy.kubeconfig
配置 proxy 配置文件
cat << EOF > ${work_dir}/tmp/service/kube-proxy-config.yaml.template
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  kubeconfig: "##configPath##/kube-proxy.kubeconfig"
clusterCIDR: "172.20.0.0/16"
conntrack:
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: "##nodeHost##"
metricsBindAddress: 0.0.0.0:10249
mode: "ipvs"
EOF
配置 proxy 为 systemctl 管理
cat << EOF > ${work_dir}/tmp/service/kube-proxy.service.template
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
# kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量
## 指定 --cluster-cidr 或 --masquerade-all 选项后
## kube-proxy 会对访问 Service IP 的请求做 SNAT
WorkingDirectory=##dataRoot##/kube-proxy
ExecStart=##k8sBin##/kube-proxy \\
  --config=##dataRoot##/kube-proxy/kube-proxy-config.yaml
Restart=always
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
分发文件并启动 proxy 组件
ip_head='192.168.11';for i in 147 148 149;do \
nodeHost="${ip_head}.${i}"; \
k8sBin='/data/kubernetes/bin'; \
configPath='/etc/kubernetest'; \
dataRoot='/data/kubernetes/data'
sed -e "s|##configPath##|${configPath}|g" \
    -e "s|##nodeHost##|${nodeHost}|g" ${work_dir}/tmp/service/kube-proxy-config.yaml.template \
    > ${work_dir}/tmp/service/kube-proxy-config.yaml.${nodeHost};
sed -e "s|##dataRoot##|${dataRoot}|g" \
    -e "s|##k8sBin##|${k8sBin}|g" ${work_dir}/tmp/service/kube-proxy.service.template \
    > ${work_dir}/tmp/service/kube-proxy.service
ssh ${nodeHost} "mkdir -p ${k8sBin} ${configPath} ${dataRoot}/kube-proxy"; \
scp ${work_dir}/tmp/ssl/kube-proxy.kubeconfig ${nodeHost}:${configPath}/; \
scp ${work_dir}/tmp/service/kube-proxy.service ${nodeHost}:/usr/lib/systemd/system/; \
scp ${work_dir}/tmp/service/kube-proxy-config.yaml.${nodeHost} ${nodeHost}:${dataRoot}/kube-proxy/kube-proxy-config.yaml; \
scp ${work_dir}/bin/kubernetes/server/bin/kube-proxy ${nodeHost}:${k8sBin}/; \
ssh ${nodeHost} "systemctl enable kube-proxy && systemctl start kube-proxy --no-block"; \
done

部署 calico 网络插件

wget -O ${work_dir}/calico.yaml --no-check-certificate https://docs.tigera.io/archive/v3.25/manifests/calico.yaml
- name: CALICO_IPV4POOL_CIDR
  value: "172.20.0.0/16"
${work_dir}/bin/kubernetes/server/bin/kubectl apply -f ${work_dir}/calico.yaml

部署 coredns 组件

cat << EOF > ${work_dir}/coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
- apiGroups:
  - discovery.k8s.io
  resources:
  - endpointslices
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health {
            lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
            pods insecure
            fallthrough in-addr.arpa ip6.arpa
            ttl 30
        }
        prometheus :9153
        forward . /etc/resolv.conf {
            max_concurrent 1000
        }
        cache 30
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 1
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                  - key: k8s-app
                    operator: In
                    values: ["kube-dns"]
              topologyKey: kubernetes.io/hostname
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      containers:
      - name: coredns
        image: coredns/coredns:1.9.3
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 300Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.88.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
EOF
${work_dir}/bin/kubernetes/server/bin/kubectl apply -f ${work_dir}/coredns.yaml
cat<<EOF | ${work_dir}/bin/kubernetes/server/bin/kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: busybox:1.28.3
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF
${work_dir}/bin/kubernetes/server/bin/kubectl exec busybox -- nslookup kubernetes
Server:    10.88.0.2
Address 1: 10.88.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.88.0.1 kubernetes.default.svc.cluster.local
05-01 20:32