由于CentOS 7 不再获得官方支持,故更新为 RockyLinux9.4,同时部署K8S1.31,以获得SWAP更好的支持

K8S All In One(1.31-Rocky Linux 9.4)

一、环境依赖安装

1.开启PSI

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# 查看默认内核
[root@lanecat ~]# grubby --default-kernel
/boot/vmlinuz-5.14.0-427.42.1.el9_4.x86_64
# 启用PSI
[root@lanecat ~]# grubby --update-kernel=/boot/vmlinuz-5.14.0-427.42.1.el9_4.x86_64 --args="psi=1"
# 查看内核信息
[root@lanecat ~]# grubby --info=/boot/vmlinuz-5.14.0-427.42.1.el9_4.x86_64
# 重启
[root@lanecat ~]# reboot
# 查看资源压力状态文件是否存在
[root@lanecat ~]# ls /proc/pressure
cpu io irq memory
# 查看压力状态是否正常
[root@lanecat ~]# cat /sys/fs/cgroup/memory.pressure
some avg10=0.00 avg60=0.00 avg300=0.00 total=1000
full avg10=0.00 avg60=0.00 avg300=0.00 total=897

2.启用内核模块(br_netfilter、IPVS)

RockyLinux 使用/etc/modules-load.d/目录来开机自动加载模块

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# 开启网络内核转发
cat > /etc/modules-load.d/netfilter.conf << EOF
br_netfilter
EOF
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# 开启IPVS支持
cat > /etc/modules-load.d/ipvs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
# 重启生效
[root@lanecat ~]# reboot
# 验证端口转发是否打开
[root@lanecat ~]# sysctl -a |grep ip_forward
net.ipv4.ip_forward = 1
[root@lanecat ~]# sysctl -a |grep bridge
net.bridge.bridge-nf-call-arptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
# 验证IPVS是否生效
[root@lanecat ~]# lsmod|grep -e ip_vs -e nf_conntrak_ipv4
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 237568 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
##安装ipset ipvsadm
[root@lanecat ~]# yum -y install ipset ipvsadm

3.创建SWAP分区

1
2
3
4
5
6
7
8
9
10
11
# 创建swap分区
[root@lanecat ~]# dd if=/dev/zero of=/swapfile bs=1M count=3072
[root@lanecat ~]# chmod 600 /swapfile
[root@lanecat ~]# mkswap /swapfile
[root@lanecat ~]# swapon /swapfile
[root@lanecat ~]# echo "/swapfile none swap sw 0 0">>/etc/fstab
[root@lanecat ~]# free -h
# 调整swappiness权重为100
[root@lanecat ~]# vi /etc/sysctl.conf
vm.swappiness = 100
[root@lanecat ~]# sysctl -p

4.安装containerd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# 获取软件包
https://github.com/containerd/containerd/releases/download/v2.0.0/containerd-2.0.0-linux-amd64.tar.gz
[root@lanecat ~]# ll
-rw-r--r-- 1 root root 36805913 Dec 13 15:49 containerd-2.0.0-linux-amd64.tar.gz
# 解压
[root@lanecat ~]# tar zxvf containerd-2.0.0-linux-amd64.tar.gz
bin/
bin/containerd
bin/ctr
bin/containerd-stress
# 安装service
[root@lanecat ~]# mv bin/* /usr/local/bin/
[root@lanecat ~]# wget -P /usr/lib/systemd/system/ https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
# 启动containerd
[root@lanecat ~]# systemctl daemon-reload
[root@lanecat ~]# systemctl enable --now containerd

5.安装runc

1
2
3
4
5
6
7
# 获取软件包
https://github.com/opencontainers/runc/releases/download/v1.2.3/runc.amd64
[root@lanecat ~]# ll
total 10908
-rw-r--r-- 1 root root 11168096 Dec 12 16:54 runc.amd64
# 安装为`/usr/local/sbin/runc`
[root@lanecat ~]# install -m 755 runc.amd64 /usr/local/sbin/runc

6.安装CNI

1
2
3
4
5
6
7
8
# 获取软件包
https://github.com/containernetworking/plugins/releases/download/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz
[root@lanecat ~]# ll
total 51528
-rw-r--r-- 1 root root 52762554 Dec 12 16:55 cni-plugins-linux-amd64-v1.6.1.tgz
# 安装CNI
[root@lanecat ~]# mkdir -p /opt/cni/bin
[root@lanecat ~]# tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.6.1.tgz

7.安装nerdctl、buildkit(buildctl)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# 获取软件包
https://github.com/containerd/nerdctl/releases
https://github.com/moby/buildkit/releases
# 安装nerdctl
[root@lanecat ~]# mkdir -p /usr/local/containerd/bin
[root@lanecat ~]# tar Czxvf /usr/local/containerd/bin nerdctl-2.0.2-linux-amd64.tar.gz
[root@lanecat ~]# ln -s /usr/local/containerd/bin/nerdctl /usr/local/bin/nerdctl
# 修改nerdctl 默认namespace 为k8s.io
[root@lanecat ~]# mkdir /etc/nerdctl
[root@lanecat ~]# echo 'namespace = "k8s.io"'> /etc/nerdctl/nerdctl.toml
# 安装buildctl
[root@lanecat ~]# mkdir /usr/local/buildkit
[root@lanecat ~]# tar Czxvf /usr/local/buildkit buildkit-v0.18.1.linux-amd64.tar.gz
[root@lanecat ~]# ln -s /usr/local/buildkit/bin/buildctl /usr/local/bin/buildctl
# 配置buildkitd service
cat <<EOF > /usr/lib/systemd/system/buildkitd.service
[Unit]
Description=buildkitd
After=network.target

[Service]
ExecStart=/usr/local/buildkit/bin/buildkitd

[Install]
WantedBy=multi-user.target
EOF
# 启动buildkitd
[root@lanecat ~]# systemctl enable buildkitd
[root@lanecat ~]# systemctl start buildkitd
# 查看版本号
[root@lanecat ~]# nerdctl --version
nerdctl version 2.0.2
[root@lanecat ~]# buildctl --version
buildctl github.com/moby/buildkit v0.18.1

8.配置containerd镜像加速

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 默认没有/etc/containerd/config.toml,使用命令生成
[root@lanecat ~]# mkdir /etc/containerd
[root@lanecat ~]# containerd config default > /etc/containerd/config.toml
# 创建镜像仓库路径
[root@lanecat ~]# mkdir /etc/containerd/certs.d
# 由于是给doceker.io配置镜像加速,所以创建docker.io目录
[root@lanecat ~]# mkdir /etc/containerd/certs.d/docker.io
# 添加如下配置文件
[root@lanecat ~]# vi /etc/containerd/certs.d/docker.io/hosts.toml
[host."https://15u8yk3w.mirror.aliyuncs.com"]
capabilities = ["pull","resolve"]
# 重载并生效
[root@lanecat ~]# systemctl daemon-reload
[root@lanecat ~]# systemctl restart containerd

二、部署集群

1.安装kubeadm、kubelet、kubectl

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 添加repo源
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.31/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.31/rpm/repodata/repomd.xml.key
EOF

# 安装kubeadm、kubelet、kubectl
[root@lanecat ~]# yum install -y kubelet kubeadm kubectl
[root@lanecat ~]# systemctl start kubelet
[root@lanecat ~]# systemctl enable kubelet

2.配置kubelet ,启用特性以支持swap

在 Linux 上使用 swap 的 Beta 支持

1
[root@lanecat ~]# echo "KUBELET_EXTRA_ARGS=--feature-gates=NodeSwap=true --fail-swap-on=false --experimental-allocatable-ignore-eviction" >/etc/sysconfig/kubelet 

3.拉取镜像

通过kubeadm config images list查看所需镜像列表,使用私有仓库拉取镜像

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
# 登录阿里云ACR
[root@lanecat ~]# nerdctl login --username=LaneCat registry.cn-beijing.aliyuncs.com
# 拉取镜像,修改tag,并删除源镜像
nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/kube-apiserver:v1.31.0
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/kube-apiserver:v1.31.0 registry.k8s.io/kube-apiserver:v1.31.0
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/kube-apiserver:v1.31.0

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/kube-controller-manager:v1.31.0
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/kube-controller-manager:v1.31.0 registry.k8s.io/kube-controller-manager:v1.31.0
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/kube-controller-manager:v1.31.0

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/kube-scheduler:v1.31.0
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/kube-scheduler:v1.31.0 registry.k8s.io/kube-scheduler:v1.31.0
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/kube-scheduler:v1.31.0

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/kube-proxy:v1.31.0
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/kube-proxy:v1.31.0 registry.k8s.io/kube-proxy:v1.31.0
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/kube-proxy:v1.31.0

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/coredns:v1.11.3
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/coredns:v1.11.3 registry.k8s.io/coredns/coredns:v1.11.3
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/coredns:v1.11.3

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/pause:3.10
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/pause:3.10 registry.k8s.io/pause:3.10
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/pause:3.10

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/etcd:3.5.15-0
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/etcd:3.5.15-0 registry.k8s.io/etcd:3.5.15-0
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/etcd:3.5.15-0

# 删除无用镜像层
[root@lanecat ~]# for i in `ctr -n k8s.io image ls |grep sha |awk '{print $1}'|grep sha`;do ctr -n k8s.io image delete $i;done

4.修改kubeadm-config.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# 通过kubeadm命令生成kubeadm-config.yaml配置文件
[root@lanecat ~]# kubeadm config print init-defaults >kubeadm-config.yaml
# 修改kubeadm-config.yaml配置信息(master节点IP、podSubnet、cgroupDriver),并添加cgropuDriver
[root@lanecat ~]# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
# 修改为Master节点私有IP地址
advertiseAddress: 172.20.81.231
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
imagePullSerial: true
# 节点名
name: lanecat
taints: null
timeouts:
controlPlaneComponentHealthCheck: 4m0s
discovery: 5m0s
etcdAPICall: 2m0s
kubeletHealthCheck: 4m0s
kubernetesAPICall: 1m0s
tlsBootstrap: 5m0s
upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
encryptionAlgorithm: RSA-2048
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.32.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
# 指定podSubnet CIDR范围,后续calico会使用
podSubnet: 10.244.0.0/16
proxy: {}
scheduler: {}
# 添加下述内容,指定cgropuDriver为systemd
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd

5.初始化k8S 1.31集群

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# 初始化集群
[root@lanecat ~]# kubeadm init --config=kubeadm-config.yaml
# 添加kube config环境
[root@lanecat ~]# mkdir -p $HOME/.kube
[root@lanecat ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@lanecat ~]# chown $(id -u):$(id -g) $HOME/.kube/config
# 修改kubelet,以支持swap
[root@lanecat ~]# vi /var/lib/kubelet/config.yaml
ignoreEvictionDefaults: "true"
evictionHard:
nodefs.available: 10%
nodefs.inodesFree: 5%
imagefs.available: 10%
imagefs.inodesFree: 5%
failSwapOn: false
featureGates:
NodeSwap: true
memorySwap:
swapBehavior: LimitedSwap
# 重启并生效
[root@lanecat ~]# systemctl daemon-reload
[root@lanecat ~]# systemctl restart kubelet
# 查看节点状态,未安装CNI,所以NotReady是正常的
[root@lanecat ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node NotReady control-plane 105s v1.32.0
# 查看taint
[root@lanecat ~]# kubectl get nodes -o json | jq '.items[].spec.taints'
[
{
"effect": "NoSchedule",
"key": "node-role.kubernetes.io/control-plane"
},
{
"effect": "NoSchedule",
"key": "node.kubernetes.io/not-ready"
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"timeAdded": "2024-12-14T08:45:55Z"
}
]
# 由于是单节点 AllinOne,需要去除控制平面的污点
[root@lanecat ~]# kubectl taint nodes lanecat node-role.kubernetes.io/control-plane:NoSchedule-
node/node untainted
[root@lanecat ~]# kubectl get nodes -o json | jq '.items[].spec.taints'
[
{
"effect": "NoSchedule",
"key": "node.kubernetes.io/not-ready"
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"timeAdded": "2024-12-14T08:45:55Z"
}
]

三、插件安装

1.安装CNI插件

1.1 Flannel

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# 获取kube-flannel.yaml
[root@lanecat ~]# wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
# 获取镜像
nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/flannel:v0.26.2
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/flannel:v0.26.2 docker.io/flannel/flannel:v0.26.2
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/flannel:v0.26.2

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/flannel-cni-plugin:v1.6.0-flannel1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/flannel-cni-plugin:v1.6.0-flannel1 docker.io/flannel/flannel-cni-plugin:v1.6.0-flannel1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/flannel-cni-plugin:v1.6.0-flannel1
# 清理无用镜像
[root@lanecat ~]# for i in `ctr -n k8s.io image ls |grep sha |awk '{print $1}'|grep sha`;do ctr -n k8s.io image delete $i;done
# 从kubeadm-config.yaml中获取podCIDR
[root@lanecat calico]# cat /opt/k8s-1.31/kubeadm-config.yaml |grep podSubnet
podSubnet: 10.244.0.0/16
# 修改Flannel PodSubnet CIDR
[root@lanecat k8s-1.31]# cat kube-flannel.yml |grep net-conf -A 6
net-conf.json: |
{
"Network": "10.244.0.0/16",
"EnableNFTables": false,
"Backend": {
"Type": "vxlan"
}
# 部署Flannel
[root@lanecat k8s-1.31]# kubectl apply -f kube-flannel.yml
# 哈看flannel pod状态
[root@lanecat k8s-1.31]# kubectl get pods -nkube-flannel
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-92vjr 1/1 Running 0 11s
# 查看 k8s 节点状态
[root@lanecat k8s-1.31]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
lanecat Ready control-plane 42m v1.31.4

1.2 Calico

calico官网calicoGitHub

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# 获取tigera-operator.yaml
[root@lanecat calico]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
# 拉取镜像,并修改tag
nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/operator:1.36.2
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/operator:1.36.2 quay.io/tigera/operator:v1.36.2
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/operator:1.36.2
# 由于 CRD 包很大,kubectl apply可能会超出请求限制,所以用kubectl create
[root@lanecat calico]# kubectl create -f tigera-operator.yaml
# 查看Operator状态
[root@lanecat calico]# kubectl get pods -ntigera-operator
NAME READY STATUS RESTARTS AGE
tigera-operator-76c4976dd7-lfmtg 1/1 Running 0 15s
# 获取custom-resources.yaml
[root@lanecat calico]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/custom-resources.yaml
# 从kubeadm-config.yaml中获取podCIDR
[root@lanecat calico]# cat /opt/k8s-1.31/kubeadm-config.yaml |grep podSubnet
podSubnet: 10.244.0.0/16
# 修改custom-resources.yaml中cidr范围为podSubnet,其他不变
[root@lanecat calico]# vi custom-resources.yaml
...
spec:
# Configures Calico networking.
calicoNetwork:
ipPools:
- name: default-ipv4-ippool
blockSize: 26
cidr: 10.244.0.0/16
...
# 拉取镜像,修改tag
nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/pod2daemon-flexvol:v3.29.1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/pod2daemon-flexvol:v3.29.1 docker.io/calico/pod2daemon-flexvol:v3.29.1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/pod2daemon-flexvol:v3.29.1

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/typha:v3.29.1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/typha:v3.29.1 docker.io/calico/typha:v3.29.1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/typha:v3.29.1

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/cni:v3.29.1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/cni:v3.29.1 docker.io/calico/cni:v3.29.1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/cni:v3.29.1

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/node:v3.29.1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/node:v3.29.1 docker.io/calico/node:v3.29.1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/node:v3.29.1

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/apiserver:v3.29.1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/apiserver:v3.29.1 docker.io/calico/apiserver:v3.29.1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/apiserver:v3.29.1

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/kube-controllers:v3.29.1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/kube-controllers:v3.29.1 docker.io/calico/kube-controllers:v3.29.1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/kube-controllers:v3.29.1

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/node-driver-registrar:v3.29.1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/node-driver-registrar:v3.29.1 docker.io/calico/node-driver-registrar:v3.29.1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/node-driver-registrar:v3.29.1

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/csi:v3.29.1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/csi:v3.29.1 docker.io/calico/csi:v3.29.1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/csi:v3.29.1
# 清理无用镜像
[root@lanecat ~]# for i in `ctr -n k8s.io image ls |grep sha |awk '{print $1}'|grep sha`;do ctr -n k8s.io image delete $i;done
# 创建自定义资源,安装Calico
[root@lanecat calico]# kubectl create -f custom-resources.yaml
# 查看pod状态
[root@lanecat ~]# kubectl get pods -ncalico-apiserver
NAME READY STATUS RESTARTS AGE
calico-apiserver-85c4696448-bkbqd 1/1 Running 1 (7m56s ago) 26m
calico-apiserver-85c4696448-srxbc 1/1 Running 1 (7m56s ago) 26m
[root@lanecat ~]# kubectl get pods -ncalico-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-74dd74888-dblr6 1/1 Running 1 26m
calico-node-jdhxf 1/1 Running 1 26m
calico-typha-5967d4f55f-vhjkd 1/1 Running 1 26m
csi-node-driver-5h529 2/2 Running 5 26m
# 查看节点状态
[root@lanecat ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
lanecat Ready control-plane 35m v1.31.4

2.安装metrics-server

metrics-server

1
2
3
4
5
6
7
8
9
10
11
12
13
# 获取chat仓库
https://github.com/kubernetes-sigs/metrics-server/releases/tag/metrics-server-helm-chart-3.12.2
# 拉取镜像,修改tag
nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/metrics-serve:v0.7.2
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/metrics-serve:v0.7.2 registry.k8s.io/metrics-server/metrics-server:v0.7.2
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/metrics-serve:v0.7.2
# 清理无用镜像
[root@lanecat ~]# for i in `ctr -n k8s.io image ls |grep sha |awk '{print $1}'|grep sha`;do ctr -n k8s.io image delete $i;done
# 部署
[root@lanecat k8s-1.31]# kubectl apply -f metrics-server-0.7.2.yaml
# 查看pod
[root@lanecat k8s-1.31]# kubectl get pods -nkube-system |grep metrics
metrics-server-8467fcc7b7-56hgm 1/1 Running 0 41m

3.安装ingress-nginx

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# 获取yaml文件
https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.12.0-beta.0/deploy/static/provider/cloud/deploy.yaml
#拉取镜像修改tag
nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/ingress-nginx-controller:v1.12.0-beta.0
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/ingress-nginx-controller:v1.12.0-beta.0 registry.k8s.io/ingress-nginx/controller:v1.12.0-beta.0
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/ingress-nginx-controller:v1.12.0-beta.0

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/kube-webhook-certgen:v1.4.4
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/kube-webhook-certgen:v1.4.4 registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/kube-webhook-certgen:v1.4.4
# 清理无用镜像
[root@lanecat ~]# for i in `ctr -n k8s.io image ls |grep sha |awk '{print $1}'|grep sha`;do ctr -n k8s.io image delete $i;done
# 修改端口范围
[root@lanecat ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml
- --service-node-port-range=1-32767
# 修改ingress-nginx.yaml中image部分,删除sha校验码
[root@lanecat k8s-1.31]# cat ingress-nginx.yaml |grep image:
image: registry.k8s.io/ingress-nginx/controller:v1.12.0-beta.0
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4
# 修改ingress-nginx-controller svc配置
# 修改type:LoadBalancer 为 type: NodePort
# 添加externalIPs: <实际公网IP地址>
# 添加nodePort: 80和NodePort: 443
[root@lanecat k8s-1.31]# cat ingress-nginx.yaml|grep nodePort -A 8
nodePort: 80
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
nodePort: 443
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: NodePort
externalIPs:
- 47.109.19.94
# 部署ingrss-nginx
[root@lanecat k8s-1.31]# kubectl apply -f ingress-nginx.yaml
# 查看状态
[root@lanecat k8s-1.31]# kubectl get pods -ningress-nginx
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-lsfxq 0/1 Completed 0 40s
ingress-nginx-admission-patch-2smpl 0/1 Completed 0 40s
ingress-nginx-controller-78df659578-kn99q 1/1 Running 0 40s
# 查看svc
[root@lanecat k8s-1.31]# kubectl get svc -ningress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default-http-backend ClusterIP 10.96.16.109 <none> 80/TCP 29m
ingress-nginx-controller NodePort 10.110.33.72 47.109.19.94 80:11643/TCP,443:29442/TCP 148m
ingress-nginx-controller-admission ClusterIP 10.110.4.50 <none> 443/TCP 148m

ingressControllerConfig

4.安装Cert-manager

4.1 安装helm,修改模板

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# 安装helm
[root@lanecat ~]# wget https://get.helm.sh/helm-v3.16.3-linux-amd64.tar.gz
[root@lanecat ~]# tar zxvf helm-v3.16.3-linux-amd64.tar.gz
[root@lanecat ~]# mv linux-amd64/helm /usr/local/bin/
[root@lanecat ~]# helm version
version.BuildInfo{Version:"v3.16.3"
# 确保cert-manager文件以导入
[root@lanecat cert-manager]# ll
cert-manager-v1.16.2.tgz
# 拉取镜像
nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-controller:v1.16.2
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-controller:v1.16.2 quay.io/jetstack/cert-manager-controller:v1.16.2
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-controller:v1.16.2

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-cainjector:v1.16.2
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-cainjector:v1.16.2 quay.io/jetstack/cert-manager-cainjector:v1.16.2
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-cainjector:v1.16.2

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-startupapicheck:v1.16.2
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-startupapicheck:v1.16.2 quay.io/jetstack/cert-manager-startupapicheck:v1.16.2
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-startupapicheck:v1.16.2

nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-webhook:v1.16.2
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-webhook:v1.16.2 quay.io/jetstack/cert-manager-webhook:v1.16.2
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/cert-manager-webhook:v1.16.2
# helm 安装 cert-manager
[root@lanecat ~]# helm install cert-manager /opt/k8s-1.31/Resources/cert-manager-v1.16.2.tgz --namespace cert-manager --create-namespace --set installCRDs=true
[root@lanecat cert-manager]# helm list -ncert-manager

4.2 安装配置alidns-webhook

由于 cert-manager 不支持 AliDNS ,所以我们只能以 webhook 方式来扩展 DNS 供应商

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 安装alidns-webhook
# https://github.com/wjiec/alidns-webhook
# 拉取镜像,修改tag
nerdctl pull registry.cn-beijing.aliyuncs.com/lanecat/alidns-webhook:v1.0.1
nerdctl tag registry.cn-beijing.aliyuncs.com/lanecat/alidns-webhook:v1.0.1 wjiec/alidns-webhook:v1.0.1
nerdctl rmi registry.cn-beijing.aliyuncs.com/lanecat/alidns-webhook:v1.0.1
# 清理多余镜像
[root@lanecat ~]# for i in `ctr -n k8s.io image ls |grep sha |awk '{print $1}'|grep sha`;do ctr -n k8s.io image delete $i;done
# helm部署alidns-webhook
[root@lanecat Resources]# helm install alidns-webhook /opt/k8s-1.31/Resources/alidns-webhook.tar.gz --namespace cert-manager --create-namespace
# 查看helm部署状态
[root@lanecat ~]# helm list -ncert-manager
# 查看pod状态
[root@lanecat ~]# kubectl get pods -ncert-manager

icert-manager

4.3 配置ClusterIssuer

创建RAM用户,放行DNS权限,生成AKSK,对AKSK base64编码,并写入secret
使用secret创建clusterIssuer

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# 通过下述资源配置清单,创建secret,并使用secret创建clusterIssuer
[root@lanecat cert-manager]# cat alidns-secret-clusterissuer.yaml
apiVersion: v1
kind: Secret
metadata:
name: alidns-secret
namespace: cert-manager
data:
ak: TFRBSTV0RWpFcmE4Wm5QRWl3WEtua0to
sk: UDBVV0FzT1h6RjByM3FFOENsNFJTb2RpMzdqVVAx
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: alidns-issuer
namespace: cert-manager
spec:
acme:
email: a1136395475@163.com
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: alidns-issuer
solvers:
- dns01:
webhook:
groupName: acme.yourcompany.com # 无需更改
solverName: alidns # 无需更改
config:
region: ""
accessKeyIdRef:
name: alidns-secret
key: ak
accessKeySecretRef:
name: alidns-secret
key: sk
[root@lanecat cert-manager]# kubectl apply -f alidns-secret-clusterissuer.yaml
[root@lanecat cert-manager]# kubectl get secret -ncert-manager
NAME TYPE DATA AGE
alidns-issuer Opaque 1 7s
alidns-secret Opaque 2 10s
alidns-webhook-ca kubernetes.io/tls 3 3m25s
alidns-webhook-tls kubernetes.io/tls 3 3m22s

4.4 创建Certificate

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
[root@lanecat cert-manager]# cat certificate.yaml 
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: lanecat
namespace: default
spec:
dnsNames:
- "lanecat.top"
- "*.lanecat.top"
issuerRef:
kind: ClusterIssuer
name: alidns-issuer # 引用 ClusterIssuer,名字和 clusterIssuer.yaml 中保持一致
secretName: lanecat # 最终签发出来的证书会保存在这个 Secret 里面
duration: 2160h # 90d
renewBefore: 360h # 15d
[root@lanecat cert-manager]# kubectl apply -f certificate.yaml
certificate.cert-manager.io/lanecat created
# 查看证书是否签发
[root@lanecat ~]# kubectl get secret
NAME TYPE DATA AGE
lanecat kubernetes.io/tls 2 52s

四、个人服务部署

…请看OBSidian笔记…