kubeadm高可用安装k8s-基于containerd

环境准备

软件版本

  • CentOS7.9.2009
  • containerd 1.6.24
  • keepalived 1.3.5
  • haproxy 1.5.18
  • kubernetes 1.28.2
  • calico v3.26.1
  • dashboard 2.7.0

准备服务器

这里准备了四台CentOS虚拟机,每台2核cpu和2G内,所有操作都是使用root账户。虚拟机具体信息如下表:

IP地址 Hostname 节点角色
192.168.146.130 node130 control plane
192.168.146.131 node131 control plane
192.168.146.132 node132 worker nodes
192.168.146.133 node133 worker nodes
192.168.146.200 VIP

生产机集群至少需要三台

常用软件安装

1
yum install -y wget vim  net-tools

关闭、禁用防火墙

1
2
3
systemctl disable firewalld
systemctl stop firewalld
systemctl status firewalld

禁用SELinux

1
setenforce 0 && sed -i  's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

禁用swap

1
swapoff -a && sed -i '/ swap / s/^\\(.*\\)$/#\\1/g' /etc/fstab

修改linux内核参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# 临时新增
modprobe br_netfilter
modprobe overlay
lsmod |grep br_netfilter
lsmod |grep overlay

# 永久新增
cat > /etc/sysconfig/modules/br_netfilter.modules << EOF
modprobe br_netfilter
modprobe overlay
EOF
chmod +x /etc/sysconfig/modules/br_netfilter.modules

#写入配置文件
cat <<EOF > /etc/sysctl.d/k8s.conf
vm.swappiness=0
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

#生效配置文件
sysctl -p /etc/sysctl.d/k8s.conf

配置ipvs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
yum install ipset ipvsadm  -y

# 添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

# 授权、运行、检查是否加载
chmod +x /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

安装容器环境

安装containerd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
tee > /etc/yum.repos.d/docker-ce.repo <<EOF
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/\$releasever/\$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF

yum makecache

yum install -y containerd.io

# 生成配置文件
containerd config default > /etc/containerd/config.toml
# SystemdCgroup = false 改为 SystemdCgroup = true
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
# sandbox_image = "k8s.gcr.io/pause:3.6" 改为:
# sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"
sed -i 's#registry.k8s.io/pause:3.6#registry.aliyuncs.com/google_containers/pause:3.9#g' /etc/containerd/config.toml

systemctl enable containerd
systemctl start containerd
containerd --version
ctr images ls

高可用环境搭建

安装haproxy

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# node130  和  node131
yum install -y haproxy
tee /etc/haproxy/haproxy.cfg <<EOF
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000

#---------------------------------------------------------------------
# apiserver frontend which proxys to the masters
#---------------------------------------------------------------------
frontend k8s-apiserver
bind *:9443
mode tcp
option tcplog
default_backend k8s-apiserver

#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend k8s-apiserver
mode tcp
option tcplog
option tcpcheck
balance roundrobin
server node131 192.168.146.131:6443 check
server node130 192.168.146.130:6443 check
EOF

systemctl start haproxy
systemctl enable haproxy

安装keeplived

1
2
# node130 和 node131节点
yum -y install keepalived

node130节点配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
tee /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived

global_defs {
router_id LVS_DEVEL
}

vrrp_script chk_haproxy {
script "/bin/bash -c 'if [[ \$(netstat -nlp | grep 9443) ]]; then exit 0; else exit 1; fi'" # haproxy 检测
interval 2 # 每2秒执行一次检测
weight 11 # 权重变化
}

vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.146.200
}
track_script {
chk_haproxy
}
}
EOF

node131节点配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
tee /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived

global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_haproxy {
script "/bin/bash -c 'if [[ $(netstat -nlp | grep 9443) ]]; then exit 0; else exit 1; fi'" # haproxy 检测
interval 2 # 每2秒执行一次检测
weight 11 # 权重变化
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.146.200
}
track_script {
chk_haproxy
}
}
EOF

启动keepalived

1
2
3
4
systemctl start  keepalived
systemctl enable keepalived
ip a
# 可以在node130上看到虚拟ip 192.168.146.200

安装 kubernetes

设置kubernetes源

1
2
3
4
5
6
7
8
9
10
11
12
13
# 所有节点配置
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
# 阿里云gpgcheck不过,关闭验证
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum clean all

安装kubeadm

1
2
3
4
5
6
7
8
# 所有节点 安装kubelet kubeadm kubectl;kubeadm依赖kubelet和kubectl
yum install -y kubelet kubeadm kubectl
kubectl version
# 修改内容如下
echo KUBELET_EXTRA_ARGS=\"--cgroup-driver=systemd\" > /etc/sysconfig/kubelet
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet

下载镜像

1
2
3
# 查看k8s依赖的镜像,可以在node130上逐个手动下载
kubeadm config images list
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --cri-socket=unix:///var/run/cri-dockerd.sock

master初始化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# node130上执行  初始化Master
kubeadm init --kubernetes-version=1.28.2 \
--apiserver-advertise-address=192.168.146.200 \
--control-plane-endpoint "192.168.146.200:6443" \
--image-repository registry.aliyuncs.com/google_containers \
--pod-network-cidr=10.244.0.0/16


# kubeadm reset && rm -rf $HOME/.kube/config

#################### 安装成功的提示,以下为注释 ####################
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

kubeadm join 192.168.146.200:6443 --token ipjohl.9hhmudgcz897ouzd \
--discovery-token-ca-cert-hash sha256:98df6afbca74ccf537825a4186904dc6cd8574373721e638f8c21ca1e52ead04 \
--control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.146.200:6443 --token ipjohl.9hhmudgcz897ouzd \
--discovery-token-ca-cert-hash sha256:98df6afbca74ccf537825a4186904dc6cd8574373721e638f8c21ca1e52ead04
#################### 至此 都是注释 ####################

# node131上执行 创建证书目录,安装时没有提示,确实必须的
mkdir -p /etc/kubernetes/pki/etcd
scp root@node130:/etc/kubernetes/admin.conf /etc/kubernetes/
scp root@node130:/etc/kubernetes/pki/{ca.crt,ca.key,sa.key,sa.pub,front-proxy-ca.crt,front-proxy-ca.key} /etc/kubernetes/pki/
scp root@node130:/etc/kubernetes/pki/etcd/{ca.crt,ca.key} /etc/kubernetes/pki/etcd/

# node131 master加入集群
kubeadm join 192.168.146.200:6443 --token ipjohl.9hhmudgcz897ouzd \
--discovery-token-ca-cert-hash sha256:98df6afbca74ccf537825a4186904dc6cd8574373721e638f8c21ca1e52ead04 \
--control-plane

# 如果是root用户,node130、node131上执行
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile

# 如果是非root用户,node130、node131上执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

worker节点加入

1
2
3
4
5
6
7
# node132和node133 work 加入集群
kubeadm join 192.168.146.200:6443 --token ipjohl.9hhmudgcz897ouzd \
--discovery-token-ca-cert-hash sha256:98df6afbca74ccf537825a4186904dc6cd8574373721e638f8c21ca1e52ead04
--cri-socket=unix:///var/run/cri-dockerd.sock
# 节点查看
kubectl get nodes # node节点需要较长时间才会Ready
kubectl get po -o wide -n kube-system

部署网络

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# 在node130上安装flannel网络
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/tigera-operator.yaml

wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/custom-resources.yaml
vim custom-resources.yaml
############################ 文件内容如下
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: 10.244.0.0/16 # 修改此行内容
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()

---

# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
############################ end ############################

kubectl create -f custom-resources.yaml

kubectl get pods -n calico-system

查看集群状态

1
2
3
kubectl get pods -n calico-system
kubectl get pods -n kube-system
kubectl get node -o wide

可视化界面dashboard安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
sed -i "s/kubernetesui/registry.aliyuncs.com\\/google_containers/g" recommended.yaml
sed -i "/targetPort: 8443/a\\ \\ \\ \\ \\ \\ nodePort: 30433\\n\\ \\ type: NodePort" recommended.yaml

kubectl apply -f recommended.yaml
kubectl get all -n kubernetes-dashboard

# 新增管理员账号
cat >> dashboard-admin.yaml << EOF
---
# ------------------- dashboard-admin ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashboard-admin
namespace: kubernetes-dashboard

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dashboard-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
EOF

kubectl apply -f dashboard-admin.yaml

kubectl -n kubernetes-dashboard create token dashboard-admin

eyJhbGciOiJSUzI1NiIsImtpZCI6Ijh3ZW9IMVdBMGFYaGc4WmRjMFVid3BIYzhSVUFvLTRZY3ZNYkpRd1VrQ2sifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjk2NTY2MjA0LCJpYXQiOjE2OTY1NjI2MDQsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJkYXNoYm9hcmQtYWRtaW4iLCJ1aWQiOiI3MDYwYWMwOC1mZGU3LTQzZTYtODI0My05ZGM0N2I2MTIwNzMifX0sIm5iZiI6MTY5NjU2MjYwNCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.HwQB4-FfeGdI9sduT5HgDzm1LIcywlBc4fO2LWhXsiumb9HkXuQyRu5pFS32KR7XFn8IL9wb2fuFGLIJqnGIfCQhDaZ0cDfTRothE4g4V_BN6ZIwXXMs5UTPNUOBBPW_ZsGXcvnfzhLXZtuClwgrrqJYtA_cY4JLuHol2Y4T-dnG75el6EXt5LUJG1RwstMIr9IFG6lLPT3t3Dxq4lqjvFNzzp9hcmSiINMiOEMo7Qk52J9gbWs-_JBJulZiMy8RcHGIpDy2DusgQlRp73bfONFUnChXE8PIhEeItbVmfLUDWJ9s0bIYGinxpd2AL1p8GrrlNlGtGm8pHqXcv3bKNA

访问https://192.168.146.200:30433

可视化界面kuboard安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 在 node131节点上执行
docker run -d \
--restart=unless-stopped \
--name=kuboard \
-p 80:80/tcp \
-p 10081:10081/tcp \
-e KUBOARD_ENDPOINT="http://192.168.146.131:80" \
-e KUBOARD_AGENT_SERVER_TCP_PORT="10081" \
-v /root/kuboard-data:/data \
eipwork/kuboard:v3

# 以下通过k8s安装失败,可以另外找一台机器通过docker部署
kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
# 等待 kuboard 名称空间中所有的 Pod 就绪
watch kubectl get pods -n kuboard

通过浏览器访问 http://192.168.146.131

输入默认默认用户名:admin 密码:Kuborad123 进行登录

点击添加集群,使用token方式进行认证

如下图说明集群导入成功,点击 kuboard-admin 后进入集群概要

安装mysql进行测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# 创建命名空间
kubectl create ns database

tee mysql_deploy.yaml <<EOF
apiVersion: apps/v1 # apiserver的版本
kind: Deployment # 副本控制器deployment,管理pod和RS
metadata:
name: mysql # deployment的名称,全局唯一
namespace: database # deployment所在的命名空间
labels:
app: mysql
spec:
replicas: 1 # Pod副本期待数量
selector:
matchLabels: # 定义RS的标签
app: mysql # 符合目标的Pod拥有此标签
strategy: # 定义升级的策略
type: RollingUpdate # 滚动升级,逐步替换的策略
template: # 根据此模板创建Pod的副本(实例)
metadata:
labels:
app: mysql # Pod副本的标签,对应RS的Selector
spec:
# nodeName: k8s-worker01 # 指定pod运行在的node
containers: # Pod里容器的定义部分
- name: mysql # 容器的名称
image: mysql:8.0 # 容器对应的docker镜像
volumeMounts: # 容器内挂载点的定义部分
- name: time-zone # 容器内挂载点名称
mountPath: /etc/localtime # 容器内挂载点路径,可以是文件或目录
- name: mysql-data
mountPath: /var/lib/mysql # 容器内mysql的数据目录
- name: mysql-logs
mountPath: /var/log/mysql # 容器内mysql的日志目录
ports:
- containerPort: 3306 # 容器暴露的端口号
env: # 写入到容器内的环境容量
- name: MYSQL_ROOT_PASSWORD # 定义了一个mysql的root密码的变量
value: "123456"
volumes: # 本地需要挂载到容器里的数据卷定义部分
- name: time-zone # 数据卷名称,需要与容器内挂载点名称一致
hostPath:
path: /etc/localtime # 挂载到容器里的路径,将localtime文件挂载到容器里,可让容器使用本地的时区
- name: mysql-data
hostPath:
path: /data/mysql/data # 本地存放mysql数据的目录
- name: mysql-logs
hostPath:
path: /data/mysql/logs # 本地存入mysql日志的目录
EOF

kubectl create -f mysql_deploy.yaml
# 查看容器状态
kubectl -n database get pods

# 查看容器具体状态
kubectl -n database describe pod

# 命令行访问mysql
kubectl -n database exec -it mysql-5ccddd6b74-l5652 -- mysql -uroot -p

通过kuboard登录