主机信息
| 主机 IP | 主机名 | 角色 | 硬件 |
|---|---|---|---|
| 192.168.12.31 | k8s-31 | control plane, etcd | 4H8G |
| 192.168.12.32 | k8s-32 | control plane, etcd | 4H8G |
| 192.168.12.33 | k8s-33 | control plane, etcd | 4H8G |
| 192.168.12.34 | k8s-34 | worker | 8H16G |
| 192.168.12.35 | k8s-35 | worker | 8H16G |
| 192.168.12.36 | k8s-36 | worker | 8H16G |
| 192.168.12.9 | haproxy-01 | haproxy+keepalived | 2H4G |
| 192.168.12.10 | haproxy-02 | haproxy+keepalived | 2H4G |
| 192.168.12.11 | keepalived vip |
系统软件版本
- OS:Ubuntu 22.04.3 LTS
- Kernel:Linux 5.15.0-88-generic
- Kubernetes:v1.33.3
- Containerd:1.7.13
- KubeKey:v3.1.12
高可用
haproxy
https://haproxy.debian.net/#distribution=Ubuntu&release=jammy&version=2.8
# apt-get install --no-install-recommends software-properties-common
# add-apt-repository ppa:vbernat/haproxy-2.8
# apt-get install haproxy=2.8.\*
两台haproxy 同样配置
root@haproxy-01:~$ cat /etc/haproxy/haproxy.cfg
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
defaults
log global
mode tcp
option tcplog
#option dontlognull
timeout connect 5s
timeout client 60s
timeout server 60s
maxconn 10000
listen stats
bind *:8100
mode http
stats enable
stats uri /stats
stats refresh 10s
stats hide-version
http-request use-service prometheus-exporter if { path /metrics }
frontend k8s-api
bind *:6443
default_backend k8s-apiserver
backend k8s-apiserver
balance roundrobin
option tcp-check
# check inter 2000 表示每 2s 检查一次,rise 2 成功两次认为正常,fall 3 失败三次认为宕机
server master01 192.168.12.31:6443 check inter 2000 fall 3 rise 2
server master02 192.168.12.32:6443 check inter 2000 fall 3 rise 2
server master03 192.168.12.33:6443 check inter 2000 fall 3 rise 2
启动haproxy
systemctl enable --now haproxy
systemctl restart haproxy
keepalived
keepalived配置文件不一样,注意每个节点的IP和interface
主keepalived
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens160
mcast_src_ip 192.168.12.9
virtual_router_id 61
priority 100
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.12.11
}
track_script {
chk_apiserver
}
}
EOF
从keepalived
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface ens160
mcast_src_ip 192.168.12.10
virtual_router_id 61
priority 90
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.12.11
}
track_script {
chk_apiserver
}
}
EOF
健康检查配置
cat > /etc/keepalived/check_apiserver.sh << \EOF
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
chmod +x /etc/keepalived/check_apiserver.sh
启动keepalived
systemctl daemon-reload
systemctl enable --now keepalived
systemctl restart keepalived
测试高可用
ping 192.168.12.11
Chrony时间同步
apt-get install -y chrony
haproxy 做chrony主
sed -i.bak 's/^\(pool.*\)/#\1/g' /etc/chrony/chrony.conf
sed -i '0,/^#pool.*/{s//pool ntp1.aliyun.com iburst\n&/}' /etc/chrony/chrony.conf
systemctl enable chrony
systemctl restart chrony
# 验证
chronyc sourcestats -v
root@haproxy-01:~# grep ^pool /etc/chrony/chrony.conf
pool ntp1.aliyun.com iburst
所有K8S节点 指向haproxy
sed -i.bak 's/^\(pool.*\)/#\1/g' /etc/chrony/chrony.conf
sed -i '0,/^#pool.*/{s//pool 192.168.12.11 iburst\n&/}' /etc/chrony/chrony.conf
systemctl enable chrony
systemctl restart chrony
# 验证
chronyc sourcestats -v
K8S部署
系统配置
ufw disable
timedatectl set-timezone Asia/Shanghai
timedatectl set-local-rtc 0
swapoff -a
sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab
sed -i 's#http://.*.ubuntu.com#http://mirrors.aliyun.com#g' /etc/apt/sources.list
apt-get update
关闭主机systemd-resolve 53端口
sed -i 's/^#\?DNSStubListener=.*/DNSStubListener=no/' /etc/systemd/resolved.conf
ln -sf /run/systemd/resolve/resolv.conf /etc/resolv.conf
systemctl restart systemd-resolved.service
安装依赖
apt-get install -y curl socat conntrack ebtables ipset ipvsadm bash-completion
Sudo权限
sudo有密码
usermod -G sudo sunday
sudo无密码
update-alternatives --config editor #选择3 vim.basic
visudo # 添加
sunday ALL=(ALL:ALL) NOPASSWD:ALL
SSH免密码
root@k8s-31:~# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa
Your public key has been saved in /root/.ssh/id_rsa.pub
The key fingerprint is:
SHA256:AfQnEmRagGWhrOKZVUHcSNh/iGbpmc0KDMlwmQSP9Mk root@k8s-master01
The key's randomart image is:
+---[RSA 3072]----+
|.+.=@OX |
|o+B+.B.= |
|+.=E..= = . |
| = .= + = |
|o o.+ = S |
|o +o + o |
| + . . |
| . |
| |
+----[SHA256]-----+
root@k8s-31:~# ssh-copy-id sunday@192.168.12.31
root@k8s-31:~# ssh-copy-id sunday@192.168.12.32
root@k8s-31:~# ssh-copy-id sunday@192.168.12.33
root@k8s-31:~# ssh-copy-id sunday@192.168.12.34
root@k8s-31:~# ssh-copy-id sunday@192.168.12.35
root@k8s-31:~# ssh-copy-id sunday@192.168.12.36
下载KubeKey
KubeKey默认etcd是独立安装systemd启动,etcd证书有效期10年
Kubernetes CA证书有效期10年,apiserver等证书有效期为1年
export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | sh -
查看 KubeKey 支持的 Kubernetes 版本列表
./kk version --show-supported-k8s
./kk create config --with-kubernetes v1.33.3
https://github.com/kubesphere/kubekey/blob/master/docs/config-example.md
注意修改etcd、containerd数据目录
root@k8s-31:~# vim config-sample.yaml
apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
name: sample
spec:
hosts:
#- {name: k8s-31, address: 192.168.12.31, internalAddress: 192.168.12.31, user: sunday, password: ""}
- {name: k8s-31, address: 192.168.12.31, internalAddress: 192.168.12.31, user: sunday, privateKeyPath: "~/.ssh/id_rsa"}
- {name: k8s-32, address: 192.168.12.32, internalAddress: 192.168.12.32, user: sunday, privateKeyPath: "~/.ssh/id_rsa"}
- {name: k8s-33, address: 192.168.12.33, internalAddress: 192.168.12.33, user: sunday, privateKeyPath: "~/.ssh/id_rsa"}
- {name: k8s-34, address: 192.168.12.34, internalAddress: 192.168.12.34, user: sunday, privateKeyPath: "~/.ssh/id_rsa"}
- {name: k8s-35, address: 192.168.12.35, internalAddress: 192.168.12.35, user: sunday, privateKeyPath: "~/.ssh/id_rsa"}
- {name: k8s-36, address: 192.168.12.36, internalAddress: 192.168.12.36, user: sunday, privateKeyPath: "~/.ssh/id_rsa"}
roleGroups:
etcd:
- k8s-31
- k8s-32
- k8s-33
control-plane:
- k8s-31
- k8s-32
- k8s-33
worker:
- k8s-34
- k8s-35
- k8s-36
controlPlaneEndpoint:
## Internal loadbalancer for apiservers
# internalLoadbalancer: haproxy
domain: k8s-api-lb.sundayhk.com
address: "192.168.12.11"
port: 6443
kubernetes:
version: v1.33.3
clusterName: cluster.local
autoRenewCerts: true
containerManager: containerd
etcd:
type: kubekey
dataDir: "/data/etcd"
network:
plugin: calico
kubePodsCIDR: 10.22.0.0/18
kubeServiceCIDR: 10.23.0.0/18
## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
multusCNI:
enabled: false
registry:
privateRegistry: ""
namespaceOverride: ""
registryMirrors: []
insecureRegistries: []
containerdDataDir: /data/containerd
addons: []
# export KKZONE=cn
./kk create cluster -f config-sample.yaml
root@k8s-31:~# ./kk create cluster -f config-sample.yaml
_ __ _ _ __
| | / / | | | | / /
| |/ / _ _| |__ ___| |/ / ___ _ _
| \| | | | '_ \ / _ \ \ / _ \ | | |
| |\ \ |_| | |_) | __/ |\ \ __/ |_| |
\_| \_/\__,_|_.__/ \___\_| \_/\___|\__, |
__/ |
|___/
18:48:58 CST [GreetingsModule] Greetings
18:48:59 CST message: [k8s-36]
Greetings, KubeKey!
18:49:00 CST message: [k8s-33]
Greetings, KubeKey!
18:49:01 CST message: [k8s-31]
Greetings, KubeKey!
18:49:03 CST message: [k8s-35]
Greetings, KubeKey!
18:49:04 CST message: [k8s-32]
Greetings, KubeKey!
18:49:05 CST message: [k8s-34]
Greetings, KubeKey!
18:49:05 CST success: [k8s-36]
18:49:05 CST success: [k8s-33]
18:49:05 CST success: [k8s-31]
18:49:05 CST success: [k8s-35]
18:49:05 CST success: [k8s-32]
18:49:05 CST success: [k8s-34]
18:49:05 CST [NodePreCheckModule] A pre-check on nodes
18:49:06 CST success: [k8s-34]
18:49:06 CST success: [k8s-35]
18:49:06 CST success: [k8s-36]
18:49:06 CST success: [k8s-33]
18:49:06 CST success: [k8s-32]
18:49:06 CST success: [k8s-31]
18:49:06 CST [ConfirmModule] Display confirmation form
+--------+------+------+---------+----------+-------+-------+---------+-----------+--------+--------+------------+------------+-------------+------------------+--------------+
| name | sudo | curl | openssl | ebtables | socat | ipset | ipvsadm | conntrack | chrony | docker | containerd | nfs client | ceph client | glusterfs client | time |
+--------+------+------+---------+----------+-------+-------+---------+-----------+--------+--------+------------+------------+-------------+------------------+--------------+
| k8s-31 | y | y | y | y | y | y | y | y | y | | v1.7.13 | | | | CST 18:49:06 |
| k8s-32 | y | y | y | y | y | y | y | y | y | | v1.7.13 | | | | CST 18:49:05 |
| k8s-33 | y | y | y | y | y | y | y | y | y | | v1.7.13 | | | | CST 18:49:06 |
| k8s-34 | y | y | y | y | y | y | y | y | y | | v1.7.13 | | | | CST 18:49:01 |
| k8s-35 | y | y | y | y | y | y | y | y | y | | v1.7.13 | | | | CST 18:48:50 |
| k8s-36 | y | y | y | y | y | y | y | y | y | | v1.7.13 | | | | CST 18:48:49 |
+--------+------+------+---------+----------+-------+-------+---------+-----------+--------+--------+------------+------------+-------------+------------------+--------------+
This is a simple check of your environment.
Before installation, ensure that your machines meet all requirements specified at
https://github.com/kubesphere/kubekey#requirements-and-recommendations
Install k8s with specify version: v1.33.3
Continue this installation? [yes/no]: yes
18:49:08 CST success: [LocalHost]
18:49:08 CST [NodeBinariesModule] Download installation binaries
18:49:08 CST message: [localhost]
downloading amd64 kubeadm v1.33.3 ...
...
18:54:57 CST [SaveKubeConfigModule] Save kube config as a configmap
18:54:57 CST success: [LocalHost]
18:54:57 CST [AddonsModule] Install addons
18:54:57 CST message: [LocalHost]
[0/0] enabled addons
18:54:57 CST success: [LocalHost]
18:54:57 CST Pipeline[CreateClusterPipeline] execute successfully
Installation is complete.
Please check the result using the command:
kubectl get pod -A
root@k8s-31:~# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-31 Ready control-plane 3m17s v1.33.3
k8s-32 Ready control-plane 2m38s v1.33.3
k8s-33 Ready control-plane 2m41s v1.33.3
k8s-34 Ready worker 2m35s v1.33.3
k8s-35 Ready worker 2m36s v1.33.3
k8s-36 Ready worker 2m36s v1.33.3
root@k8s-31:~# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-946bdd5fd-h52gk 1/1 Running 0 4m29s
kube-system calico-node-4hns4 1/1 Running 0 4m29s
kube-system calico-node-4rwqq 1/1 Running 0 4m28s
kube-system calico-node-5bxls 1/1 Running 0 4m29s
kube-system calico-node-cwg7f 1/1 Running 0 4m28s
kube-system calico-node-khww9 1/1 Running 0 4m28s
kube-system calico-node-lrncb 1/1 Running 0 4m29s
kube-system coredns-7f6468b95c-4zct2 1/1 Running 0 5m7s
kube-system coredns-7f6468b95c-g97f8 1/1 Running 0 4m7s
kube-system kube-apiserver-k8s-31 1/1 Running 0 5m12s
kube-system kube-apiserver-k8s-32 1/1 Running 0 4m27s
kube-system kube-apiserver-k8s-33 1/1 Running 0 4m32s
kube-system kube-controller-manager-k8s-31 1/1 Running 0 5m11s
kube-system kube-controller-manager-k8s-32 1/1 Running 0 4m27s
kube-system kube-controller-manager-k8s-33 1/1 Running 0 4m32s
kube-system kube-proxy-g9bwk 1/1 Running 0 4m35s
kube-system kube-proxy-k9jwv 1/1 Running 0 4m35s
kube-system kube-proxy-m7btd 1/1 Running 0 4m38s
kube-system kube-proxy-qwm8l 1/1 Running 0 4m36s
kube-system kube-proxy-ts5ww 1/1 Running 0 5m7s
kube-system kube-proxy-x8tnl 1/1 Running 0 4m41s
kube-system kube-scheduler-k8s-31 1/1 Running 0 5m11s
kube-system kube-scheduler-k8s-32 1/1 Running 0 4m27s
kube-system kube-scheduler-k8s-33 1/1 Running 0 4m32s
kube-system nodelocaldns-67qxn 1/1 Running 0 4m38s
kube-system nodelocaldns-9kdxt 1/1 Running 0 5m5s
kube-system nodelocaldns-dkqxm 1/1 Running 0 4m36s
kube-system nodelocaldns-lh445 1/1 Running 0 4m41s
kube-system nodelocaldns-vxf5h 1/1 Running 0 4m35s
kube-system nodelocaldns-x6gfv 1/1 Running 0 4m35s
自动补全
apt-get install bash-completion
echo 'source <(kubectl completion bash)' >>~/.bashrc
kubectl completion bash >/etc/bash_completion.d/kubectl
卸载或异常清理
./kk delete cluster -f config-sample.yaml
rm -rf /data/etcd
rm -rf /data/containerd
systemctl restart containerd
# kubeadm reset
crictl ps -a
rm -rf /etc/kubernetes/*
SundayHK