快速高效部署k8s高可用集群

一、安装组件准备

节点规划

192.168.1.93 master01 控制节点1
192.168.1.94 master02 控制节点2
192.168.1.95 master03 控制节点3
192.168.1.96 node01 计算节点1
192.168.1.97 node02 计算节点2
192.168.1.98 node03 计算节点3
192.168.1.99  lb.kubesphere.local (VIP) VIP地址
192.168.1.238 nfs-server NFS存储服务器
192.168.1.76 hakeepalived-1 (keepalived+haproxy高可用组件服务器)
192.168.1.77 hakeepalived-2 (keepalived+haproxy高可用组件服务器)
192.168.1.155 registry.starz.top (镜像服务器)
  1. HA(Keepalived+haproxy)

安装的服务器:

192.168.1.76 hakeepalived-1

192.168.1.77 hakeepalived-2
yum install keeplived haproxy -y
附上我的haproxy的配置
global

    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     204800
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats

defaults
    mode                    tcp
    log                     global
    option                  dontlognull
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 600

listen stats
    mode http
    bind :8888
    stats enable
    stats hide-version
    stats uri     /status
    stats realm   Haproxy\ Statistics
    stats auth    admin:admin

frontend  main 
bind 0.0.0.0:6443
    default_backend             K8S

backend K8S
    balance     leastconn
    server master01 192.168.1.93:6443 check port 6443 maxconn 300
    server master02 192.168.1.94:6443 check port 6443 maxconn 300
    server master02 192.168.1.95:6443 check port 6443 maxconn 300
附上我的keepalive配置 
! Configuration File for keepalived
global_defs {
        notificationd LVS_DEVEL
}

vrrp_script chk_haproxy {
    script "/etc/keepalived/chk.sh"
    interval 2
}

vrrp_instance VI_1 {   
    state BACKUP
    nopreempt
    interface ens192
    virtual_router_id 70
    priority 130 #节点2的优先值调整比这个低(priority 120)

    advert_int 1
    authentication {
        auth_type PASS
        auth_pass asd
    }
    virtual_ipaddress {
        192.168.1.99
    }
    track_script {
        chk_haproxy   
    }

    notify_backup "/etc/init.d/haproxy restart"
    notify_fault "/etc/init.d/haproxy stop"
}
[root@hakeepalived-2 ~]# cat /etc/keepalived/chk.sh
#!/bin/bash
#
if [ $(ps -C haproxy --no-header | wc -l) -eq 0 ]; then
       /etc/init.d/keepalived stop
fi

2. DOCKER CE安装包

root@ansible installdocker]# ll
total 74720
drwxrwxr-x 2 rancher rancher      169 Mar 24 09:51 docker
-rw-r--r-- 1 root    root    64273875 Jun  8 14:22 docker-20.10.14.tgz
-rw-r--r-- 1 root    root    12218968 Jun  8 14:22 docker-compose
-rw-r--r-- 1 root    root         723 Jun 13 15:01 docker.yml
-rw-r--r-- 1 root    root         318 Jun 13 14:53 hosts
-rwxr-xr-x 1 root    root        2511 Jul  6 11:22 installdocker.sh
-rw-r--r-- 1 root    root         136 Jun 13 15:00 k8s
root@ansible installdocker]# ansible-playbook -i k8s docker.yml

这里用ansible进行对6个节点进行安装

3. 其他的RPM包

所有节点进行安装

[root@master01 rpm]# ll *.rpm
-rw-r--r-- 1 root root  88784 Jun  8 15:23 bash-completion-2.1-8.el7.noarch.rpm
-rw-r--r-- 1 root root 191000 Jun  8 15:23 conntrack-tools-1.4.4-7.el7.x86_64.rpm
-rw-r--r-- 1 root root  18400 Jun  8 15:23 libnetfilter_cthelper-1.0.0-11.el7.x86_64.rpm
-rw-r--r-- 1 root root  18212 Jun  8 15:23 libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm
-rw-r--r-- 1 root root  23584 Jun  8 15:23 libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm
[root@master01 rpm]# ll nfs/*.rpm
-rw-r--r-- 1 root root 113244 Jun  8 15:23 nfs/gssproxy-0.7.0-29.el7.x86_64.rpm
-rw-r--r-- 1 root root  54856 Jun  8 15:23 nfs/keyutils-1.5.8-3.el7.x86_64.rpm
-rw-r--r-- 1 root root  26268 Jun  8 15:23 nfs/libbasicobjects-0.1.1-32.el7.x86_64.rpm
-rw-r--r-- 1 root root  42604 Jun  8 15:23 nfs/libcollection-0.7.0-32.el7.x86_64.rpm
-rw-r--r-- 1 root root 219068 Jun  8 15:23 nfs/libevent-2.0.21-4.el7.x86_64.rpm
-rw-r--r-- 1 root root  65172 Jun  8 15:23 nfs/libini_config-1.3.1-32.el7.x86_64.rpm
-rw-r--r-- 1 root root  51000 Jun  8 15:23 nfs/libnfsidmap-0.25-19.el7.x86_64.rpm
-rw-r--r-- 1 root root  28960 Jun  8 15:23 nfs/libpath_utils-0.2.1-32.el7.x86_64.rpm
-rw-r--r-- 1 root root  27700 Jun  8 15:23 nfs/libref_array-0.1.5-32.el7.x86_64.rpm
-rw-r--r-- 1 root root  91396 Jun  8 15:23 nfs/libtirpc-0.2.4-0.16.el7.x86_64.rpm
-rw-r--r-- 1 root root   9108 Jun  8 15:23 nfs/libverto-libevent-0.2.5-4.el7.x86_64.rpm
-rw-r--r-- 1 root root 422244 Jun  8 15:23 nfs/nfs-utils-1.3.0-0.68.el7.x86_64.rpm
-rw-r--r-- 1 root root 182932 Jun  8 15:23 nfs/quota-4.01-19.el7.x86_64.rpm
-rw-r--r-- 1 root root  92628 Jun  8 15:23 nfs/quota-nls-4.01-19.el7.noarch.rpm
-rw-r--r-- 1 root root  61360 Jun  8 15:23 nfs/rpcbind-0.2.0-49.el7.x86_64.rpm
-rw-r--r-- 1 root root  80380 Jun  8 15:23 nfs/tcp_wrappers-7.6-77.el7.x86_64.rpm
[root@master01 rpm]# rpm -ivh *.rpm && rpm -ivh nfs/*.rpm

4. 时间同步服务开启(略)

5. HOSTS配置

[root@master01 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.93 master01
192.168.1.94 master02
192.168.1.95 master03
192.168.1.96 node01
192.168.1.97 node02
192.168.1.98 node03
192.168.1.99  lb.kubesphere.local

6. 主机ssh受信

[root@master01 ~]# ssh-keygen #全程Y确认
[root@master01 ~]# ssh-copy-id 192.168.1.93
[root@master01 ~]# ssh-copy-id 192.168.1.94
[root@master01 ~]# ssh-copy-id 192.168.1.95
[root@master01 ~]# ssh-copy-id 192.168.1.96
[root@master01 ~]# ssh-copy-id 192.168.1.97
[root@master01 ~]# ssh-copy-id 192.168.1.98
[root@master01 ~]# ssh-copy-id master01
[root@master01 ~]# ssh-copy-id master02
[root@master01 ~]# ssh-copy-id master03
[root@master01 ~]# ssh-copy-id node01
[root@master01 ~]# ssh-copy-id node02
[root@master01 ~]# ssh-copy-id node03

二、安装

  1. 镜像准备

网盘地址:链接:https://pan.baidu.com/s/1Vc2h-pgRFPUQ5C8xIRWUnQ
提取码:2379

解压密码请加QQ:396796725联系索取

2. 部署包说明

[root@master01 k8spackge]# ls -lrth
total 975M
-rw-r--r-- 1 root root 1.8K Jul  7 16:54 config-sample.yaml  -> 集群配置文件
-rw-r--r-- 1 root root  16M Jul  7 16:54 kubekey-v2.0.0-linux-amd64.tar.gz -> kubekey软件包
-rw-r--r-- 1 root root  770 Jul  7 16:54 push.sh -> 镜像上传到仓库脚本
-rw-r--r-- 1 root root  52M Jul  7 16:54 kk -> 二进制安装脚本
drwxr-xr-x 8 root root   81 Jul  7 16:54 kubekey -> 安装组件文件
-rw-r--r-- 1 root root 908M Jul  7 16:55 k8s_images.tar -> 镜像包

3. 镜像上传

[root@master01 k8spackge]# docker load -i k8s_images.tar #加载镜像到本地
[root@master01 k8spackge]# chmod +x push.sh #给予push脚本执行权限
注意:压缩包的脚本并非最新的,请使用如下的脚本
[root@master01 k8spackge]# cat push.sh 
#!/bin/bash
registry=$1
echo "===================处理镜像标签=================================================="
docker tag kubesphere/kube-apiserver:v1.21.5 $registry/kubesphere/kube-apiserver:v1.21.5
docker tag kubesphere/kube-controller-manager:v1.21.5 $registry/kubesphere/kube-controller-manager:v1.21.5
docker tag kubesphere/kube-scheduler:v1.21.5 $registry/kubesphere/kube-scheduler:v1.21.5
docker tag kubesphere/kube-proxy:v1.21.5 $registry/kubesphere/kube-proxy:v1.21.5
docker tag calico/node:v3.20.0 $registry/calico/node:v3.20.0
docker tag calico/pod2daemon-flexvol:v3.20.0 $registry/calico/pod2daemon-flexvol:v3.20.0
docker tag calico/cni:v3.20.0 $registry/calico/cni:v3.20.0
docker tag calico/kube-controllers:v3.20.0 $registry/calico/kube-controllers:v3.20.0
docker tag kubesphere/pause:3.4.1 $registry/calico/kube-controllers:v3.20.0
docker tag kubesphere/pause:3.4.1 $registry/kubesphere/pause:3.4.1
docker tag coredns/coredns:1.8.0 $registry/coredns/coredns:1.8.0
docker tag kubesphere/k8s-dns-node-cache:1.15.12 $registry/kubesphere/k8s-dns-node-cache:1.15.12
echo "===================推送到镜像仓库=================================================="
docker push $registry/kubesphere/kube-apiserver:v1.21.5
docker push $registry/kubesphere/kube-controller-manager:v1.21.5
docker push $registry/kubesphere/kube-scheduler:v1.21.5
docker push $registry/kubesphere/kube-proxy:v1.21.5
docker push $registry/calico/node:v3.20.0
docker push $registry/calico/pod2daemon-flexvol:v3.20.0
docker push $registry/calico/cni:v3.20.0
docker push $registry/calico/kube-controllers:v3.20.0
docker push $registry/kubesphere/pause:3.4.1
docker push $registry/coredns/coredns:1.8.0
docker push $registry/kubesphere/k8s-dns-node-cache:1.15.12
[root@master01 k8spackge]# ./push.sh 【你的仓库地址】 #注意要先登录你的仓库

4. 集群配置文件(config-sample.yaml)解析

[root@master01 ~]# cat config-sample.yaml 

apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
  name: sample
spec:
  hosts: #设置需要安装的节点IP和主机名
  - {name: master01, address: 192.168.1.93, internalAddress: 192.168.1.93, privateKeyPath: "~/.ssh/id_rsa"}  
  - {name: master02, address: 192.168.1.94, internalAddress: 192.168.1.94, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: master03, address: 192.168.1.95, internalAddress: 192.168.1.95, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: node01, address: 192.168.1.96, internalAddress: 192.168.1.96, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: node02, address: 192.168.1.97, internalAddress: 192.168.1.97, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: node03, address: 192.168.1.98, internalAddress: 192.168.1.98, privateKeyPath: "~/.ssh/id_rsa"}
  roleGroups:
    etcd:  #ETCD数据库角色节点
    - master01
    - master02
    - master03
    control-plane:  #控制角色节点
    - master01
    - master02
    - master03
    worker: #计算角色节点
    - master01
    - master02
    - master03
    - node01
    - node02
    - node03
  controlPlaneEndpoint:
    ## Internal loadbalancer for apiservers 
    # internalLoadbalancer: haproxy

    domain: lb.kubesphere.local #VIP域名
    address: "192.168.1.78" #VIP地址
    port: 6443
  kubernetes:
    version: v1.21.5 #k8s版本,我们这次部署的是这个版本(镜像)
    clusterName: cluster.local
  network:
    plugin: calico
    kubePodsCIDR: 10.233.64.0/18
    kubeServiceCIDR: 10.233.0.0/18
    multusCNI:
      enabled: false
  registry:
    plainHTTP: false
    privateRegistry: "registry.starz.top/library" #设置仓库地址
    namespaceOverride: ""
    registryMirrors: []
    insecureRegistries: []
  addons: 
  - name: nfs-client #设置k8s后端存储
    namespace: kube-system
    sources: 
      chart: 
        name: nfs-client-provisioner
        repo: https://charts.kubesphere.io/main
        values: 
        - storageClass.defaultClass=true
        - nfs.server=192.168.1.238 #后端存储服务器IP
        - nfs.path=/share/k8s-6 #存储路径

5. 安装

[root@master01 ~]# chmod +x kk   #给予二进制文件执行权限
[root@master01 ~]# ./kk create cluster -f config-sample.yaml --with-kubernetes v1.21.5 #执行安装

稍等片刻后,提示安装完毕

6. 补充:设置命令补全

[root@master01 k8spackge]# cat /root/.bashrc 
# .bashrc

# User specific aliases and functions

alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
source <(kubectl completion bash) #加入
# Source global definitions
if [ -f /etc/bashrc ]; then
	. /etc/bashrc
fi
[root@master01 k8spackge]# source <(kubectl completion bash) #执行
[root@master01 k8spackge]# source /root/.bashrc
[root@master01 k8spackge]# source /usr/share/bash-completion/bash_completion

原创文章,作者:admin,如若转载,请注明出处:https://www.starz.top/2022/07/08/%e5%bf%ab%e9%80%9f%e9%ab%98%e6%95%88%e9%83%a8%e7%bd%b2k8s%e9%ab%98%e5%8f%af%e7%94%a8%e9%9b%86%e7%be%a4/

发表评论

邮箱地址不会被公开。 必填项已用*标注