0%

k8s安装

k8s根据官网指引学习Kubernetes 文档 | Kubernetes

集群搭建

环境准备

  • 配置/etc/hosts(略)

  • 关闭防火墙(略)

  • 转发 IPv4 并让 iptables 看到桥接流量执行下述指令:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
    overlay
    br_netfilter
    EOF

    sudo modprobe overlay
    sudo modprobe br_netfilter

    # 设置所需的 sysctl 参数,参数在重新启动后保持不变
    cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
    net.bridge.bridge-nf-call-iptables = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    net.ipv4.ip_forward = 1
    EOF

    # 应用 sysctl 参数
    sudo sysctl --system
  • 禁用selinux

    1
    2
    3
    # 将 SELinux 设置为 permissive 模式(相当于将其禁用)
    sudo setenforce 0
    sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
  • 通过运行以下指令确认模块以及参数被加载:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    [root@k8s-master containered]# lsmod | grep br_netfilter
    br_netfilter 22256 0
    bridge 151336 2 br_netfilter,ebtable_broute
    [root@k8s-master containered]# lsmod | grep overlay
    overlay 91659 0
    [root@k8s-master containered]# sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
    net.bridge.bridge-nf-call-iptables = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    net.ipv4.ip_forward = 1
  • 禁用swap分区

    1
    2
    3
    4
    5
    # 编辑分区配置文件/etc/fstab,注释掉swap分区一行
    # 注意修改完毕之后需要重启linux服务
    vim /etc/fstab
    注释掉 /dev/mapper/centos-swap swap
    # /dev/mapper/centos-swap swap
  • 重启reboot

安装containerd

k8s是依赖于容器运行时的,所以需要首先安装,自 1.24 版起,Dockershim 已从 Kubernetes 项目中移除。这次我们安装containerd

容器运行时 | Kubernetes

  • 下载安装containerd

    1
    2
    3
    yum install wget -y
    wget https://github.com/containerd/containerd/releases/download/v1.7.15/containerd-1.7.15-linux-amd64.tar.gz
    tar Cxzvf /usr/local containerd-1.7.15-linux-amd64.tar.gz

    如果打算通过systemd启动containerd,还需要在/usr/local/lib/systemd/system/containerd.service配置以下内容

    1
    2
    3
    mkdir -p /usr/local/lib/systemd/system
    touch containerd.service
    vim /usr/local/lib/systemd/system/containerd.service

    containerd.service

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    # Copyright The containerd Authors.
    #
    # Licensed under the Apache License, Version 2.0 (the "License");
    # you may not use this file except in compliance with the License.
    # You may obtain a copy of the License at
    #
    # http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.

    [Unit]
    Description=containerd container runtime
    Documentation=https://containerd.io
    After=network.target local-fs.target

    [Service]
    ExecStartPre=-/sbin/modprobe overlay
    ExecStart=/usr/local/bin/containerd

    Type=notify
    Delegate=yes
    KillMode=process
    Restart=always
    RestartSec=5

    # Having non-zero Limit*s causes performance problems due to accounting overhead
    # in the kernel. We recommend using cgroups to do container-local accounting.
    LimitNPROC=infinity
    LimitCORE=infinity

    # Comment TasksMax if your systemd version does not supports it.
    # Only systemd 226 and above support this version.
    TasksMax=infinity
    OOMScoreAdjust=-999

    [Install]
    WantedBy=multi-user.target

    创建配置文件

    1
    2
    3
    4
    mkdir /etc/containerd/
    touch /etc/containerd/config.toml
    containerd config default > /etc/containerd/config.toml
    vim /etc/containerd/config.toml

    修改sandbox_image镜像地址为registry.aliyuncs.com/google_containers/pause:3.9

    配置镜像加速

    1
    2
    3
    # 找到config_path 
    [plugins."io.containerd.grpc.v1.cri".registry]
    config_path = "/etc/containerd/certs.d"

    创建文件 /etc/containerd/certs.d/docker.io/hosts.toml 并输入以下内容

    1
    2
    3
    4
    5
    6
    7
    8
    server = "https://docker.io"

    [host."https://z0ow2vpn.mirror.aliyuncs.com"]
    capabilities = ["pull", "resolve"]
    [host."https://docker.mirrors.ustc.edu.cn"]
    capabilities = ["pull","resolve"]
    [host."https://registry-1.docker.io"]
    capabilities = ["pull", "resolve"]

    然后执行命令

    1
    2
    systemctl daemon-reload
    systemctl enable --now containerd
  • 安装runc

    1
    2
    wget https://github.com/opencontainers/runc/releases/download/v1.2.0-rc.1/runc.amd64
    install -m 755 runc.amd64 /usr/local/sbin/runc
  • 安装网络插件

    1
    2
    3
    wget https://github.com/containernetworking/plugins/releases/download/v1.4.1/cni-plugins-linux-amd64-v1.4.1.tgz
    mkdir -p /opt/cni/bin
    tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.4.1.tgz

安装 cri-tools

下载解压

1
2
3
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-amd64.tar.gz
tar zxvf crictl-v1.29.0-linux-amd64.tar.gz -C /usr/local/bin
rm -f crictl-v1.29.0-linux-amd64.tar.gz

配置runtime-endpoint、image-endpoint

1
2
crictl config runtime-endpoint unix:///run/containerd/containerd.sock
crictl config image-endpoint unix:///run/containerd/containerd.sock

安装 kubeadm、kubelet 和 kubectl

安装的时候会依赖cri-tools,要确保版本一致才不会安装出错

  • 添加 Kubernetes 的 yum 仓库

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    # 此操作会覆盖 /etc/yum.repos.d/kubernetes.repo 中现存的所有配置
    cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
    enabled=1
    gpgcheck=1
    gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
    exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
    EOF
  • 安装 kubelet、kubeadm 和 kubectl,并启用 kubelet 以确保它在启动时自动启动:

    1
    2
    3
    4
    yum clean all
    yum makecache
    yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
    systemctl enable --now kubelet
  • 初始化kubeadm

    1
    2
    3
    4
    5
    6
    kubeadm init   --apiserver-advertise-address=192.168.56.109 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=192.169.0.0/16 --service-cidr=10.96.0.0/12 

    --apiserver-advertise-address=192.168.56.109 # 指定apiserver地址
    --image-repository registry.aliyuncs.com/google_containers #执行镜像仓库
    --pod-network-cidr=192.169.0.0/16 # 注意和主机其他网段进行区分,方便后面装calico
    --service-cidr=10.96.0.0/12 # 制定服务的网段,要不然后面无法访问

    稍等个几分钟

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21

    Your Kubernetes control-plane has initialized successfully!

    To start using your cluster, you need to run the following as a regular user:

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config

    Alternatively, if you are the root user, you can run:

    export KUBECONFIG=/etc/kubernetes/admin.conf

    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
    https://kubernetes.io/docs/concepts/cluster-administration/addons/

    Then you can join any number of worker nodes by running the following on each as root:

    kubeadm join 192.168.56.109:6443 --token gcqhde.1r55hh2h8wgngdhj \
    --discovery-token-ca-cert-hash sha256:b796a75fdefd4aca56bd139ff0478d7692b2766619a1f4689e87bcda07230d18
  • 子节点重复上述步骤,最后执行

    1
    2
    kubeadm join 192.168.56.109:6443 --token dorx9q.bhix4u00el6jsdmy \
    --discovery-token-ca-cert-hash sha256:b796a75fdefd4aca56bd139ff0478d7692b2766619a1f4689e87bcda07230d18
  • master节点执行

    1
    2
    3
    4
    [root@k8s-master ~]# kubectl get nodes
    NAME STATUS ROLES AGE VERSION
    k8s-master NotReady control-plane 23m v1.29.4
    k8s-node NotReady <none> 7s v1.29.4
  • 如果join命令没有记住就执行命令

    1
    2
    [root@k8s-master ~]# kubeadm token create --print-join-command
    kubeadm join 192.168.56.109:6443 --token k3o8hj.kixfcm2b8wk6eptw --discovery-token-ca-cert-hash sha256:c120f29231c6928d35571588d3250f629685c7ed602071cb099b1981908eea16

安装Calico

  • 配置 NetworkManager

    1
    2
    3
    vim /etc/NetworkManager/conf.d/calico.conf
    [keyfile]
    unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
  • 安装 Tigera Calico

    1
    kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/tigera-operator.yaml
  • 安装custom-resources

    1
    kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/custom-resources.yaml

    可能由于网络问题镜像拉不下来,可以使用代理或者手动拉取

安装完成

  • 查看状态

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    [root@k8s-master tigera-operator]# kubectl get po -A
    NAMESPACE NAME READY STATUS RESTARTS AGE
    calico-apiserver calico-apiserver-779b6669b4-4rvmd 1/1 Running 0 70s
    calico-apiserver calico-apiserver-779b6669b4-gph6b 1/1 Running 0 70s
    calico-system calico-kube-controllers-5699bc9456-zsfzg 1/1 Running 0 71s
    calico-system calico-node-42kgg 1/1 Running 0 71s
    calico-system calico-node-pbqpl 1/1 Running 0 71s
    calico-system calico-typha-5d5474dd98-xkfx9 1/1 Running 0 71s
    calico-system csi-node-driver-ntm46 2/2 Running 0 71s
    calico-system csi-node-driver-vc6gf 2/2 Running 0 71s
    kube-system coredns-857d9ff4c9-mtth7 1/1 Running 0 153m
    kube-system coredns-857d9ff4c9-pmcvw 1/1 Running 0 153m
    kube-system etcd-k8s-master 1/1 Running 2 154m
    kube-system kube-apiserver-k8s-master 1/1 Running 0 154m
    kube-system kube-controller-manager-k8s-master 1/1 Running 0 154m
    kube-system kube-proxy-95kk2 1/1 Running 0 153m
    kube-system kube-proxy-x6n42 1/1 Running 0 153m
    kube-system kube-scheduler-k8s-master 1/1 Running 0 154m
    tigera-operator tigera-operator-6bfc79cb9c-5zhjq 1/1 Running 0 11m
  • 为了保证master的高可用,默认集群不会在master节点上调度pod,可以通过使用以下命令去除污点(taint)

    1
    2
    [root@k8s-master tigera-operator]# kubectl taint nodes --all node-role.kubernetes.io/control-plane-
    node/k8s-master untainted

额外的

krew

Krew 是 kubectl 插件的包管理器。Quickstart · Krew (k8s.io)

1、确保已安装。git

2、运行此命令下载并安装:krew

1
2
3
4
5
6
7
8
9
(
set -x; cd "$(mktemp -d)" &&
OS="$(uname | tr '[:upper:]' '[:lower:]')" &&
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" &&
KREW="krew-${OS}_${ARCH}" &&
curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" &&
tar zxvf "${KREW}.tar.gz" &&
./"${KREW}" install krew
)

3、下面这行加到环境变量里 ~/.bashrc or ~/.zshrc:

1
export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"

4、运行以检查安装。kubectl krew

directpv

DirectPV直连存储CSI 驱动程序。从更简单的意义上说,它是一个分布式持久卷管理器,而不是像 SAN 或 NAS 这样的存储系统。跨服务器发现、格式化、装载、调度和监视驱动器非常有用。

1、安装 DirectPV Krew 插件

1
$ kubectl krew install directpv

2、在 kubernetes 集群中安装 DirectPV

1
$ kubectl directpv install

3、获取安装信息

1
$ kubectl directpv info

4、添加驱动器

1
2
3
4
5
# Probe and save drive information to drives.yaml file.
$ kubectl directpv discover

# Initialize selected drives.
$ kubectl directpv init drives.yaml

5、部署StorageClass

1
2
3
4
5
6
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: directpv-min-io
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

执行kubectl create -f minio.yaml --save-config

6、部署演示 MinIO 服务器

1
$ curl -sfL https://github.com/minio/directpv/raw/master/functests/minio.yaml | kubectl apply -f -
赏口饭吃吧!