基于Ubuntu系统kubeadm部署k8s1.25.3主从集群(containerd)
基于Ubuntu系统kubeadm部署k8s1.25.3主从集群(containerd)
1. kubernetes集群规划
主机IP | 主机名 | 主机配置 | 角色 |
---|---|---|---|
192.168.100.3 | master1 | 2C/4G | 管理节点 |
192.168.100.4 | node1 | 2C/4G | 工作节点 |
192.168.100.5 | node2 | 2C/4G | 工作节点 |
2. 集群前期环境准备
(1)配置网卡
设置手动IP地址
root@localhost:~# vim /etc/netplan/00-installer-config.yaml
# This is the network config written by 'subiquity'
network:
ethernets:
ens32:
dhcp4: no # 将true修改为no关闭dhcp
addresses: # 静态IP地址/掩码
- 192.168.100.3/24
optional: true # 接口的可选标志
gateway4: 192.168.100.2 # 接口的默认网关
nameservers: # DNS服务器地址列表
addresses: [223.5.5.5,8.8.8.8]
version: 2
重启网卡服务
root@localhost:~# netplan apply
访问测试
root@localhost:~# ping -c 4 bing.com
PING bing.com (204.79.197.200) 56(84) bytes of data.
64 bytes from a-0001.a-msedge.net (204.79.197.200): icmp_seq=1 ttl=128 time=130 ms
64 bytes from a-0001.a-msedge.net (204.79.197.200): icmp_seq=2 ttl=128 time=130 ms
64 bytes from a-0001.a-msedge.net (204.79.197.200): icmp_seq=3 ttl=128 time=130 ms
64 bytes from a-0001.a-msedge.net (204.79.197.200): icmp_seq=4 ttl=128 time=130 ms
--- bing.com ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3006ms
rtt min/avg/max/mdev = 129.649/130.018/130.404/0.345 ms
(2)配置root远程登录
安装openssh-server
gxl@localhost:~$ sudo -i
[sudo] password for gxl: ### 输入密码
root@localhost:~# apt update
root@localhost:~# apt install -y openssh-server
修改ssh配置文件
root@localhost:~# vi /etc/ssh/sshd_config
...
### 修改第22行
#Port 22
### 修改为以下内容
Port 22
...
### 修改第34行
#PermitRootLogin prohibit-password
### 修改为以下内容
PermitRootLogin yes
...
重启ssh服务
root@localhost:~# systemctl restart ssh
(3)初始化脚本
root@k8s-master:~# vi init.sh
#!/bin/bash
echo "——>>> 关闭防火墙与SELinux <<<——"
sleep 3
# 关闭防火墙
systemctl disable ufw --now &> /dev/null
echo "——>>> 配置阿里仓库 <<<——"
sleep 3
# 更新APT源为阿里云
sed -i 's|http://archive.ubuntu.com/ubuntu/|https://mirrors.aliyun.com/ubuntu/|g' /etc/apt/sources.list
sed -i 's|http://security.ubuntu.com/ubuntu/|https://mirrors.aliyun.com/ubuntu/|g' /etc/apt/sources.list
echo "——>>> 设置时区并同步时间 <<<——"
sleep 3
# 设置时区为上海
timedatectl set-timezone Asia/Shanghai
# 安装并启用 chrony 时间同步服务
apt update -y &> /dev/null
apt install -y chrony &> /dev/null
systemctl enable chrony --now &> /dev/null
# 确保chrony服务已安装且配置
if [ -f /etc/chrony/chrony.conf ]; then
sed -i '/^server/s/^/# /' /etc/chrony/chrony.conf
sed -i '/^# server 3.centos.pool.ntp.org iburst/a\server ntp1.aliyun.com iburst\nserver ntp2.aliyun.com iburst\nserver ntp3.aliyun.com iburst' /etc/chrony/chrony.conf
systemctl restart chrony &> /dev/null
chronyc sources &> /dev/null
else
echo "chrony 配置文件不存在!"
fi
echo "——>>> 设置系统最大打开文件数 <<<——"
sleep 3
# 配置系统最大打开文件数
if ! grep "* soft nofile 65535" /etc/security/limits.conf &>/dev/null; then
cat >> /etc/security/limits.conf << EOF
* soft nofile 65535 # 软限制
* hard nofile 65535 # 硬限制
EOF
fi
echo "——>>> 系统内核优化 <<<——"
sleep 3
# 配置内核参数优化
cat >> /etc/sysctl.conf << EOF
net.ipv4.tcp_syncookies = 1 # 防范SYN洪水攻击,0为关闭
net.ipv4.tcp_max_tw_buckets = 20480 # 此项参数可以控制TIME_WAIT套接字的最大数量,避免Squid服务器被大量的TIME_WAIT套接字拖死
net.ipv4.tcp_max_syn_backlog = 20480 # 表示SYN队列的长度,默认为1024,加大队列长度为8192,可以容纳更多等待连接的网络连接数
net.core.netdev_max_backlog = 262144 # 每个网络接口 接受数据包的速率比内核处理这些包的速率快时,允许发送到队列的数据包的最大数目
net.ipv4.tcp_fin_timeout = 20 # FIN-WAIT-2状态的超时时间,避免内核崩溃
EOF
# 应用内核参数优化
sysctl -p &> /dev/null
echo "——>>> 减少SWAP使用 <<<——"
sleep 3
# 禁用swap
echo "0" > /proc/sys/vm/swappiness
# 设置为永久禁用swap
sed -i '/vm.swappiness/d' /etc/sysctl.conf
echo "vm.swappiness=0" >> /etc/sysctl.conf
sysctl -p &> /dev/null
echo "——>>> 安装系统性能分析工具及其他 <<<——"
sleep 3
# 安装常用工具
apt install -y vim net-tools lsof wget lrzsz &> /dev/null
echo "——>>> 完成系统优化配置 ——"
sleep 3
执行初始化脚本
root@k8s-master:~# sh init.sh
(5)配置主机名
root@localhost:~# hostnamectl set-hostname k8s-master
root@localhost:~# bash
root@k8s-master:~#
(6)配置主机映射
cat >> /etc/hosts << EOF
192.168.100.3 k8s-master
192.168.100.4 k8s-node1
192.168.100.5 k8s-node2
EOF
3. Docker环境安装
(1)开启bridge网桥过滤
bridge(桥接) 是 Linux 系统中的一种虚拟网络设备,它充当一个虚拟的交换机,为集群内的容器提供网络通信功能,容器就可以通过这个 bridge 与其他容器或外部网络通信了。
root@k8s-master:~# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
由于开启bridge功能,需要加载br_netfilter模块来允许在bridge设备上的数据包经过iptables防火墙处理
root@k8s-master:~# modprobe br_netfilter && lsmod | grep br_netfilter
加载配置文件
root@k8s-master:~# sysctl -p /etc/sysctl.d/k8s.conf
(2)安装Docker
安装docker依赖
root@k8s-master:~# apt install -y apt-transport-https ca-certificates curl software-properties-common
添加软件包GPG密钥文件
root@k8s-master:~# curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
添加阿里云 docker-ce 仓库
root@k8s-master:~# add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
更新软件仓库
root@k8s-master:~# apt update
查看可以安装的版本
root@k8s-master:~# apt-cache madison docker-ce
安装 docker 软件包
root@k8s-master:~# apt install -y docker-ce
查看以安装的软件包信息
root@k8s-master:~# dpkg -l | grep docker-ce
ii docker-ce 5:27.3.1-1~ubuntu.20.04~focal amd64 Docker: the open-source application container engine
ii docker-ce-cli 5:27.3.1-1~ubuntu.20.04~focal amd64 Docker CLI: the open-source application container engine
ii docker-ce-rootless-extras 5:27.3.1-1~ubuntu.20.04~focal amd64 Rootless support for Docker.
设置Docker开机自启动
root@k8s-master:~# systemctl enable docker --now
(3)配置镜像加速器和Cgroup驱动
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json << 'EOF'
{
"insecure-registries": ["0.0.0.0/0"],
"registry-mirrors": [
"https://docker.linkedbus.com",
"https://docker.xuanyuan.me",
"https://docker.aityp.com",
"https://docker.m.daocloud.io",
"https://reg-mirror.qiniu.com",
"https://k8s.m.daocloud.io",
"https://elastic.m.daocloud.io",
"https://gcr.m.daocloud.io",
"https://ghcr.m.daocloud.io",
"https://k8s-gcr.m.daocloud.io",
"https://mcr.m.daocloud.io",
"https://nvcr.m.daocloud.io",
"https://quay.m.daocloud.io",
"https://jujucharms.m.daocloud.io",
"https://rocks-canonical.m.daocloud.io",
"https://d3p1s1ji.mirror.aliyuncs.com"
],
"exec-opts": [
"native.cgroupdriver=systemd"
],
"max-concurrent-downloads": 10,
"max-concurrent-uploads": 5,
"log-opts": {
"max-size": "300m",
"max-file": "2"
},
"live-restore": true,
"log-level": "debug"
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
(4)安装containerd
4. Containerd环境安装
(1)配置containerd
root@k8s-master:~# tar Czxvf /usr/local/ containerd-1.6.2-linux-amd64.tar.gz
bin/
bin/containerd-shim-runc-v2
bin/containerd-shim
bin/ctr
bin/containerd-shim-runc-v1
bin/containerd
bin/containerd-stress
配置containerd启动项
root@k8s-master:~# cat > /etc/systemd/system/containerd.service << EOF
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
启动服务
# 重新加载 systemd 守护进程,以便使配置生效
root@k8s-master:~# systemctl daemon-reload
# 设置containerd开机自启
root@k8s-master:~# systemctl enable containerd --now
(2)配置runc
root@k8s-master:~# install -m 755 runc.amd64 /usr/local/sbin/runc
# 查看权限
root@k8s-master:~# ll /usr/local/sbin/runc
-rwxr-xr-x. 1 root root 10802720 Jul 7 13:33 /usr/local/sbin/runc
(3)配置cni
# 创建目录
root@k8s-master:~# mkdir -p /opt/cni/bin
# 解压二进制包
root@k8s-master:~# tar Czxvf /opt/cni/bin/ cni-plugins-linux-amd64-v1.1.1.tgz
./
./macvlan
./static
./vlan
./portmap
./host-local
./vrf
./bridge
./tuning
./firewall
./host-device
./sbr
./loopback
./dhcp
./ptp
./ipvlan
./bandwidth
# 把cni命令ln到/usr/local/bin目录下
root@k8s-master:~# ln -s /opt/cni/bin/* /usr/local/bin
(4)生成containerd配置文件
root@k8s-master:~# mkdir -p /etc/containerd
root@k8s-master:~# containerd config default > $HOME/config.toml
root@k8s-master:~# cp $HOME/config.toml /etc/containerd/config.toml
(5)配置containerd配置文件
# 修改 /etc/containerd/config.toml 文件
root@k8s-master:~# sudo sed -i 's#k8s.gcr.io/pause:3.6#registry.aliyuncs.com/google_containers/pause:3.9#g' /etc/containerd/config.toml
# 确保 /etc/containerd/config.toml 中的 disabled_plugins 内不存在 cri
root@k8s-master:~# sudo sed -i "s#SystemdCgroup = false#SystemdCgroup = true#g" /etc/containerd/config.toml
(6)配置镜像加速
# 修改145行为 config_path = "/etc/containerd/certs.d"
root@k8s-master:~# sudo sed -i 's#config_path = ""#config_path = "/etc/containerd/certs.d"#' /etc/containerd/config.toml
docker hub镜像加速
# docker.linkedbus.com 镜像加速
mkdir -p /etc/containerd/certs.d/docker.linkedbus.com
tee /etc/containerd/certs.d/docker.linkedbus.com/hosts.toml << 'EOF'
server = "https://docker.linkedbus.com"
[host."https://docker.linkedbus.com"]
capabilities = ["pull", "resolve"]
EOF
# docker.xuanyuan.me 镜像加速
mkdir -p /etc/containerd/certs.d/docker.xuanyuan.me
tee /etc/containerd/certs.d/docker.xuanyuan.me/hosts.toml << 'EOF'
server = "https://docker.xuanyuan.me"
[host."https://docker.xuanyuan.me"]
capabilities = ["pull", "resolve"]
EOF
mkdir -p /etc/containerd/certs.d/docker.io
tee > /etc/containerd/certs.d/docker.io/hosts.toml << 'EOF'
server = "https://docker.io"
[host."https://d3p1s1ji.mirror.aliyuncs.com"]
capabilities = ["pull", "resolve"]
[host."https://docker.m.daocloud.io"]
capabilities = ["pull", "resolve"]
[host."https://reg-mirror.qiniu.com"]
capabilities = ["pull", "resolve"]
EOF
# registry.k8s.io镜像加速
mkdir -p /etc/containerd/certs.d/registry.k8s.io
tee /etc/containerd/certs.d/registry.k8s.io/hosts.toml << 'EOF'
server = "https://registry.k8s.io"
[host."https://k8s.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
# docker.elastic.co镜像加速
mkdir -p /etc/containerd/certs.d/docker.elastic.co
tee /etc/containerd/certs.d/docker.elastic.co/hosts.toml << 'EOF'
server = "https://docker.elastic.co"
[host."https://elastic.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
# gcr.io镜像加速
mkdir -p /etc/containerd/certs.d/gcr.io
tee /etc/containerd/certs.d/gcr.io/hosts.toml << 'EOF'
server = "https://gcr.io"
[host."https://gcr.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
# ghcr.io镜像加速
mkdir -p /etc/containerd/certs.d/ghcr.io
tee /etc/containerd/certs.d/ghcr.io/hosts.toml << 'EOF'
server = "https://ghcr.io"
[host."https://ghcr.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
# k8s.gcr.io镜像加速
mkdir -p /etc/containerd/certs.d/k8s.gcr.io
tee /etc/containerd/certs.d/k8s.gcr.io/hosts.toml << 'EOF'
server = "https://k8s.gcr.io"
[host."https://k8s-gcr.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
# mcr.m.daocloud.io镜像加速
mkdir -p /etc/containerd/certs.d/mcr.microsoft.com
tee /etc/containerd/certs.d/mcr.microsoft.com/hosts.toml << 'EOF'
server = "https://mcr.microsoft.com"
[host."https://mcr.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
# nvcr.io镜像加速
mkdir -p /etc/containerd/certs.d/nvcr.io
tee /etc/containerd/certs.d/nvcr.io/hosts.toml << 'EOF'
server = "https://nvcr.io"
[host."https://nvcr.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
# quay.io镜像加速
mkdir -p /etc/containerd/certs.d/quay.io
tee /etc/containerd/certs.d/quay.io/hosts.toml << 'EOF'
server = "https://quay.io"
[host."https://quay.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
# registry.jujucharms.com镜像加速
mkdir -p /etc/containerd/certs.d/registry.jujucharms.com
tee /etc/containerd/certs.d/registry.jujucharms.com/hosts.toml << 'EOF'
server = "https://registry.jujucharms.com"
[host."https://jujucharms.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
# rocks.canonical.com镜像加速
mkdir -p /etc/containerd/certs.d/rocks.canonical.com
tee /etc/containerd/certs.d/rocks.canonical.com/hosts.toml << 'EOF'
server = "https://rocks.canonical.com"
[host."https://rocks-canonical.m.daocloud.io"]
capabilities = ["pull", "resolve", "push"]
EOF
重启服务
root@k8s-master:~# systemctl daemon-reload
root@k8s-master:~# systemctl restart containerd
ctr拉取镜像测试
# ctr拉取镜像
root@k8s-master:~# ctr image pull --hosts-dir=/etc/containerd/certs.d docker.io/library/nginx:latest
docker.io/library/nginx:latest: resolving |--------------------------------------|
elapsed: 20.9s total: 0.0 B (0.0 B/s)
docker.io/library/nginx:latest: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:447a8665cc1dab95b1ca778e162215839ccbb9189104c79d7ec3a81e14577add: exists |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:5f0574409b3add89581b96c68afe9e9c7b284651c3a974b6e8bac46bf95e6b7f: exists |++++++++++++++++++++++++++++++++++++++|
layer-sha256:23fa5a7b99a685258885918c468ded042b95b5a7c56cee758a689f4f7e5971e0: exists |++++++++++++++++++++++++++++++++++++++|
config-sha256:5ef79149e0ec84a7a9f9284c3f91aa3c20608f8391f5445eabe92ef07dbda03c: exists |++++++++++++++++++++++++++++++++++++++|
layer-sha256:e4fff0779e6ddd22366469f08626c3ab1884b5cbe1719b26da238c95f247b305: exists |++++++++++++++++++++++++++++++++++++++|
layer-sha256:2a0cb278fd9f7737ef5ddc52b4198821dd02e87ed204f74d7e491016b96ebe7f: exists |++++++++++++++++++++++++++++++++++++++|
layer-sha256:7045d6c32ae2d3dc002f33beb0c1cdd7f69b2663a9720117ac9b82ec28865e30: exists |++++++++++++++++++++++++++++++++++++++|
layer-sha256:03de31afb03573e0fa679d6777ba3267c2b8ec087cbc0efa46524c1de08f43ec: exists |++++++++++++++++++++++++++++++++++++++|
layer-sha256:0f17be8dcff2e2c27ee6a33c1bacc582e71f76f855c2d69d510f2a93df897303: exists |++++++++++++++++++++++++++++++++++++++|
layer-sha256:14b7e5e8f3946da0f9120dab3b0e05ef24a5ca874ba484327db8b3308a92b532: exists |++++++++++++++++++++++++++++++++++++++|
elapsed: 22.5s total: 0.0 B (0.0 B/s)
unpacking linux/amd64 sha256:447a8665cc1dab95b1ca778e162215839ccbb9189104c79d7ec3a81e14577add...
done: 8.365106ms
# 查看镜像
root@k8s-master:~# ctr i ls
REF TYPE DIGEST SIZE PLATFORMS LABELS
docker.io/library/nginx:latest application/vnd.oci.image.index.v1+json sha256:447a8665cc1dab95b1ca778e162215839ccbb9189104c79d7ec3a81e14577add 67.7 MiB linux/386,linux/amd64,linux/arm/v5,linux/arm/v7,linux/arm64/v8,linux/mips64le,linux/ppc64le,linux/s390x,unknown/unknown -
(7)配置crictl
# 安装工具
root@k8s-master:~# tar xf crictl-v1.25.0-linux-amd64.tar.gz -C /usr/local/bin/
# 生成配置文件
root@k8s-master:~# cat > /etc/crictl.yaml << EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
- runtime-endpoint # 指定了容器运行时的sock文件位置
- image-endpoint # 指定了容器镜像使用的sock文件位置
- timeout # 容器运行时或容器镜像服务之间的通信超时时间
- debug # 指定了crictl工具的调试模式,false表示调试模式未启用,true则会在输出中包含更多的调试日志信息,有助于故障排除和问题调试
查看配置是否生效
root@k8s-master:~# crictl info
使用 crictl 拉取测试测试
root@k8s-master:~# crictl pull docker.io/library/nginx:1.20.2
DEBU[0000] get image connection
DEBU[0000] PullImageRequest: &PullImageRequest{Image:&ImageSpec{Image:docker.io/library/nginx:1.20.2,Annotations:map[string]string{},},Auth:nil,SandboxConfig:nil,}
DEBU[0046] PullImageResponse: &PullImageResponse{ImageRef:sha256:0584b370e957bf9d09e10f424859a02ab0fda255103f75b3f8c7d410a4e96ed5,}
Image is up to date for sha256:0584b370e957bf9d09e10f424859a02ab0fda255103f75b3f8c7d410a4e96ed5
# 查看拉取的结果
root@k8s-master:~# crictl images
DEBU[0000] get image connection
DEBU[0000] ListImagesRequest: &ListImagesRequest{Filter:&ImageFilter{Image:&ImageSpec{Image:,Annotations:map[string]string{},},},}
DEBU[0000] ListImagesResponse: &ListImagesResponse{Images:[]*Image{&Image{Id:sha256:0584b370e957bf9d09e10f424859a02ab0fda255103f75b3f8c7d410a4e96ed5,RepoTags:[docker.io/library/nginx:1.20.2],RepoDigests:[docker.io/library/nginx@sha256:38f8c1d9613f3f42e7969c3b1dd5c3277e635d4576713e6453c6193e66270a6d],Size_:56732885,Uid:nil,Username:,Spec:nil,Pinned:false,},},}
IMAGE TAG IMAGE ID SIZE
docker.io/library/nginx 1.20.2 0584b370e957b 56.7MB
(8)配置nerdctl
root@k8s-master:~# tar xf nerdctl-0.21.0-linux-amd64.tar.gz -C /usr/local/bin/
root@k8s-master:~# nerdctl --version
nerdctl version 0.21.0
(10)优化nerdctl
root@k8s-master:~# mkdir -p /etc/nerdctl
root@k8s-master:~# cat > /etc/nerdctl/nerdctl.toml << EOF
namespace = "k8s.io"
insecure_registry = true
cni_path = "/opt/cni/bin/"
EOF
为了测试 insecure_registry = true
设置,需要配置并运行一个不使用 TLS 的镜像仓库。我们可以使用 Docker Registry 镜像来创建一个本地不安全的镜像仓库。
使用 Docker Registry 镜像创建不安全的本地仓库
启动一个不安全的本地 Docker Registry
你可以使用 docker run
命令启动一个不使用 TLS 的本地镜像仓库:
root@k8s-master:~# docker run -d -p 5000:5000 --name registry --restart=always registry:2
配置 nerdctl
使用不安全的仓库
namespace = "k8s.io"
insecure_registry = true
cni_path = "/data/kube/bin"
使用 nerdctl
推送和拉取镜像以测试连接
# 拉取一个测试镜像
nerdctl pull nginx:1.20.2
# 给镜像打标签
nerdctl tag nginx:1.20.2 localhost:5000/my-nginx
# 推送镜像到本地不安全仓库
nerdctl push localhost:5000/my-nginx
# 从不安全仓库拉取镜像
nerdctl pull localhost:5000/my-nginx
nerdctl拉取镜像测试
root@k8s-master:~# nerdctl -n k8s.io image pull docker.io/library/nginx:1.20.2
docker.io/library/nginx:1.20.2: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:03f3cb0afb7bd5c76e01bfec0ce08803c495348dccce37bcb82c347b4853c00b: done |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:cba27ee29d62dfd6034994162e71c399b08a84b50ab25783eabce64b1907f774: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:50fe74b50e0d0258922495297efbb9ebc3cbd5742103df1ca54dc21c07d24575: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:c423e1dacb26b544d5623a4a6a137c5a6e03e00048c3a3e074149b660ea78a2d: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:a2abf6c4d29d43a4bf9fbb769f524d0fb36a2edab49819c1bf3e76f409f953ea: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:da03644a12939e348735c7b34b6678429795293c69597602c50e9b3fb344973e: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:dcbfc6badd70b93971be6029156559388b9676386d543c042f8ff92ce83ab9c0: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:3f7ccff97047fb175bc00671991889f0c8e942a80b2857e9fd662293d275be9e: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:49e31097680b161295ba1a3963cf0f8516a5e43ac1b99a1dafa1425fc9bec29f: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 30.6s total: 54.1 M (1.8 MiB/s)
root@k8s-master:~# nerdctl -n k8s.io image ls
REPOSITORY TAG IMAGE ID CREATED PLATFORM SIZE BLOB SIZE
nginx 1.20.2 03f3cb0afb7b 25 seconds ago linux/amd64 146.2 MiB 54.1 MiB
sha256 50fe74b50e0d0258922495297efbb9ebc3cbd5742103df1ca54dc21c07d24575 03f3cb0afb7b 25 seconds ago linux/amd64 146.2 MiB 54.1 MiB
4. 配置阿里云YUM源
(1)添加k8s源
安装依赖包,使得 apt 支持 ssl 传输
root@k8s-master:~# apt install apt-transport-https
添加软件包GPG密钥文件
root@k8s-master:~# curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
强制刷新包列表并确保 apt 更新了所有可用的源和包信息
root@k8s-master:~# apt update
root@k8s-master:~# apt-get clean
root@k8s-master:~# apt-get update --fix-missing
添加阿里云 kubernetes 仓库
root@k8s-master:~# cat > /etc/apt/sources.list.d/kubernetes.list << EOF
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
(2)安装k8s工具
root@k8s-master:~# apt install -y kubeadm=1.25.3-00 kubelet=1.25.3-00 kubectl=1.25.3-00
- kubeadm:用于初始化集群,并配置集群所需的组件并生成对应的安全证书和令牌;
- kubelet:负责与 Master 节点通信,并根据 Master 节点的调度决策来创建、更新和删除 Pod,同时维护 Node 节点上的容器状态;
- kubectl:用于管理k8集群的一个命令行工具;
设置kubelet开启自启,不需要直接开启初始化过程会启动
root@k8s-master:~# systemctl enable kubelet
(3)关闭swap分区
临时关闭
root@k8s-master:~# swapoff -a
永久关闭
root@k8s-master:~# sed -ri 's/.*swap.*/#&/' /etc/fstab
(4)初始化集群
命令行方式
kubeadm init \
--apiserver-advertise-address=192.168.100.3 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.25.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
yaml文件方式
root@k8s-master:~# kubeadm config print init-defaults > kubeadm-config.yaml
root@k8s-master:~# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.100.3### 本地IP地址
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
imagePullPolicy: IfNotPresent
name: master### 修改主机名
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers### 修改仓库地址
kind: ClusterConfiguration
kubernetesVersion: 1.25.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
scheduler: {}
初始化集群
root@k8s-master:~# kubeadm init --config kubeadm-config.yaml --upload-certs
#选项说明:
--upload-certs //初始化过程将生成证书,并将其上传到etcd存储中,否则节点无法加入集群
初始化失败使用以下命令重置
root@k8s-master:~# kubeadm reset
(5)配置认证文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
使用kubectl工具查看节点状态
root@k8s-master:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady control-plane 20s v1.25.3
注:由于网络插件还没有部署,节点会处于"NotReady"状态
(6)将node节点加入集群
kubeadm join 192.168.100.3:6443 --token rtrcwi.cyn9p08jznxgmciq \
--discovery-token-ca-cert-hash sha256:93f37dba1f78e73ec5e343baeda001b481bc120efaefa962887991ac0da42216 \
(7)去除污点
root@k8s-master:~# kubectl taint nodes master node-role.kubernetes.io/control-plane-
5. 配置Calico网络组件
(1)下载配置文件
root@k8s-master:~# wget --no-check-certificate https://framagit.org/mirrors-github/projectcalico/calico/-/raw/v3.26.4/manifests/calico.yaml
(2)编辑配置文件
root@k8s-master:~# vim calico.yaml
# 在 - name: CLUSTER_TYPE 下方添加如下内容
- name: CLUSTER_TYPE
value: "k8s,bgp"
# 下方为新增内容
# 如果集群服务器中存在不同的网卡名称,需要在这里将每台服务器所使用的网卡名称全部填写(使用英文逗号分隔),否则网络无法使用,一直报错
# 例如:集群一共存在10台机器,其中有些机器的网卡名称是 ens33,有些是 eth0,有些是 enp9s0f0,则网卡配置为 interface=ens33,eth0,enp9s0f0
- name: IP_AUTODETECTION_METHOD
value: "interface=网卡名称"
(3)部署Calico网络
root@k8s-master:~# kubectl apply -f calico.yaml
查看集群Pod运行状态
root@k8s-master:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane 3m36s v1.25.3
k8s-node1 Ready <none> 3m1s v1.25.3
root@k8s-master:~# kubectl get nodes -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master Ready control-plane 3m45s v1.25.3 192.168.100.3 <none> Ubuntu 20.04.6 LTS 5.4.0-200-generic containerd://1.6.2
k8s-node1 Ready <none> 3m10s v1.25.3 192.168.100.4 <none> Ubuntu 20.04.6 LTS 5.4.0-200-generic containerd://1.6.2
root@k8s-master:~# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-69b5dd6548-cxw8n 1/1 Running 0 41s
kube-system calico-node-mpnlg 1/1 Running 0 41s
kube-system calico-node-qxc7z 1/1 Running 0 41s
kube-system coredns-c676cc86f-gmdrm 1/1 Running 0 75s
kube-system coredns-c676cc86f-zwq6t 1/1 Running 0 75s
kube-system etcd-k8s-master 1/1 Running 0 89s
kube-system kube-apiserver-k8s-master 1/1 Running 0 91s
kube-system kube-controller-manager-k8s-master 1/1 Running 0 91s
kube-system kube-proxy-6sbm7 1/1 Running 0 57s
kube-system kube-proxy-7d8kl 1/1 Running 0 75s
kube-system kube-scheduler-k8s-master 1/1 Running 0 89s
(4)测试部署
root@k8s-master:~# kubectl create deployment --image nginx:1.20.2 nginx
root@k8s-master:~# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6f974c44c8-xzvwg 1/1 Running 0 65s
root@k8s-master:~# kubectl describe pod nginx-6f974c44c8-xzvwg
...
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 64s default-scheduler Successfully assigned default/nginx-6f974c44c8-xzvwg to node
Normal Pulling 63s kubelet Pulling image "nginx:1.20.2"
Normal Pulled 3s kubelet Successfully pulled image "nginx:1.20.2" in 1m0.406s (1m0.406s including waiting)
Normal Created 2s kubelet Created container nginx
Normal Started 2s kubelet Started container nginx
原文地址:https://blog.csdn.net/weixin_58410911/article/details/144236481
免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!