麒麟V10离线部署kubernetes1.19.3-arm64
1.安装环境
[root@k8s-master01 ~]# cat /etc/kylin-release
Kylin Linux Advanced Server release V10 (Tercel)
[root@k8s-master01 ~]# uname -a
Linux k8s-master01 4.19.90-20.1stable.ky10.aarch64 #1 SMP Sun Aug 23 11:31:17 CST 2020 aarch64 aarch64 aarch64 GNU/Linux
2.修改master 和 node 的hosts 文件
# cat /etc/hosts
主机IP主机名
192.168.111.21 k8s-master01
192.168.111.19 k8s-node01
192.168.111.55 k8s-node02
3.关闭master 和 node 的防火墙和selinux
systemctl stop firewalld && systemctl disablefirewalld
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
swapoff -a
要永久禁掉swap分区,打开如下文件注释掉swap那一行
vi /etc/fstab
【麒麟V10离线部署kubernetes1.19.3-arm64】4.配置系统内核参数和调优
# modprobe br_netfilter
配置sysctl内核参数
# cat > /etc/sysctl.conf <> /etc/security/limits.conf
# echo "* hard nofile 655360" >> /etc/security/limits.conf
# echo "* soft nproc 655360">> /etc/security/limits.conf
# echo "* hard nproc 655360">> /etc/security/limits.conf
# echo "* softmemlockunlimited">> /etc/security/limits.conf
# echo "* hard memlockunlimited">> /etc/security/limits.conf
# echo "DefaultLimitNOFILE=1024000">> /etc/systemd/system.conf
# echo "DefaultLimitNPROC=1024000">> /etc/systemd/system.conf
- 安装依赖组件
cd server
rpm -ivh libnetfilter_cttimeout-1.0.0-2.el7.aarch64.rpm
rpm -ivh lib64netfilter_cthelper0-1.0.0-7.mga7.aarch64.rpm
rpm -ivh conntrack-tools-1.4.5-1.46.aarch64.rpm
#安装cni 插件
mkdir -p /opt/cni/bin
mv cni-plugins-linux-arm64-v0.8.2.tgz /opt/cni/bin
cd /opt/cni/bin
tar -zxvf cni-plugins-linux-arm64-v0.8.2.tgz
#安装crictl
mkdir -p /opt/bin
mv crictl-v1.16.0-linux-amd64.tar.gz /opt/bin/
cd /opt/bin/
tar -zxvf crictl-v1.16.0-linux-amd64.tar.gz
- 安装docker
tar xf docker-18.09.8.tgz
cp docker/* /usr/bin/
设置docker 服务
vim /etc/systemd/system/docker.service[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd --selinux-enabled=false
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s[Install]
WantedBy=multi-user.target
设置自启动
systemctl start docker & systemctl enable docker
- 安装kubelet
tar -zxvf kubernetes-server-linux-arm64.tar.gz
cd kubernetes/server/bin/
chmod +x {kubeadm,kubelet,kubectl}
cp kubectl /usr/bin/
cp kubeadm /usr/bin/
配置kubelet
vim /usr/lib/systemd/system/kubelet.service[Unit]
Description=Kubernetes Kubelet Service
After=network.target network-online.target docker.service
Wants=network-online.target docker.service[Service]
Type=simple
EnvironmentFile=-/etc/kubernetes/kubelet/k8s-kubelet.conf
ExecStartPre=-source /etc/kubernetes/kubelet/k8s-kubelet.conf
ExecStart=/usr/bin/kubeletRestart=on-failure
LimitNOFILE=65536[Install]
WantedBy=multi-user.target
systemctl enable kubelet && systemctl start kubelet
- 配置kubeadm
mkdir -p /etc/kubernetes/manifests
mkdir -p /etc/systemd/system/kubelet.service.d
vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --feature-gates SupportPodPidsLimit=false --feature-gates SupportNodePidsLimit=false"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/sysconfig/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
- 导入镜像
kube-apiserver
kube-controller
kube-scheduler
这三个镜像可以只在master节点导入,1-9 步骤是所有集群节点必须执行的
cd kubernetes/server/bin/
docker load -i kube-apiserver.tar
docker load -i kube-controller-manager.tar
docker load -i kube-scheduler.tar
docker load -i kube-proxy.tar
docker tag gcr.io/k8s-staging-kubernetes/kube-apiserver-arm64:v1.19.3k8s.gcr.io/kube-apiserver:v1.19.3
docker tag gcr.io/k8s-staging-kubernetes/kube-controller-manager-arm64:v1.19.3k8s.gcr.io/kube-controller-manager:v1.19.3
docker tag gcr.io/k8s-staging-kubernetes/kube-scheduler-arm64:v1.19.3k8s.gcr.io/kube-scheduler:v1.19.3
docker tag gcr.io/k8s-staging-kubernetes/kube-proxy-arm64:v1.19.3k8s.gcr.io/kube-proxy:v1.19.3cd server
docker load -i pause-arm.tar
docker load -i coredns-arm64.tar
docker load -i etcd-arm64.tar
10.初始化master 节点
#kubeadm init --kubernetes-version=1.19.3 \
--apiserver-advertise-address=192.168.111.21 \
--service-cidr=172.16.0.0/16 \
--pod-network-cidr=10.244.0.0/16
执行输出;
# mkdir -p $HOME/.kube
# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# sudo chown $(id -u):$(id -g) $HOME/.kube/config
11.部署flannel 网络组件
cd server
docker load -i flanneld-v0.13.1-rc1-arm64.docker
kubectl apply -fkube-flannel.yml
- node 加入集群
master 节点执行
kubeadm token create --print-join-command
node 节点执行
kubeadm join 192.168.111.21:6443 --token 6jc88d.3w16nt2bxo7ebc98--discovery-token-ca-cert-hash sha256:99d0b395548e75a01e3326451fbd152e5f3e4a5eb1e52236c1a8279d310478ec
- 异常问题处理
dns 异常网络不通
排查步骤
1.查看各节点vtep mac 地址都一样
kubectl get node -o yaml | grep -A3 Vtep
2. 若一致,按以下步骤执行
# cat<<'EOF'>/etc/systemd/network/10-flannel.1.link
[Match]
OriginalName=flannel.1[Link]
MACAddressPolicy=none
EOF# cat<<'EOF'>>/etc/systemd/networkd.conf
[Match]
OriginalName=flannel*[Link]
MACAddressPolicy=none
EOFip -d link show flannel.1
ip link delete flannel.1
docker ps -a | grep -m1 flanneld
docker restart f87
ip -d link show flannel.1
kubectl get node -o yaml | grep -A3 Vtep
- 部署harbor 仓库
安装docker-compose
cd harbor
chmod +x docker-compose
mv docker-compose /usr/bin/
导入harbor 1.9.1镜像
tar -xvf harbor.tar
cd harbor/harbor
bash harbor.sh
tar -xf v1.9.1.tar.gz
cd v1.9.1/harbor-1.9.1/make
vim harbor.yml
修改hostname为本机IP,port 为30888 避免冲突
hostname: 192.168.111.21
port: 30888
chmod 777 prepare
./install.sh
查看 docker-compose,全部up 表示安装成功
docker-compose ps
- 部署rancher 服务
导入镜像
docker load -i rancher-arm.tar
docker load -i rancher-agent-arm.tar
docker run -d --privileged --restart=unless-stopped-p 30080:80 -p 30443:443--privilegedrancher/rancher:v2.5.8-linux-arm64
推荐阅读
- 麒麟之才的处世之道
- 再获认可 | 优麒麟荣获“最佳技术社区运营”奖
- 看过来!腾讯文档上架优麒麟软件商店啦
- 游戏党注意了,超80款Steam游戏可在优麒麟上畅玩
- 实时作业转离线作业的几种场景及方案
- 优麒麟入选|优麒麟入选 2021 中国技术品牌影响力企业榜,深耕开发者生态
- 祝贺!优麒麟入选年度最受关注操作系统!
- KubeSphere离线无网络环境部署
- 安卓高德地图-(定位+离线UI组件自定义1)
- 极客星球 | MobPush之FCM离线消息解密