本文为centos系统搭建k8s 1.17.3版本的搭建步骤整理
搭建前建议初步了解k8s功能,了解集群master和node节点的功能
k8s集群架构图
节点信息和系统初始化
机器信息
k8s-master: 10.211.55.6
k8s-node1: 10.211.55.7
k8s-node2: 10.211.55.8
设置主机名
1 2 3 hostnamectl set-hostname k8s-master hostnamectl set-hostname k8s-node1 hostnamectl set-hostname k8s-node2
安装依赖包
1 yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git gcc libffi-devel python-devel openssl-devel
清空防火墙和关闭SELinux
1 2 3 systemctl stop firewalld && systemctl disable firewalldyum -y install iptables-services && systemctl start iptables && systemctl enable iptables&& iptables -F && service iptables save setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
关闭swap
1 swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
内核参数
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 cat > kubernetes.conf <<EOF net.bridge.bridge-nf-call-iptables =1 net.bridge.bridge-nf-callip6tables =1 net.ipv4.ip_forward =1 net.ipv4.tcp_tw_recycle =0 vm.swappiness =0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它 vm.overcommit_memory =1 # 不检查物理内存是否够用 vm.panic_on_oom =0 # 开启 OOM fs.inotify.max_user_instances =8192 fs.inotify.max_user_watches =1048576 fs.file-max =52706963fs.nr_open=52706963 net.ipv6.conf.all.disable_ipv6 =1 net.netfilter.nf_conntrack_max =2310720 EOF cp kubernetes.conf /etc/sysctl.d/kubernetes.confsysctl -p /etc/sysctl.d/kubernetes.conf
时间
1 2 3 4 5 6 timedatectl set-timezone Asia/ Shanghai # 将当前的 UTC 时间写入硬件时钟timedatectl set-local-rtc 0systemctl restart rsyslogsystemctl restart crond
关闭无关服务
1 systemctl stop postfix && systemctl disable postfix
设置rsyslogd 和 systemd journald
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 mkdir /var/log/journal mkdir /etc/systemd/journald.conf.d cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF [Journal] Storage=persistent Compress=yes SyncIntervalSec=5m RateLimitInterval=30s RateLimitBurst=1000 SystemMaxUse=10G SystemMaxFileSize=200M MaxRetentionSec=2week ForwardToSyslog=no EOF systemctl restart systemd-journald
升级内核到4.44(3.10内核存在bug)
1 2 3 4 5 rpm -Uvh http://www.elrepo.org/elrepo-release-7.0 -3. el7.elrepo.noarch.rpm# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次! yum # 设置开机从新内核启动 grub2 -set-default 'CentOS Linux (4.4.189-1.el7 .elrepo .x86_64 ) 7 (Core )'
Kubeadm部署安装
开启ipvs(master/node)
1 2 3 4 5 6 7 8 9 10 modprobe br_netfilter cat > /etc/sysconfig/modules/ipvs.modules <<EOF #!/bin/bash modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules &&lsmod | grep -e ip_vs -e nf_conntrack_ipv4
安装docker-ce-17.12.1.ce(master/node)
安装1
1 2 3 4 5 yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager yum install docker-ce-17.12 .1 .ce systemctl start docker && systemctl enable docker pip install docker-compose -U
安装2
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager --add -repo http://mirrors.aliyun.com /docker-ce /linux/centos/docker-ce .repo yum update -y && yum install -y docker-ce ## 创建 /etc/docker 目录 mkdir /etc/docker# 配置 daemon cat > /etc/docker/daemon.json <<EOF{ "exec-opts" : ["native.cgroupdriver=systemd" ],"log-driver" : "json-file" ,"log-opts" : { "max-size" : "100m" } } EOF mkdir -p /etc/systemd/system /docker.service.d# 重启docker服务 systemctl daemon-reload && systemctl restart docker && systemctl enable docker
Kubeadm 安装(master)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes systemctl enable kubelet && systemctl start kubelet kubeadm init --pod-network-cidr=10.244.0.0/16
拉取所需镜像(master/node)
1 2 3 4 5 6 7 8 #拉取镜像 docker pull registry.cn-hangzhou .aliyuncs .com /google_containers/kube-controller-manager:v1.17.3 docker pull registry.cn-hangzhou .aliyuncs .com /google_containers/kube-apiserver:v1.17.3 docker pull registry.cn-hangzhou .aliyuncs .com /google_containers/kube-scheduler:v1.17.3 docker pull registry.cn-hangzhou .aliyuncs .com /google_containers/kube-proxy:v1.17.3 docker pull registry.cn-hangzhou .aliyuncs .com /google_containers/pause:3.1 docker pull registry.cn-hangzhou .aliyuncs .com /google_containers/etcd:3.4 .3 -0 docker pull registry.cn-hangzhou .aliyuncs .com /google_containers/coredns:1.6 .5
1 2 3 4 5 6 7 8 9 #打tag标签 docker tag registry.cn-hangzhou .aliyuncs .com /google_containers/kube-scheduler:v1.17.3 k8s.gcr .io /kube-scheduler:v1.17.3 docker tag registry.cn-hangzhou .aliyuncs .com /google_containers/kube-proxy:v1.17.3 k8s.gcr .io /kube-proxy:v1.17.3 docker tag registry.cn-hangzhou .aliyuncs .com /google_containers/pause:3.1 k8s.gcr .io /pause:3.1 docker tag registry.cn-hangzhou .aliyuncs .com /google_containers/etcd:3.4 .3 -0 k8s.gcr .io /etcd:3.4 .3 -0 docker tag registry.cn-hangzhou .aliyuncs .com /google_containers/coredns:1.6 .5 k8s.gcr .io /coredns:1.6 .5 docker tag registry.cn-hangzhou .aliyuncs .com /google_containers/kube-apiserver:v1.17.3 k8s.gcr .io /kube-apiserver:v1.17.3 docker tag registry.cn-hangzhou .aliyuncs .com /google_containers/kube-controller-manager:v1.17.3 k8s.gcr .io /kube-controller-m anager:v1.17.3
运行kubeadm安装k8s (master)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 kubeadm config print init-defaults > kubeadm-config.yaml vim kubeadm-config.yaml localAPIEndpoint: advertiseAddress: 192.168.66.10 kubernetesVersion: v1.15.1 networking: podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration featureGates: SupportIPVSProxyMode: true mode: ipvs kubeadm init --config =kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log mkdir -p $HOME /.kube sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config sudo chown $(id -u):$(id -g) $HOME /.kube/config echo "source <(kubectl completion bash)" >> ~/.bashrc
将node节点加入集群
执行 kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
执行安装日志中的加入命令即可。
1 2 # 本次安装中加入命令 kubeadm join 10.211 .55 .6 :6443 --token nj59e7 .kq3ldvb026cw1s7x --discovery-token-ca-cert-hash sha256 :e836cb1a3e1e3fb226614fba8800ad338dbaf133cfcdd0ccf8ba870eb163ffe2
部署flannel网络
1 2 docker pull quay.io/coreos/ flannel:v0.11.0 -amd64
1 kubectl apply -f https:// raw.githubusercontent.com/coreos/ flannel/master/ Documentation/kube-flannel.yml
搭建完成
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 [root@k8s-master ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-master Ready master 2 d11h v1.17 .3 k8s-node1 Ready <none> 2 d11h v1.17 .3 k8s-node2 Ready <none> 2 d11h v1.17 .3 [root@k8s-master ~]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE coredns-6955765 f44-csmfg 1 /1 Running 0 15 m coredns-6955765 f44-jq897 1 /1 Running 1 2 d7h etcd-k8s-master 1 /1 Running 3 2 d11h kube-apiserver-k8s-master 1 /1 Running 3 2 d11h kube-controller-manager-k8s-master 1 /1 Running 10 2 d11h kube-flannel-ds-amd64-8 qdvb 1 /1 Running 0 2 d7h kube-flannel-ds-amd64-r2hq6 1 /1 Running 0 2 d7h kube-flannel-ds-amd64-twptj 1 /1 Running 6 2 d7h kube-proxy-75 wds 1 /1 Running 3 2 d11h kube-proxy-8 t4zz 1 /1 Running 0 2 d11h kube-proxy-g87zw 1 /1 Running 0 2 d11h kube-scheduler-k8s-master 1 /1 Running 11 2 d11h