shell脚本部署 Kubernetes 1.17.4 多节点集群

tech2024-07-06  63

虽然知道这份文档在河南很少会用到,最多就是某个同行会跟我聊聊还有没有改进的地方了,但毕竟是工作和学习的总结性结果,不想让它束之高阁。

河南,回来一次失望一次、住上一段儿绝望一段儿!

本次共涉及shell脚本文件4个、配置文件1份、Kubernetes集群管理说明1份,假定这5份文档都存储在目标主机的 /root/tmp 下,所有的操作也都发生在 /root/tmp 下。

我的 ESXi 环境中,IP地址的范围为 192.168.207.0/24

所涉及的文件内容如下:

initOSforCentOS71804.sh

mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.original wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo yum clean all && yum makecache yum -y update systemctl stop firewalld && systemctl disable firewalld sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config && setenforce 0 wget https://mirrors.aliyun.com/epel/epel-release-latest-7.noarch.rpm yum install -y https://mirrors.aliyun.com/epel/epel-release-latest-7.noarch.rpm wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo sed -i 's|^#baseurl=https://download.fedoraproject.org/pub|baseurl=https://mirrors.aliyun.com|' /etc/yum.repos.d/epel* sed -i 's|^metalink|#metalink|' /etc/yum.repos.d/epel* yum clean all && yum makecache yum -y update yum install -y ntpdate wget https://dl.google.com/linux/direct/google-chrome-stable_current_x86_64.rpm yum localinstall -y google-chrome-stable_current_x86_64.rpm ntpdate cn.ntp.org.cn yum install -y tree who | grep googlebigtable | sed -n '1p' | cut -d' ' -f 1 | sort | uniq DescriptionUser=$(who | grep googlebigtable | sed -n '1p' | cut -d' ' -f 1 | sort | uniq) echo $DescriptionUser echo "$DescriptionUser  ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers init 6  

installdockerkubeadmkubeletkubectl.sh

#!/bin/sh ################ README INFO ########################### ### Purpose: install docker/kubeadm/kubelet/kubectl  ### ### Made By: PomanTeng                               ### ### E-mail: 1807479153@qq.com                        ### ### WeChat: 1807479153                               ### ### Version Identification Number:V0.00              ### ### Procedure Identification Number:20200903         ### ########################################################

################ ATTENTION ####################################### ### This script shuold be excuted on all master & worker nodes ### ##################################################################

# load the GLOABLE ENVIRENMENT . /etc/profile . /etc/bashrc

# check the map of hostname and hostIP # grep 'IP.*kubernetes-master' /etc/hosts || echo "IP  kubernetes-master" >> /etc/hosts # grep 'IP.*kubernetes-worker1' /etc/hosts || echo "IP kubernetes-worker1" >> /etc/hosts # grep 'IP.*kubernetes-worker2' /etc/hosts || echo "IP kubernetes-workernode02" >> /etc/hosts

# install the essential package # nfs-utils for nfs network storage yum install -y nfs-utils # wget for downloading yum install -y wget # others essential yum install -y conntrack ipvsadm ipset

# turn off  swap swapoff -a cp /etc/fstab{,.original} sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

# the time zone is UTC 8,Beijing ls -l /etc/localtime | grep 'Asia/Shanghai' || (rm -f /etc/localtime && ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime) #sync the time to formal crontab -l | grep 'ntpdate' || echo -e "# time sync\n*/10 * * * * /usr/sbin/ntpdate ntp1.aliyun.com >/dev/null 2>&1" >> /var/spool/cron/root hwclock --systohc

# service mail stop systemctl stop postfix.service && systemctl disable postfix.service

# config /etc/sysctl.conf and turn on ip_forward ,this file can be instead of /etc/sysctl.d/k8s.conf sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward = 1#g"  /etc/sysctl.conf sed -i "s#^net.ipv4.tcp_tw_recycle.*#net.ipv4.tcp_tw_recycle = 0#g"  /etc/sysctl.conf sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables = 1#g"  /etc/sysctl.conf sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables = 1#g"  /etc/sysctl.conf sed -i "s#^net.ipv6.conf.all.forwarding.*#net.ipv6.conf.all.forwarding = 1#g"  /etc/sysctl.conf sed -i "s#^net.netfilter.nf_conntrack_max.*#net.netfilter.nf_conntrack_max = 2310720#g"  /etc/sysctl.conf sed -i "s#^fs.file-max.*#fs.file-max = 52706963#g"  /etc/sysctl.conf sed -i "s#^fs.nr_open.*#fs.nr_open = 52706963#g"    /etc/sysctl.conf

# just a check and can be ignored # grep 'net.ipv4.ip_forward = 1' /etc/sysctl.conf                   || echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf # grep 'net.ipv4.tcp_tw_recycle = 0' /etc/sysctl.conf               || echo "net.ipv4.tcp_tw_recycle = 0" >> /etc/sysctl.conf # grep 'net.bridge.bridge-nf-call-ip6tables = 1' /etc/sysctl.conf   || echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf # grep 'net.bridge.bridge-nf-call-iptables = 1' /etc/sysctl.conf    || echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf # grep 'net.ipv6.conf.all.forwarding = 1' /etc/sysctl.conf          || echo "net.ipv6.conf.all.forwarding = 1"  >> /etc/sysctl.conf # grep 'net.netfilter.nf_conntrack_max = 2310720' /etc/sysctl.conf  || echo "net.netfilter.nf_conntrack_max = 2310720"  >> /etc/sysctl.conf # grep 'fs.file-max = 52706963' /etc/sysctl.conf        || echo "fs.file-max = 52706963"  >> /etc/sysctl.conf # grep 'fs.nr_open = 52706963'  /etc/sysctl.conf        || echo "fs.nr_open = 52706963"   >> /etc/sysctl.conf

# put /etc/sysctl.conf into effect ,and if file /etc/sysctl.d/k8s.conf has been used ,you may excute sysctl -p /etc/sysctl.d/k8s.conf sysctl -p /etc/sysctl.conf

# perapaer kube-proxy / ipvs modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules << EOF #!/bin/bash modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

# uninstall docker if installed #yum remove -y docker \ #docker-client \ #docker-client-latest \ #docker-common \ #docker-latest \ #docker-latest-logrotate \ #docker-logrotate \ #docker-selinux \ #docker-engine-selinux \ #docker-engine

# set docker yum repository yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

## install docker version 19.03.9 yum install -y docker-ce-19.03.9

## service docker start and get the directory /etc/docker systemctl start docker

# config docker daemon cat > /etc/docker/daemon.json << EOF {   "exec-opts": ["native.cgroupdriver=systemd"],   "log-driver": "json-file",   "log-opts": {     "max-size": "100m"   } } EOF

systemctl stop docker && systemctl daemon-reload && systemctl enable docker && systemctl start docker

# config kubernetes yum repository cat > /etc/yum.repos.d/kubernetes.repo <<EOF [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg        https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF

# uninstall kubelet kubeadm kubectl if installed #yum remove -y kubelet kubeadm kubectl

# install kubelet、kubeadm、kubectl ,the version is 1.17.4 yum install -y kubelet-1.17.4 kubeadm-1.17.4 kubectl-1.17.4

# restart docker and kubelet systemctl daemon-reload systemctl restart docker systemctl enable kubelet && systemctl start kubelet

# print separator and docker version echo "=====================" docker version

# get the necessary image kubeadm config images list --kubernetes-version v1.17.4 > necessaryimage.txt

#the container image repository form Aliyun #https://cr.console.aliyun.com/cn-shanghai/instances/images

 

deploymentonmaster.sh

#!/bin/sh ################ README INFO ########################### ### Purpose: install docker/kubeadm/kubelet/kubectl  ### ### Made By: PomanTeng                               ### ### E-mail: 1807479153@qq.com                        ### ### WeChat: 1807479153                               ### ### Version Identification Number:V0.00              ### ### Procedure Identification Number:20200903         ### ########################################################

################ ATTENTION ############################## ### This script shuold be excuted on only master node ### #########################################################

# load the GLOABLE ENVIRENMENT . /etc/profile . /etc/bashrc

# produce the config file for kubeadm and modify the file kubeadm config print init-defaults > kubeadm-config.yaml cat > kubeadm-config.yaml << EOF apiVersion: kubeadm.k8s.io/v1beta2 bootstrapTokens: - groups:   - system:bootstrappers:kubeadm:default-node-token   token: abcdef.0123456789abcdef   ttl: 24h0m0s   usages:   - signing   - authentication kind: InitConfiguration localAPIEndpoint:   # 改为本机内网IP   advertiseAddress: THE MASTER NODE INNER IP   bindPort: 6443 nodeRegistration:   criSocket: /var/run/dockershim.sock   name: k8s-master   taints:   - effect: NoSchedule     key: node-role.kubernetes.io/master --- apiServer:   timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta2 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controllerManager: {} dns:   type: CoreDNS etcd:   local:     dataDir: /var/lib/etcd imageRepository: k8s.gcr.io kind: ClusterConfiguration kubernetesVersion: v1.17.4 networking:   dnsDomain: cluster.local   # set the pod network range for flannel   podSubnet: 10.244.0.0/16   # the service VIP range , the default is 10.96.0.0/12   serviceSubnet: 10.96.0.0/12 scheduler: {} --- # set the defalut schedule to ipvs , if no ipvs init it canbe ignored apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration featureGates:   SupportIPVSProxyMode: true mode: ipvs EOF

# download the necessary docker image # src_registry="registry.aliyuncs.com/google_containers" # src_registry="registry.cn-beijing.aliyuncs.com/google_registry" src_registry="registry.cn-beijing.aliyuncs.com/google_registry" images=$(kubeadm config images list --kubernetes-version v1.17.4) # images=$(cat necessaryimage.txt)

# Loop to download the necessary docker image for img in ${images[@]}; do     # download the image     docker pull ${src_registry}/$img     # rename the image     docker tag  ${src_registry}/$img k8s.gcr.io/$img     # remove the source image     docker rmi  ${src_registry}/$img     # print separator     echo "======== $img download OK  ========" done echo "********** kubernetes master docker images pull OK! **********"

# kubeadm init progress # method1 # --apiserver-advertise-address=THE MASTER INNER IP # --service-cidr=THE SERVICE VIP RANGE 10.96.0.0/12 # --pod-network-cidr=THE pod NETWORK RANGE #kubeadm init \ #  --apiserver-advertise-address=THE MASTER INNER IP \ #  --kubernetes-version VERSION IDENTIFIER \ #  --service-cidr=THE SERVICE VIP RANGE 10.96.0.0/12 \ #  --pod-network-cidr=THE pod NETWORK RANGE

# method2 kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log echo "********** kubeadm init OK! **********"

# config kubectl mkdir -p $HOME/.kube/ cp -i /etc/kubernetes/admin.conf $HOME/.kube/config chown $(id -u):$(id -g) $HOME/.kube/config echo "********** kubectl config OK! **********"

# install kube-flannel # if you can not reach quay.io ,please docker pull ${src_registry}/flannel:v0.12.0-amd64 docker tag  ${src_registry}/flannel:v0.12.0-amd64 quay.io/coreos/flannel:v0.12.0-amd64 docker rmi  ${src_registry}/flannel:v0.12.0-amd64 # you may also download kube-flannel.yml to apply: https://github.com/coreos/flannel/blob/v0.12.0/Documentation/kube-flannel.yml # wget https://raw.githubusercontent.com/coreos/flannel/v0.12.0/Documentation/kube-flannel.yml # kubectl apply -f kube-flannel.yml echo "********** kube-flannel network OK! **********"

# check pods status echo "********** kubectl get pods --all-namespaces -o wide **********" # kubectl get pods -A -o wide kubectl get pods --all-namespaces -o wide echo "********** kubectl get nodes **********" kubectl get nodes echo "********** 获得 join 命令参数 **********" kubeadm token create --print-join-command # kubeadm token create --print-join-command > joinmessage.txt

ifconfig

kubectl get nodes kubectl get nodes -o wide kubectl get pods -A -o wide

# download Dashboard src_registry="registry.cn-beijing.aliyuncs.com/google_registry" images=(     dashboard:v2.0.0-rc6     metrics-scraper:v1.0.3 )

# Loop to download the necessary docker image for img in ${images[@]}; do     # download the image     docker pull ${src_registry}/$img     # rename the image     docker tag  ${src_registry}/$img kubernetesui/$img     # remove the source image     docker rmi  ${src_registry}/$img     # print separator     echo "======== $img download OK  ========" done echo "********** k8s dashboard docker images OK! **********"

wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc6/aio/deploy/recommended.yaml cat > recommended.yaml << EOF ……………… kind: Service apiVersion: v1 metadata:   labels:     k8s-app: kubernetes-dashboard   name: kubernetes-dashboard   namespace: kubernetes-dashboard spec:   # EDIT   type: NodePort   ports:     - port: 443       targetPort: 8443       # EDIT       nodePort: 30001   selector:     k8s-app: kubernetes-dashboard ………………   template:     metadata:       labels:         k8s-app: kubernetes-dashboard     spec:       containers:         - name: kubernetes-dashboard           image: kubernetesui/dashboard:v2.0.0-rc6           # MODIFY           #imagePullPolicy: Always           imagePullPolicy: IfNotPresent ………………     spec:       containers:         - name: dashboard-metrics-scraper           image: kubernetesui/metrics-scraper:v1.0.3           # ADD           imagePullPolicy: IfNotPresent EOF

kubectl apply -f recommended.yaml          

# config token for Dashboard kubectl apply -f account.yaml  

deploymentonworker.sh

#!/bin/sh ################ README INFO ########################### ### Purpose: install docker/kubeadm/kubelet/kubectl  ### ### Made By: PomanTeng                               ### ### E-mail: 1807479153@qq.com                        ### ### WeChat: 1807479153                               ### ### Version Identification Number:V0.00              ### ### Procedure Identification Number:20200903         ### ########################################################

################ ATTENTION ############################### ### This script shuold be excuted on only worker nodes ### ##########################################################

# load the GLOABLE ENVIRENMENT . /etc/profile . /etc/bashrc

############################################### # download the necessary docker image # src_registry="registry.aliyuncs.com/google_containers" src_registry="registry.cn-beijing.aliyuncs.com/google_registry" # images=$(kubeadm config images list --kubernetes-version v1.17.4) # copy the file necessaryimage.txt from master node to worker node scp root@MASTERIP:/root/tmp /root/tmp images=$(cat necessaryimage.txt)

# Loop to download the necessary docker image for img in ${images[@]}; do     # download the image     docker pull ${src_registry}/$img     # rename the image     docker tag  ${src_registry}/$img k8s.gcr.io/$img     # remove the source image     docker rmi  ${src_registry}/$img     # print separator     echo "======== $img download OK  ========" done echo "********** kubernetes worker docker images pull OK! **********"

# install kube-flannel # if you can not reach quay.io ,please docker pull ${src_registry}/flannel:v0.12.0-amd64 docker tag  ${src_registry}/flannel:v0.12.0-amd64 quay.io/coreos/flannel:v0.12.0-amd64 docker rmi  ${src_registry}/flannel:v0.12.0-amd64 # you may also download kube-flannel.yml to apply: https://github.com/coreos/flannel/blob/v0.12.0/Documentation/kube-flannel.yml # wget https://raw.githubusercontent.com/coreos/flannel/v0.12.0/Documentation/kube-flannel.yml # kubectl apply -f kube-flannel.yml echo "********** kube-flannel network OK! **********"

# download Dashboard src_registry="registry.cn-beijing.aliyuncs.com/google_registry" images=(     dashboard:v2.0.0-rc6     metrics-scraper:v1.0.3 )

# Loop to download the necessary docker image for img in ${images[@]}; do     # download the image     docker pull ${src_registry}/$img     # rename the image     docker tag  ${src_registry}/$img kubernetesui/$img     # remove the source image     docker rmi  ${src_registry}/$img     # print separator     echo "======== $img download OK  ========" done echo "********** k8s dashboard docker images OK! **********"

 

account.yaml

# Create Service Account apiVersion: v1 kind: ServiceAccount metadata:   name: admin-user   namespace: kube-system --- # Create ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata:   name: admin-user roleRef:   apiGroup: rbac.authorization.k8s.io   kind: ClusterRole   name: cluster-admin subjects: - kind: ServiceAccount   name: admin-user   namespace: kube-system

 

kubernetesClusterManage.txt

# excute on master node kubectl get nodes kubectl get nodes -o wide kubectl get pods -A -o wide kubectl version

############# REMOVE A WORKER NODE ############# # excute on target node kubeadm reset # excute on master node kubectl delete node TARGET NODE IDENTIFIER

############# START DASHBOARD ############# # excute on master node kubectl get pods -A -o wide kubectl get pods -n kubernetes-dashboard -o wide kubectl get services --all-namespaces kubectl get services --namespace=kubernetes-dashboard

https://MASTERIP:30001/

############# TOKEN FOR DASHBOARD ############# # excute on master node kubectl get clusterrolebinding kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

 

最新回复(0)