https://www.cnblogs.com/wjhlinux/p/14422021.html
mkdir /k8s1.21 && cd /k8s1.21 for i in `docker images |grep -v REP |awk '{print $1}'` ; do j=`docker images |grep $i |awk '{print $2}'` && docker save $i:$j > ${i##*/}.tar ; done
分发images 执行 cd /k8s1.21/ && for i in `ls` ; do docker load < $i ; done
导出的镜像内容为:
[root@k8s-master01 k8s1.21]# du -ah 121M ./kube-apiserver.tar 119M ./kube-proxy.tar 116M ./kube-controller-manager.tar 50M ./kube-scheduler.tar 42M ./coredns.tar 243M ./etcd.tar 186M ./node.tar 9.3M ./pod2daemon-flexvol.tar 157M ./cni.tar 47M ./kube-controllers.tar 178M ./rabbitmq.tar 680K ./pause-amd64.tar 1.3G .
序号 | 主机名 | ip地址 | 备注说明 |
---|---|---|---|
1 | k8s-master01 | 10.110.83.231 | master |
2 | k8s-master02 | 10.110.83.232 | master |
3 | k8s-master02 | 10.110.83.233 | master |
4 | k8s-node01 | 10.110.83.234 | node |
5 | k8s-node02 | 10.110.83.235 | node |
6 | k8s-master | 10.110.83.230 | vip |
创建虚拟机 进行主机配置, 包含修改内核参数, 互信ssh等操作,然后导入images 等操作. 修改 /etc/hosts 文件 添加 网络设置里面的 六个机器 主机名和 ip地址. 注意建议使用 centos7 的版本,然后升级到 kernel 4.19 的内核版本
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime echo 'Asia/Shanghai' >/etc/timezone ntpdate time2.aliyun.com cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF
systemctl disable firewalld && systemctl disable dnsmasq swapoff -a && sysctl -w vm.swappiness=0 sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git ntpdate keepalived haproxy -y
注意前面两处需要手工添加 第三处 直接cat 执行命令即可.
vim /etc/security/limits.conf * soft nofile 655360 * hard nofile 131072 * soft nproc 655350 * hard nproc 655350 * soft memlock unlimited * hard memlock unlimited vim /etc/modules-load.d/ipvs.conf ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp ip_vs_sh nf_conntrack ip_tables ip_set xt_set ipt_set ipt_rpfilter ipt_REJECT ipip at <<EOF > /etc/sysctl.d/k8s.conf net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 fs.may_detach_mounts = 1 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_watches=89100 fs.file-max=52706963 fs.nr_open=52706963 net.netfilter.nf_conntrack_max=2310720 net.ipv4.tcp_keepalive_time = 600 net.ipv4.tcp_keepalive_probes = 3 net.ipv4.tcp_keepalive_intvl =15 net.ipv4.tcp_max_tw_buckets = 36000 net.ipv4.tcp_tw_reuse = 1 net.ipv4.tcp_max_orphans = 327680 net.ipv4.tcp_orphan_retries = 3 net.ipv4.tcp_syncookies = 1 net.ipv4.tcp_max_syn_backlog = 16384 net.ipv4.ip_conntrack_max = 65536 net.ipv4.tcp_max_syn_backlog = 16384 net.ipv4.tcp_timestamps = 0 net.core.somaxconn = 16384 EOF
ssh-keygen ssh-copy-id root@10.110.83.230 ssh-copy-id root@127.0.0.1
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm yum localinstall -y kernel-ml* grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)" # 设置完成 执行重启 reboot
yum install docker-ce-19.03.* -y 安装完成后需要设置为开机自动启动 systemctl enable docker && systemctl restart docker # 修改参数,不然启动会报错, 我这边就忘记修改报错了, 原作者写的很详细 cat > /etc/docker/daemon.json <<EOF { "exec-opts": ["native.cgroupdriver=systemd"] } EOF 安装 kubeadm yum install kubeadm -y 修改使用阿里云镜像 cat >/etc/sysconfig/kubelet<<EOF KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2" EOF 设置开机启动 systemctl enable kubelet && systemctl restart kubelet
vim /etc/haproxy/haproxy.cfg # 添加内容为: 注意需要根据ip 规划修改自己的ip地址等内容. global maxconn 2000 ulimit-n 16384 log 127.0.0.1 local0 err stats timeout 30s defaults log global mode http option httplog timeout connect 5000 timeout client 50000 timeout server 50000 timeout http-request 15s timeout http-keep-alive 15s frontend monitor-in bind *:33305 mode http option httplog monitor-uri /monitor frontend k8s-master bind 0.0.0.0:16443 bind 127.0.0.1:16443 mode tcp option tcplog tcp-request inspect-delay 5s default_backend k8s-master backend k8s-master mode tcp option tcplog option tcp-check balance roundrobin default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 server k8s-master01 10.110.83.201:6443 check server k8s-master02 10.110.83.202:6443 check server k8s-master03 10.110.83.203:6443 check
vim /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { router_id LVS_DEVEL script_user root enable_script_security } vrrp_script chk_apiserver { script "/etc/keepalived/check_apiserver.sh" interval 5 weight -5 fall 2 rise 1 } vrrp_instance VI_1 { state MASTER interface ens33 # 这里要修改成ifconfig 查出来本地局域网ip地址对应的网卡信息 mcast_src_ip 10.110.82.23x #这里需要修改为具体的master机器地址 virtual_router_id 51 priority 101 advert_int 2 authentication { auth_type PASS auth_pass K8SHA_KA_AUTH } virtual_ipaddress { 10.110.83.230 #这里需要修改成 vip 的地址 } track_script { chk_apiserver } }
注意 最后的 track 脚本需要手动添加 主要内容如下:
注意 需要添加可执行权限: chmod +x /etc/keepalived/check_apiserver.sh
vim /etc/keepalived/check_apiserver.sh #!/bin/bash err=0 for k in $(seq 1 3) do check_code=$(pgrep haproxy) if [[ $check_code == "" ]]; then err=$(expr $err + 1) sleep 1 continue else err=0 break fi done if [[ $err != "0" ]]; then echo "systemctl stop keepalived" /usr/bin/systemctl stop keepalived exit 1 else exit 0 fi
设置高可用服务自动启动
systemctl enable keepalived && systemctl enable haproxy
vim kubeadm-config.yaml apiVersion: kubeadm.k8s.io/v1beta2 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: 7t2weq.bjbawausm0jaxury ttl: 24h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: 10.110.83.231 bindPort: 6443 nodeRegistration: criSocket: /var/run/dockershim.sock name: k8s-master01 taints: - effect: NoSchedule key: node-role.kubernetes.io/master --- apiServer: certSANs: - 10.110.83.230 timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta2 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controlPlaneEndpoint: 10.110.83.230:16443 controllerManager: {} dns: type: CoreDNS etcd: local: dataDir: /var/lib/etcd imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers kind: ClusterConfiguration kubernetesVersion: v1.20.4 networking: dnsDomain: cluster.local podSubnet: 172.168.0.0/12 serviceSubnet: 10.96.0.0/12 scheduler: {}
执行镜像拉取的命令为:
kubeadm config images pull --config /root/kubeadm-config.yaml
kubeadm init --config /root/kubeadm-config.yaml --upload-certs
To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config Alternatively, if you are the root user, you can run: export KUBECONFIG=/etc/kubernetes/admin.conf You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ You can now join any number of the control-plane node running the following command on each as root: kubeadm join 10.110.83.200:16443 --token 7t2weq.bjbawausm0jaxury \ --discovery-token-ca-cert-hash sha256:cd36fbf1a304695af05c9438e2ea1388d0f9915fa4cda8d09ab9e65f6f6cd1d3 \ --control-plane --certificate-key 79a90c89ec3e5a3d8cdf01888b65a1564dcc50d7a62fe1d625df8e9b16f223f8 Please note that the certificate-key gives access to cluster sensitive data, keep it secret! As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use "kubeadm init phase upload-certs --upload-certs" to reload certs afterward. Then you can join any number of worker nodes by running the following on each as root: kubeadm join 10.110.83.200:16443 --token 7t2weq.bjbawausm0jaxury \ --discovery-token-ca-cert-hash sha256:cd36fbf1a304695af05c9438e2ea1388d0f9915fa4cda8d09ab9e65f6f6cd1d3
[root@k8s-master01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-master01 NotReady control-plane,master 174m v1.21.0 k8s-master02 NotReady control-plane,master 172m v1.21.0 k8s-master03 NotReady control-plane,master 171m v1.21.0 k8s-node01 NotReady <none> 170m v1.21.0 k8s-node02 NotReady <none> 170m v1.21.0
拉取相关文件: wget https://docs.projectcalico.org/v3.8/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml 在当前目录下执行 kubectl apply -f calico.yaml
如果五个虚拟机内部都有了相关的images 一会儿就会可以使用了下:
[root@k8s-master01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-master01 Ready control-plane,master 5h22m v1.21.0 k8s-master02 Ready control-plane,master 5h21m v1.21.0 k8s-master03 Ready control-plane,master 5h20m v1.21.0 k8s-node01 Ready <none> 5h18m v1.21.0 k8s-node02 Ready <none> 5h19m v1.21.0
wget https://get.helm.sh/helm-v3.5.4-linux-amd64.tar.gz 解压缩以及将文件放到 /usr/bin 目录下即可
添加仓库
helm repo add stable http://mirror.azure.cn/kubernetes/charts helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts helm repo update
安装ingress
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx helm pull ingress ingress-nginx/ingress-nginx
修改配置文件进行简单处理
tar -xf ingress-nginx-3.7.1.tgz && cd ingress-nginx vim values.yaml # 修改controller镜像地址 repository: registry.cn-beijing.aliyuncs.com/dotbalo/controller # dnsPolicy dnsPolicy: ClusterFirstWithHostNet # 使用hostNetwork,即使用宿主机上的端口80 443 hostNetwork: true # 使用DaemonSet,将ingress部署在指定节点上 kind: DaemonSet # 节点选择,将需要部署的节点打上ingress=true的label nodeSelector: kubernetes.io/os: linux ingress: "true" # 修改type,改为ClusterIP。如果在云环境,有loadbanace可以使用loadbanace type: ClusterIP # 修改kube-webhook-certgen镜像地址 registry.cn-beijing.aliyuncs.com/dotbalo/kube-webhook-certgen 进行安装 kubectl create ns ingress-nginx helm install ingress-nginx -n ingress-nginx kubectl get pods -n ingress-nginx -owide
本部分学习自:
https://www.cnblogs.com/bigberg/p/13926052.html