【实验】kubernetes部署

1、实验准备

1.1、主机规划

系统CPUMem主机数量角色
centos7.644G(生产:8G+)31master,2node

主机列表

主机角色主机名主机IP
MASTERvm_6_el7192.168.23.106
NODE1vm_7_el7192.168.23.107
NODE2vm_8_el7192.168.23.108

1.2、主机系统环境

1.主机名
[root@all ~]# hostnamectl set-hostname server_name
2.IP地址+主机名解析
[root@all ~]# cat > /etc/sysconfig/network-scripts/ifcfg-ens33 <<EOF
TYPE="Ethernet"
BOOTPROTO="static"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.23.106"
PREFIX="24"
GATEWAY="192.168.23.2"
DNS1="119.29.29.29"
EOF

[root@all ~]# for i in {6..8};do;echo "192.168.23.10$i vm_${i}_el7 master " >> /etc/hosts;done
3.selinux

关闭selinux

[root@all ~]# sed -i '/^SELINUX=/s/=.*/=disabled/' /etc/selinux/config
4.swap

为每台主机关闭swap

#临时关闭
[root@all ~]# swapoff -a

#永久关闭:注释掉swap设置行
[root@all ~]# vim /etc/fstab
# /etc/fstab
# Created by anaconda on Wed Jul 24 10:32:24 2019
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=146496df-1c2b-4822-9dab-1146e14f27d5 /boot                   xfs     defaults        0 0
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

注释掉swap设置行,重启生效
5.防火墙
  1. 关闭firewalld
[root@all ~]# systemctl stop firewalld
[root@all ~]# systemctl disable firewalld
[root@all ~]# firewall-cmd --state
  1. 安装iptables
[root@all ~]# yum -y install iptables-services
[root@all ~]# systemctl enable iptables
[root@all ~]# systemctl start iptables
[root@all ~]# iptables -nL
  1. 添加iptables规则
#清空规则
[root@all ~]# iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
#FORWARD设置为ACCEPT
[root@all ~]# iptables -P FORWARD ACCEPT
#保存设置
[root@all ~]# service iptables save
6.网桥过滤
#创建过滤规则
[root@all ~]# vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0

[root@all ~]# modprobe br_netfilter
[root@all ~]# lsmod | grep br_netfilter
[root@all ~]# sysctl -p /etc/sysctl.d/k8s.conf
7.时间同步
[root@all ~]# ntpdate 1 time1.aliyun.com

1.3、集群部署

1、安装docker

1.安装依赖

[root@all ~]# yum install -y yum-utils   device-mapper-persistent-data   lvm2

2.下载docker专用repo

[root@all ~]# yum-config-manager     --add-repo \    
https://download.docker.com/linux/centos/docker-ce.repo

3.对版本进行排序,选择安装的版本

[root@all ~]# yum list docker-ce.x86_64 --showduplicates 1 |sort -r

4.安装docker指定版本

[root@all ~]# yum install -y --setopt=obsoletes=0 docker-ce-18.06.1.ce-3.el7

5.启动docker

[root@all ~]# systemctl enable docker
[root@all ~]# systemctl start docker

2、k8s集群软件准备(all节点)

软件版本作用
kubeadm1.15.1初始化集群,管理集群
kubelet1.15.1
kubectl1.15.1
docker-ce18.06.1

配置阿里云k8s yum源

[root@all ~]# cat > /etc/yum.repos.d/k8s.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装软件

#版本检查
[root@all ~]# yum list|grep kube
#安装软件
[root@all ~]# yum -y install kubeadm kubelet kubectl

3、容器镜像准备

master节点使用

[node节点也可以用,只是有部分包用不到]

下载官方镜像(需要科学上网),单独下载好的包直接跳过下载步骤,到42行执行导入

#查看需要下载的镜像(k8s版本不同,需要的镜像会有不同)
[root@node1 ~]# kubeadm config images list

#导出镜像列表
[root@node1 ~]# kubeadm config images list >> images.list

[root@node1 ~]# cat images.list
k8s.gcr.io/kube-apiserver:v1.15.1
k8s.gcr.io/kube-controller-manager:v1.15.1
k8s.gcr.io/kube-scheduler:v1.15.1
k8s.gcr.io/kube-proxy:v1.15.1
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.10
k8s.gcr.io/coredns:1.3.1

#基于镜像列表【下载镜像并打包】的脚本
[root@node1 ~]# vim images.pull
#!/bin/bash
img_list='
k8s.gcr.io/kube-apiserver:v1.15.1
k8s.gcr.io/kube-controller-manager:v1.15.1
k8s.gcr.io/kube-scheduler:v1.15.1
k8s.gcr.io/kube-proxy:v1.15.1
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.10
k8s.gcr.io/coredns:1.3.1'

for img in img_list
do
		#下载
        docker pull $img
done
		#打包
docker save -o k8s-1-15-1-img.tar $img_list

#执行以上脚本
[root@node1 ~]# sh images.pull

===========================================
如果是下载好的包,直接带入即可
#导入
#复制到所有主机节点
[root@all ~]# docker load -i k8s-1-15-1-img.tar

node节点使用

#node节点需要导入的镜像
k8s.gcr.io/kube-proxy:v1.15.1
k8s.gcr.io/kube-proxy:v1.15.1
quay.io/calico/typha:v3.3.2
k8s.gcr.io/pause:3.1

1.4、集群初始化(集群配置)

1.配置kubelet【all】

[root@all ~]# systemctl status kubelet
● kubelet.service - kubelet: The Kubernetes Node Agent
Loaded: loaded (/etc/systemd/system/kubelet.service; disabled; vendor preset:
disabled)
Drop-In: /etc/systemd/system/kubelet.service.d
└─10-kubeadm.conf
Active: inactive (dead)
Docs: https://kubernetes.io/docs/

[root@all ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to
/etc/systemd/system/kubelet.service.
[root@all ~]# DOCKER_CGROUP=$(docker info | grep Cgroup | awk '{print $3}')
[root@all ~]# echo $DOCKER_CGROUP
cgroupfs
[root@all ~]# cat /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS=
[root@all ~]# echo "KUBELET_EXTRA_ARGS=--cgroup-driver=$DOCKER_CGROUP" >/etc/sysconfig/kubelet
[root@all ~]# cat /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS=--cgroup-driver=cgroupfs

[root@all ~]# systemctl daemon-reload

2.集群初始化【master】

[root@master ~]# kubeadm init \
--kubernetes-version=v1.15.1 \
--pod-network-cidr=172.16.0.0/16 \
--apiserver-advertise-address=192.168.23.10

    ###
    --kubernetes-version  当前使用的k8s版本
    --pod-network-cidr    pod使用的网段
    --apiserver-advertise-address  api广播地址
#此处报错:端口被占用
	#解决方法:[root@master ~]# kubeadm reset  然后y,回车重置


#输出如下信息(可按输出信息进行配置):
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#操作时,不要sudo,我们是管理员帐户。
#网络初始化
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
#添加node到集群
You can now join any number of machines by running the following on each node
as root:
kubeadm join 192.168.122.10:6443 --token 3pcx3r.9irr7z8axuri9775 --discovery-tokenca-
cert-hash sha256:502a3ead462c02f55dbdb8892070c8f8508aa1dd210409b7aaf3f7922364556e

配置文件获取

[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# chown $(id -u):$(id -g) $HOME/.kube/config

网络初始化

[root@master calico]# cd calico/
[root@master calico]# ls
calico.yaml rbac-kdd.yaml
#设置网络在集群中的权限
[root@master calico]# kubectl apply -f rbac-kdd.yaml
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created

修改calico.yaml文件

修改
96、278行

[root@node1 calico]# cat calico.yaml
# Calico Version v3.3.2
# https://docs.projectcalico.org/v3.3/releases#v3.3.2
# This manifest includes the following component versions:
# calico/node:v3.3.2
# calico/cni:v3.3.2
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha
replicas
# belo

执行calico.yaml创建网络Pod

#创建网络Pod
[root@master calico]# kubectl apply -f calico.yaml
configmap/calico-config created
service/calico-typha created
deployment.apps/calico-typha created
poddisruptionbudget.policy/calico-typha created
daemonset.extensions/calico-node created
serviceaccount/calico-node created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.o
rg created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org
created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org
created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.o
rg created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico
.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org
created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org
created