千家信息网

Kubernetes 集群搭建

发表于:2024-09-22 作者:千家信息网编辑
千家信息网最后更新 2024年09月22日,基础环境系统环境# cat /etc/redhat-releaseCentOS Linux release 7.3.1611 (Core)主机名设置centos-master 192.168.59.1
千家信息网最后更新 2024年09月22日Kubernetes 集群搭建

基础环境

系统环境# cat /etc/redhat-releaseCentOS Linux release 7.3.1611 (Core)主机名设置centos-master 192.168.59.135centos-minion1 192.168.59.132centos-minion2 192.168.59.133

关闭selinux 和 firewalld 后重启服务器

# systemctl stop firewalld# systemctl disable firewalld# setenforce 0# sed -i 's/^SELINUX=.*/SELINUX=disableds/' /etc/selinux/config

三个节点 安装并部署etcd集群

# yum install etcd -y安装版本# rpm -qa | grep etcdetcd-3.2.7-1.el7.x86_64

配置ETCD /etc/etcd/etcd.conf

Master etcd 配置# cat /etc/etcd/etcd.conf | grep -Ev "^#|^$"ETCD_NAME=centos-masterETCD_DATA_DIR="/var/lib/etcd/default.etcd"ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.135:2380"ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380"ETCD_INITIAL_CLUSTER_STATE="new"ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.135:2379"
Minion2 etcd 配置# grep -Ev "^#|^$" /etc/etcd/etcd.confETCD_NAME=centos-minion2ETCD_DATA_DIR="/var/lib/etcd/default.etcd"ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.133:2380"ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380"ETCD_INITIAL_CLUSTER_STATE="new"ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.133:2379"
Minion1 etcd 配置# grep -Ev "^#|^$" /etc/etcd/etcd.confETCD_NAME=centos-minion1ETCD_DATA_DIR="/var/lib/etcd/default.etcd"ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.59.132:2380"ETCD_INITIAL_CLUSTER="centos-master=http://192.168.59.135:2380,centos-minion2=http://192.168.59.133:2380,centos-minion1=http://192.168.59.132:2380"ETCD_INITIAL_CLUSTER_STATE="new"ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"ETCD_ADVERTISE_CLIENT_URLS="http://192.168.59.132:2379"
启动etcd集群(三个节点) 并检查状态(任意一台上操作)# 启动etcd# systemctl start etcd# systemctl enable etcd# 查看状态# etcdctl member list10a23ff41e3abcb8: name=centos-minion1 peerURLs=http://192.168.59.132:2380 clientURLs=http://192.168.59.132:2379 isLeader=false168ea6ce7632b2e4: name=centos-minion2 peerURLs=http://192.168.59.133:2380 clientURLs=http://192.168.59.133:2379 isLeader=true587d83f824bf96c6: name=centos-master peerURLs=http://192.168.59.135:2380 clientURLs=http://192.168.59.135:2379 isLeader=false# etcdctl cluster-healthmember 10a23ff41e3abcb8 is healthy: got healthy result from http://192.168.59.132:2379member 168ea6ce7632b2e4 is healthy: got healthy result from http://192.168.59.133:2379member 587d83f824bf96c6 is healthy: got healthy result from http://192.168.59.135:2379cluster is healthy

kubernetes master 节点安装部署

#yum install kubernetes -y安装的版本# rpm -qa | grep kuberneteskubernetes-client-1.5.2-0.7.git269f928.el7.x86_64kubernetes-1.5.2-0.7.git269f928.el7.x86_64kubernetes-master-1.5.2-0.7.git269f928.el7.x86_64kubernetes-node-1.5.2-0.7.git269f928.el7.x86_64
配置kubernetes API Server (/etc/kubernetes/apiserver)# cat /etc/kubernetes/apiserver | grep -Ev "^#|^$"KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"KUBE_API_PORT="--port=8080"KUBELET_PORT="--kubelet-port=10250"KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379"KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"KUBE_API_ARGS=""
配置kubernetes config (/etc/kubernetes/config)# cat /etc/kubernetes/config | grep -Ev "^#|^$"KUBE_LOGTOSTDERR="--logtostderr=true"KUBE_LOG_LEVEL="--v=0"KUBE_ALLOW_PRIV="--allow-privileged=false"KUBE_MASTER="--master=http://centos-master:8080"

kubernetes minion 节点安装(minion1和minion2)

# yum install flannel docker kubernetes -y
配置 flannel (/etc/sysconfig/flanneld)# grep -Ev "^#|^$" /etc/sysconfig/flanneldFLANNEL_ETCD_ENDPOINTS="http://192.168.59.133:2379"FLANNEL_ETCD_PREFIX="/atomic.io/network"
配置 kubelet (/etc/kubernetes/kubelet)# grep -Ev "^#|^$" /etc/kubernetes/kubeletKUBELET_ADDRESS="--address=0.0.0.0"KUBELET_PORT="--port=10250"KUBELET_HOSTNAME="--hostname-override=centos-minion2"KUBELET_API_SERVER="--api-servers=http://centos-master:8080"# 下面请填写你的registry地址,如果你能连接到任何网络,请自动过滤# KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=192.168.59.133:5000/pod-infrastructure:latest"# 下面填写你的dns信息和网络信息KUBELET_ARGS="--cluster-dns=192.168.51.198 --cluster-domain=atomic.io/network"

启动程序

kubernetes masterfor SERVICES in kube-apiserver kube-controller-manager kube-scheduler; dosystemctl restart $SERVICESsystemctl enable $SERVICESsystemctl status $SERVICES -ldoneetcd 网络配置# etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
kubernetes minionfor SERVICES in kube-proxy kubelet docker flanneld; dosystemctl restart $SERVICESsystemctl enable $SERVICESsystemctl status $SERVICESdone

查看 节点情况(在 master)

# kubectl get nodesNAME STATUS AGEcentos-minion1 Ready 1hcentos-minion2 Ready 1h

查看flannel网卡

[root@centos-minion1 ~]# ifconfig flannel0flannel0: flags=4305 mtu 1472inet 172.17.34.0 netmask 255.255.0.0 destination 172.17.34.0unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)RX packets 0 bytes 0 (0.0 B)RX errors 0 dropped 0 overruns 0 frame 0TX packets 0 bytes 0 (0.0 B)TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0[root@centos-minion2 ~]# ifconfig flannel0flannel0: flags=4305 mtu 1472inet 172.17.59.0 netmask 255.255.0.0 destination 172.17.59.0inet6 fe80::2d54:2169:1a0:d364 prefixlen 64 scopeid 0x20unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)RX packets 0 bytes 0 (0.0 B)RX errors 0 dropped 0 overruns 0 frame 0TX packets 3 bytes 144 (144.0 B)TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0

简单测试 (在master上创建一个Pod、Service 和 RC)

# lshttp-pod.yaml http-rc.yaml http-service.yaml# cat http-pod.yamlapiVersion: v1kind: Podmetadata:name: http-podlabels:name: http-podspec:containers:- name: httpimage: 192.168.59.133:5000/centos6-httpports:- containerPort: 80# cat http-service.yamlapiVersion: v1kind: Servicemetadata:name: http-servicespec:type: NodePortports:- port: 80nodePort: 30001selector:name: http-pod# cat http-rc.yamlapiVersion: v1kind: ReplicationControllermetadata:name: http-rcspec:replicas: 2selector:name: http-podtemplate:metadata:labels:name: http-podspec:containers:- name: http-podimage: 192.168.59.133:5000/centos6-httpports:- containerPort: 80
创建Pod# kubectl create -f http-pod.yamlpod "http-pod" created# kubectl get podsNAME READY STATUS RESTARTS AGEhttp-pod 1/1 Running 0 4s
创建Service# kubectl create -f http-service.yamlservice "http-service" created# kubectl get serviceNAME CLUSTER-IP EXTERNAL-IP PORT(S) AGEhttp-service 10.254.235.49  80:30001/TCP 5skubernetes 10.254.0.1  443/TCP 1d
查看 pod在哪个节点上生成# kubectl describe serviceName: http-serviceNamespace: defaultLabels: Selector: name=http-podType: NodePortIP: 10.254.235.49Port:  80/TCPNodePort:  30001/TCPEndpoints: 172.17.59.3:80 #这个地址是flannel的地址 为minion2Session Affinity: NoneNo events.Name: kubernetesNamespace: defaultLabels: component=apiserverprovider=kubernetesSelector: Type: ClusterIPIP: 10.254.0.1Port: https 443/TCPEndpoints: 192.168.59.135:6443Session Affinity: ClientIPNo events.访问pod 会显示出http默认的欢迎页面# curl http://192.168.59.133:30001/
创建RC# kubectl create -f http-rc.yamlreplicationcontroller "http-rc" created# kubectl get rcNAME DESIRED CURRENT READY AGEhttp-rc 2 2 2 8s# kubectl get pods # 本来是一个现在有2个NAME READY STATUS RESTARTS AGEhttp-pod 1/1 Running 0 9mhttp-rc-b24kx 1/1 Running 0 13s
现在删除一个pod 看能不能在生成pod# kubectl delete pod http-podpod "http-pod" deleted# kubectl get podsNAME READY STATUS RESTARTS AGEhttp-rc-8cl5p 1/1 Running 0 2shttp-rc-b24kx 1/1 Running 0 2m# kubectl delete pod http-rc-8cl5p http-rc-b24kxpod "http-rc-8cl5p" deletedpod "http-rc-b24kx" deleted# kubectl get podsNAME READY STATUS RESTARTS AGEhttp-rc-xxtrw 1/1 Running 0 3shttp-rc-z8t9n 1/1 Running 0 3s删除了2次,最后都同样有两个pod生成,经测试都可以正常访问
查看pod的描述# kubectl describe podName: http-rc-xxtrwNamespace: defaultNode: centos-minion2/192.168.59.133Start Time: Tue, 31 Oct 2017 16:05:51 +0800Labels: name=http-podStatus: RunningIP: 172.17.59.4Controllers: ReplicationController/http-rcContainers:http-pod:Container ID: docker://a3338c455a27540c8f7b7b3f01fa3862b1082f7ae47e9b3761610b4a6043245bImage: 192.168.59.133:5000/centos6-httpImage ID: docker-pullable://192.168.59.133:5000/centos6-http@sha256:545cbb5dda7db142f958ec4550a4dcb6daed47863c78dc38206c39bfa0b5e715Port: 80/TCPState: RunningStarted: Tue, 31 Oct 2017 16:05:53 +0800Ready: TrueRestart Count: 0Volume Mounts: Environment Variables: Conditions:Type StatusInitialized TrueReady TruePodScheduled TrueNo volumes.QoS Class: BestEffortTolerations: Events:FirstSeen LastSeen Count From SubObjectPath Type Reason Message--------- -------- ----- ---- ------------- -------- ------ -------2m 2m 1 {default-scheduler } Normal Scheduled Successfully assigned http-rc-xxtrw to centos-minion22m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Pulling pulling image "192.168.59.133:5000/centos6-http"2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Pulled Successfully pulled image "192.168.59.133:5000/centos6-http"2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Created Created container with docker id a3338c455a27; Security:[seccomp=unconfined]2m 2m 1 {kubelet centos-minion2} spec.containers{http-pod} Normal Started Started container with docker id a3338c455a27Name: http-rc-z8t9nNamespace: defaultNode: centos-minion1/192.168.59.132Start Time: Tue, 31 Oct 2017 16:05:52 +0800Labels: name=http-podStatus: RunningIP: 172.17.34.3Controllers: ReplicationController/http-rcContainers:http-pod:Container ID: docker://6b4fbca3f6a8690f24fe749556323a6be85f5122f378a076a8bf9d0556a89b6eImage: 192.168.59.133:5000/centos6-httpImage ID: docker-pullable://192.168.59.133:5000/centos6-http@sha256:545cbb5dda7db142f958ec4550a4dcb6daed47863c78dc38206c39bfa0b5e715Port: 80/TCPState: RunningStarted: Tue, 31 Oct 2017 16:05:54 +0800Ready: TrueRestart Count: 0Volume Mounts: Environment Variables: Conditions:Type StatusInitialized TrueReady TruePodScheduled TrueNo volumes.QoS Class: BestEffortTolerations: Events:FirstSeen LastSeen Count From SubObjectPath Type Reason Message--------- -------- ----- ---- ------------- -------- ------ -------2m 2m 1 {default-scheduler } Normal Scheduled Successfully assigned http-rc-z8t9n to centos-minion12m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Pulling pulling image "192.168.59.133:5000/centos6-http"2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Pulled Successfully pulled image "192.168.59.133:5000/centos6-http"2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Created Created container with docker id 6b4fbca3f6a8; Security:[seccomp=unconfined]2m 2m 1 {kubelet centos-minion1} spec.containers{http-pod} Normal Started Started container with docker id 6b4fbca3f6a8
0