千家信息网

编译安装kubernetes 1.15.3

发表于:2025-02-23 作者:千家信息网编辑
千家信息网最后更新 2025年02月23日,环境:操作系统: win10 on Ubuntu 18.04.3 LTS (GNU/Linux 4.4.0-18362-Microsoft x86_64)GO 版本 go version go1.12
千家信息网最后更新 2025年02月23日编译安装kubernetes 1.15.3

环境:

操作系统: win10 on Ubuntu 18.04.3 LTS (GNU/Linux 4.4.0-18362-Microsoft x86_64)
GO 版本 go version go1.12.9 linux/amd64
GCC 版本 gcc version 7.4.0 (Ubuntu 7.4.0-1ubuntu1~18.04.1)
make 版本 Make 4.1
部署服务器: etcd 192.168.30.31 kubernetes 192.168.30.32
部署环境:centos 7.6
业务部署目录:/apps/业务
K8S 网段 10.66.0.0/16
docker pod 网段 10.67.0.0/16
使用kube-router 提供网络服务

go 环境部署

cd /usr/local/srcwget https://dl.google.com/go/go1.12.9.linux-amd64.tar.gztar -xvf go1.12.9.linux-amd64.tar.gzmv go ../vi /etc/profileexport GOPATH=/mnt/e/work/goexport GOBIN=/mnt/e/work/go/binPATH=$PATH:/usr/local/go/bin:$HOME/bin:$GOBINexport PATHsource /etc/profilego version root@Qist:~# go versiongo version go1.12.9 linux/amd64创建go 工作目录BIN目录mkdir -p /mnt/e/work/go/{bin,src,pkg}

安装编译kubernetes 1.15.3 依赖

apt -y install make gcc

编译kubernetes 1.15.3

wget https://github.com/kubernetes/kubernetes/archive/v1.15.3.tar.gztar -xvf v1.15.3.tar.gzcd kubernetes-1.15.3/makecd ./_output/local/bin/linux/amd64mkdir -p /mnt/e/work/k8s/bincp -pdr kube* /mnt/e/work/k8s/bin/

编译证书生成工具

go get  github.com/cloudflare/cfssl/cmd/cfsslgo get  github.com/cloudflare/cfssl/cmd/cfssljson

部署etcd

#etcd 使用二进制方式部署编译依赖会用到墙外的东西环境设置比较麻烦 etcd 节点服务器操作wget https://github.com/etcd-io/etcd/releases/download/v3.4.0/etcd-v3.4.0-linux-amd64.tar.gzmkdir -p /apps/etcd/{bin,conf,ssl,data}# 数据存储目录mkdir -p /apps/etcd/data/default.etcd# 创建 etcd 用户 useradd etcd -s /sbin/nologin -M # 解压etcd tar -xvf  etcd-v3.4.0-linux-amd64.tar.gz # cp 可执行文件到工作目录 cd etcd-v3.4.0-linux-amd64/ cp -pdr etcd  etcdctl /apps/etcd/bin/ # etcd 证书 在win on Ubuntu 操作 mkdir -p /apps/work/k8s/cfssl/ && \cat << EOF | tee /apps/work/k8s/cfssl/ca-config.json{"signing": {"default": {"expiry": "87600h"},"profiles": {"kubernetes": {"usages": ["signing","key encipherment","server auth","client auth"],"expiry": "87600h"}}}}EOF# 创建etcd ca证书配置mkdir -p /apps/work/k8s/cfssl/etcdcat << EOF | tee /apps/work/k8s/cfssl/etcd/etcd-ca-csr.json{"CN": "etcd","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "cluster","OU": "cluster"}]}EOF# 生成 ETCD CA 证书和私钥mkdir -p /apps/work/k8s/cfssl/pki/etcdcfssl gencert -initca /apps/work/k8s/cfssl/etcd/etcd-ca-csr.json | cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd-ca# 创建 ETCD Server 证书 export ETCD_SERVER_IPS=" \\"192.168.30.31\" \" && \export ETCD_SERVER_HOSTNAMES=" \\"etcd\" \" && \cat << EOF | tee /apps/work/k8s/cfssl/etcd/etcd_server.json{"CN": "etcd","hosts": ["127.0.0.1",${ETCD_SERVER_IPS},${ETCD_SERVER_HOSTNAMES}],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "cluster","OU": "cluster"}]}EOF# 生成 ETCD Server 证书和私钥cfssl gencert \-ca=/apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem \-ca-key=/apps/work/k8s/cfssl/pki/etcd/etcd-ca-key.pem \-config=/apps/work/k8s/cfssl/ca-config.json \-profile=kubernetes \/apps/work/k8s/cfssl/etcd/etcd_server.json | \cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd_server# 创建 ETCD Member 证书export ETCD_MEMBER_1_IP=" \    \"192.168.30.31\" \" && \export ETCD_MEMBER_1_HOSTNAMES="etcd\" && \cat << EOF | tee /apps/work/k8s/cfssl/etcd/${ETCD_MEMBER_1_HOSTNAMES}.json{  "CN": "etcd",  "hosts": [    "127.0.0.1",    ${ETCD_MEMBER_1_IP},    "${ETCD_MEMBER_1_HOSTNAMES}"  ],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "GuangDong",      "L": "GuangZhou",      "O": "cluster",      "OU": "cluster"    }  ]}EOF##### 生成 ETCD Member 1 证书和私钥cfssl gencert \    -ca=/apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem \    -ca-key=/apps/work/k8s/cfssl/pki/etcd/etcd-ca-key.pem \    -config=/apps/work/k8s/cfssl/ca-config.json \    -profile=kubernetes \    /apps/work/k8s/cfssl/etcd/${ETCD_MEMBER_1_HOSTNAMES}.json | \    cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd_member_${ETCD_MEMBER_1_HOSTNAMES}# 创建 ETCD Client 配置文件cat << EOF | tee /apps/work/k8s/cfssl/etcd/etcd_client.json{"CN": "client","hosts": [""], "key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "cluster","OU": "cluster"}]}EOF#生成 ETCD Client 证书和私钥cfssl gencert \-ca=/apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem \-ca-key=/apps/work/k8s/cfssl/pki/etcd/etcd-ca-key.pem \-config=/apps/work/k8s/cfssl/ca-config.json \-profile=kubernetes \/apps/work/k8s/cfssl/etcd/etcd_client.json | \cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd_client# 复制证书到etcd 节点服务器scp -r /apps/work/k8s/cfssl/pki/etcd/*  192.168.30.31:/apps/etcd/ssl# 配置etcd 启动文件 etcd 服务器操作vi /apps/etcd/conf/etcdETCD_OPTS="--name=etcd \           --data-dir=/apps/etcd/data/default.etcd \           --listen-peer-urls=https://192.168.30.31:2380 \           --listen-client-urls=https://192.168.30.31:2379,https://127.0.0.1:2379 \           --advertise-client-urls=https://192.168.30.31:2379 \           --initial-advertise-peer-urls=https://192.168.30.31:2380 \           --initial-cluster=etcd=https://192.168.30.31:2380\           --initial-cluster-token=node4=etcd=https://192.168.30.31:2380 \           --initial-cluster-state=new \           --heartbeat-interval=6000 \           --election-timeout=30000 \           --snapshot-count=5000 \           --auto-compaction-retention=1 \           --max-request-bytes=33554432 \           --quota-backend-bytes=17179869184 \           --trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem \           --cert-file=/apps/etcd/ssl/etcd_server.pem \           --key-file=/apps/etcd/ssl/etcd_server-key.pem \           --peer-cert-file=/apps/etcd/ssl/etcd_member_etcd.pem \           --peer-key-file=/apps/etcd/ssl/etcd_member_etcd-key.pem \           --peer-client-cert-auth \           --peer-trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem"# 配置etcd 启动service 文件vi /usr/lib/systemd/system/etcd.service[Unit]Description=Etcd ServerAfter=network.targetAfter=network-online.targetWants=network-online.target[Service]Type=notifyLimitNOFILE=1024000LimitNPROC=1024000LimitCORE=infinityLimitMEMLOCK=infinityUser=etcdGroup=etcdEnvironmentFile=-/apps/etcd/conf/etcdExecStart=/apps/etcd/bin/etcd $ETCD_OPTSRestart=on-failure[Install]WantedBy=multi-user.target# etcd 目录 etcd 用户权限chown -R etcd.etcd /apps/etcd# 启动 etcdsystemctl start etcd# 设置开机启动systemctl enable etcd# 设置环境变量 /etc/profile export ETCDCTL_API=3export ENDPOINTS=https://192.168.30.31:2379#执行source /etc/profilevim ~/.bashrcalias etcdctl='/apps/etcd/bin/etcdctl --endpoints=${ENDPOINTS} --cacert=/apps/etcd/ssl/etcd-ca.pem'source ~/.bashrc# 查看状态etcdctl endpoint health[root@etcd ~]# etcdctl endpoint healthhttps://192.168.30.31:2379 is healthy: successfully committed proposal: took = 16.707114ms

kube-apiserver 部署

# win on Ubuntu 操作#创建kube-apiserver ca证书配置mkdir -p /apps/work/k8s/cfssl/k8scat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s-ca-csr.json{"CN": "kubernetes","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "cluster","OU": "cluster"}]}EOF#生成 Kubernetes CA 证书和私钥mkdir -p /apps/work/k8s/cfssl/pki/k8scfssl gencert -initca /apps/work/k8s/cfssl/k8s/k8s-ca-csr.json | \cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s-ca#创建 Kubernetes API Server 证书配置文件export K8S_APISERVER_VIP=" \\"192.168.30.32\" \" && \export K8S_APISERVER_SERVICE_CLUSTER_IP="10.66.0.1" && \export K8S_APISERVER_HOSTNAME="api.k8s.cluster.local" && \export K8S_CLUSTER_DOMAIN_SHORTNAME="cluster" && \export K8S_CLUSTER_DOMAIN_FULLNAME="cluster.local" && \cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_apiserver.json{"CN": "kubernetes","hosts": ["127.0.0.1",${K8S_APISERVER_VIP},"${K8S_APISERVER_SERVICE_CLUSTER_IP}", "${K8S_APISERVER_HOSTNAME}","kubernetes","kubernetes.default","kubernetes.default.svc","kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_SHORTNAME}","kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_FULLNAME}" ],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "cluster","OU": "cluster"}]}EOF#生成 Kubernetes API Server 证书和私钥cfssl gencert \-ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \-ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \-config=/apps/work/k8s/cfssl/ca-config.json \-profile=kubernetes \/apps/work/k8s/cfssl/k8s/k8s_apiserver.json | \cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_server# 创建 Kubernetes webhook 证书配置文件cat << EOF | tee /apps/work/k8s/cfssl/k8s/aggregator.json{  "CN": "aggregator",  "hosts": [""],   "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "GuangDong",      "L": "GuangZhou",      "O": "cluster",      "OU": "cluster"    }  ]}EOF##### 生成 Kubernetes webhook 证书和私钥cfssl gencert \    -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \    -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \    -config=/apps/work/k8s/cfssl/ca-config.json \    -profile=kubernetes \    /apps/work/k8s/cfssl/k8s/aggregator.json | \    cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/aggregator    # 远程服务器创建目录    mkdir -p /apps/kubernetes/{bin,conf,config,kubelet-plugins,log,ssl} #证书到分发到 192.168.30.32 scp -r /apps/work/k8s/cfssl/pki/k8s 192.168.30.32:/apps/kubernetes/ssl/k8s # cp  etcd_client 证书  scp -r /apps/work/k8s/cfssl/pki/etcd/etcd_client* 192.168.30.32:/apps/kubernetes/ssl/etcd    scp -r /apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem 192.168.30.32:/apps/kubernetes/ssl/etcd# 分发kubernetes 二进制文件到远程服务器把所有的二进制都cp 过去scp -r /mnt/e/work/k8s/bin/* 192.168.30.32:/apps/kubernetes/bin# 远程服务器操作192.168.30.32 # 创建 k8s 用户 useradd k8s -s /sbin/nologin -M # encryption-config.yaml 生成 cd /apps/kubernetes/config export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64) cat > encryption-config.yaml <

kube_scheduler 部署

#  生成kube_scheduler访问kube-apiserver 证书win on Ubuntu  操作cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_scheduler.json{  "CN": "system:kube-scheduler",  "hosts": [""],   "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "GuangDong",      "L": "GuangZhou",      "O": "system:kube-scheduler",      "OU": "Kubernetes-manual"    }  ]}EOF##  生成 Kubernetes Scheduler 证书和私钥cfssl gencert \    -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \    -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \    -config=/apps/work/k8s/cfssl/ca-config.json \    -profile=kubernetes \    /apps/work/k8s/cfssl/k8s/k8s_scheduler.json | \    cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_scheduler#创建kube_scheduler.kubeconfigkubectl config set-cluster kubernetes \    --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \    --embed-certs=true \    --server=${KUBE_APISERVER} \    --kubeconfig=kube_scheduler.kubeconfigkubectl config set-credentials system:kube-scheduler \    --client-certificate=/apps/work/k8s/cfssl/pki/k8s/k8s_scheduler.pem \    --embed-certs=true \    --client-key=/apps/work/k8s/cfssl/pki/k8s/k8s_scheduler-key.pem \    --kubeconfig=kube_scheduler.kubeconfigkubectl config set-context kubernetes \    --cluster=kubernetes \    --user=system:kube-scheduler \    --kubeconfig=kube_scheduler.kubeconfigkubectl config use-context kubernetes --kubeconfig=kube_scheduler.kubeconfig# cp kube_scheduler.kubeconfig 到远程服务器scp kube_scheduler.kubeconfig 192.168.30.32:/apps/kubernetes/config# 远程服务器操作cd /apps/kubernetes/conf# 创建kube-scheduler 启动配置文件vi kube-schedulerKUBE_SCHEDULER_OPTS=" \                   --logtostderr=false \                   --address=0.0.0.0 \                   --leader-elect=true \                   --kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \                   --authentication-kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \                   --authorization-kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \                   --alsologtostderr=true \                   --kube-api-qps=100 \                   --kube-api-burst=100 \                   --log-dir=/apps/kubernetes/log \                   --v=2"# 创建/kube-scheduler 启动文件vi /usr/lib/systemd/system/kube-scheduler.service[Unit]Description=Kubernetes SchedulerDocumentation=https://github.com/kubernetes/kubernetes[Service]LimitNOFILE=1024000LimitNPROC=1024000LimitCORE=infinityLimitMEMLOCK=infinityEnvironmentFile=-/apps/kubernetes/conf/kube-schedulerExecStart=/apps/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTSRestart=on-failureRestartSec=5User=k8s[Install]WantedBy=multi-user.target# 给新创建文件 k8s 用户权限chown -R k8s.k8s /apps/kubernetes# 启动 kube-schedulersystemctl start kube-scheduler# 设置开启启动systemctl enable kube-scheduler# 验证状态 kubectl get cs [root@]~]# kubectl get csNAME                 STATUS    MESSAGE             ERRORcontroller-manager   Healthy   okscheduler            Healthy   oketcd-0               Healthy   {"health":"true"}

kube-controller-manager部署

# 生成kube-controller-manager访问kube-apiserver 证书 win on Ubuntu  操作cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_controller_manager.json{  "CN": "system:kube-controller-manager",  "hosts": [""],   "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "GuangDong",      "L": "GuangZhou",      "O": "system:kube-controller-manager",      "OU": "Kubernetes-manual"    }  ]}EOF## 生成 Kubernetes Controller Manager 证书和私钥cfssl gencert \    -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \    -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \    -config=/apps/work/k8s/cfssl/ca-config.json \    -profile=kubernetes \    /apps/work/k8s/cfssl/k8s/k8s_controller_manager.json | \    cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_controller_manager# 创建kube_controller_manager.kubeconfigkubectl config set-cluster kubernetes \   --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \   --embed-certs=true \   --server=${KUBE_APISERVER} \   --kubeconfig=kube_controller_manager.kubeconfigkubectl config set-credentials system:kube-controller-manager \   --client-certificate=/apps/work/k8s/cfssl/pki/k8s/k8s_controller_manager.pem \   --embed-certs=true \   --client-key=/apps/work/k8s/cfssl/pki/k8s/k8s_controller_manager-key.pem \   --kubeconfig=kube_controller_manager.kubeconfigkubectl config set-context kubernetes \   --cluster=kubernetes \   --user=system:kube-controller-manager \   --kubeconfig=kube_controller_manager.kubeconfigkubectl config use-context kubernetes --kubeconfig=kube_controller_manager.kubeconfig# cp kube_controller_manager.kubeconfig 到远程服务器scp kube_controller_manager.kubeconfig 192.168.30.32:/apps/kubernetes/config#  远程服务器操作cd /apps/kubernetes/confvi kube-controller-managerKUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \ --leader-elect=true \ --address=0.0.0.0 \ --service-cluster-ip-range=10.66.0.0/16 \ --cluster-cidr=10.67.0.0/16 \ --node-cidr-mask-size=24 \ --cluster-name=kubernetes \ --allocate-node-cidrs=true \ --kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \ --authentication-kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \ --authorization-kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \ --use-service-account-credentials=true \ --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \ --requestheader-client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \ --node-monitor-grace-period=40s \ --node-monitor-period=5s \ --pod-eviction-timeout=5m0s \ --terminated-pod-gc-threshold=50 \ --alsologtostderr=true \ --cluster-signing-cert-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \ --cluster-signing-key-file=/apps/kubernetes/ssl/k8s/k8s-ca-key.pem  \ --deployment-controller-sync-period=10s \ --experimental-cluster-signing-duration=86700h0m0s \ --enable-garbage-collector=true \ --root-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \ --service-account-private-key-file=/apps/kubernetes/ssl/k8s/k8s-ca-key.pem \ --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true \ --controllers=*,bootstrapsigner,tokencleaner \ --horizontal-pod-autoscaler-use-rest-clients=true \ --horizontal-pod-autoscaler-sync-period=10s \ --flex-volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume \ --tls-cert-file=/apps/kubernetes/ssl/k8s/k8s_controller_manager.pem \ --tls-private-key-file=/apps/kubernetes/ssl/k8s/k8s_controller_manager-key.pem \ --kube-api-qps=100 \ --kube-api-burst=100 \ --log-dir=/apps/kubernetes/log \ --v=2" # 创建启动文件kube-controller-manager vi /usr/lib/systemd/system/kube-controller-manager.service [Unit]Description=Kubernetes Controller ManagerDocumentation=https://github.com/kubernetes/kubernetes[Service]LimitNOFILE=1024000LimitNPROC=1024000LimitCORE=infinityLimitMEMLOCK=infinityEnvironmentFile=-/apps/kubernetes/conf/kube-controller-managerExecStart=/apps/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTSRestart=on-failureRestartSec=5User=k8s[Install]WantedBy=multi-user.target# 给新创建文件 k8s 用户权限chown -R k8s.k8s /apps/kubernetes# 启动kube-controller-managersystemctl start kube-controller-manager# 设置开机启动systemctl enable kube-controller-manager# 验证状态[root@]~]#kubectl get csNAME                 STATUS    MESSAGE             ERRORscheduler            Healthy   okcontroller-manager   Healthy   oketcd-0               Healthy   {"health":"true"}# 配置 kube-controller-manager,kubelet 、kube-scheduler 访问kube-api 用户授权授予 kubernetes API 的权限kubectl create clusterrolebinding controller-node-clusterrolebing --clusterrole=system:kube-controller-manager  --user=system:kube-controller-managerkubectl create clusterrolebinding scheduler-node-clusterrolebing  --clusterrole=system:kube-scheduler --user=system:kube-schedulerkubectl create clusterrolebinding controller-manager:system:auth-delegator --user system:kube-controller-manager --clusterrole system:auth-delegator授予 kubernetes 证书访问 kubelet API 的权限kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-adminkubectl create clusterrolebinding kubelet-node-clusterbinding --clusterrole=system:node --group=system:nodeskubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

docker 部署

# 远程服务器节点操作# 使用阿里源cat > /etc/yum.repos.d/docker-ce.repo << EOF[docker-ce-stable]name=Docker CE Stable - \$basearchbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/stableenabled=1gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-stable-debuginfo]name=Docker CE Stable - Debuginfo \$basearchbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/stableenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-stable-source]name=Docker CE Stable - Sourcesbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/stableenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-edge]name=Docker CE Edge - \$basearchbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/edgeenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-edge-debuginfo]name=Docker CE Edge - Debuginfo \$basearchbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/edgeenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-edge-source]name=Docker CE Edge - Sourcesbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/edgeenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-test]name=Docker CE Test - \$basearchbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/testenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-test-debuginfo]name=Docker CE Test - Debuginfo \$basearchbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/testenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-test-source]name=Docker CE Test - Sourcesbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/testenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-nightly]name=Docker CE Nightly - \$basearchbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/nightlyenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-nightly-debuginfo]name=Docker CE Nightly - Debuginfo \$basearchbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/nightlyenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg[docker-ce-nightly-source]name=Docker CE Nightly - Sourcesbaseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/nightlyenabled=0gpgcheck=1gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpgEOF# 安装docker依赖yum install -y    python-pip python-devel yum-utils device-mapper-persistent-data lvm2 # 安装dockeryum install -y docker-ce# 修改docker 启动配置vi /lib/systemd/system/docker.serviceExecStart= 修改成ExecStart=/usr/bin/dockerd -H fd:// --graph /apps/docker -H unix:///var/run/docker.sock  --max-concurrent-downloads=20 --log-opt max-size=200M --log-opt max-file=10 --default-ulimit nofile=1024000 --default-ulimit nproc=1024000# reload service 配置systemctl daemon-reload# 重启dockersystemctl restart docker# 设置开机启动systemctl enable docker

安装kubelet 依赖

# 远程服务器节点操作cni 插件安装mkdir -p /apps/cni/bincd  /apps/cni/binwget https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgztar -xvf cni-plugins-linux-amd64-v0.8.2.tgzrm -f cni-plugins-linux-amd64-v0.8.2.tgz# 创建/etc/cni/net.d 目录mkdir -p /etc/cni/net.dvi  /etc/cni/net.d/10-kuberouter.conflist{   "cniVersion":"0.3.0",   "name":"mynet",   "plugins":[      {         "name":"kubernetes",         "type":"bridge",         "bridge":"kube-bridge",         "isDefaultGateway":true,         "ipam":{            "type":"host-local"         }      },      {         "type":"portmap",         "capabilities":{            "snat":true,            "portMappings":true         }      }   ]}# lxcfs 安装yum install -y git automake libtool fuse-develgit clone git://github.com/lxc/lxcfs cd lxcfs/./bootstrap.sh./configuremakemake install # 创建目录mkdir -p /var/lib/lxcfs/# 配置 lxcfs 启动文件vi /usr/lib/systemd/system/lxcfs.service[Unit]Description=FUSE filesystem for LXCConditionVirtualization=!containerBefore=lxc.serviceDocumentation=man:lxcfs(1)[Service]ExecStart=/usr/local/bin/lxcfs /var/lib/lxcfs/KillMode=processRestart=on-failureExecStopPost=-/bin/fusermount -u /var/lib/lxcfsDelegate=yes[Install]WantedBy=multi-user.target# 启动lxcfssystemctl start lxcfs # 设置开机启动systemctl enable lxcfs# 安装 kubelet 依赖yum install -y  epel-releaseyum install -y   yum-utils  ipvsadm  telnet  wget  net-tools  conntrack  ipset  jq  iptables  curl  sysstat  libseccomp  socat  nfs-utils  fuse  fuse-devel 

kubelet 部署

# win on Ubuntu  操作# 生成 bootstrap Token# Bootstrap Token 生成echo "$(head -c 6 /dev/urandom | md5sum | head -c 6)"."$(head -c 16 /dev/urandom | md5sum | head -c 16)"9dad00.2ac445bf1cc5e9c2vi bootstrap.secret.yamlapiVersion: v1kind: Secretmetadata:  # Name MUST be of form "bootstrap-token-"  name: bootstrap-token-9dad00  namespace: kube-system# Type MUST be 'bootstrap.kubernetes.io/token'type: bootstrap.kubernetes.io/tokenstringData:  # Human readable description. Optional.  description: "The default bootstrap token generated by 'kubelet '."  # Token ID and secret. Required.  token-id: 9dad00  token-secret: 2ac445bf1cc5e9c2  # Allowed usages.  usage-bootstrap-authentication: "true"  usage-bootstrap-signing: "true"  # Extra groups to authenticate the token as. Must start with "system:bootstrappers:"  auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress### 创建k8s资源kubectl create -f bootstrap.secret.yaml### 创建bootstrap.clusterrole.yamlvi bootstrap.clusterrole.yaml# A ClusterRole which instructs the CSR approver to approve a node requesting a# serving cert matching its client cert.kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata:  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserverrules:- apiGroups: ["certificates.k8s.io"]  resources: ["certificatesigningrequests/selfnodeserver"]  verbs: ["create"]kubectl create -f bootstrap.clusterrole.yaml### 创建 apiserver-to-kubelet.yamlvi apiserver-to-kubelet.yamlapiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  annotations:    rbac.authorization.kubernetes.io/autoupdate: "true"  labels:    kubernetes.io/bootstrapping: rbac-defaults  name: system:kubernetes-to-kubeletrules:  - apiGroups:      - ""    resources:      - nodes/proxy      - nodes/stats      - nodes/log      - nodes/spec      - nodes/metrics    verbs:      - "*"---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  name: system:kubernetes  namespace: ""roleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:kubernetes-to-kubeletsubjects:  - apiGroup: rbac.authorization.k8s.io    kind: User    name: kuberneteskubectl create -f apiserver-to-kubelet.yaml### 查看创建的tokenkubeadm token list# 允许 system:bootstrappers 组用户创建 CSR 请求kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers# 自动批准 system:bootstrappers 组用户 TLS bootstrapping 首次申请证书的 CSR 请求kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:bootstrappers# 自动批准 system:nodes 组用户更新 kubelet 自身与 apiserver 通讯证书的 CSR 请求kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes# 自动批准 system:nodes 组用户更新 kubelet 10250 api 端口证书的 CSR 请求kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes# 创建bootstrap.kubeconfig# 设置集群参数kubectl config set-cluster kubernetes \  --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \  --embed-certs=true \  --server=${KUBE_APISERVER} \  --kubeconfig=bootstrap.kubeconfig# 设置客户端认证参数kubectl config set-credentials system:bootstrap:9dad00 \  --token=9dad00.2ac445bf1cc5e9c2 \  --kubeconfig=bootstrap.kubeconfig# 设置上下文参数kubectl config set-context default \  --cluster=kubernetes \  --user=system:bootstrap:9dad00 \  --kubeconfig=bootstrap.kubeconfig# 设置默认上下文kubectl config use-context default --kubeconfig=bootstrap.kubeconfig# 分发 bootstrap.kubeconfig 到远程节点scp bootstrap.kubeconfig 192.168.30.32:/apps/kubernetes/conf# 创建kubelet 启动配置文件 远程节点 操作cd /apps/kubernetes/confvi  kubeletKUBELET_OPTS="--bootstrap-kubeconfig=/apps/kubernetes/conf/bootstrap.kubeconfig \              --fail-swap-on=false \              --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/apps/cni/bin \              --kubeconfig=/apps/kubernetes/conf/kubelet.kubeconfig \              --address=192.168.30.32 \              --node-ip=192.168.30.32 \              --hostname-override=master \              --cluster-dns=10.66.0.2 \              --cluster-domain=cluster.local \              --authorization-mode=Webhook \              --authentication-token-webhook=true \              --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \              --rotate-certificates=true \              --cgroup-driver=cgroupfs \              --healthz-port=10248 \              --healthz-bind-address=192.168.30.32 \              --cert-dir=/apps/kubernetes/ssl \              --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \              --node-labels=node-role.kubernetes.io/k8s-node=true \              --serialize-image-pulls=false \              --enforce-node-allocatable=pods,kube-reserved,system-reserved \              --pod-manifest-path=/apps/work/kubernetes/manifests \              --runtime-cgroups=/systemd/system.slice/kubelet.service \              --kube-reserved-cgroup=/systemd/system.slice/kubelet.service \              --system-reserved-cgroup=/systemd/system.slice \              --root-dir=/apps/work/kubernetes/kubelet \              --log-dir=/apps/kubernetes/log \              --alsologtostderr=true \              --logtostderr=false \              --anonymous-auth=true \              --image-gc-high-threshold=70 \              --image-gc-low-threshold=50 \              --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \              --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \              --eviction-hard=memory.available<500Mi,nodefs.available<10% \              --serialize-image-pulls=false \              --sync-frequency=30s \              --resolv-conf=/etc/resolv.conf \              --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \              --image-pull-progress-deadline=30s \              --v=2 \              --event-burst=30 \              --event-qps=15 \              --kube-api-burst=30 \              --kube-api-qps=15 \              --max-pods=200 \              --pods-per-core=10 \              --read-only-port=0 \              --allowed-unsafe-sysctls 'kernel.msg*,kernel.shm*,kernel.sem,fs.mqueue.*,net.*' \              --volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume"# 创建 kubelet 服务文件vi /usr/lib/systemd/system/kubelet.service[Unit]Description=Kubernetes KubeletAfter=docker.serviceRequires=docker.service[Service]LimitNOFILE=1024000LimitNPROC=1024000LimitCORE=infinityLimitMEMLOCK=infinityEnvironmentFile=-/apps/kubernetes/conf/kubeletExecStart=/apps/kubernetes/bin/kubelet $KUBELET_OPTSRestart=on-failureKillMode=process[Install]WantedBy=multi-user.target# kubernetes 工作目录mkdir -p /apps/work/kubernetes/{manifests,kubelet}# 启动 kubeletsystemctl start kubelet # 设置开机启动systemctl enable kubelet#查看证书是否签发cd /apps/kubernetes/ssl/[root@master ssl]# lltotal 12drwxr-xr-x 2 k8s k8s   75 Jul  4 15:06 etcddrwxr-xr-x 2 k8s k8s  310 Sep  3 11:18 k8s-rw------- 1 k8s k8s 1277 Sep  3 10:25 kubelet-client-2019-09-03-10-25-10.pemlrwxrwxrwx 1 k8s k8s   59 Sep  3 10:25 kubelet-client-current.pem -> /apps/kubernetes/ssl/kubelet-client-2019-09-03-10-25-10.pem-rw-r--r-- 1 k8s k8s 2153 Sep  3 10:25 kubelet.crt-rw------- 1 k8s k8s 1675 Sep  3 10:25 kubelet.key# 查看节点是否正常kubectl get node[root@]~]#kubectl get nodeNAME     STATUS   ROLES      AGE   VERSIONmaster   Ready    k8s-node   27h   v1.15.3

kube-router 部署

# win on Ubuntu  操作# 创建kube-router 访问kube-apiserver 证书cat << EOF | tee /apps/work/k8s/cfssl/k8s/kube-router.json{  "CN": "kube-router",  "hosts": [""],   "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "GuangDong",      "L": "GuangZhou",      "O": "system:masters",      "OU": "Kubernetes-manual"    }  ]}EOF## 生成 kube-router 证书和私钥cfssl gencert \        -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \        -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \        -config=/apps/work/k8s/cfssl/ca-config.json \        -profile=kubernetes \         /apps/work/k8s/cfssl/k8s/kube-router.json | \         cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/kube-router# 设置集群参数kubectl config set-cluster kubernetes \  --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \  --embed-certs=true \  --server=${KUBE_APISERVER} \  --kubeconfig=kubeconfig.conf# 设置客户端认证参数    kubectl config set-credentials kube-router \  --client-certificate=/apps/work/k8s/cfssl/pki/k8s/kube-router.pem \  --client-key=/apps/work/k8s/cfssl/pki/k8s/kube-router-key.pem \  --embed-certs=true \  --kubeconfig=kubeconfig.conf# 设置上下文参数    kubectl config set-context default \  --cluster=kubernetes \  --user=kube-router \  --kubeconfig=kubeconfig.conf# 设置默认上下文kubectl config use-context default --kubeconfig=kubeconfig.conf# 创建kube-router configmap kubectl create configmap "kube-proxy" --from-file=kubeconfig.conf# 创建kubeadm-kuberouter-all-features-hostport.yamlvi kubeadm-kuberouter-all-features-hostport.yamlapiVersion: v1kind: ConfigMapmetadata:  name: kube-router-cfg  namespace: kube-system  labels:    tier: node    k8s-app: kube-routerdata:  cni-conf.json: |    {       "cniVersion":"0.3.0",       "name":"mynet",       "plugins":[          {             "name":"kubernetes",             "type":"bridge",             "bridge":"kube-bridge",             "isDefaultGateway":true,             "ipam":{                "type":"host-local"             }          },          {             "type":"portmap",             "capabilities":{                "snat":true,                "portMappings":true             }          }       ]    }---apiVersion: apps/v1kind: DaemonSetmetadata:  labels:    k8s-app: kube-router    tier: node  name: kube-router  namespace: kube-systemspec:  selector:    matchLabels:      k8s-app: kube-router  template:    metadata:      labels:        k8s-app: kube-router        tier: node      annotations:        scheduler.alpha.kubernetes.io/critical-pod: ''    spec:      serviceAccountName: kube-router      serviceAccount: kube-router      containers:      - name: kube-router        image: docker.io/cloudnativelabs/kube-router        imagePullPolicy: Always        args:        - --run-router=true         - --run-firewall=true         - --run-service-proxy=true         - --advertise-cluster-ip=true         - --advertise-loadbalancer-ip=true         - --advertise-pod-cidr=true         - --advertise-external-ip=true         - --cluster-asn=64512         - --metrics-path=/metrics         - --metrics-port=20241         - --enable-cni=true         - --enable-ibgp=true         - --enable-overlay=true         - --nodeport-bindon-all-ip=true         - --nodes-full-mesh=true         - --enable-pod-egress=true         - --cluster-cidr=10.67.0.0/16         - --v=2        - --kubeconfig=/var/lib/kube-router/kubeconfig        env:        - name: NODE_NAME          valueFrom:            fieldRef:              fieldPath: spec.nodeName        - name: KUBE_ROUTER_CNI_CONF_FILE          value: /etc/cni/net.d/10-kuberouter.conflist        livenessProbe:          httpGet:            path: /healthz            port: 20244          initialDelaySeconds: 10          periodSeconds: 3        resources:          requests:            cpu: 250m            memory: 250Mi        securityContext:          privileged: true        volumeMounts:        - name: lib-modules          mountPath: /lib/modules          readOnly: true        - name: cni-conf-dir          mountPath: /etc/cni/net.d        - name: kubeconfig          mountPath: /var/lib/kube-router          readOnly: true      initContainers:      - name: install-cni        image: busybox        imagePullPolicy: Always        command:        - /bin/sh        - -c        - set -e -x;          if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then            if [ -f /etc/cni/net.d/*.conf ]; then              rm -f /etc/cni/net.d/*.conf;            fi;            TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;            cp /etc/kube-router/cni-conf.json ${TMP};            mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;          fi        volumeMounts:        - name: cni-conf-dir          mountPath: /etc/cni/net.d        - name: kube-router-cfg          mountPath: /etc/kube-router      hostNetwork: true      tolerations:      - key: CriticalAddonsOnly        operator: Exists      - effect: NoSchedule        key: node-role.kubernetes.io/master        operator: Exists      - effect: NoSchedule        key: node.kubernetes.io/not-ready        operator: Exists      - effect: NoSchedule        key: node-role.kubernetes.io/ingress        operator: Equal      volumes:      - name: lib-modules        hostPath:          path: /lib/modules      - name: cni-conf-dir        hostPath:          path: /etc/cni/net.d      - name: kube-router-cfg        configMap:          name: kube-router-cfg      - name: kubeconfig        configMap:          name: kube-proxy          items:          - key: kubeconfig.conf            path: kubeconfig---apiVersion: v1kind: ServiceAccountmetadata:  name: kube-router  namespace: kube-system---kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1beta1metadata:  name: kube-router  namespace: kube-systemrules:  - apiGroups:    - ""    resources:      - namespaces      - pods      - services      - nodes      - endpoints    verbs:      - list      - get      - watch  - apiGroups:    - "networking.k8s.io"    resources:      - networkpolicies    verbs:      - list      - get      - watch  - apiGroups:    - extensions    resources:      - networkpolicies    verbs:      - get      - list      - watch---kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1beta1metadata:  name: kube-routerroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: kube-routersubjects:- kind: ServiceAccount  name: kube-router  namespace: kube-system# 创建  kube-router 服务kubectl apply -f  kubeadm-kuberouter-all-features-hostport.yaml# 查看服务是否成功创建[root@]~]#kubectl get pod -A | grep kube-routerkube-system            kube-router-5tmgw                           1/1     Running   0          21h# 进入192.168.30.32cat  /etc/cni/net.d/10-kuberouter.conflist[root@master ssl]# cat  /etc/cni/net.d/10-kuberouter.conflist{"cniVersion":"0.3.0","name":"mynet","plugins":[{"bridge":"kube-bridge","ipam":{"subnet":"10.67.0.0/24","type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"},{"capabilities":{"portMappings":true,"snat":true},"type":"portmap"}]}# 已经获取到ip段ip a| grep  kube[root@master ssl]# ip a| grep  kube4: kube-bridge:  mtu 1500 qdisc noqueue state UP group default qlen 1000    inet 10.67.0.1/24 brd 10.67.0.255 scope global kube-bridge6: kube-dummy-if:  mtu 1500 qdisc noqueue state UNKNOWN group default    inet 10.66.0.1/32 brd 10.66.0.1 scope link kube-dummy-if    inet 10.66.0.2/32 brd 10.66.0.2 scope link kube-dummy-if    inet 10.66.91.125/32 brd 10.66.91.125 scope link kube-dummy-if    inet 10.66.86.10/32 brd 10.66.86.10 scope link kube-dummy-if    inet 10.66.52.216/32 brd 10.66.52.216 scope link kube-dummy-if# kube-router 部署正常

coredns 部署

# win on Ubuntu  操作vi coredns.yaml# __MACHINE_GENERATED_WARNING__apiVersion: v1kind: ServiceAccountmetadata:  name: coredns  namespace: kube-system  labels:      kubernetes.io/cluster-service: "true"      addonmanager.kubernetes.io/mode: Reconcile---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    kubernetes.io/bootstrapping: rbac-defaults    addonmanager.kubernetes.io/mode: Reconcile  name: system:corednsrules:- apiGroups:  - ""  resources:  - endpoints  - services  - pods  - namespaces  verbs:  - list  - watch- apiGroups:  - ""  resources:  - nodes  verbs:  - get---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  annotations:    rbac.authorization.kubernetes.io/autoupdate: "true"  labels:    kubernetes.io/bootstrapping: rbac-defaults    addonmanager.kubernetes.io/mode: EnsureExists  name: system:corednsroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:corednssubjects:- kind: ServiceAccount  name: coredns  namespace: kube-system---apiVersion: v1kind: ConfigMapmetadata:  name: coredns  namespace: kube-system  labels:      addonmanager.kubernetes.io/mode: EnsureExistsdata:  Corefile: |    .:53 {        errors        health        kubernetes cluster.local in-addr.arpa ip6.arpa {            pods insecure            upstream /etc/resolv.conf            fallthrough in-addr.arpa ip6.arpa        }        prometheus :9153        forward . /etc/resolv.conf        cache 30        reload        loadbalance    }---apiVersion: apps/v1kind: Deploymentmetadata:  name: coredns  namespace: kube-system  labels:    k8s-app: kube-dns    kubernetes.io/cluster-service: "true"    addonmanager.kubernetes.io/mode: Reconcile    kubernetes.io/name: "CoreDNS"spec:  # replicas: not specified here:  # 1. In order to make Addon Manager do not reconcile this replicas parameter.  # 2. Default is 1.  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.  strategy:    type: RollingUpdate    rollingUpdate:      maxUnavailable: 1  selector:    matchLabels:      k8s-app: kube-dns  template:    metadata:      labels:        k8s-app: kube-dns      annotations:        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'    spec:      priorityClassName: system-cluster-critical      serviceAccountName: coredns      tolerations:        - key: "CriticalAddonsOnly"          operator: "Exists"      nodeSelector:        beta.kubernetes.io/os: linux      containers:      - name: coredns        image: coredns/coredns        imagePullPolicy: Always        resources:          limits:            memory: 170Mi          requests:            cpu: 100m            memory: 70Mi        args: [ "-conf", "/etc/coredns/Corefile" ]        volumeMounts:        - name: config-volume          mountPath: /etc/coredns          readOnly: true        ports:        - containerPort: 53          name: dns          protocol: UDP        - containerPort: 53          name: dns-tcp          protocol: TCP        - containerPort: 9153          name: metrics          protocol: TCP        livenessProbe:          httpGet:            path: /health            port: 8080            scheme: HTTP          initialDelaySeconds: 60          timeoutSeconds: 5          successThreshold: 1          failureThreshold: 5        readinessProbe:          httpGet:            path: /health            port: 8080            scheme: HTTP        securityContext:          allowPrivilegeEscalation: false          capabilities:            add:            - NET_BIND_SERVICE            drop:            - all          readOnlyRootFilesystem: true      dnsPolicy: Default      volumes:        - name: config-volume          configMap:            name: coredns            items:            - key: Corefile              path: Corefile---apiVersion: v1kind: Servicemetadata:  name: kube-dns  namespace: kube-system  annotations:    prometheus.io/port: "9153"    prometheus.io/scrape: "true"  labels:    k8s-app: kube-dns    kubernetes.io/cluster-service: "true"    addonmanager.kubernetes.io/mode: Reconcile    kubernetes.io/name: "CoreDNS"spec:  selector:    k8s-app: kube-dns  clusterIP: 10.66.0.2  ports:  - name: dns    port: 53    protocol: UDP  - name: dns-tcp    port: 53    protocol: TCP  - name: metrics    port: 9153    protocol: TCP# 创建 CoreDNS dns 服务kubectl apply -f coredns.yaml# 验证服务[root@]~]#kubectl get all -A | grep corednskube-system            pod/coredns-597b77445b-fhxvr                    1/1     Running   0          27hkube-system            deployment.apps/coredns                     1/1     1            1           27hkube-system            replicaset.apps/coredns-597b77445b                    1         1         1       27hdig @10.66.0.2 www.baidu.com[root@master ssl]# dig @10.66.0.2 www.baidu.com; <<>> DiG 9.9.4-RedHat-9.9.4-74.el7_6.1 <<>> @10.66.0.2 www.baidu.com; (1 server found);; global options: +cmd;; Got answer:;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 40347;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 13, ADDITIONAL: 27;; OPT PSEUDOSECTION:; EDNS: version: 0, flags:; udp: 4096;; QUESTION SECTION:;www.baidu.com.                 IN      A;; ANSWER SECTION:www.baidu.com.          30      IN      CNAME   www.a.shifen.com.www.a.shifen.com.       30      IN      A       14.215.177.38www.a.shifen.com.       30      IN      A       14.215.177.39;; AUTHORITY SECTION:com.                    30      IN      NS      h.gtld-servers.net.com.                    30      IN      NS      m.gtld-servers.net.com.                    30      IN      NS      g.gtld-servers.net.com.                    30      IN      NS      d.gtld-servers.net.com.                    30      IN      NS      a.gtld-servers.net.com.                    30      IN      NS      j.gtld-servers.net.com.                    30      IN      NS      c.gtld-servers.net.com.                    30      IN      NS      l.gtld-servers.net.com.                    30      IN      NS      b.gtld-servers.net.com.                    30      IN      NS      f.gtld-servers.net.com.                    30      IN      NS      k.gtld-servers.net.com.                    30      IN      NS      i.gtld-servers.net.com.                    30      IN      NS      e.gtld-servers.net.;; ADDITIONAL SECTION:e.gtld-servers.net.     30      IN      AAAA    2001:502:1ca1::30a.gtld-servers.net.     30      IN      A       192.5.6.30i.gtld-servers.net.     30      IN      AAAA    2001:503:39c1::30c.gtld-servers.net.     30      IN      A       192.26.92.30g.gtld-servers.net.     30      IN      AAAA    2001:503:eea3::30m.gtld-servers.net.     30      IN      A       192.55.83.30d.gtld-servers.net.     30      IN      A       192.31.80.30a.gtld-servers.net.     30      IN      AAAA    2001:503:a83e::2:30b.gtld-servers.net.     30      IN      A       192.33.14.30b.gtld-servers.net.     30      IN      AAAA    2001:503:231d::2:30i.gtld-servers.net.     30      IN      A       192.43.172.30d.gtld-servers.net.     30      IN      AAAA    2001:500:856e::30l.gtld-servers.net.     30      IN      A       192.41.162.30h.gtld-servers.net.     30      IN      AAAA    2001:502:8cc::30e.gtld-servers.net.     30      IN      A       192.12.94.30l.gtld-servers.net.     30      IN      AAAA    2001:500:d937::30k.gtld-servers.net.     30      IN      AAAA    2001:503:d2d::30j.gtld-servers.net.     30      IN      AAAA    2001:502:7094::30m.gtld-servers.net.     30      IN      AAAA    2001:501:b1f9::30f.gtld-servers.net.     30      IN      A       192.35.51.30g.gtld-servers.net.     30      IN      A       192.42.93.30h.gtld-servers.net.     30      IN      A       192.54.112.30j.gtld-servers.net.     30      IN      A       192.48.79.30k.gtld-servers.net.     30      IN      A       192.52.178.30c.gtld-servers.net.     30      IN      AAAA    2001:503:83eb::30f.gtld-servers.net.     30      IN      AAAA    2001:503:d414::30;; Query time: 6 msec;; SERVER: 10.66.0.2#53(10.66.0.2);; WHEN: Wed Sep 04 14:17:05 CST 2019;; MSG SIZE  rcvd: 897dig @10.66.0.2 kube-dns.kube-system.svc.cluster.local[root@master ssl]# dig @10.66.0.2 kube-dns.kube-system.svc.cluster.local; <<>> DiG 9.9.4-RedHat-9.9.4-74.el7_6.1 <<>> @10.66.0.2 kube-dns.kube-system.svc.cluster.local; (1 server found);; global options: +cmd;; Got answer:;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 40471;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1;; WARNING: recursion requested but not available;; OPT PSEUDOSECTION:; EDNS: version: 0, flags:; udp: 4096;; QUESTION SECTION:;kube-dns.kube-system.svc.cluster.local.        IN A;; ANSWER SECTION:kube-dns.kube-system.svc.cluster.local. 5 IN A  10.66.0.2;; Query time: 1 msec;; SERVER: 10.66.0.2#53(10.66.0.2);; WHEN: Wed Sep 04 14:19:13 CST 2019;; MSG SIZE  rcvd: 121能够正常解析

traefik-https 部署

# win on Ubuntu  操作 #申请证书 请使用letsencrypt 申请免费 多域名证书#重命名证书名字tls.crt tls.key#创建 secretkubectl -n kube-system create secret generic tls-cert --from-file=tls.key --from-file=tls.crt# 创建traefik 配置vi traefik.tomldefaultEntryPoints = ["http","https"][entryPoints]  [entryPoints.http]  address = ":80"    entryPoint = "https"  [entryPoints.https]  address = ":443"    [entryPoints.https.tls]      [[entryPoints.https.tls.certificates]]      certFile = "/certs/tls.crt"      keyFile = "/certs/tls.key"# 生成 configmapkubectl create configmap traefik-conf --from-file=traefik.toml -n kube-system# 创建traefik-rbacvi traefik-rbac.yaml---kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1beta1metadata:  name: traefik  namespace: kube-systemrules:  - apiGroups:      - ""    resources:      - services      - endpoints      - secrets    verbs:      - get      - list      - watch  - apiGroups:      - extensions    resources:      - ingresses    verbs:      - get      - list      - watch  - apiGroups:    - extensions    resources:    - ingresses/status    verbs:    - update---kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1beta1metadata:  name: traefikroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: traefiksubjects:- kind: ServiceAccount  name: traefik  namespace: kube-system# traefik-deployment-httpsvi traefik-deployment-https.yaml---apiVersion: v1kind: ServiceAccountmetadata:  name: traefik  namespace: kube-system---kind: DeploymentapiVersion: apps/v1metadata:  name: traefik  namespace: kube-system  labels:    k8s-app: traefikspec:  replicas: 1  selector:    matchLabels:      k8s-app: traefik  template:    metadata:      labels:        k8s-app: traefik        name: traefik    spec:      serviceAccountName: traefik      terminationGracePeriodSeconds: 60      volumes:      - name: ssl        secret:          secretName: tls-cert      - name: config        configMap:          name: traefik-conf          defaultMode: 0644          items:          - key: traefik.toml            path: traefik.toml       hostNetwork: true      dnsPolicy: ClusterFirstWithHostNet            containers:      - image: traefik        name: traefik        imagePullPolicy: IfNotPresent        volumeMounts:        - mountPath: /certs                   name: "ssl"        - mountPath: /etc/traefik.toml          subPath: traefik.toml          name: "config"        ports:        - name: http          containerPort: 80          hostPort: 80        - name: https          containerPort: 443          hostPort: 443        - name: admin          containerPort: 8080        args:        - --api        - --web        - --api.dashboard        - --logLevel=INFO        - --web.metrics        - --metrics.prometheus        - --web.metrics.prometheus        - --kubernetes        - --traefiklog        - --traefiklog.format=json        - --accesslog        - --accesslog.format=json        - --accessLog.fields.headers.defaultMode=redact        - --insecureskipverify=true        - --configFile=/etc/traefik.toml       #      nodeSelector:#        ingress: "yes"#      tolerations:#      - effect: NoSchedule#        key: node-role.kubernetes.io/ingress#        operator: Equal---kind: ServiceapiVersion: v1metadata:  labels:    k8s-app: traefik  name: traefik  namespace: kube-systemspec:  selector:    k8s-app: traefik  clusterIP: None  ports:    - protocol: TCP      port: 80      name: http    - protocol: TCP      port: 443      name: https    - protocol: TCP      port: 8080      name: admin  type: ClusterIP# 或者traefik-daemonset-https---apiVersion: v1kind: ServiceAccountmetadata:  name: traefik  namespace: kube-system---kind: DaemonSetapiVersion: apps/v1metadata:  name: traefik  namespace: kube-system  labels:    k8s-app: traefikspec:  selector:    matchLabels:      k8s-app: traefik  template:    metadata:      labels:        k8s-app: traefik        name: traefik    spec:      serviceAccountName: traefik      terminationGracePeriodSeconds: 60      volumes:      - name: ssl        secret:          secretName: tls-cert      - name: config        configMap:          name: traefik-conf          defaultMode: 0644          items:          - key: traefik.toml            path: traefik.toml      hostNetwork: true      dnsPolicy: ClusterFirstWithHostNet            containers:      - image: traefik        name: traefik        imagePullPolicy: IfNotPresent        volumeMounts:        - mountPath: /certs                   name: "ssl"        - mountPath: /etc/traefik.toml          subPath: traefik.toml          name: "config"        ports:        - name: http          containerPort: 80          hostPort: 80        - name: https          containerPort: 443          hostPort: 443        - name: admin          containerPort: 8080        securityContext:          capabilities:            drop:            - ALL            add:            - NET_BIND_SERVICE        args:        - --api        - --web        - --api.dashboard        - --logLevel=INFO        - --web.metrics        - --metrics.prometheus        - --web.metrics.prometheus        - --kubernetes        - --traefiklog        - --traefiklog.format=json        - --accesslog        - --accesslog.format=json        - --accessLog.fields.headers.defaultMode=redact        - --insecureskipverify=true        - --configFile=/etc/traefik.toml             nodeSelector:        ingress: "yes"      tolerations:      - effect: NoSchedule        key: node-role.kubernetes.io/ingress        operator: Equal---kind: ServiceapiVersion: v1metadata:  labels:    k8s-app: traefik  name: traefik  namespace: kube-systemspec:  selector:    k8s-app: traefik  clusterIP: None  ports:    - protocol: TCP      port: 80      name: http    - protocol: TCP      port: 443      name: https    - protocol: TCP      port: 8080      name: admin  type: ClusterIP#创建 traefik-dashboardvi traefik-dashboard.yamapiVersion: extensions/v1beta1kind: Ingressmetadata:  name: traefik-dashboard  namespace: kube-system  annotations:    kubernetes.io/ingress.class: traefik    traefik.ingress.kubernetes.io/frontend-entry-points: http,httpsspec:  rules:  - host: trae.xxx.com    http:      paths:        - backend:            serviceName: traefik            servicePort: 8080  tls:   - secretName: tls-cert# 创建 服务器kubectl apply -f traefik-deployment-https.yamlkubectl apply -f traefik-rbac.yamlkubectl apply -f traefik-dashboard.yamlhosts 绑定 访问 trae.xxx.com 是否正常能正常打开证明正常

kubernetes-dashboard 部署

# win on Ubuntu  操作  # 创建 kubernetes-dashboard 使用证书cat << EOF | tee /apps/work/k8s/cfssl/k8s/dashboard.json{  "CN": "dashboard",  "hosts": [""],   "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "GuangDong",      "L": "GuangZhou",      "O": "cluster",      "OU": "cluster"    }  ]}EOF##### 生成kubernetes-dashboard 证书,当然如果有外部签发的证书也可以使用cfssl gencert \        -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \       -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \        -config=/apps/work/k8s/cfssl/ca-config.json \        -profile=kubernetes \        /apps/work/k8s/cfssl/k8s/dashboard.json | \        cfssljson -bare ./dashboard# base64 加密cat dashboard.pem|base64 | tr -d '\n'cat dashboard-key.pem|base64 | tr -d '\n'# 做好记录# kubernetes-dashboard vi kubernetes-dashboard.yaml# Copyright 2017 The Kubernetes Authors.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.apiVersion: v1kind: Namespacemetadata:  name: kubernetes-dashboard---apiVersion: v1kind: ServiceAccountmetadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard  namespace: kubernetes-dashboard---kind: ServiceapiVersion: v1metadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard  namespace: kubernetes-dashboard  annotations:    kubernetes.io/ingress.class: traefikspec:  ports:    - port: 443      targetPort: 8443  selector:    k8s-app: kubernetes-dashboard---apiVersion: v1kind: Secretmetadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard-certs  namespace: kubernetes-dashboardtype: Opaquedata:  dashboard.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeFlFV0MxbGlqcnFzNW5vcHBxTXF0YzZSY0pnSWFJSGhGemZZUWhRQm5pK0Vjam8vCkRTUkYvY3BUOFlkTTg2MVpEV1lSN1FEelFLNmJUTmRLWXJJYmpVWHJpRFVFU01EUW13Y1VteTMzWjFpeXR6K0wKUUVmTVFvWVNReGVIY2RqUHp3bUhFS0todk9vNmxQTHNFWkMwQ3ZCamw2VHlERjhuSDEzby9kRlRVbGJhWUlGaQpPeGVIWkxMMTZKbmNLK3RVaW9ncjdLekFKMUkxTjdwOVQ1blZ5YU9PbWNCVEFnU3RJM0ZwSzdMZG1zaVU0ZEZ0CkpSSFZ0eTh6Y3dCSU9wWnhqV29mM2ROVkRrVUFsYjVtV2psU0RaQ2lhYmFYQi91NmJ0R0k3RlY2cENaUzdDVG4KeWlpUFlFSXRPSGRCT0VycGpKZWQ0bHQ5K2MvNDE3UTRIaiswdndJREFRQUJBb0lCQVFDK1daSWdjQTZRRnhScQpzVlNST1BNQjlFdXlJNlQrN0NZL2xXQUZGM2tUdHlKRVlTVEJpck0yVFprbjBFbjNGSndlVU1CNEZwRmJScTJBCm1vSWpxeHJveG5taGRjOWlPd3NTVHZtcU1kd2ZLNXBiQ0pBeDdNRE5ZS0FiTDRNbjAxazlaaVpaZnhTNG1WcksKa1hHNTRDZlYzeWR0VU5qRDJiVkFBdWQ2TVJQSDV5QWJTVktsMG9ONkRCaFV4MlYyWEo0WnRUVHE0b3R6VGYxZwp3SjNJeVFjSXl3czE2V3dkeHpuYStqVmpOYU5OQ3ZCT1BMbm9TeXZBQXZGRG9UYmUrMG1tcnZLVmlSeDBDT1FzCkUwNjFtNHY2eUExL3locndkT1BDYXN6SkpjWlYzOThJTzFKb2QxUHk3OU9aT1FpY1FEOGhwQmxqb0FSQ2JlY3QKRFFPcG5CR0JBb0dCQVBhYlJSSGpPTkxIQ25JZWlFQU1EYXNwQXo2RGxRNkQvdWNNdzROdkVPRVNVa3dvQ0p4cApwK1hJeVVzT1B1d2swTzVCcHJRcHZjdGYyWXlLZTFtR25iVUpmUVNWNGpLdWpqb0M0OWhOWk9lSE8zd0xMcnNXCkl1SU1Qeko0TjhxSzl0dUpDQ3BVYUZFVzRiN1R2OGsyK1pJWHJwN3hzNklDd01EUnpTaW9wY0hCQW9HQkFNMEgKQVl1bmdzY3hTM2JnZ05idU5sQ3lIOHBLZFVPbi95cU9IQUdYcG9vZmJUbXJiUUlWN0ZOVSszUTlYc2ErVVE0QwpUbVdFbzhabVhrL3lIV2FDVWxpRkN0ckRhTzNUZVhvb2pia1JyaDcxakFXN0pjVDRVZ1ZwcG1RakFVUW8vOWtVCmxHMUNpOTFZZy94dlV5dHlYM1BnZHJ6SnU2aWNsM1pVZ1h4dzNoWi9Bb0dBZENmY2w3bFVLWXZSTXNHSTRjb0wKb2lRMlAvclFlYjdZa05IbFFZSk9EQVdLT0E3ZlIzVkl2U1lmRWpoS2tRWWlWeWNiTTE4NTQ1SnBNUmFGVlR6ZwpDY2JIV1NLVUlkVXdic2l2czFGNUJza2V6cVdoeEVOLytNTlYvUnE5QkswQjY1UVhBWUV5aFlkbW0zQzN0RG90CndZOWdFOE83SGNONE1ScGhMUmFLeE1FQ2dZRUFoS2E5eHorUUM1VEhRSmlzZzJNSVhWbUIyLzRrdEt0akdvTnIKZDFSSStpQ3ZLSnJUSW9CUXNQSFE1em8xc2R5ODBKV0paNEZUL1MrS1lhdENmbXBmSU1xalpUcjlEcksrYTkwRgpKUEpkZDhaaTIrcGoyM2JXaW8zNmk5dGlIRmx5ZjE4alVUVzNESFVTb0NiZTVzTlBJc2ZkeXZPeXFMcjMvQ1ZjCnlaOU1jYjBDZ1lBMVp2RVM3bU42Nm10T2JpSlR3a3hhaTVvS2tHbDdHTDJkZXJFUmxsc1YrNWRCSVY4dG5DTnAKT2tjMFlMbHV2TEg4cG4zd2VCNzg5dUFCQjNXYmNKcHg0L2NIRm9oZDNhdlR0RThRVjJod0tNS2RKQVBvTHNoMgprK2lEUWd1dmFxSzNmL1RYUW43bWU3dWFqSDk3SXZldXJtWWsvVmRJY0dicnd1SVRzd0FEYWc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=  dashboard.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ5ekNDQXQrZ0F3SUJBZ0lVUWRIVXdKS1JYc1ZRb2VYS1JDTjd0eVcwWU04d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2JqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUk13CkVRWURWUVFERXdwcmRXSmxjbTVsZEdWek1CNFhEVEU1TURjd05ERXhNVE13TUZvWERUSTVNRGN3TVRFeE1UTXcKTUZvd2JURUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUkl3CkVBWURWUVFERXdsa1lYTm9ZbTloY21Rd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUIKQVFERmdSWUxXV0tPdXF6bWVpbW1veXExenBGd21BaG9nZUVYTjloQ0ZBR2VMNFJ5T2o4TkpFWDl5bFB4aDB6egpyVmtOWmhIdEFQTkFycHRNMTBwaXNodU5SZXVJTlFSSXdOQ2JCeFNiTGZkbldMSzNQNHRBUjh5Q2hoSkRGNGR4CjJNL1BDWWNRb3FHODZqcVU4dXdSa0xRSzhHT1hwUElNWHljZlhlajkwVk5TVnRwZ2dXSTdGNGRrc3ZYb21kd3IKNjFTS2lDdnNyTUFuVWpVM3VuMVBtZFhKbzQ2WndGTUNCSzBqY1drcnN0MmF5SlRoMFcwbEVkVzNMekp6QUVnNgpsbkdOYWgvZDAxVU9SUUNWdm1aYU9WSU5rS0pwdHBjSCs3cHUwWWpzVlhxa0psTHNKT2ZLS0k5Z1FpMDRkMEU0ClN1bU1sNTNpVzMzNXovalh0RGdlUDdTL0FnTUJBQUdqZ1kwd2dZb3dEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CMEcKQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjBHQTFVZApEZ1FXQkJURTl6cWx4dkErRXMrbE8zWlFEMlhubGFHRFpqQWZCZ05WSFNNRUdEQVdnQlJ4NEtjQVJjYWtSL2J4Cm13b1RCZURzK3hBb2FUQUxCZ05WSFJFRUJEQUNnZ0F3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUJnWHZwTEMKQjIybXlQaURlZnhsWGNZRzAvY0R2RXlYcTlENWtKTnBxKzFZQ0EvMlp2RDIyN1Q5VjY3aHVyTlA3T2FvSG95Tgo0MHpkR3lZTGRNV3pyZTQwVksxdC84N3pDTENzamt1ZXRCRWEwNVRqUTJhbDRhSzJ6TXl5MkJLWEpYbjlvdkhzCjJwNndvL001eklEOXl2OEhyRkZqWHM3NitTUTFzNXpOdUxuaDBET0Z1SktiZUZxSUJyNmZRbXlsb0l1VURtZjYKcGtQYkJyRnJpNHFGS0lDcVZKRCt3Z01zRFBiclVMZXF5NWlBVjNqRzJKMFgxOE4zdklCeUFwdWhZbjNudlV0TwpLREVIWkFJcFpjRWdqQ2ZLVDNyaERLL3JLN0VFZkxLcGlCdGJya3pFbjVWV3FQUFJEK3ZPU2VySldETDl1K0xyCmhEazlvZ084cmNqQzZGdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==---apiVersion: v1kind: Secretmetadata:  labels:    k8s-app: kubernetes-dashboard  name: dashboard-tls-cert  namespace: kubernetes-dashboardtype: Opaquedata:  tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUdYekNDQlVlZ0F3SUJBZ0lTQkdVcDlSaVAvK2lNMDVYM0FuY0FUeTg1TUEwR0NTcUdTSWIzRFFFQkN3VUEKTUVveEN6QUpCZ05WQkFZVEFsVlRNUll3RkFZRFZRUUtFdzFNWlhRbmN5QkZibU55ZVhCME1TTXdJUVlEVlFRRApFeHBNWlhRbmN5QkZibU55ZVhCMElFRjFkR2h3Y21sMGVTQllNekFlRncweE9UQTNNRGt3T1RJNU1ESmFGdzB4Ck9URXdNRGN3T1RJNU1ESmFNQll4RkRBU0JnTlZCQU1UQzIxa1pHZGhiV1V1WTI5dE1JSUNJakFOQmdrcWhraUcKOXcwQkFRRUZBQU9DQWc4QU1JSUNDZ0tDQWdFQW9mSVdOdTE4YUp1T3Jzd0JjZE9lODN0dWpXZ2dpUXl0VVYxQwpqNVhYbzNjQTM1L2ZxQXNGVHpJRGNwUmxhTGJ6SHd1d1psOWNSKzJuRENaUzI4VlhZaXcrSkQvQXpna3FzTHFJCjZ3YlFhcHNCa1lYUzRuT1UrZzhSMVgwcm52ckpickE1eHFJSWJKM002ajVLTXZ4RktvMEV3YXNBY2NiYlVGOW4KMHQ2RzNreG4zWW1Sek5HeHh2bXZ4V2prNWNkSWMza0MyT1VuRktGOG5XemJab2JiNk9PUnZSaElEWW5YdjkxdgoyMUYwQnZ0Q21GY0FEaDRqZXUrLzNKVDVLcEJkdkFHOHI3aU1wbkhKaFU1alhqTXlPRytMbkcvcnJuRzJGaXpHCmx1UHQwKzRlK0ZRSXFZY1BUM1cyTUF2ZDlzQTNEMThsUW82M00vZlMyYjNIYVNidFY0b1pmNS9zTzJNeEVPVnoKVEd1M0NxYk40TkcrZE8ycXoxYWxMQmlGZlVjNEdmUVpYRmlLaDFzazl3Qm5zeWhqYUZmdUx6bHRxMDg3STJLYQorVlRaUzFQSlJFbGduM3UwY1FmaENjelF5ZTJ3Vjl6RE9lVmUxeTBjLzZ0RWJhNllCeGR2ZGcwOFpKL0QwYTBLCnJvWlVJMW5Rc2RKeE8rQ3N1OURLYjROZzJCYnZkWVpHVWJrSCtSUDU0UUdrS1VnYnVxNVIwbXI0U1I2VUwrRE4KZjNxem81a3ZiMXVRWXFpaDZYUFVDVUVPOTNOU1Y2MTNUSUVOTUpyYjVhbGRLUkhPZlpWL201QThlUy9ibFFYcgpOV3FCRy9OL2RtckZjMmcyNGJEY3d5OXIzL3FkNy9MTWxmMVRVdzJGczR3M2x2VHJFanlwWEZhQ3BRRGxkc0xJCkYwcWVKVnNDQXdFQUFhT0NBbkV3Z2dKdE1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZEJnTlZIU1VFRmpBVUJnZ3IKQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVHUUNXOGNFbgpaNWhVWjBDa004QW03Wjh7NGJNd0h4WURWUjBqQkJnd0ZvQVVxRXBxWXdSOTNicm0wVG0zcGtWbDcvT283S0V3CmJ3WUlLd1lCQlFVSEFRRUVZekJoTUM0R0NDc0dBUVVGQnpBQmhpSm9kSFJ3T2k4dmIyTnpjQzVwYm5RdGVETXUKYkdWMGMyVnVZM0o1Y0hRdWIzSm5NQzhHQ0NzR0FRVUZCekFDaGlOb2RIUndPaTh3WTJWeWRDNXBiblF0ZURNdQpiR1YwYzJWdVkzSjVjSFF1YjNKbkx6QWxCZ05WSFJFRUhqQWNnZzBxTG0xa1pHZGhiV1V1WTI5dGdndHRaR1JuCllXMWxMbU52YlRCTUJnTlZIU0FFUlRCRE1BZ0dCbWVCREFFQ0FUQTNCZ3NyQmdFRUFZTGZFd0VCQVRBb01DWUcKQ0NzR0FRVUZCd0lCRmhwb2RIUndPaTh3WTNCekxteGxkSE5sYm1OeWVYQjBMbTl5WnpDQ0FRWUdDaXNHQVFRQgoxbmtDQkFJRWdmY0VnZlFBOGdCM0FPSnBTNjRtNk9sQUNlaUdHN1k3ZzlRKzUvNTBpUHVranlpVEFaM2Q4ZHYrCkFBQUJhOVpIamZBQUFBUURBRWd3UmdJaEFKNXBWaDFDSEpmcTFhd2NOYmxEU2FwL1prQmVBeXU5ajcrTVhISnMKTEI3TUFpRUFwM2xLVVNCZXpiQWpodkZWSTBGR3ZFWmtzU2lYKyt3SitiZ3VLOXlaS3JBQWR3QXBQRkdXVk1nNQpaYnFxVVB4WUI5UzNiNzlZZWlseTNLVEREUFRsUlVmMGVBQUFBV3ZXUjQzd0FBQUVBd0JJTUVZQ0lRRDI1L1NHClcrWHRDa2VzaHViekZtUnRnaDUrWXMxaXpnSG5CSmtOS1Z0cE9nSWhBT1lteWJCWjV3RjZBeE5UT29WdnkyYVMKNktEdURyWmRzSVYrN251WkhFSDdNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUNjRHFwTzF3OWdNbzJGaW1GTgpwSUlxT3d1N2hsUWVURU44enY1UmFiYWtGelpvZlhURXpRcGNtSlNWRUhET25MVGpjaWpITWxtbGdIbndTM2w0CjAyWFB0akIzUWJUNFRWUHlqUGpBZ1ZvL1ZmclNJT2N5S1pKRDNJMWxLNXV1anRCdGF3Rnh4cjBYeGd1Q2k5TlUKdlQ2R0RxYnlaVVdiL1I0bXVVYzFwRzMySVJiS3BxQnZveitsaGRMNHdOb1M5YXdiUlg3LzBmUytEZUZiZ09vbgpzYnBDYTFQeFdqWHYwNloxNkF0LzBRTlVZLzExdEw4bTRDK3Q2OW5kOUt6eUdRZmdOank2NmM1RmhIODVBQkNFClJ6L3NoVkdyb1lTQkh4M1Q0c0NKZnh5dW5oK0tVZ0dvRFk5VUc5RzI2T200eHgvWFQ5OTZONTNxUytPS21iY0wKajVJMgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJRWtqQ0NBM3FnQXdJQkFnSVFDZ0ZCUWdBQUFWT0ZjMm9MaGV5bkNEQU5CZ2txaGtpRzl3MEJBUXNGQURBLwpNU1F3SWdZRFZRUUtFeHRFYVdkcGRHRnNJRk5wWjI1aGRIVnlaU0JVY25WemRDQkRieTR4RnpBVkJnTlZCQU1UCkRrUlRWQ0JTYjI5MElFTkJJRmd6TUI0WERURTJNRE14TnpFMk5EQTBObG9YRFRJeE1ETXhOekUyTkRBME5sb3cKU2pFTE1Ba0dBMVVFQmhNQ1ZWTXhGakFVQmdOVkJBb1REVXhsZENkeklFVnVZM0o1Y0hReEl6QWhCZ05WQkFNVApHa3hsZENkeklFVnVZM0o1Y0hRZ1FYVjBhRzl5YVhSNUlGZ3pNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUFuTk1NOEZybExrZTNjbDAzZzdOb1l6RHExelVtR1NYaHZiNDE4WENTTDdlNFMwRUYKcTZtZU5RaFk3TEVxeEdpSEM2UGpkZVRtODZkaWNicDVnV0FmMTVHYW4vUFFlR2R4eUdrT2xaSFAvdWFaNldBOApTTXgreWsxM0VpU2RSeHRhNjduc0hqY0FISnlzZTZjRjZzNUs2NzFCNVRhWXVjdjliVHlXYU44aktrS1FESVowClo4aC9wWnE0VW1FVUV6OWw2WUtIeTl2NkRsYjJob256aFQrWGhxK3czQnJ2YXcyVkZuM0VLNkJsc3BrRU5uV0EKYTZ4Szh5dVFTWGd2b3BaUEtpQWxLUVRHZE1EUU1jMlBNVGlWRnJxb003aEQ4YkVmd3pCL29ua3hFejB0TnZqagovUEl6YXJrNU1jV3Z4STBOSFdRV002cjZoQ20yMUF2QTJIM0Rrd0lEQVFBQm80SUJmVENDQVhrd0VnWURWUjBUCkFRSC9CQWd3QmdFQi93SUJBREFPQmdOVkhROEJBZjhFQkFNQ0FZWXdmd1lJS3dZQkJRVUhBUUVFY3pCeE1ESUcKQ0NzR0FRVUZCekFCaGlab2RIUndPaTh3YVhOeVp5NTBjblZ6ZEdsa0xtOWpjM0F1YVdSbGJuUnlkWE4wTG1OdgpiVEE3QmdnckJnRUZCUWN3QW9ZdmFIUjBjRG92TDJGd2NITXVhV1JsYm5SeWRYTjBMbU52YlM5eWIyOTBjeTlrCmMzUnliMjkwWTJGNE15NXdOMk13SHdZRFZSMGpCQmd3Rm9BVXhLZXhwSHNzY2ZyYjRVdVFkZi9FRldDRmlSQXcKVkFZRFZSMGdCRTB3U3pBSUJnWm5nUXdCQWdFd1B3WUxLd1lCQkFHQzN4TUJBUUV3TURBdUJnZ3JCZ0VGQlFjQwpBUllpYUhSMGNEb3ZMMk53Y3k1eWIyOTBMWGd4TG14bGRITmxibU55ZVhCMExtOXlaekE4QmdOVkhSOEVOVEF6Ck1ER2dMNkF0aGl0b2RIUndPaTh3WTNKc0xtbGtaVzUwY25WemRDNWpiMjB2UkZOVVVrOVBWRU5CV0RORFVrd3UKWTNKc01CMEdBMVVkRGdRV0JCU29TbXBqQkgzZHV1YlJPYmVtUldYdjg2anNvVEFOQmdrcWhraUc5dzBCQVFzRgpBQU9DQVFFQTNUUFhFZk5qV0RqZEdCWDdDVlcrZGxhNWNFaWxhVWNuZThJa0NKTHhXaDlLRWlrM0pIUlJIR0pvCnVNMlZjR2ZsOTZTOFRpaFJ6WnZvcm9lZDZ0aTZXcUVCbXR6dzNXb2RhdGcrVnlPZXBoNEVZcHIvMXdYS3R4OC8Kd0FwSXZKU3d0bVZpNE1GVTVhTXFyU0RFNmVhNzNNajJ0Y015bzVqTWQ2am1lV1VISzhzby9qb1dVb0hPVWd3dQpYNFBvMVFZeiszZHN6a0RxTXA0ZmtseEJ3WFJzVzEwS1h7UE1UWitzT1BBdmV5eGluZG1qa1c4bEd5K1FzUmxHClBmWitHNlo2aDdtamVtMFkraVdsa1ljVjRQSVdMMWl3Qmk4c2FDYkdTNWpOMnA4TStYK1E3VU5LRWtST2IzTjYKS09xa3FtNTdUSDJIM2VESkFrU25oNi9ETkZ1MFFnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==  tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBb2ZJV051MThhSnVPcnN3QmNkT2U4M3R1aldnZ2lReXRVVjFDajVYWG8zY0EzNS9mCnFBc0ZUeklEY3BSbGFMYnpId3V3Wmw5Y1IrMm5EQ1pTMjhWWFlpdytKRC9BemdrcXNMcUk2d2JRYXBzQmtZWFMKNG5PVStnOFIxWDBybnZySmJyQTV4cUlJYkozTTZqNUtNdnhGS28wRXdhc0FjY2JiVUY5bjB0Nkcza3huM1ltUgp6Tkd4eHVtdnhXams1Y2RJYzNrQzJPVW5GS0Y4bld6YlpvYmI2T09SdlJoSURZblh3OTF2MjFGMEJ2dENtRmNBCkRoNGpldSsvM0pUNUtwQmR2QUc4cjdpTXBuSEpoVTVqWGpNeU9HK0xuRy9ycm5HMkZpekdsdVB0MCs0ZStGUUkKcVljUFQzVzJNQXZkOXNBM0QxOGxRbzYzTS9mUzJiM0hhU2J0VjRvWmY1L3NPMk14RU9WelRHdTNDcWJONE5HKwpkTzJxejFhbExCaUZmVWM0R2ZRWlhGaUtoMXNrOXdCbnN5aGphRmZ1THpsdHEwODdJMkthK1ZUWlMxUEpSRWxnCm4zdTBjUWZoQ2N6UXllMndWOXpET2VWZTF5MGMvNnRFYmE2WUJ4ZHZkZzA4WkovRDBhMEtyb1pVSTFuUXNkSngKTytDc3U5REtiNE5nMkJidmRZWkdVYmtIK1JQNTRRR2tLVWdidXE1UjBtcjRTUjZVTCtETmYzcXpvNWt2YjF1UQpZcWloNlhQVUNVRU85M05TVjYxM1RJRU5NSnJiNWFsZEtSSE9mWlYvbTVBOGVTL2JsUVhyTldxQkcvTi9kbXJGCmMyZzI0YkRjd3k5cjMvcWQ3L0xNbGYxVFV3MkZzNHczbHZUckVqeXBYRmFDcFFEbGRzTElGMHFlSlZzQ0F3RUEKQVFLQ0FnQXY5Zk13UnpzTisrdlF4cWd5M3JwM1gzbkpOU3BWakVTVUVTdVNQSTFGWXd3R0xtSGRjWTRiK3pMYwpMeWl0VDJsSEszNE5nM1pmOHZrQzl5S1k1YVBRZGt2ZERtaDZYR3FoTmswd1ZhOUpzeWhPd2JSSHpuVXpiVjBaCnZkMDZVd2x1MTQvMHpLMzBCUFBYOTZTZjN1aFpCclIrNnJiUisxT2VSUE1KbDArWDdFYmliRWlhd1F1R1hsVHAKQVB5eE5FaTNzZ0h2M0VhcnJIdXNYNzNHYW5BY1U3RW9zRlUrZFRGSktEcGxXSVVsUUNwajFYZzF0aVZKMWxFYQo4Wit0UkY0T1BQRjFsUkZLaGU1cHBXSjJWbkVzRjVUZ09xRXc0NHBLbk80Zlo5ZGFhVzRRbTBxSmNtOU5XQTRoCndwSDA3czRmcGt6eG5qU1JsbmFDZDlyandGeVBsSkJzUXNhVlFFNzlpQzJZMTRnTk9KQ0xyMXRKSEQ2ODN3bW4KS3ZNOHZpOTdHTmIybXZHeWNtZnloNVpzTFBpTWNqOFFER3VWZU53dlNESXpybnhqVkZlc0liTWt5UlZRem9IVApTTHRQbXdVR3lwRHVrMDhaZytsT0lYOC85K3lqMER3MDRqenllTVptYlFVdkd2N2lNWjFUaHdaRHF1YkJXV3J4CmtYTmJwTG9BMGxrcHh5bjdGam9Ya20zM2ZKQURjd2xWSS82WFNrSm1FaFVlZmZnaFFSMGNyVGphQVd1Qkx2Qk0KT0s5aEEzT3RTN2F0S2FDb1lvSmRrYkpHQTdWdytNNzA4NEJOTGhxM1Fyckg4S3M3Z05pdC9NN3lxSnU1alBaZgo2SE1seHNyWU9NVUhuVlk4VDkwN0Q3cS9ORUNnRThzODhnZzAyQ3JNWTFqanE4UnBpUUtDQVFFQTE2UHJaMUEwClNISS83akdmS3BETkJzQ0xrVUFxRERKSzQ0dFdJYmJBUXFhRTN1eDh4bkFlU2NjSHozbS9ScEpPSGtteHZTZlgKbTJ1Wk8veGtNTWhYK2lwOHdFOHZibzR1enVNYitTSXE3bWpialJkK1JJczJ5NHJsZVQ2NGVjRWc4R2pZckExZgpiSEI0MmhQclVTcXpxUVIwOTZocm1Lb1diU0RDZDZwOUVNeWVzT3IwTjdtQmJYVVZPazJxZGtYRlZWbHBlUDdpClFxWGdRUUI0bHgzLzJJdlpBMlhJUXlQdGJ0RWVRbmgyQ3FNM2NDMzR0VEVjZ244K0VwNG9SWmkwTTBHaUY3bXgKOTEvZHY2THZlNTR5K1pON1lXd1NFQ09ubzd5bDlvTlBZVnVGMGRiMjh0elppMThCeHJTQ2JESE1XbExvUzhWNgpXTEo0OGlSODJDYkc1d0tDQVFFQXdFRjM4KzYyeDhDU2x0blZZNlJaN0J0NEdiNEJqVWhWYXZ0NFkxUGFlbXFNCjFidFVnR2JyUnBoNHFUSEFTckUwUUZLeVZKYnlCUkJyRHIxWHU4WWRSVXQzZC92VzlIR1dPd1BKdTN2M3pLbHMKQ2xsZnpFY3J5L1l2aHAzSzlEcGR6OE1icHdueW5xcGV6b0xMNlJpL3JnK0hyTzBueXd1RSt0T2xYVFo2eUtadApHWVdTSVBWaG00NUJkc2ZxUzhnYjVvbjA0bHh4bnhxVnJvN0c0TUR6cmVEYlFhaGdyS3VuRWxwajZ4eW1PVWpBCkdCZDR3QUVrUExxNUUrRWcreDY4TkRLVTYwK29ybFhLWVhDQm5HSFZOQ3BVcmswVXkrcHFZZmFEN3VuR2VzaHMKSEwra3lXbXl5a3ErTmNKbnRXMFNSNy9sU1IvZUFhVEZyVzZVaXV0RGJRS0NBUUVBemhRYU9PNmVPSW51N016QgpScVdCT3EyeDg4cjFKQmpBRnZzbkFpc3JTOGJsZmtGVTdXREdvVTB5K3FWb0ZhSm1RMjI4RFlCUS9YZnp4aTdxCjlPL1JuQU1VbTVoUlJQOWVYbHNPZGFXZ2o1em9ETXRoNFZHRnVUbHhHZERGN1oyU3hBMysyMVlnVm5xYUZCY3IKTUxOMVpOWWNqajJITGl1R0tSNUFtcW4wd2FRN0YrcENJQ3NKTkxqSzQ2QXJnc0lrMXU4TzdCSHgyeTI0eFlZVQp1SjV6emRmQU9nNEFONkhURzY5L2twaWFmb29DeGhNNDlyZ0xmZTdxUEZLbk8vTzJhckdUbmNiWi9BWEMzb3h4Ci81dHRMYlF6R2lSMGtyWHdWSHRKdys4elltQmIzL0RtcWF4RHZueTZMdEo5UGJiTmk1aGw1VnZCRTVqa0dzeWgKL3RQNEN3S0NBUUJ2R1dZb0lKcWZkRGxCMHovdEJOeXlCRzJ5OG9vVEN1blJtT0JKQmZ3TEllZWcyMUJKb3kveQo2OGxPZk9HU1NEVFp0dkEyMGNPcUNZTFVVYmFSWERzdUFCNVp4NzdBSTZPZEZ1Tk01S2FlTG9td3NWVWF4MFlYCjUzd3ZYcUFaNG1DejN4dnJ1MlBwTEtyOHk3anFTdEw1MHgra1hxZlFQaWZxaXNQVXlkYktmT0l2RFhFVWVyaWQKRytmWXJFNUkzS3JDM3BZVStUWmJ1eEVrZm4yUEEvSE5XVk5hN2VKdjVnSDJLU1gwaCtuRzBMT3hPRjhmRlluTApUbHdGa09OdU9xU254Vk1wYUM4aUQ1R1VIVi9JN3dBMTFRQjZlVEM3Wmd0ejhQRHM3MHN6U1A2dzNrNXIxaGpyCnJhV2RpMnBDL1hUQzRiR3VRQ3dhNXcwVTNBSWJCVGxCQW9JQkFEc1RONGhvclVHNWw3MXhLZk5ibVBTbDZ6RlIKYTJ4d2U2VVZPOVZzMFpHeEdLWWJSN1VuVDBDL1FqUiswS2JsbE9leDdFY3cyMklCcmFFVzBGbXpuVnoyUW9FNwpMUE5COXhyTTFEeE56UjZEbFBUeERMcEFGWVlUcm40SWY1cjFVdVdpc2lMdmd6T2xGTlVITnN5UFJIZWNGblhUCnNhTk9JWkgrQTJ5KzF3QWdpSFZIS2JPRGRHeVFQVlQ0TXFFWkJaY2pQcmRBekNKcnloSHlYdHBqRjFSdlFEYTMKTVM3U3JVTGM4djJGQWJ1VG1QZ2R1ZHBKd1Q4dENCa2VRKzZ4YmJWN3YrZzBEMG5EWFNIZFVwNXFyUzcrTnhtVwp4NWV4UHo1VENhYXcxSnkzWjRmT1MzMTV6eHJGdmRHTmhWRXhMMzRlUVlzOHRYN0N0VWxuWkNray9zYz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0=---apiVersion: v1kind: Secretmetadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard-csrf  namespace: kubernetes-dashboardtype: Opaquedata:  csrf: ""---apiVersion: v1kind: Secretmetadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard-key-holder  namespace: kubernetes-dashboardtype: Opaque---kind: ConfigMapapiVersion: v1metadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard-settings  namespace: kubernetes-dashboard---kind: RoleapiVersion: rbac.authorization.k8s.io/v1metadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard  namespace: kubernetes-dashboardrules:  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.  - apiGroups: [""]    resources: ["secrets"]    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]    verbs: ["get", "update", "delete"]    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.  - apiGroups: [""]    resources: ["configmaps"]    resourceNames: ["kubernetes-dashboard-settings"]    verbs: ["get", "update"]    # Allow Dashboard to get metrics.  - apiGroups: [""]    resources: ["services"]    resourceNames: ["heapster", "dashboard-metrics-scraper"]    verbs: ["proxy"]  - apiGroups: [""]    resources: ["services/proxy"]    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]    verbs: ["get"]---kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboardrules:  # Allow Metrics Scraper to get metrics from the Metrics server  - apiGroups: ["metrics.k8s.io"]    resources: ["pods", "nodes"]    verbs: ["get", "list", "watch"]---apiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard  namespace: kubernetes-dashboardroleRef:  apiGroup: rbac.authorization.k8s.io  kind: Role  name: kubernetes-dashboardsubjects:  - kind: ServiceAccount    name: kubernetes-dashboard    namespace: kubernetes-dashboard---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  name: kubernetes-dashboard  namespace: kubernetes-dashboardroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: kubernetes-dashboardsubjects:  - kind: ServiceAccount    name: kubernetes-dashboard    namespace: kubernetes-dashboard---kind: DeploymentapiVersion: apps/v1metadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard  namespace: kubernetes-dashboardspec:  replicas: 1  revisionHistoryLimit: 10  selector:    matchLabels:      k8s-app: kubernetes-dashboard  template:    metadata:      labels:        k8s-app: kubernetes-dashboard    spec:      containers:        - name: kubernetes-dashboard          image: kubernetesui/dashboard:v2.0.0-beta4          imagePullPolicy: Always          ports:            - containerPort: 8443              protocol: TCP          args:            - --auto-generate-certificates            - --namespace=kubernetes-dashboard            - --token-ttl=43200            # Uncomment the following line to manually specify Kubernetes API server Host            # If not specified, Dashboard will attempt to auto discover the API server and connect            # to it. Uncomment only if the default does not work.            # - --apiserver-host=http://my-address:port          volumeMounts:            - name: kubernetes-dashboard-certs              mountPath: /certs              # Create on-disk volume to store exec logs            - mountPath: /tmp              name: tmp-volume          livenessProbe:            httpGet:              scheme: HTTPS              path: /              port: 8443            initialDelaySeconds: 30            timeoutSeconds: 30      volumes:        - name: kubernetes-dashboard-certs          secret:            secretName: kubernetes-dashboard-certs        - name: tmp-volume          emptyDir: {}      serviceAccountName: kubernetes-dashboard      # Comment the following tolerations if Dashboard must not be deployed on master      tolerations:        - key: node-role.kubernetes.io/master          effect: NoSchedule---kind: ServiceapiVersion: v1metadata:  labels:    k8s-app: dashboard-metrics-scraper  name: dashboard-metrics-scraper  namespace: kubernetes-dashboardspec:  ports:    - port: 8000      targetPort: 8000  selector:    k8s-app: dashboard-metrics-scraper---kind: DeploymentapiVersion: apps/v1metadata:  labels:    k8s-app: dashboard-metrics-scraper  name: dashboard-metrics-scraper  namespace: kubernetes-dashboardspec:  replicas: 1  revisionHistoryLimit: 10  selector:    matchLabels:      k8s-app: dashboard-metrics-scraper  template:    metadata:      labels:        k8s-app: dashboard-metrics-scraper    spec:      containers:        - name: dashboard-metrics-scraper          image: kubernetesui/metrics-scraper:v1.0.1          ports:            - containerPort: 8000              protocol: TCP          livenessProbe:            httpGet:              scheme: HTTP              path: /              port: 8000            initialDelaySeconds: 30            timeoutSeconds: 30          volumeMounts:          - mountPath: /tmp            name: tmp-volume      serviceAccountName: kubernetes-dashboard      # Comment the following tolerations if Dashboard must not be deployed on master      tolerations:        - key: node-role.kubernetes.io/master          effect: NoSchedule      volumes:        - name: tmp-volume          emptyDir: {}---apiVersion: extensions/v1beta1kind: Ingressmetadata:  labels:    k8s-app: kubernetes-dashboard  name: kubernetes-dashboard  namespace: kubernetes-dashboard  annotations:    kubernetes.io/ingress.class: traefik    traefik.ingress.kubernetes.io/frontend-entry-points: http,https    traefik.ingress.kubernetes.io/redirect-entry-point: httpsspec:  rules:  - host: csdd.xxxx.com    http:      paths:        - backend:            serviceName: kubernetes-dashboard            servicePort: 443  tls:   - secretName: dashboard-tls-cert# 创建kubernetes-dashboard token 登录#  生成tokenkubectl create sa dashboard-admin -n kube-system # 授权token 访问权限kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin# 获取token ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')# 获取dashboard.kubeconfig 使用token   值DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')echo ${DASHBOARD_LOGIN_TOKEN}# 设置集群参数kubectl config set-cluster kubernetes \  --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \  --embed-certs=true \  --server=${KUBE_APISERVER} \  --kubeconfig=dashboard.kubeconfig# 设置客户端认证参数,使用上面创建的 Tokenkubectl config set-credentials dashboard_user \  --token=${DASHBOARD_LOGIN_TOKEN} \  --kubeconfig=dashboard.kubeconfig# 设置上下文参数kubectl config set-context default \  --cluster=kubernetes \  --user=dashboard_user \  --kubeconfig=dashboard.kubeconfig# 设置默认上下文kubectl config use-context default --kubeconfig=dashboard.kubeconfig# 绑定hostshttps://csdd.xxxx.com/#/overview?namespace=default# kubernetes-dashboard 使用metrics 显示cpu内存资源 所有要部署metrics-server

metrics-server 部署

# win on Ubuntu  操作 #创建metrics-server 证书cat << EOF | tee /apps/work/k8s/cfssl/k8s/metrics-server.json{  "CN": "metrics-server",  "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "GuangDong",      "L": "GuangZhou",      "O": "cluster",      "OU": "cluster"    }  ]}EOF### 生成证书cfssl gencert -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \    -config=/apps/work/k8s/cfssl/ca-config.json \    -profile=kubernetes /apps/work/k8s/cfssl/k8s/metrics-server.json | cfssljson -bare ./metrics-server# 创建metrics-server-secrets.yaml# base64 加密cat metrics-server.pem|base64 | tr -d '\n'cat metrics-server-key.pem|base64 | tr -d '\n'vi metrics-server-secrets.yamlapiVersion: v1kind: Secretmetadata:  labels:    k8s-app: metrics-server  name: metrics-server-certs  namespace: kube-systemtype: Opaquedata:  metrics-server.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ3VENDQXRXZ0F3SUJBZ0lVYkloOTQ3Z3NET2gxdVplVnBoMU9GVmhYeHA0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2JqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUk13CkVRWURWUVFERXdwcmRXSmxjbTVsZEdWek1CNFhEVEU1TURjd05UQXpOVGd3TUZvWERUSTVNRGN3TWpBek5UZ3cKTUZvd2NqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUmN3CkZRWURWUVFERXc1dFpYUnlhV056TFhObGNuWmxjakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTUlvdHhsakpLcCtyb1hGcGJJWXRuNW1mVXVyREM1bUlkb2Z0RTNSVGhNU1pPSCt0aVVoMDdQRQpnb0xPOG1lSkxaS21ob1BUYzhJWTJYVjdiZzJWWFpRVUd6MFJuMExmNWdWam5UN29yMFFnZzErYnJnZU5wWUtpCjVoNm5ISVE3ZGlKYk10cUFndm16RGR6bWdoUXd2SHBKTzh4bEQwKzRwT0VHT2VtQkNPU3BsaFhrenR3UWQ3ZHYKY2x1QUljQUdiUGF6dzI4VkJJU2F4bCtrZnZwNzIyeEkvVy9DL3pRS1JnN053UG9IaVpFWm9QcGxPY001cGpvUwpJeEdnWVZEYjB6OGlqZWR3RjZmcE9RZkFOcitvQnVONnZnMXAzd2Jud2tKTWtEQUV2MzBXZG1BUzB5STJicC9RCkJZYjU2VWxGTXI4anNoWHJ5dlVsZ3F3S0hscFh0WkVDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2cKTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwRwpBMVVkRGdRV0JCVEp6cVJBMWdIN***d3B3TG01ZEtWUHFvdG43VEFmQmdOVkhTTUVHREFXZ0JSeDRLY0FSY2FrClIvYnhtd29UQmVEcyt4QW9hVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBcTgyS0JRMWJBM1pjeG9OZGFJVVUKNjVvSkZhM3paM2k5SDk1QWZvU0dXQWQvbFU3L3B2Qkt2K0EwbS9qQ0tMS1FpQ0xXRGtHYzFUNS8xTzhPYTg0YgpDV3I5M3pOWldRb2N6UWJEaHFCRnZNUVp0azRYVVkrMjR3Yjd3cmJCRHc2QVY2R0l2bVMrYm91eFRVd29sbmRMCk5FS2EvcHNvQUtaRUFJZkJUaCtMNVpMQ09GOXFUWEMyOGtnN1czak4vMzBiYlk5UE5ObVpLcGNaNEpEVjA5aGYKU3RaTjZuOVFXK3ZDcFFoZXVISWVORlR2RnQ5bGtSMVBFYUtHUjFiWEdyeUNHOHNTeXVDc0xER1lnVlhmYVZtYgp3dTlnSG1JS2E2aDZWVmVIWitMbVFmZmxqcEdPRStKV1l1TWRPamtHYUYxdEtLUWZFelZGN3BxT0VTQXkrV3hpCnVBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=  metrics-server-key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBd2lpM0dXTWtxbjZ1aGNXbHNoaTJmbVo5UzZzTUxtWWgyaCswVGRGT0V4Sms0ZjYyCkpTSFRzOFNDZ3M3eVo0a3RrcWFHZzlOendoalpkWHR1RFpWZGxCUWJQUkdmUXQvbUJXT2RQdWl2UkNDRFg1dXUKQjQybGdxTG1IcWNjaER0Mklsc3kyb0NDK2JNTjNPYUNGREM4ZWtrN3pDVVBUN2lrNFFZNTZZRUk1S21XRmVUTwozQkIzdDI5eVc0QWh4QVpzOXJQRGJ4VUVoSnJHWDZSKytudmJiRWo5YjhML05BcEdEczNBK2dlSmtSbWcrbVU1Cnd6bW1PaElqRWFCaFVOdlRQeUtONTNBWHArazVCOEEydjZnRzQzcStEV25mQnVmQ1FreVFNQVMvZlJaMllCTFQKSWpadW45QUZodm5wU1VVeXZ5T3lGZXZLOVNXQ3JBb2VXbGUxa1FJREFRQUJBb0lCQUQzaEtoSGVSalpNYjZUVQp6QzFKc1FmeDlDYmttZHBEMUxBbkFKajRoekNPNFRZaHJyMkEzVzdpeDFHWFVTeHNUT2o3UjgzRjI1UFZ4YUNxCnVQVjlVRGk4ZTczbjJ1RSthSm41R0ltUE1TUytWQUJwcG5wank0Y3FFYnRkT1RwdmxRUDRHdW9Hb1RlaHVGNVoKM01WQWp5Rk9LOCt4VkFMdGJ5Y0VpL3ArbEc0RGkvOThIcUlDQngwSlhCUnJoV05lWUdZL0c3eGNWT2pCNUl5SQpPNVpoZ1I0Sk9yODloNVZ1RHdIY3E2UVlLQ2sxTktQZzc0Y3BOY2J5ZVZEM0FhYVRHd25QU3BMd0hGaElzTGpNCkllaEJqZzkrZDdyRU8xMHU0azhKWW5qYUdNMzRMM0RlTVVrck95NUMzRjY3RFNwaTJVZUhjOVh7YzViVUFwb0gKTE1zRUxuMENnWUVBenV3STRSN0FrYmZ2VnVFR0hOa0FFNjgzdTJoWk1iUFdwSHVVTGQ5UzJnUzYzODVWUFdJSApiQXp6WGZuTjh2U2dHNVVQdzUxa1VLZVppY0pORTVBWWtOVDRwMWxoTFdKSkwxSWRSdEV3VU5oblVLNlczRWlMCmJLeDhhalk3dkZDV0ZKUmRTUHJYLzViTWU4TVBCWWNTT0FkZEErZFhlaGd0K2x0WEU3LzE1cGNDZ1lFQThEVzcKOEIyZGNYLzZqMVF4UkhEc3ZGUDYxaWxTREJhMEtKUGlobnpSZWVwVHRFc0hvRitBODk2SXRDZFFMSTJZcEZyRApBU1dSSU1VQVVzVE1uMStvZFprOGovd21QRkxzUmRpSVJWZC80ZHdCTmlDNHJxdnkwQTFxUVNJUXF1MC9CcFV2ClRpMjhZeURRdHh0Wmg1d0NDQUx2a0Rqb2N4cXJzbHEwRDViTnNoY0NnWUFFNjB1U3ZuNXBlR3Z5THN0TWFFUSsKTTQ0cG9HTnVjUTRMUHZ6WTBsUlA1b2ppeFM3MWxKMEVqcEpDaTZ1L3VIdXQ3QzlCd1Q0ZlJMeTdyNmpSYkQySgpRK2JkWTV5UnphSmJ3Nkg2aXdLUkNYUDdVUXM1Rldockh3YWVOOGZYeERxdEpwSEpLRjEyTUFtUWI2U3R4dlpCCjZycmxXdHlUaEh2alZnU043YVJVNVFLQmdHbVFJN3lkTnpERy9sVDR1Z0lLNG03Tk5VSGl2TlRsTVYxWHlFZzAKR0ZiTW5PWnh5ck02NVUvRzd5ckUwQjRVU0EyS2VZSktnU0gya1hMT1crSjZSbTBQMzZhak9DWndocmNYTnFQSwpsVCtyMExoNTNzK2NiMFB4Y1UyWWE5ekNFRjJUT0V2U0c2VXdxYWllazFUZVFhSkZzQVFnamo3dmJKOGY3MXVlCmVWMFhBb0dCQUlQWDN1OTJtU3o5clpqOTN3NFNKbUk2K0tkY3BtTGg0TE5IcUdBOVJSNi80T0NWWDBHNHVtZ1YKMkxUOU1MY05CdUVFZE5nTHRHeGIzdTI5cUlLZHZpcDhBZzlTbHQvMlBIcnlsLzIzWWU0bURTWDVUOXNrVXhaRgpjVGxvN3QxSnRZN3NaRU00Vng4cWxQbDcwWXMvSWRuWmhWaFU2d1F2ZGp0TGk1UlU4L2ttCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==# resource-reader.yamlvi resource-reader.yaml---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  name: system:metrics-serverrules:- apiGroups:  - ""  resources:  - pods  - nodes  - nodes/stats  verbs:  - get  - list  - watch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  name: system:metrics-serverroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:metrics-serversubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system# metrics-server-service.yamlvi metrics-server-service.yaml---apiVersion: v1kind: Servicemetadata:  name: metrics-server  namespace: kube-system  labels:    kubernetes.io/name: "Metrics-server"spec:  selector:    k8s-app: metrics-server  ports:  - port: 443    protocol: TCP    targetPort: 443# metrics-apiservice.yamlvi metrics-apiservice.yaml---apiVersion: apiregistration.k8s.io/v1beta1kind: APIServicemetadata:  name: v1beta1.metrics.k8s.iospec:  service:    name: metrics-server    namespace: kube-system  group: metrics.k8s.io  version: v1beta1  insecureSkipTLSVerify: true  groupPriorityMinimum: 100  versionPriority: 100# auth-reader.yamlvi auth-reader.yaml---apiVersion: rbac.authorization.k8s.io/v1beta1kind: RoleBindingmetadata:  name: metrics-server-auth-reader  namespace: kube-systemroleRef:  apiGroup: rbac.authorization.k8s.io  kind: Role  name: extension-apiserver-authentication-readersubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system#  auth-delegator.yamlvi auth-delegator.yaml---apiVersion: rbac.authorization.k8s.io/v1beta1kind: ClusterRoleBindingmetadata:  name: metrics-server:system:auth-delegatorroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:auth-delegatorsubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system# aggregated-metrics-reader.yamlvi aggregated-metrics-reader.yamlkind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata:  name: system:aggregated-metrics-reader  labels:    rbac.authorization.k8s.io/aggregate-to-view: "true"    rbac.authorization.k8s.io/aggregate-to-edit: "true"    rbac.authorization.k8s.io/aggregate-to-admin: "true"rules:- apiGroups: ["metrics.k8s.io"]  resources: ["pods"]  verbs: ["get", "list", "watch"]# metrics-server-deployment.yamlvi  metrics-server-deployment.yaml---apiVersion: v1kind: ServiceAccountmetadata:  name: metrics-server  namespace: kube-system---apiVersion: apps/v1kind: Deploymentmetadata:  name: metrics-server  namespace: kube-system  labels:    k8s-app: metrics-serverspec:  selector:    matchLabels:      k8s-app: metrics-server  template:    metadata:      name: metrics-server      labels:        k8s-app: metrics-server    spec:      serviceAccountName: metrics-server      tolerations:        - effect: NoSchedule          key: node.kubernetes.io/unschedulable          operator: Exists        - key: NoSchedule          operator: Exists          effect: NoSchedule      volumes:      # mount in tmp so we can safely use from-scratch images and/or read-only containers      - name: tmp-dir        emptyDir: {}      - name: metrics-server-certs        secret:          secretName: metrics-server-certs      containers:      - name: metrics-server        image: juestnow/metrics-server-amd64:v0.3.3        imagePullPolicy: Always        command:        - /metrics-server        - --tls-cert-file=/certs/metrics-server.pem        - --tls-private-key-file=/certs/metrics-server-key.pem        - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP        - --kubelet-insecure-tls        volumeMounts:        - name: tmp-dir          mountPath: /tmp        - name: metrics-server-certs          mountPath: /certs# 创建metrics-server 服务kubectl apply -f .# 验证metrics-server kubectl top node[root@]~]#kubectl top nodeNAME     CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%master   177m         7%     2057Mi          35%[root@]~]#kubectl top pods -ANAMESPACE              NAME                                        CPU(cores)   MEMORY(bytes)clusterstorage         nfs-client-provisioner-5f6bc44cd7-fjr7f     3m           15Mikube-system            coredns-597b77445b-fhxvr                    4m           23Mikube-system            kube-router-5tmgw                           9m           16Mikube-system            metrics-server-66d78c47-zn679               1m           14Mikube-system            traefik-578574dfdb-dzl22                    6m           41Mikubernetes-dashboard   dashboard-metrics-scraper-fb986f88d-rc6zs   1m           25Mikubernetes-dashboard   kubernetes-dashboard-668c4f84bc-w6vw6       2m           40Mi# 能够正常获取CPU 内存值


0