(9)kubelet部署

创建相关目录

# 3台master, 2台node
mkdir /k8s/kubelet

创建kubelet-bootstrap.kubeconfig

创建token

cd /opt/k8s-playbook/ssl

kubeadm token create \
    --description kubelet-bootstrap-token \
    --groups system:bootstrappers:master-01 \
    --kubeconfig ~/.kube/config > master-01-token

kubeadm token create \
    --description kubelet-bootstrap-token \
    --groups system:bootstrappers:master-02 \
    --kubeconfig ~/.kube/config > master-02-token

kubeadm token create \
    --description kubelet-bootstrap-token \
    --groups system:bootstrappers:master-03 \
    --kubeconfig ~/.kube/config > master-03-token
    
kubeadm token create \
    --description kubelet-bootstrap-token \
    --groups system:bootstrappers:node-01 \
    --kubeconfig ~/.kube/config > node-01-token
    
kubeadm token create \
    --description kubelet-bootstrap-token \
    --groups system:bootstrappers:node-02 \
    --kubeconfig ~/.kube/config > node-02-token

提前将作为node的5台主机的BOOTSTRAP_TOKEN生成好, 保存到文件

设置集群参数

kubectl config set-cluster kubernetes \
    --certificate-authority=/etc/k8s/cert/ca.pem \
    --embed-certs=true \
    --server=https://192.168.104.61:8443 \
    --kubeconfig=kubelet-bootstrap.kubeconfig

设置客户端认证参数

kubectl config set-credentials kubelet-bootstrap \
    --token=当前主机的token \
    --kubeconfig=kubelet-bootstrap.kubeconfig

设置上下文参数

kubectl config set-context default \
    --cluster=kubernetes \
    --user=kubelet-bootstrap \
    --kubeconfig=kubelet-bootstrap.kubeconfig

设置默认上下文

kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

可以先按上面的命令生成kubelet-bootstrap.kubeconfig, 后续复制到在每台node主机, 并修改token值, server不需要修改, 使用的是vip

分发

cp kubelet-bootstrap.kubeconfig /etc/k8s/

scp kubelet-bootstrap.kubeconfig root@master-02:/etc/k8s/
scp kubelet-bootstrap.kubeconfig root@master-03:/etc/k8s/
scp kubelet-bootstrap.kubeconfig root@node-01:/etc/k8s/
scp kubelet-bootstrap.kubeconfig root@node-02:/etc/k8s/

查看各节点token

kubeadm token list --kubeconfig ~/.kube/config

配置kubelet-config.yaml

vi kubelet-config.yaml
#######################################
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: "192.168.104.61"
staticPodPath: ""
syncFrequency: 1m
fileCheckFrequency: 20s
httpCheckFrequency: 20s
staticPodURL: ""
port: 10250
readOnlyPort: 0
rotateCertificates: true
serverTLSBootstrap: true
authentication:
  anonymous:
    enabled: false
  webhook:
    enabled: true
  x509:
    clientCAFile: "/etc/k8s/cert/ca.pem"
authorization:
  mode: Webhook
registryPullQPS: 0
registryBurst: 20
eventRecordQPS: 0
eventBurst: 20
enableDebuggingHandlers: true
enableContentionProfiling: true
healthzPort: 10248
healthzBindAddress: "192.168.104.61"
clusterDomain: "zhfi.k8s."
clusterDNS:
  - "10.254.0.2"
nodeStatusUpdateFrequency: 10s
nodeStatusReportFrequency: 1m
imageMinimumGCAge: 2m
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
volumeStatsAggPeriod: 1m
kubeletCgroups: ""
systemCgroups: ""
cgroupRoot: ""
cgroupsPerQOS: true
cgroupDriver: cgroupfs
runtimeRequestTimeout: 10m
hairpinMode: promiscuous-bridge
maxPods: 220
podCIDR: "172.18.0.0/16"
podPidsLimit: -1
resolvConf: /etc/resolv.conf
maxOpenFiles: 1000000
kubeAPIQPS: 1000
kubeAPIBurst: 2000
serializeImagePulls: false
evictionHard:
  memory.available:  "100Mi"
  nodefs.available:  "10%"
  nodefs.inodesFree: "5%"
  imagefs.available: "15%"
evictionSoft: {}
enableControllerAttachDetach: true
failSwapOn: true
containerLogMaxSize: 20Mi
containerLogMaxFiles: 10
systemReserved: {}
kubeReserved: {}
systemReservedCgroup: ""
kubeReservedCgroup: ""
enforceNodeAllocatable: ["pods"]
#######################################

分发

cp kubelet-config.yaml /etc/k8s/

scp kubelet-config.yaml root@master-02:/etc/k8s/
scp kubelet-config.yaml root@master-03:/etc/k8s/
scp kubelet-config.yaml root@node-01:/etc/k8s/
scp kubelet-config.yaml root@node-02:/etc/k8s/
  • address: 每台主机修改成自己的ip
  • healthzBindAddress: 每台主机修改成自己的ip

创建kubelet.service

vi /etc/systemd/system/kubelet.service
#######################################
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
WorkingDirectory=/k8s/kubelet
ExecStart=/opt/k8s/bin/kubelet \
  --bootstrap-kubeconfig=/etc/k8s/kubelet-bootstrap.kubeconfig \
  --cert-dir=/etc/k8s/cert \
  --network-plugin=cni \
  --cni-bin-dir=/opt/cni/bin \
  --cni-conf-dir=/etc/cni/net.d \
  --container-runtime=remote \
  --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \
  --root-dir=/k8s/kubelet \
  --kubeconfig=/etc/k8s/kubelet.kubeconfig \
  --config=/etc/k8s/kubelet-config.yml \
  --hostname-override=master-01 \
  --image-pull-progress-deadline=15m \
  --pod-infra-container-image=rancher/pause:3.1 \
  --volume-plugin-dir=/k8s/kubelet/kubelet-plugins/volume/exec/ \
  --logtostderr=true \
  --v=2
Restart=always
RestartSec=5
StartLimitInterval=0

[Install]
WantedBy=multi-user.target
#######################################
  • --hostname-override: 修改成自己的主机名

复制二进制文件并启动

cd /opt/k8s-playbook/
cp kubernetes/server/bin/kubelet /opt/k8s/bin/

前提是已将相关组件解压到/opt/k8s-playbook/

scp /opt/k8s/bin/kubelet root@master-02:/opt/k8s/bin/
scp /opt/k8s/bin/kubelet root@master-03:/opt/k8s/bin/
scp /opt/k8s/bin/kubelet root@node-01:/opt/k8s/bin/
scp /opt/k8s/bin/kubelet root@node-02:/opt/k8s/bin/

# 三台master分别执行
chmod +x /opt/k8s/bin/kubelet

systemctl enable kubelet
systemctl start kubelet

授权

授予kube-apiserver访问kubelet API的权限

kubectl create clusterrolebinding \
    kube-apiserver:kubelet-apis \
    --clusterrole=system:kubelet-api-admin \
    --user kubernetes-master

Bootstrap Token Auth和授予权限

kubectl create clusterrolebinding \
    kubelet-bootstrap \
    --clusterrole=system:node-bootstrapper \
    --group=system:bootstrappers

自动approve CSR请求,生成kubelet client证书

vi csr-crb.yaml
##########################################
# Approve all CSRs for the group "system:bootstrappers"
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: auto-approve-csrs-for-group
subjects:
- kind: Group
  name: system:bootstrappers
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
  apiGroup: rbac.authorization.k8s.io
---
# To let a node of the group "system:nodes" renew its own credentials
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: node-client-cert-renewal
subjects:
- kind: Group
  name: system:nodes
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
  apiGroup: rbac.authorization.k8s.io
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: approve-node-server-renewal-csr
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]
---
# To let a node of the group "system:nodes" renew its own server credentials
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: node-server-cert-renewal
subjects:
- kind: Group
  name: system:nodes
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: approve-node-server-renewal-csr
  apiGroup: rbac.authorization.k8s.io
##########################################

使配置生效

kubectl apply -f csr-crb.yaml

查看 kubelet 情况

kubectl get csr
kubectl get no

手动approve server cert csr

kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve
kubectl get csr

展示评论