- 积分
- 16843
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2025-1-1 19:51:59
|
显示全部楼层
创建目录+ ?9 Y; g) e. {. h3 Q9 k! s4 M
根据自身实际情况创建指定路径,此路径用来存放k8s二进制文件以及用到的镜像文件; W d, ]; ]9 j6 E7 \! ^
' n$ j5 u: q! K8 o( Umkdir -p /approot1/k8s/{bin,images,pkg,tmp/{ssl,service}}
0 W t5 Q8 A% Z1 P( j/ t! {( @关闭防火墙
+ K2 j4 u2 D+ k" K- z0 i0 ~for i in 192.168.91.19 192.168.91.20;do \- C6 e) Z& Z; ?8 y1 \/ L- k
ssh $i "systemctl disable firewalld"; \+ R9 Z: }* M- w( W: @% h( |7 T6 d- T' x
ssh $i "systemctl stop firewalld"; \
* s1 a1 g5 a/ p6 F& vdone
5 J" _* C+ ~3 L+ [2 V% U# ^关闭selinux
& g& S) ~; p# d2 V. C6 B9 F临时关闭1 @1 M! _( \4 y1 v' P
* T5 e! i' `& M! R. v6 Hfor i in 192.168.91.19 192.168.91.20;do \1 d# ?5 Q' u1 V/ [1 n1 r0 a0 r
ssh $i "setenforce 0"; \
. N: `2 f5 \# R: I( h4 \done
$ D- k" ~3 x/ D! f, [, O$ z永久关闭0 n8 N( [, E4 T! \! Z2 G' l/ a' s
: Z2 D( B* y8 w8 ]
for i in 192.168.91.19 192.168.91.20;do \
$ P) |) x$ a- l# V6 }0 yssh $i "sed -i '/SELINUX/s/enforcing/disabled/g' /etc/selinux/config"; \
7 Q0 H! c5 i- J$ [6 } Hdone3 k S2 P2 E1 R: T0 H+ N
关闭swap
5 i6 S1 X2 d- a+ @临时关闭# j7 j7 i! [7 K
( S3 S& A G/ C" {$ f+ Gfor i in 192.168.91.19 192.168.91.20;do \( l/ C+ p. g, C: \4 }9 U" k3 a, x
ssh $i "swapoff -a"; \
* G& y" L% }9 s2 Mdone
. C% I9 n$ U9 z F3 t& F永久关闭: h9 U8 D) t+ P: x/ O w, K
) d ]6 `. Q9 X% y' i, b" z7 rfor i in 192.168.91.19 192.168.91.20;do \+ v2 r" c5 ^, m3 L/ N
ssh $i "sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab"; \$ N$ B8 W+ z$ j) c# ]8 O! u
done6 H8 I) u+ I+ d& k( y) \4 l" f
开启内核模块: |! o4 g( Y6 j( V. f$ V$ }
临时开启
H0 R1 `# s% O4 ^
2 ?# q8 O4 G( P8 x' f; afor i in 192.168.91.19 192.168.91.20;do \! I- v) N, L9 m8 O- k$ Q+ Y
ssh $i "modprobe ip_vs"; \" v0 j3 F" I R/ s6 E. Y& i
ssh $i "modprobe ip_vs_rr"; \
$ E5 b1 R* e$ Z! s: z" Dssh $i "modprobe ip_vs_wrr"; \; m1 s" X* O7 E1 T: y
ssh $i "modprobe ip_vs_sh"; \
" {4 N: R) A# E# u' ^0 {ssh $i "modprobe nf_conntrack"; \9 \ E3 @8 j: A- o, [+ ]0 [* ]
ssh $i "modprobe nf_conntrack_ipv4"; \3 v/ v$ |0 j2 ]) R' O& S
ssh $i "modprobe br_netfilter"; \+ `* G* q) ]% h9 u" L# s
ssh $i "modprobe overlay"; \6 L3 V' ?; v) _, z" s# Q$ G% |: T
done
2 m& d5 n0 X, o$ \- ~永久开启
$ A0 b k" S. c) J. O/ Q' Q1 H, d
$ Y5 U. C$ M. \, w7 ]3 ~vim /approot1/k8s/tmp/service/k8s-modules.conf
/ N& I: \% m! N! hip_vs6 V6 A0 G; p& y, R+ o
ip_vs_rr7 v# b3 s0 m. e# a
ip_vs_wrr2 f! O8 q. U- L8 E, b# r: A
ip_vs_sh
# w/ P0 H8 m1 D' K+ [+ n. {nf_conntrack
: f9 ?9 N: G3 W8 r' Q5 [nf_conntrack_ipv4
h7 V6 }) X) E/ q- ]3 \br_netfilter
$ t5 @2 `; b% S3 Yoverlay. C: ?) H/ n1 i) ^9 y
分发到所有节点
7 J7 m: Q& D; z' G4 q! a ofor i in 192.168.91.19 192.168.91.20;do \* ]9 q; h; P1 Q) q8 x
scp /approot1/k8s/tmp/service/k8s-modules.conf $i:/etc/modules-load.d/; \
% I; X7 q& N7 Ndone
+ h$ m v+ W9 f- S" z! ^% ~+ h启用systemd自动加载模块服务+ H! a0 s8 z" c" D& C8 m
for i in 192.168.91.19 192.168.91.20;do \
2 h0 Z) A2 t' a( `! d2 fssh $i "systemctl enable systemd-modules-load"; \; n$ l0 \5 e/ q
ssh $i "systemctl restart systemd-modules-load"; \
; z# p- J' j& }' cssh $i "systemctl is-active systemd-modules-load"; \/ M" }) x8 K/ M5 J$ B, V
done- }' p3 G! O. ^2 a$ p- `9 y
返回active表示 自动加载模块服务 启动成功
6 Q8 `9 ~% T- ]5 D* W1 Q5 l. z7 Q8 }5 O0 `& h, X! [! K/ ^
配置系统参数
' T: B; {$ q9 f/ s" j% M& L以下的参数适用于3.x和4.x系列的内核& R# y' V. S9 n* z
) N* A8 c) p& f+ t1 s Z4 kvim /approot1/k8s/tmp/service/kubernetes.conf
" }$ t5 P+ z- C2 |9 b- {3 y建议编辑之前,在 vim 里面先执行 :set paste ,避免复制进去的内容和文档的不一致,比如多了注释,或者语法对齐异常 [% K* G, n! u9 e" B
3 s7 W$ U% l( E* ]2 U7 M% U/ M2 x
# 开启数据包转发功能(实现vxlan)6 @3 g0 a7 p& @ l- t2 X! ^
net.ipv4.ip_forward=1
) l+ P. Z* Z1 Z S. _# iptables对bridge的数据进行处理; ~: ^9 G# Y% \4 e
net.bridge.bridge-nf-call-iptables=15 P' G( Y+ ]/ M) |
net.bridge.bridge-nf-call-ip6tables=1
! J1 E/ H# y. ynet.bridge.bridge-nf-call-arptables=1
3 L3 ^1 W. o/ s% z; B7 ?# X# 关闭tcp_tw_recycle,否则和NAT冲突,会导致服务不通3 u; M9 ^1 Y/ o
net.ipv4.tcp_tw_recycle=0
, G! P' v A; x$ ]5 Z# 不允许将TIME-WAIT sockets重新用于新的TCP连接( {; m7 _7 H9 E& w, C# O* x
net.ipv4.tcp_tw_reuse=0
8 w! f7 R' h! T' R% g# socket监听(listen)的backlog上限: D1 k; x' g& A, N" O+ _9 {. Q9 Q" h' L
net.core.somaxconn=32768
. {1 h W1 t& w( f y$ U# 最大跟踪连接数,默认 nf_conntrack_buckets * 4( g& w# j: k2 r4 n% t
net.netfilter.nf_conntrack_max=10000006 J c) K' E5 t
# 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
8 O: c& ~$ z' X8 c M& _vm.swappiness=0. q+ N* x7 M$ m$ c
# 计算当前的内存映射文件数。. \# U7 y" J: p* N; D* w* d3 ]" D
vm.max_map_count=6553606 e$ F7 Z& l2 z8 \1 s
# 内核可分配的最大文件数- F$ d- w* g3 ~$ k% n
fs.file-max=6553600
: X, a$ b/ ^/ j: b# 持久连接
& q, S) @" b: g# ~) q3 Wnet.ipv4.tcp_keepalive_time=600
' ?' [" ~, h* [net.ipv4.tcp_keepalive_intvl=303 f* Q% y1 N0 R2 H/ Z3 [
net.ipv4.tcp_keepalive_probes=10
5 U) ~# O% ~* _6 a+ S; e分发到所有节点% @" K5 K6 O* r% a+ [! [. q
for i in 192.168.91.19 192.168.91.20;do \7 c0 i' f* D. k
scp /approot1/k8s/tmp/service/kubernetes.conf $i:/etc/sysctl.d/; \
7 A; Q. I( Y8 o6 @done& {! f+ V+ V* W) {; O- |
加载系统参数
0 d0 w9 M( |' K l% p6 Vfor i in 192.168.91.19 192.168.91.20;do \
, l5 }) r6 s4 Z, T, f" {ssh $i "sysctl -p /etc/sysctl.d/kubernetes.conf"; \
% d, H+ l% r& P) d; V; {& Jdone. a! j8 R! |& r
清空iptables规则, _ o+ Z+ E6 o2 a" L+ U8 D& Y
for i in 192.168.91.19 192.168.91.20;do \" e, x+ B! t/ J# n, g
ssh $i "iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat"; \
( P/ E, ?6 x2 `! R3 @' D% yssh $i "iptables -P FORWARD ACCEPT"; \
0 P1 s6 o5 E* X* B7 n$ r4 _" jdone
9 G N3 r# k4 p- p' ]配置 PATH 变量
9 u1 N* v1 D- D2 U$ G$ tfor i in 192.168.91.19 192.168.91.20;do \
( j2 l3 j" b l2 gssh $i "echo 'PATH=$PATH:/approot1/k8s/bin' >> $HOME/.bashrc"; \
8 m5 P' G( Z' T |done6 e G1 x q. ^! X
source $HOME/.bashrc
- u- J% L4 u" g+ V下载二进制文件
- k3 }: Z6 s6 I+ W' _( u其中一台节点操作即可
+ }, b& w) v3 a% m
1 g3 i- t% L% b2 \: N+ S. W3 E; s: rgithub下载会比较慢,可以从本地上传到 /approot1/k8s/pkg/ 目录下 C5 y- g* J3 j7 _
4 {/ o7 n7 M; g# Ywget -O /approot1/k8s/pkg/kubernetes.tar.gz \" b- D+ E ^1 Z( [* }* y2 `0 n0 c
https://dl.k8s.io/v1.23.3/kubernetes-server-linux-amd64.tar.gz
7 ~! N$ Y! T' T% D5 ^) s
, `' b! n% X( }wget -O /approot1/k8s/pkg/etcd.tar.gz \
1 L) X/ @* Z7 _https://github.com/etcd-io/etcd/ ... -linux-amd64.tar.gz
8 D8 Y6 R8 h4 e1 t! l' i: |) ]8 |解压并删除不必要的文件
* L7 ~( g" t4 X" q* h* V
% ^$ t5 N* U# i& a9 Fcd /approot1/k8s/pkg/# Z7 ?9 O9 o( Y# g5 E
for i in $(ls *.tar.gz);do tar xvf $i && rm -f $i;done$ Q$ K- M# C, G
mv kubernetes/server/bin/ kubernetes/" N. B+ F, V+ y; F( b6 x3 v E9 e ]! S! r
rm -rf kubernetes/{addons,kubernetes-src.tar.gz,LICENSES,server}
# C) z" ^* V, b; k1 Yrm -f kubernetes/bin/*_tag kubernetes/bin/*.tar( Y: G. l' k( p; ]+ V
rm -rf etcd-v3.5.1-linux-amd64/Documentation etcd-v3.5.1-linux-amd64/*.md' [# B( [: p. ]/ d1 r: c( `! h
部署 master 节点
# d, ~" V7 j! g- x) T创建 ca 根证书7 \; x7 R3 W; a7 |
wget -O /approot1/k8s/bin/cfssl https://github.com/cloudflare/cf ... l_1.6.1_linux_amd64
1 x3 M; l; Y3 zwget -O /approot1/k8s/bin/cfssljson https://github.com/cloudflare/cf ... n_1.6.1_linux_amd64
( p( E2 O: b8 f3 V" [4 tchmod +x /approot1/k8s/bin/*
, P0 ~' e- M5 ]! [) V! fvim /approot1/k8s/tmp/ssl/ca-config.json8 { }6 j% p' `6 x0 w
{
- H' B- y+ ^% d5 T$ R "signing": {
# Y* |1 G& I- j9 @. L$ Y. ], c( M "default": {
; C* c6 D7 x9 G5 ^8 V9 b6 A) }$ b "expiry": "87600h"
3 S4 k, y+ S. R4 L, T. q2 t },
( S/ g4 r9 @" d, S, a$ q+ E "profiles": {, w& p& p4 n1 w( b8 P z _ E
"kubernetes": {% Q" V& @8 W% t" k9 R" O
"usages": [5 d- R" H4 r9 C7 `8 H/ @
"signing",
! Q. c& `8 A: G9 x U "key encipherment"," y7 r5 x3 K/ }2 U
"server auth",$ ~5 ] c( r* a+ |/ K6 r- P7 _6 S
"client auth"
9 P" j8 x# ?, @$ L ],
8 y# H2 G, x; M+ A( t* C "expiry": "876000h"# P6 f# F0 y( _! W
}
2 H+ t( j& H( t- I0 w% l }" \+ `; @5 i4 N' x( m3 k( [5 V8 F
}
) J. M" R; W' \3 e}* f9 [- L/ r0 G/ S
vim /approot1/k8s/tmp/ssl/ca-csr.json
8 q( @# T U9 b6 p f3 u7 \{
7 i6 c o3 n7 J5 l @+ b "CN": "kubernetes",
- `& Y* e( J* s' ^' Y: r& r "key": {9 }# Z9 O. A9 x4 d
"algo": "rsa",
. C. }4 {( W+ L+ F8 T% u "size": 2048, G" a s+ i! r
},! U8 o% y1 N' Q1 h5 Y
"names": [
' t% u" J& d5 a: {* B {
0 Q* f- a! R' m% G" C' L "C": "CN",
, Q8 f( F% b4 a* |1 [! D "ST": "ShangHai",
9 T* U+ c6 U$ Z, _4 e "L": "ShangHai",
; U1 L: a& }% e7 }7 v "O": "k8s",/ B' V" I! f: ~, Y; _, S( M, `: P7 h( P
"OU": "System"
7 y- b$ `, q1 ^! U" A5 p$ Q }
1 b+ i; O) {7 V0 g. N8 U ],
) W* ?3 {: L) y$ E "ca": {& A5 ]5 r* J6 `5 p. X
"expiry": "876000h"8 r7 |) ?8 u8 h
}
* ^* d7 V4 |. X8 U- I# i}, X/ L( L# n5 D9 B1 Z
cd /approot1/k8s/tmp/ssl/2 m4 _. { e9 R# f8 x
cfssl gencert -initca ca-csr.json | cfssljson -bare ca& d. m, i0 d5 f
部署 etcd 组件( S2 W1 h# p; {4 J7 k" G
创建 etcd 证书% L/ Q7 z3 H% A6 V: T
vim /approot1/k8s/tmp/ssl/etcd-csr.json
# O# a: r7 [ p/ R# `& @这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
( ^; x5 D: g2 Y a+ c: \! `1 a5 Q
注意json的格式& T" o) v1 q e* \) }1 R- c
2 \2 f# ^: B0 ~, P
{
3 c+ v, V* j( |) o u9 d7 I& E1 @ "CN": "etcd",: L7 H' w! C1 _' q2 C" T7 s, t8 F
"hosts": [
0 B7 Q7 I3 B" b. W3 E1 S "127.0.0.1",3 ^: Y4 O. Z3 s9 O+ h
"192.168.91.19"
% o9 N/ V, x) E; `" Z ],
8 n# {( P$ F4 V' o- s: g% v "key": {+ |, v$ c+ ^9 J! t# I9 u0 i
"algo": "rsa",3 s" D$ f/ x" C: B
"size": 2048$ C9 I4 [0 E( R5 d5 `5 V
},
0 r! }# F* A5 D# A0 ~; ^ "names": [
- O# b I, z! D/ j {4 m% N4 R0 N3 {9 }7 } [: w
"C": "CN",
) u* |4 d- E; t& t: [3 n "ST": "ShangHai",( P: |$ {- a( V3 [6 g
"L": "ShangHai",' H' F" V+ ^! u* G/ W/ @
"O": "k8s",
9 e r3 u. b# U; k) w2 b "OU": "System"
3 j! | q: E4 Z6 M }9 K: r8 G; ?7 ]
]
/ F0 c) T9 ?% P% G) s% _}/ X0 v! B/ G' w4 F, H; K" w
cd /approot1/k8s/tmp/ssl/) D8 y0 G1 c$ ^# F4 B# U5 C
cfssl gencert -ca=ca.pem \
9 C# `4 n, [% n7 q, H-ca-key=ca-key.pem \
- b! k! e7 R% O+ H A* V; ?1 [-config=ca-config.json \$ z3 N V2 w( B e
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd# A& P" F) V3 O5 j4 l9 i9 ^
配置 etcd 为 systemctl 管理6 A& p3 x* i, [. ~# a2 w
vim /approot1/k8s/tmp/service/kube-etcd.service.192.168.91.19 `9 }+ `1 H, @, O6 c( ^' n3 U
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
" I; V: l5 V6 Y* Y3 B2 ]7 f% T0 G
9 V+ ] g% }/ G) l! ^etcd 参数' X8 W( H1 x" W- X, I2 n @
, d G; f$ b* i% G* {- K
[Unit]% C! K$ V; M k4 z1 w& B- b
Description=Etcd Server
* x! ~. e9 @5 I9 pAfter=network.target+ w: p0 _4 A' Q: k. M8 a2 V
After=network-online.target" L6 |+ A4 h ]# P$ H
Wants=network-online.target( q7 P1 W' d- E2 ~
Documentation=https://github.com/coreos7 Q) R$ [7 Q: }3 z9 |& U
+ V! u$ P3 y0 q/ o% G6 E[Service]
( @+ ~' Y) G7 f* r0 z$ T% ^Type=notify
6 D( W5 Q7 _# V1 r* d1 q; _WorkingDirectory=/approot1/k8s/data/etcd; x3 N4 s' v2 z/ H2 r; j
ExecStart=/approot1/k8s/bin/etcd \3 R$ _; c+ X3 @. k6 ?
--name=etcd-192.168.91.19 \& G5 `6 C: D0 Y2 c7 F( e$ ~
--cert-file=/etc/kubernetes/ssl/etcd.pem \7 C% [7 F- w( t2 D
--key-file=/etc/kubernetes/ssl/etcd-key.pem \
/ l9 G- ]% o$ { --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
; X* u" Y# h" J: P --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
0 l1 K, r9 i) ~1 T: d- s* p8 _ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \/ o9 X; ^3 a. e
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
+ ?. w1 p, x4 Q2 F! U5 p --initial-advertise-peer-urls=https://192.168.91.19:2380 \7 |4 L: O8 o* W h( J
--listen-peer-urls=https://192.168.91.19:2380 \$ O7 w, ~" a8 Y
--listen-client-urls=https://192.168.91.19:2379,http://127.0.0.1:2379 \" T- w: o' x% S: D
--advertise-client-urls=https://192.168.91.19:2379 \+ K% Y- O7 e+ ^) N3 z, ]$ f
--initial-cluster-token=etcd-cluster-0 \& X# m0 P- x) ] [9 b Q
--initial-cluster=etcd-192.168.91.19=https://192.168.91.19:2380 \7 N$ g9 Y( Q. U6 I
--initial-cluster-state=new \
7 H7 ]9 h& P& j; f --data-dir=/approot1/k8s/data/etcd \
' o6 `3 N6 W$ {/ t0 ]# J0 @ --wal-dir= \4 N, e- P. I' v) v+ f
--snapshot-count=50000 \8 r8 @ K. ?3 ~" d5 g
--auto-compaction-retention=1 \1 n- o- _" J7 P3 v& g5 g8 k
--auto-compaction-mode=periodic \
3 X# b6 C |% `# \1 t8 t/ J- p --max-request-bytes=10485760 \
F1 K8 X% G e% i- J --quota-backend-bytes=8589934592
* q1 v8 Z5 e+ P7 w1 k/ \" `, Y! T- xRestart=always8 Q$ u7 \; j! x
RestartSec=159 _1 u# p2 c' F% Y7 Q; U3 L/ k) s
LimitNOFILE=655361 V9 B" J& m! \, i3 g
OOMScoreAdjust=-999) j( | I$ V n
; e l0 c) `, S
[Install]) u3 A1 q4 @1 k6 a: r% s! B
WantedBy=multi-user.target4 t* j T$ E6 j9 j) g' u
分发证书以及创建相关路径
% B& Z7 f* W8 g' J如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制- |- q9 [9 ~" K; u# B+ |9 W" L- k
, K% P3 J( h0 v& O }对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败- i% Y6 n( `( u+ i: v1 }7 j
4 s$ `, K6 Q0 m7 L6 `: H$ xfor i in 192.168.91.19;do \
, W2 Q+ n5 |7 sssh $i "mkdir -p /etc/kubernetes/ssl"; \: W2 L# i# t) b( H" u5 z8 Q
ssh $i "mkdir -m 700 -p /approot1/k8s/data/etcd"; \
0 g! }8 w2 l7 g' C- l% v- vssh $i "mkdir -p /approot1/k8s/bin"; \$ i# s3 K( f( w+ a( b- ~
scp /approot1/k8s/tmp/ssl/{ca*.pem,etcd*.pem} $i:/etc/kubernetes/ssl/; \
& L4 b" U* n6 j+ T; I/ Dscp /approot1/k8s/tmp/service/kube-etcd.service.$i $i:/etc/systemd/system/kube-etcd.service; \2 m c3 ]6 A" Q* o
scp /approot1/k8s/pkg/etcd-v3.5.1-linux-amd64/etcd* $i:/approot1/k8s/bin/; \
( g& }" b9 d* v! `! R2 kdone% B0 o- I7 F5 ~
启动 etcd 服务
. Q* c+ B) A, w6 ]如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
; E$ i; A/ q* L0 M' Q s4 L d# H( v2 Y3 V9 W7 Y& k
for i in 192.168.91.19;do \
' }8 f) x+ o4 |+ w1 Ussh $i "systemctl daemon-reload"; \
, ^6 K, a8 ~1 y0 H- `' Zssh $i "systemctl enable kube-etcd"; \' t' _+ y2 `2 i
ssh $i "systemctl restart kube-etcd --no-block"; \
, M+ S H. w" \9 u3 \* A9 m. ~& z. h1 issh $i "systemctl is-active kube-etcd"; \/ e6 K' B X7 u7 v9 q6 C
done
{( L, S" x7 x8 m% m9 b% l返回 activating 表示 etcd 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-etcd";done1 P0 _1 Y0 C$ u4 g3 }6 d
( F, v9 }) R: i
返回active表示 etcd 启动成功,如果是多节点 etcd ,其中一个没有返回active属于正常的,可以使用下面的方式来验证集群. ?# T5 g& `% q$ k) g0 `' Y
4 Q! ?$ D1 n% Y( U6 ]& \
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
" ~! [+ m- j( u) `3 R5 N J+ _4 n' ~9 c% p
for i in 192.168.91.19;do \2 [" v6 E c7 |7 N& n' H1 A
ssh $i "ETCDCTL_API=3 /approot1/k8s/bin/etcdctl \7 Z( z' E" v5 w0 r
--endpoints=https://${i}:2379 \# {9 d* ]: l: E& n- y l0 O; ^ [
--cacert=/etc/kubernetes/ssl/ca.pem \
" o# Q$ i( J* B0 n1 \8 Z: B) d Z --cert=/etc/kubernetes/ssl/etcd.pem \# z. l! `2 f |3 @
--key=/etc/kubernetes/ssl/etcd-key.pem \ D9 I& `! x0 ^. F
endpoint health"; \
: _* D, Y; c" V! Udone3 U& X* S# a! s+ w4 l
https://192.168.91.19:2379 is healthy: successfully committed proposal: took = 7.135668ms% d- v3 X9 ^3 C( X# P
6 \! a6 e) u; x$ [ _: ~, j2 B返回以上信息,并显示 successfully 表示节点是健康的
9 G8 [% N$ u# t! `" D
- ~& m% O+ a1 r部署 apiserver 组件
3 b2 l( ]0 x" q. f7 v创建 apiserver 证书
% }( _6 T1 ?8 y z4 @# ~* ]5 vvim /approot1/k8s/tmp/ssl/kubernetes-csr.json
9 q c7 G6 T: H4 x这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴- s$ u/ ?5 f3 ~; ]( H4 I1 D, E# `8 c
! w9 A6 }, W$ A) ]/ p注意json的格式
3 H, O$ H/ e( V9 X: o. {, S( O- b
) K" d$ C9 v9 }. m10.88.0.1 是 k8s 的服务 ip,千万不要和现有的网络一致,避免出现冲突8 R0 i6 t+ s z8 g/ g6 Y$ W
6 y0 w" O& S9 Z% d: P; R{/ O$ c0 C/ t% x- V" h( W
"CN": "kubernetes",
. D* {3 j \$ G7 E "hosts": [
8 Q5 v9 I( e; w "127.0.0.1",, ?9 h6 Y% |; ?: N/ s
"192.168.91.19",0 }( [- Y% b2 F. e: ]7 }) H
"10.88.0.1",
( M- ] `2 C' k) g& e "kubernetes",$ b6 j1 ?7 w& S# q1 d- ?
"kubernetes.default",% K, l8 L) x3 s5 W! u. J. m2 t6 X
"kubernetes.default.svc",5 b9 e. t8 d& l0 r0 n* p8 x
"kubernetes.default.svc.cluster",8 Q& N+ A D$ Q0 A
"kubernetes.default.svc.cluster.local", d, {/ ^: J; V
],, h, i) ]1 u! ~* l1 X1 c, O# j( [
"key": {
. V/ v) t0 V, G- z: z- O& t# V "algo": "rsa",
) _) R) l" P6 U# R "size": 2048
% _4 Q5 U/ r$ e$ ^ },2 C& H( L0 H4 o8 y8 b8 `5 t
"names": [0 z J8 p9 H9 {" _
{
4 e" ~+ m2 N$ o0 j "C": "CN",
+ w' X' @( z3 S( y$ n "ST": "ShangHai",
s5 l. {: ~* J6 \: s8 ]# H8 ~; q/ G "L": "ShangHai",3 x% K" O+ y1 ~ \' `
"O": "k8s",4 N7 M) ?8 a9 J% w6 P' y, p! R
"OU": "System"/ j/ D4 v; z k( s, ^% b
}. P* A( [* N5 q$ N: m
]
% n G2 O8 U& p; M}' J( d! ?: T. X: A
cd /approot1/k8s/tmp/ssl/% B1 ]; R: L) U4 d( y
cfssl gencert -ca=ca.pem \ A% H: f, ]% f ~1 L! m
-ca-key=ca-key.pem \4 p* W# Q8 [: s( B) ]* F0 \* n
-config=ca-config.json \; R C" @/ p$ ~0 T+ Q2 ^) ^
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes- @& e9 K% X$ a# ~
创建 metrics-server 证书
$ H. r+ x N+ o0 \0 jvim /approot1/k8s/tmp/ssl/metrics-server-csr.json
& V2 A' U2 x+ ]) @; N9 m3 }{1 @0 A( J; _0 K. Y9 d
"CN": "aggregator",2 a& U5 x0 U0 R ~* z% h
"hosts": [
$ O G) W$ r# n. h- T" ` ],) G0 B2 G' _! L+ ?0 G, F7 H$ W
"key": {" Q4 A2 E+ a2 R, U
"algo": "rsa",% a4 o& C0 H2 H8 v3 e
"size": 20480 o( H M0 W" W7 Q- W5 ~5 N
},0 |4 j4 U3 W R- w4 a
"names": [
1 B k; n7 q/ R {
0 k, i" ~7 Y8 V! Y- p6 z "C": "CN",3 @* c1 c6 w- ]" y" n) U; j
"ST": "ShangHai",: D# [: K! _' {: `* h+ R! R
"L": "ShangHai",# c7 c0 j# E, |- Q) d0 W: W! x
"O": "k8s",
, O" L2 {- }9 ]* Z6 ?/ L; u "OU": "System"+ R$ L/ l& C6 g; y7 z# W# W
} k; J+ }/ G1 C3 ~; z
]' T3 \/ m' i. B/ Y& g8 p( z4 h
}) b$ d' m6 a( B
cd /approot1/k8s/tmp/ssl/" x2 Z+ ] I8 z. p0 J) p. W
cfssl gencert -ca=ca.pem \
) x( i" Y4 R* T/ w0 ?-ca-key=ca-key.pem \% }5 {$ u2 Z, L5 H7 a! H3 P7 t
-config=ca-config.json \
% A6 U, h: n4 N-profile=kubernetes metrics-server-csr.json | cfssljson -bare metrics-server3 o+ p ?( l" t4 S& i
配置 apiserver 为 systemctl 管理! f/ a+ w7 w$ ? ^
vim /approot1/k8s/tmp/service/kube-apiserver.service.192.168.91.195 ?0 j7 }/ ~/ `
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴4 M2 C. H# A4 _ X7 C3 g
0 C% U6 d+ \, K" `$ S
--service-cluster-ip-range 参数的 ip 网段要和 kubernetes-csr.json 里面的 10.88.0.1 是一个网段的8 q0 }- E0 P! {/ T" p6 |
/ X1 _7 O0 [, \6 s; w' @# n
--etcd-servers 如果 etcd 是多节点的,这里要写上所有的 etcd 节点
( R% c& a' i) o, }: I' u3 q- ?4 @. a, s3 Y
apiserver 参数
* S- p4 h7 p" y2 P- Q1 f2 k: U
' c+ Z9 Z6 y* {* K2 U) A[Unit]4 n8 @' }; m& i M1 U( c
Description=Kubernetes API Server' }! m: d+ P: ]. i* m
Documentation=https://github.com/GoogleCloudPlatform/kubernetes/ V1 Y9 N0 |2 f
After=network.target
6 K9 M0 f& z, |8 N* W
" o/ |( z4 r% i _4 p3 N# V9 _ W[Service]) c3 d) T5 E( `8 _! K( C) L
ExecStart=/approot1/k8s/bin/kube-apiserver \* C7 m2 @- k9 v
--allow-privileged=true \* I/ c$ o# \5 e. y
--anonymous-auth=false \
1 C5 @1 B. P1 j0 H --api-audiences=api,istio-ca \* P3 C6 a2 v2 }% f% |( |
--authorization-mode=Node,RBAC \; s7 b9 w7 s- T3 P; p+ q
--bind-address=192.168.91.19 \6 T4 j$ D. {2 h% c& w; D
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
: s! K p* m, Q1 w1 X/ r9 X --endpoint-reconciler-type=lease \
9 D% [' D& ~6 [0 b3 s( V# l --etcd-cafile=/etc/kubernetes/ssl/ca.pem \1 ?% Q4 f6 L$ _8 ^) @( ^
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \2 X& Y& F- M) I+ i, b; N7 N: k
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
/ n5 \7 o# e' q --etcd-servers=https://192.168.91.19:2379 \
' g* O. P2 b6 W3 _ --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \5 D: y0 N/ Z% C( X
--kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem \
0 U F* q$ `- V2 e --kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem \- _6 {% A) l; S6 i2 O! U e" p2 V
--secure-port=6443 \
6 ~6 s3 E& {9 k0 ]0 W --service-account-issuer=https://kubernetes.default.svc \, T4 W( K+ w" B
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
5 W. ^! {( e9 \( t --service-account-key-file=/etc/kubernetes/ssl/ca.pem \
8 Q2 A$ d5 ^% v% N --service-cluster-ip-range=10.88.0.0/16 \
1 @5 N% i% m' k8 L" p --service-node-port-range=30000-32767 \
8 A7 E% }( w8 S9 j3 H8 B( e --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
" `: i1 A* B% J$ | --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \4 j3 ^( \2 B2 [2 X4 k
--requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem \
! s" z' w3 D$ Q. c7 t% I --requestheader-allowed-names= \
2 |5 g& R2 ]. e- ?2 e1 A6 m% L --requestheader-extra-headers-prefix=X-Remote-Extra- \
" v6 ]7 u: [9 U! V. M3 y --requestheader-group-headers=X-Remote-Group \9 k7 C3 c7 o! J
--requestheader-username-headers=X-Remote-User \* G8 v X# K! }' I! ~2 T
--proxy-client-cert-file=/etc/kubernetes/ssl/metrics-server.pem \" E. f# c1 y- H' A7 q5 a
--proxy-client-key-file=/etc/kubernetes/ssl/metrics-server-key.pem \
4 t$ U0 ?, M( C; N/ {! k; w- i --enable-aggregator-routing=true \% M! ^: Y# C# H5 O- ~
--v=2- V4 V# d* y! C7 E5 Y
Restart=always' o2 _0 V7 `0 k5 x( A
RestartSec=5# y% i! x( e2 @6 N( r6 z- J$ O. a
Type=notify( h& L# {% W; A( R: S: ^/ E9 d5 i t
LimitNOFILE=655363 T* Y2 W4 m4 j v% o
! e- m2 M* q/ r- f3 J
[Install]) H+ p6 A$ ~, [& l. l B' F, G1 N
WantedBy=multi-user.target( s( z4 i: W& ~' g
分发证书以及创建相关路径
: b* v2 O- i6 T% H$ F7 e: W& b$ Q2 q如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
: i, H3 \$ V W0 }* n, g' ^. N) r: p% f$ J: e7 ~) z
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
6 d# C( _# T! w0 Z( A8 `4 \) [
6 m; \. W Q% s- E: h& hfor i in 192.168.91.19;do \
" {9 O/ Y' ?6 }4 l3 u! ?" }3 q6 Dssh $i "mkdir -p /etc/kubernetes/ssl"; \
9 ^- @, o9 a& @9 `2 Q, d9 q5 \* m* cssh $i "mkdir -p /approot1/k8s/bin"; \
. }9 I7 b0 k. Q1 }3 M1 W" xscp /approot1/k8s/tmp/ssl/{ca*.pem,kubernetes*.pem,metrics-server*.pem} $i:/etc/kubernetes/ssl/; \9 z6 n# z! L! U
scp /approot1/k8s/tmp/service/kube-apiserver.service.$i $i:/etc/systemd/system/kube-apiserver.service; \( ]! f7 F! c J4 e6 k
scp /approot1/k8s/pkg/kubernetes/bin/kube-apiserver $i:/approot1/k8s/bin/; \
( E; L; o" ^# {7 B% W7 J& ?8 Sdone {! r+ E0 I; U$ ]. n
启动 apiserver 服务! W- S+ j) `& l0 _. W/ ~
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
! f3 S) e9 O% F- I3 A$ X- p$ i+ F/ M2 N, O
for i in 192.168.91.19;do \& z! a2 Y2 r ]: a4 S! V
ssh $i "systemctl daemon-reload"; \8 w+ _: P5 ], n* T# ?
ssh $i "systemctl enable kube-apiserver"; \7 y7 H! n2 ?) l; r$ ?9 T0 l6 W5 L, }
ssh $i "systemctl restart kube-apiserver --no-block"; \
& M: f5 L9 k8 I; ^" H* @+ k1 pssh $i "systemctl is-active kube-apiserver"; \) Y* c# j; G, a" P6 N* y* m
done& f% \4 m7 s2 f+ F( I
返回 activating 表示 apiserver 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-apiserver";done
' F5 ?1 U% a" L4 M% o
% `( k: Q5 x& B8 ~: ^" j! Z0 m返回active表示 apiserver 启动成功: K4 `! P- r5 s$ [2 b
+ i' p1 k5 ^8 M Z" n; h% N6 ^curl -k --cacert /etc/kubernetes/ssl/ca.pem \: a1 L l" W+ Y* [7 k
--cert /etc/kubernetes/ssl/kubernetes.pem \+ v# U$ K* }, n6 A% H. x% d- c; j
--key /etc/kubernetes/ssl/kubernetes-key.pem \1 t" u' c4 v) Y2 b0 O5 y! c
https://192.168.91.19:6443/api4 P6 f' {3 X7 @: q
正常返回如下信息,说明 apiserver 服务运行正常1 F' Z' k s' I3 J( W
/ v& `5 ?7 H/ a& { n8 ?{ v8 q- G1 V S7 v
"kind": "APIVersions",: i- p( G. R% O; {: z5 u/ p9 d0 c
"versions": [
8 |4 \' o$ I4 Q1 G* | "v1"
& U T+ c( g# k$ Z9 S$ W ],
t" H0 }) d9 o& s "serverAddressByClientCIDRs": [
$ U2 q4 K& Y( t- }& m+ y3 ^3 A v {
3 l( c5 g& d" ]( ^2 r# | q2 U% w "clientCIDR": "0.0.0.0/0",' a A0 h/ k9 p9 y
"serverAddress": "192.168.91.19:6443"2 P) d. j. _/ R9 I8 h8 c; S
}
0 M+ B0 n% R6 Q% b ]
4 j& c7 u" H* B4 D0 M}
; n( z+ I% A# y查看 k8s 的所有 kind (对象类别)0 S6 ?- v/ F* F% w5 ]" a7 s9 i
* ^# |* ?' c5 Z
curl -s -k --cacert /etc/kubernetes/ssl/ca.pem \7 }& o. ~& i& U4 M% L
--cert /etc/kubernetes/ssl/kubernetes.pem \1 }* m8 ^2 t2 F; f" j
--key /etc/kubernetes/ssl/kubernetes-key.pem \' e. l0 h2 L1 D" R! n
https://192.168.91.19:6443/api/v1/ | grep kind | sort -u
+ d0 q5 i; A+ z "kind": "APIResourceList",
+ p: w: y, }$ t" E# r# p" r "kind": "Binding",- A& i: J% A7 w$ p, l+ D
"kind": "ComponentStatus",# u1 d- m2 r5 A' M! r" S$ r
"kind": "ConfigMap",, f. p$ f; ?* C( L8 S8 L; `4 i6 H1 w
"kind": "Endpoints",2 G; M- ?7 H/ D8 z0 Y, g4 w
"kind": "Event",9 m: f, x1 F; Y) s, z" b
"kind": "Eviction",5 t* s2 v9 B" d! S; _
"kind": "LimitRange",
5 L+ w$ L) j+ J' x5 ` "kind": "Namespace",
3 S3 U. V/ f8 P+ U "kind": "Node",
5 r. L4 F& l0 o. Y9 N "kind": "NodeProxyOptions",( l y+ z4 J5 B6 y0 w
"kind": "PersistentVolume",
- f+ ~* W3 n1 h+ a4 A, J "kind": "PersistentVolumeClaim",
: z0 p0 ?' }& v. i# i7 [; E' m "kind": "Pod",
: e# P' L9 T0 d" D x+ v4 s' Q' C& Y "kind": "PodAttachOptions",; O7 N' ^9 P' l' G0 Q
"kind": "PodExecOptions",- g/ m7 Y" I0 Y* V8 e, o
"kind": "PodPortForwardOptions",& w+ j. Z& _, K% R
"kind": "PodProxyOptions",8 |3 [9 w0 N1 n7 i+ L) I) ~( `
"kind": "PodTemplate",
' \2 f7 B+ F. T "kind": "ReplicationController",# m5 _* d0 ]3 m5 c. G$ `( X
"kind": "ResourceQuota",
# V6 @* U6 g$ h8 k5 q m "kind": "Scale",1 \1 d4 B; p' x, d
"kind": "Secret",
6 ~4 C- P4 U" P- |* ^* ` "kind": "Service",! P* `0 h. W% U! a- u5 }+ f5 @
"kind": "ServiceAccount",8 m* Y( z' k9 ]; W& O" ~( K
"kind": "ServiceProxyOptions",
) G" K Z6 {: w* |% \7 P "kind": "TokenRequest",
) V T+ M4 N; b0 f/ P6 o$ \/ P配置 kubectl 管理- T$ z0 s1 r2 w2 H% ?$ @1 T& j2 R6 s
创建 admin 证书; B/ N* O7 B+ Y3 K4 {- g* @% m
vim /approot1/k8s/tmp/ssl/admin-csr.json! \9 j: f, r$ m' P6 S4 h3 C
{
7 X+ Y* Q* f& ?" i( F$ D$ P "CN": "admin",( v2 b: Q8 k' i' S
"hosts": [# I k. ?/ |% f
],
2 ^, F' O, D2 V* Q7 G; B "key": {& Z9 g7 j) @% o6 b. H4 w7 |. l
"algo": "rsa",3 k9 Z0 R1 c- i6 \" x! u0 t) B- Z8 E3 h
"size": 2048
3 A1 {/ F6 w% c# ^ },5 c) x! G ^7 a4 o
"names": [
! }! {( r* G$ l- @2 `' L {
7 |6 m% x7 X& H ~ "C": "CN",
, l) H, d8 O. _: a0 ]2 U6 v "ST": "ShangHai",
4 J) Y( j5 l7 @" h& a1 }$ n4 ] "L": "ShangHai",% z* z1 \7 {' i* E" j* F
"O": "system:masters",2 R$ C. l6 N# q" @; _3 u1 x
"OU": "System"
, G6 V9 m' e5 y& U; K6 t }
$ }, p# [. N; ]; Z6 J7 | ]
% s9 ^% N0 o& M. m, T2 z}
. w8 M; J4 ^( c% C5 y- M2 [, qcd /approot1/k8s/tmp/ssl/2 n* A9 M: e0 |7 |* F
cfssl gencert -ca=ca.pem \% w& l0 }, Q8 w
-ca-key=ca-key.pem \( @% y9 y7 T2 S I1 V _. ?
-config=ca-config.json \
e- C9 {& A; M7 c2 Y4 J& B-profile=kubernetes admin-csr.json | cfssljson -bare admin
% ^% N. s8 u% N6 D7 N% V# t' s创建 kubeconfig 证书1 N* Z5 t' A2 ?, J0 _3 v, E4 P) O
设置集群参数
/ u, R% w0 m7 F7 n% _% O- e! p! n6 u7 U& J# O9 s* Z
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver* ~0 y6 J# U" h' n9 R
! l; \+ i! z4 \- O0 H. c0 W
cd /approot1/k8s/tmp/ssl/5 `0 w1 Y7 t4 E" J* j: [
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
. k3 q+ s5 c- ^: F--certificate-authority=ca.pem \
) b. M" _: Y7 y0 Z; ^! t--embed-certs=true \& C5 H0 I- Z+ _; l$ I% A1 a9 n
--server=https://192.168.91.19:6443 \9 c% ?$ }. e3 V: y. p& ]/ Z
--kubeconfig=kubectl.kubeconfig
) ~: ?( N+ j' z1 q t设置客户端认证参数3 A. d; R x, k" i* J
% T+ V' f& c' i1 }cd /approot1/k8s/tmp/ssl/( g/ ^0 k4 M: i% C. J7 P
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials admin \$ S2 N( @4 B' n2 ]
--client-certificate=admin.pem \ V! B0 h+ o- j# P! e6 j# Z
--client-key=admin-key.pem \
. ^& Z* W" P: ~7 s3 i. N$ A+ l9 p--embed-certs=true \
- Q1 A; D( ?. @# V. C--kubeconfig=kubectl.kubeconfig* r7 e% F2 }; X( a& Y$ f, f
设置上下文参数
& T. Z/ E0 E/ i. A0 a" a4 i
& E' w( a V. z, Acd /approot1/k8s/tmp/ssl/9 P# I) r- P; o; ]: D' F* x
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context kubernetes \
1 a% M1 P$ L: ?--cluster=kubernetes \7 P# u" n, o8 T
--user=admin \
4 S7 X7 a3 a! |. O4 n, o--kubeconfig=kubectl.kubeconfig
, {& c& K1 F, `% A* g设置默认上下文
; m7 C& c; z6 o8 e1 i! \, Z
" \4 i) x, m% }: v' Ycd /approot1/k8s/tmp/ssl/& U0 Y- i) U/ ]5 A! Z7 U# c
/approot1/k8s/pkg/kubernetes/bin/kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig6 M/ Q8 Y$ A5 c# @
分发 kubeconfig 证书到所有 master 节点0 I2 a" }9 A. ^( x, s! x
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
- S" V+ w% l" _2 e& i5 z# W0 t* u; q2 r1 g- l+ Q/ m3 @$ J. ]
for i in 192.168.91.19;do \/ q, }$ H! e, _; p G6 L. g+ P7 G
ssh $i "mkdir -p /etc/kubernetes/ssl"; \& x# I" c/ I: K% b! i( L9 W, h7 k- W
ssh $i "mkdir -p /approot1/k8s/bin"; \
' f |" `) H/ Zssh $i "mkdir -p $HOME/.kube"; \
# ^0 @/ Z, d7 A7 k% |8 Gscp /approot1/k8s/pkg/kubernetes/bin/kubectl $i:/approot1/k8s/bin/; \; E8 |" j* v6 m
ssh $i "echo 'source <(kubectl completion bash)' >> $HOME/.bashrc"8 G' M# Q. K5 Y
scp /approot1/k8s/tmp/ssl/kubectl.kubeconfig $i:$HOME/.kube/config; \/ Z2 u9 M+ A8 _* k1 Q
done- c7 B6 {5 w3 f8 F/ n
部署 controller-manager 组件
" s ]6 ?% r7 R7 I# f" g$ \- h& j. @创建 controller-manager 证书
: P$ c2 P. K4 F' l* kvim /approot1/k8s/tmp/ssl/kube-controller-manager-csr.json" K" M' g5 o+ r; D t! X" N
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
! V0 Q7 O" s7 c: {, W: z& F9 O5 i: h$ Q
注意json的格式
# J1 ^( ]' [8 z$ {
b# u" F8 b2 p4 I! w* {{; Z$ d/ s# R0 ]! u% W/ l7 z
"CN": "system:kube-controller-manager",4 a1 s( ?0 Q: ~9 ~1 Y
"key": {0 V1 |8 B* J$ A8 C) u' b% W& W
"algo": "rsa",
4 E# y- m! o( W7 k8 @1 a "size": 2048
; i9 l" c! O* [$ U* F. ~ },
x" w2 |4 R& F6 t( Y9 e- H "hosts": [
. i6 d* |1 N& i4 n3 a( z "127.0.0.1",
, _/ ~, G/ _7 x. B: W "192.168.91.19"! \. U. Q3 }. D/ o: Z8 U& g7 t
],+ P0 @2 X2 ^9 ~6 H
"names": [
1 T6 i4 n5 M: ^& u0 h$ ^ {
: `2 f o. @2 y4 | "C": "CN",3 x! [4 C# |- y! q h" ?% Q
"ST": "ShangHai",! N, y: u1 t! e! d. X% j
"L": "ShangHai",
" v4 y7 [; Q. \# I$ g "O": "system:kube-controller-manager",$ Z m8 o3 N7 y6 x& x
"OU": "System". ^! p6 Q2 }% W' w
}$ e9 K$ [# e. `$ ], Z( V
]
6 J7 D! j: b' [}
3 A, g$ n7 u" Tcd /approot1/k8s/tmp/ssl/. r0 d$ S3 j2 Q
cfssl gencert -ca=ca.pem \% J. f, b( x/ W* p/ _2 Z" a( d
-ca-key=ca-key.pem \
' w3 ~' Q5 R. X-config=ca-config.json \6 W& X2 t0 o/ C: G7 O) O
-profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
* ]1 \# G1 {* E4 o* u创建 kubeconfig 证书
# N" a5 x& V8 C1 c, v设置集群参数
! Y9 j; e6 E: {& x0 Q7 C3 W5 J* `8 W( s! V
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver
6 I n! s1 Y$ M- v2 h' H( v9 R0 X$ L7 I, \3 _! }
cd /approot1/k8s/tmp/ssl/
( c* O5 R" n$ p7 c, }/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \3 V- J7 O3 s2 p7 ]; n, n- ?1 b
--certificate-authority=ca.pem \/ M! p7 \6 L2 K, k' L7 `0 b
--embed-certs=true \% R) M9 F$ W3 |# H6 o9 ]9 \
--server=https://192.168.91.19:6443 \# V( C$ ?5 ^7 K/ ~5 d& x
--kubeconfig=kube-controller-manager.kubeconfig7 T1 o+ g5 m$ p7 p9 j* `& g: F
设置客户端认证参数
* m# M0 V0 `# m- ] r2 T/ O S. M2 m/ V# t B2 m5 P
cd /approot1/k8s/tmp/ssl/
' v/ D2 ?$ H" C0 Z3 E) k! ?. _/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials system:kube-controller-manager \# b' P/ v8 p0 f( E3 |- M1 H" c% o
--client-certificate=kube-controller-manager.pem \( |9 c, F! }+ |; i. B* b1 C
--client-key=kube-controller-manager-key.pem \
+ A/ m1 {$ t8 m& m--embed-certs=true \- d1 L5 W8 h" n% r6 r* G& Q: A
--kubeconfig=kube-controller-manager.kubeconfig3 p9 e" {$ A/ T" S
设置上下文参数+ E$ B D% i% V2 a, ?- C
) j( l( u! K7 G( |/ M
cd /approot1/k8s/tmp/ssl/
8 [) }. n: u& e" p, V- [8 z! f) |/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context system:kube-controller-manager \$ c/ W) m |9 p a, n" R$ Q; n
--cluster=kubernetes \
1 |3 d8 V" j( l% V6 ?--user=system:kube-controller-manager \
$ C+ B# j x- L, X--kubeconfig=kube-controller-manager.kubeconfig
$ K1 d$ j4 Q4 d0 d# \) n- n设置默认上下文0 G2 ]$ j% C0 e5 F: E3 _# Q" z9 h
7 F) }/ n6 v+ Y% m$ gcd /approot1/k8s/tmp/ssl/4 I/ R% c0 D W) H4 G% t
/approot1/k8s/pkg/kubernetes/bin/kubectl config \3 M' _( f+ G( Y4 }
use-context system:kube-controller-manager \2 ]' B4 p0 @- z ]' z
--kubeconfig=kube-controller-manager.kubeconfig* t1 J: } l* G& `1 R+ W0 j( ^
配置 controller-manager 为 systemctl 管理; U4 }: ~# U# {( H- N% B& v
vim /approot1/k8s/tmp/service/kube-controller-manager.service
+ m4 }; a7 O& s8 W这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴! @5 S- C! T; y. ~" Q8 |0 b
8 F$ w! F0 v9 r% ^& I--service-cluster-ip-range 参数的 ip 网段要和 kubernetes-csr.json 里面的 10.88.0.1 是一个网段的$ l6 d4 U; n- s/ D2 I& K* ]' x
# K1 y; e) a0 S+ e
--cluster-cidr 为 pod 运行的网段,要和 --service-cluster-ip-range 参数的网段以及现有的网络不一致,避免出现冲突
8 y: ^0 @( q- d8 O5 S m; M9 h0 m3 t2 {8 Q
controller-manager 参数# z+ A& i8 R% T% v1 Q
, P6 M7 T* w. a$ g[Unit]' ^5 Z( |2 d' \ n2 f* D8 L
Description=Kubernetes Controller Manager3 b' p- S3 T; o8 m
Documentation=https://github.com/GoogleCloudPlatform/kubernetes& C; A% L9 v$ ~3 L8 {
+ T. ?3 z/ Q, d( n* x% W1 N' d% u
[Service]2 U; j: g8 W/ c4 s+ I% @
ExecStart=/approot1/k8s/bin/kube-controller-manager \
5 Q8 R: | Z$ p; b' |* F6 A/ F --bind-address=0.0.0.0 \( @( j+ }$ ]- ` D
--allocate-node-cidrs=true \' l4 u- q' {; e, k' b0 ~
--cluster-cidr=172.20.0.0/16 \5 d8 n' S& B) i4 f- T
--cluster-name=kubernetes \- D" ^3 o! }" i9 H
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \7 q5 C5 _' D8 T2 [5 i% z
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \1 c, b# ?* b m; X! y
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
0 j: N {4 H* } --leader-elect=true \% P8 Z l: L/ U' b$ p
--node-cidr-mask-size=24 \) _/ M+ x# k* f8 H. B
--root-ca-file=/etc/kubernetes/ssl/ca.pem \4 @0 |. d! q3 M
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
, c z- L% `7 A$ l9 y X; O- D# O$ a --service-cluster-ip-range=10.88.0.0/16 \
4 U8 _) f- u! o K --use-service-account-credentials=true \
3 y0 {- H0 f4 c/ d8 Y --v=2
0 s% h8 a$ t5 m5 L) mRestart=always& ]; L# g v: g4 w! V. `
RestartSec=5. q( c* I3 ^9 {1 Z
8 R. M3 X) Q% m" {* h" N; G
[Install]% S @0 N0 s# y0 g; o0 r7 T
WantedBy=multi-user.target
7 X1 ~4 ^0 `, A分发证书以及创建相关路径
: q9 x# L5 d' l# ^如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
/ ^( Q: D1 L( }8 N% J$ R" b; E- j/ E1 T* U* ?! n" j
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
+ P0 W) Y# R6 W
, [8 F3 _$ v8 Y5 Zfor i in 192.168.91.19;do \* e1 H2 F+ E2 C* E" ?
ssh $i "mkdir -p /etc/kubernetes/ssl"; \6 B* I) O' b! x0 ^0 j H4 u
ssh $i "mkdir -p /approot1/k8s/bin"; \; a( J1 `. Z. B1 ?1 Y1 \' ~; ^; \
scp /approot1/k8s/tmp/ssl/kube-controller-manager.kubeconfig $i:/etc/kubernetes/; \
$ O1 Z5 S5 W* `5 I) \, E3 escp /approot1/k8s/tmp/ssl/ca*.pem $i:/etc/kubernetes/ssl/; \
$ O3 n! ^6 }# g9 |! hscp /approot1/k8s/tmp/service/kube-controller-manager.service $i:/etc/systemd/system/; \9 w1 j* x# j( N+ |% T
scp /approot1/k8s/pkg/kubernetes/bin/kube-controller-manager $i:/approot1/k8s/bin/; \& z" h4 n( v: Y3 z
done5 R6 Y n; q2 V% p7 x- s' T$ A
启动 controller-manager 服务$ K2 x @( H4 @6 A3 k* B
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制# f6 v: |* j- s% q& S7 R; V* c
* ], K2 e3 y; e7 U E' I! |6 ~
for i in 192.168.91.19;do \( R7 X- U- Q" C0 Y! e9 ?
ssh $i "systemctl daemon-reload"; \
+ s( I; ]8 S G: V7 U0 L6 g9 T0 p- X5 fssh $i "systemctl enable kube-controller-manager"; \2 m. w2 o+ v7 h5 p
ssh $i "systemctl restart kube-controller-manager --no-block"; \
o2 h! V7 J- e) ^1 O& qssh $i "systemctl is-active kube-controller-manager"; \+ F+ ], r0 ~( }) S# k# b
done& p; X' A$ ~: D
返回 activating 表示 controller-manager 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-controller-manager";done
9 j! K3 t/ u( J" c& p
+ g, t% C U. Q3 d! w& C& h返回active表示 controller-manager 启动成功( p2 \& y$ y- o8 x* |: i. }
6 _" p* E9 X8 x* C$ j4 S% Y/ x部署 scheduler 组件4 L1 M/ E) Z& p
创建 scheduler 证书
: j4 a8 Y7 {8 e/ `vim /approot1/k8s/tmp/ssl/kube-scheduler-csr.json* f2 W/ [; l' i% @- F7 n
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
2 r9 U! k$ G) U* p6 Q/ u% ^) A. P, Z/ h; E$ y! E
注意json的格式0 `5 t8 q; g y; O
+ {! G( `- a k6 x. O- Q, M{' U6 q2 {+ u0 A
"CN": "system:kube-scheduler",' h- W0 K1 N( p0 A+ x
"key": {2 T( H a' H7 R" A k& n0 R9 X
"algo": "rsa",9 ], i7 n+ p7 V" o$ k+ J" L
"size": 2048' p- D5 Z3 M4 l* a, R
},
/ J8 ~8 @6 c" g3 D; V" M "hosts": [
2 Z- \ [& L6 G "127.0.0.1",4 m+ {3 l7 o% I7 d7 D! L& i& S J
"192.168.91.19"2 }* b: |9 O3 r2 H% z7 X$ G4 I
],
$ Q8 j3 r1 N/ q4 q "names": [
V( ?) x/ b$ {4 t, T- F& W: X5 p {
" K- C" O. h/ O4 U# w8 v( S9 m6 g "C": "CN",
% t+ N0 I! X( d5 b "ST": "ShangHai",
/ `3 X; n5 I/ ?' n8 t2 w- x "L": "ShangHai",
4 b! `" ?. d3 a6 R1 P9 ~0 f "O": "system:kube-scheduler",5 g% @& S& q$ a0 ]% _( X
"OU": "System"
! m/ E$ x2 ~0 B) j7 T }/ C; z, u3 n5 n5 q. O, \& J
]" q; Z, r* U% ]0 i" N% }
}
. r) y2 V( p& z( j6 {5 C6 Tcd /approot1/k8s/tmp/ssl/# C' h9 R. G& |5 B0 z& C- O0 |- K
cfssl gencert -ca=ca.pem \
' d0 b2 ~6 B3 v+ t* g-ca-key=ca-key.pem \+ i# W# y$ d J* Z2 P, v) E
-config=ca-config.json \3 g# K5 C0 v. m, G
-profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler/ j0 T4 L" ~' e; V& |4 z! @
创建 kubeconfig 证书
& X: D; I1 v: o5 ?2 C4 O设置集群参数0 p7 Y$ B, o" [; `
! B# G; T1 j/ V8 z1 w
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver$ d. g. o" [: e. y( ~' \, E
2 |6 L, m% `3 N& ?, D4 E$ [1 f
cd /approot1/k8s/tmp/ssl/
9 R+ J( t/ `5 g8 X# q, D- ~/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
; U" y% _/ N5 {( y--certificate-authority=ca.pem \
" [; p1 |% Q6 s, ~% K# l+ S$ n--embed-certs=true \4 t/ N! e$ ]5 h D
--server=https://192.168.91.19:6443 \
6 d0 M! [% b: m$ R1 ~# c5 I" ^- c--kubeconfig=kube-scheduler.kubeconfig
5 X. v$ v& [+ _, f# g- O7 L设置客户端认证参数
" U' ~1 i3 x4 i3 P# k1 }: R- N Z) p/ h2 ~; V
cd /approot1/k8s/tmp/ssl/
7 q$ ^8 u! i3 ?" Q6 [/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials system:kube-scheduler \5 }) @ t5 {9 } M+ \
--client-certificate=kube-scheduler.pem \! _- ^7 G) o9 S5 ^5 `2 f
--client-key=kube-scheduler-key.pem \
3 e0 G9 ^0 ]" S6 s3 T, ^, g5 k& c' e--embed-certs=true \, N# g. N+ N7 Y* Z+ F" B7 N4 {
--kubeconfig=kube-scheduler.kubeconfig& `) z! v* W. j# D
设置上下文参数
' M4 H3 ?* \- R; g$ t/ m) u! d& N w# o/ E j2 g
cd /approot1/k8s/tmp/ssl/
2 U$ A+ z: z5 E$ y/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context system:kube-scheduler \% A Q% }2 c; x9 S
--cluster=kubernetes \
$ c4 k# v& t1 v! y; {7 ~--user=system:kube-scheduler \ F! t3 v0 x; r2 R
--kubeconfig=kube-scheduler.kubeconfig' ^8 y7 E. e% N8 ?* H; Y) e
设置默认上下文% y8 M5 T9 ^& k) |" z: u, `! U
4 ~# |/ q$ D: [( ^0 F3 jcd /approot1/k8s/tmp/ssl/
9 K7 [% B5 v1 O2 \) ]; z1 q/approot1/k8s/pkg/kubernetes/bin/kubectl config \
9 V* e' y$ W- L" ?) ouse-context system:kube-scheduler \& p- Z; {8 }& y; L& N
--kubeconfig=kube-scheduler.kubeconfig( q& X K7 c% {1 l$ ]4 a: I8 m
配置 scheduler 为 systemctl 管理, f0 w0 y4 V K1 M2 O; j% A3 y
vim /approot1/k8s/tmp/service/kube-scheduler.service/ r$ e, d. l- v. I1 x& @5 g; S
scheduler 参数" e- W7 b% D3 A3 l* y4 k6 K
/ e% d. f* p5 C( b
[Unit]
. H7 ?% L2 @( n& {1 @2 Q5 bDescription=Kubernetes Scheduler
}. T# `0 l% ^Documentation=https://github.com/GoogleCloudPlatform/kubernetes
3 _) l+ n, M* O% M Y- a# U T0 p6 T5 j+ c5 L6 l( c
[Service]
9 C3 A. t; G) Z9 y% WExecStart=/approot1/k8s/bin/kube-scheduler \
5 ~9 a% s! A8 Q --authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
6 E. \# B2 [1 k --authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
* l7 [9 R' X* Z1 b+ l --bind-address=0.0.0.0 \
0 l' I# T$ V% @& B, J" p; t --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \" Q# ], |' M- y$ R c2 d2 S
--leader-elect=true \
, H3 V7 Q5 ~' k* Q) N/ H, W: Y% R* s" Q --v=25 R) ^2 v$ o. U0 `8 ~: t1 Y
Restart=always) v0 b" W- w! o; E+ Y3 R& j7 P, F
RestartSec=5
5 l6 t9 S% n6 @
- v# N6 I7 V, J p9 K( q2 `[Install]# | n3 p2 ^- I0 E7 |* d
WantedBy=multi-user.target
. b& S2 |; L$ r' u1 w5 q分发证书以及创建相关路径& y1 \$ V+ a5 }* L
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制+ p8 c1 ]. F$ a9 Z, H$ ?. i
& o+ b* D- z# K( C/ W
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
; G" E# [, C2 ^6 L6 g* @/ ?' \) r" |' G7 S& X
for i in 192.168.91.19;do \) n, f+ y4 E9 ?3 b0 k2 J/ }
ssh $i "mkdir -p /etc/kubernetes/ssl"; \! S; Q! {1 \/ }# g
ssh $i "mkdir -p /approot1/k8s/bin"; \% H X# E# u4 k+ r* f( H6 _
scp /approot1/k8s/tmp/ssl/{ca*.pem,kube-scheduler.kubeconfig} $i:/etc/kubernetes/; \
* @' b' I3 `( x. o+ b4 b$ mscp /approot1/k8s/tmp/service/kube-scheduler.service $i:/etc/systemd/system/; \
5 X7 v& P6 @+ `* k* Q, tscp /approot1/k8s/pkg/kubernetes/bin/kube-scheduler $i:/approot1/k8s/bin/; \
3 s& r* u- H; b3 Wdone
7 ~& I9 }% D5 V5 t! R$ S启动 scheduler 服务5 u; i: v8 |- E5 a% q5 }# o& t
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制6 F; L8 j2 `# O2 j: W: R5 h1 Z0 y1 U3 a
+ W- U% X7 m# J) |! u( ?for i in 192.168.91.19;do \
( v/ h* e3 z7 G# P# Z% |ssh $i "systemctl daemon-reload"; \
3 L; |) F6 }- I& i, {- ]0 ^5 K tssh $i "systemctl enable kube-scheduler"; \" r( B8 X n, Z$ I, T- B7 A( J* J
ssh $i "systemctl restart kube-scheduler --no-block"; \3 F# h; [! G+ A! a: l' F% W
ssh $i "systemctl is-active kube-scheduler"; \6 N" F2 k4 L, D" p4 i; j% `0 g+ \1 U
done! A, R* b9 j2 h+ a6 P* ~
返回 activating 表示 scheduler 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-scheduler";done
$ h, G6 x5 v' p. l
% p% N' |! I4 ]( e5 Y返回active表示 scheduler 启动成功
4 @2 K0 D9 J O6 L1 T$ j" j4 p) a5 j7 h+ t
部署 work 节点
6 b6 p/ n6 f$ ]1 n" q: H% v4 _! [+ u部署 containerd 组件8 ?1 {# v+ S- L& \0 U( ]/ m I, ~
下载二进制文件
9 r8 ~3 s5 D8 v8 H5 U3 j6 b* rgithub 下载 containerd 的时候,记得选择cri-containerd-cni 开头的文件,这个包里面包含了 containerd 以及 crictl 管理工具和 cni 网络插件,包括 systemd service 文件、config.toml 、 crictl.yaml 以及 cni 配置文件都是配置好的,简单修改一下就可以使用了
; l: f& |3 A* u- e
, ^. P6 j Z, X6 j( o虽然 cri-containerd-cni 也有 runc ,但是缺少依赖,所以还是要去 runc github 重新下载一个5 m% V$ A% f8 Z) a
5 G6 I- F! ?+ ?
wget -O /approot1/k8s/pkg/containerd.tar.gz \( ]3 G; p; V" J" X, M2 Y9 f
https://github.com/containerd/co ... -linux-amd64.tar.gz7 J; h' v& o* K! {. Y
wget -O /approot1/k8s/pkg/runc https://github.com/opencontainer ... d/v1.0.3/runc.amd64
Y M7 I1 K. v4 C8 Pmkdir /approot1/k8s/pkg/containerd P0 u/ r6 H9 d. M! h4 O
cd /approot1/k8s/pkg/4 c6 \) Q7 J i) w
for i in $(ls *containerd*.tar.gz);do tar xvf $i -C /approot1/k8s/pkg/containerd && rm -f $i;done0 d( D$ b" j, y% Z! d
chmod +x /approot1/k8s/pkg/runc5 j9 I. t, j! R
mv /approot1/k8s/pkg/containerd/usr/local/bin/{containerd,containerd-shim*,crictl,ctr} /approot1/k8s/pkg/containerd/6 q! e& ?" R5 K
mv /approot1/k8s/pkg/containerd/opt/cni/bin/{bridge,flannel,host-local,loopback,portmap} /approot1/k8s/pkg/containerd/
9 ~) e, | Z f+ M1 F4 r4 X6 G$ @, brm -rf /approot1/k8s/pkg/containerd/{etc,opt,usr}: s9 f. @2 ~7 @
配置 containerd 为 systemctl 管理3 k9 O7 U# T4 h: Q: L5 `
vim /approot1/k8s/tmp/service/containerd.service
+ @7 a: x8 N6 U/ S注意二进制文件存放路径
3 F4 Z# G/ F9 l, _6 N6 o
2 t6 N `4 K, c7 m+ p$ M# u+ y如果 runc 二进制文件不在 /usr/bin/ 目录下,需要有 Environment 参数,指定 runc 二进制文件的路径给 PATH ,否则当 k8s 启动 pod 的时候会报错 exec: "runc": executable file not found in $PATH: unknown2 ~5 F, Q4 L0 b, V
5 _6 v( M( y$ H& }. h/ Y+ Y9 ^& X[Unit]6 Z: o( C) [/ _3 V; f+ ^
Description=containerd container runtime
! t9 z& I d: R* |8 x4 JDocumentation=https://containerd.io
- I ?" H' G$ {) |After=network.target
) H7 `2 \* W7 [* i. E- z
^$ Z4 [2 c5 \+ ?7 S9 r. Z: a[Service]" _2 z: ^1 n b2 w# R% ?
Environment="PATH=$PATH:/approot1/k8s/bin"
1 S+ e" A1 \& x' t5 qExecStartPre=-/sbin/modprobe overlay+ \- D; b: q/ Z
ExecStart=/approot1/k8s/bin/containerd0 j5 o1 \' F4 w2 _1 i/ M- P1 w
Restart=always0 ?/ ?9 K9 h) Y, t
RestartSec=59 K/ ?, [9 S' O# h7 I3 @
Delegate=yes! \$ K8 F( x9 e4 L T$ d2 h/ k# v7 m: R8 b# r
KillMode=process
. S* K* W' X" `# ?4 i. mOOMScoreAdjust=-999! b) y0 ~4 w9 R4 o ~9 b+ n6 E! Y
LimitNOFILE=1048576
: U6 ?. M4 R$ f1 \8 x; n' ?8 K# Having non-zero Limit*s causes performance problems due to accounting overhead V q6 O# w: f m `
# in the kernel. We recommend using cgroups to do container-local accounting.
+ ?; o7 ?1 M' J" LLimitNPROC=infinity% |9 x6 D& Q0 A0 ^9 `) w9 j; D
LimitCORE=infinity2 ?4 Q# j/ s, N( E. J
% X- F( }0 c$ q1 z
[Install]7 a1 p( N+ ] j
WantedBy=multi-user.target. a4 E6 E9 n7 m+ v" W5 G6 ~
配置 containerd 配置文件
{, `0 F/ D8 q: h9 ^/ Cvim /approot1/k8s/tmp/service/config.toml
# E# L$ Z4 z/ ~root 容器存储路径,修改成磁盘空间充足的路径, K0 V- i1 Z s9 @
+ \* s6 Z8 l9 G. K4 ^" d* t( |
bin_dir containerd 服务以及 cni 插件存储路径
: F2 u# b' _7 b* O& p8 ^. M& d/ A* f% @* ^
sandbox_image pause 镜像名称以及镜像tag. k5 g/ z* `9 c0 z$ C& [
& t, A; U& O+ [/ o2 w" F6 M3 xdisabled_plugins = []5 G- Z g/ T/ Y* f$ ~
imports = []
% G1 @7 ~" Q* H) joom_score = 0
+ j1 Q3 c3 |9 \. ^- kplugin_dir = ""
" H6 y' }6 Y5 z( ?required_plugins = []1 K, b& q2 W% Y& ~8 b' u
root = "/approot1/data/containerd"
( C8 M5 p$ d- n! i6 m, R( s7 \& c# Vstate = "/run/containerd"
1 Q( ?, R. U$ |' U& {8 pversion = 29 L: h* Y) o& E3 `* M/ s
, @% o9 A: D7 j$ R4 `. T+ L[cgroup]& T$ O$ l; X' y9 W$ S3 R
path = ""
$ A% b1 U% `4 S3 w: ]' g& |6 f# v$ P9 g& d. k3 b
[debug]
( w$ ^; E2 g, q4 H( i address = ""' U, X. i2 T" l5 T8 f! @4 q
format = ""
7 X* O* E0 G$ [' q& x9 Y. p- P gid = 0
- k/ H' H5 S2 F; m% I# r- X level = ""6 i( H4 v$ N2 i+ S9 Y+ l, v! q
uid = 0
1 @1 @0 I9 [% v1 c
# o+ Q9 {% R, }[grpc]5 |( a9 }) |2 i( y0 f& y$ X
address = "/run/containerd/containerd.sock"' E1 l9 v/ p! h3 S0 {, s
gid = 0& B1 v9 ^+ q. m
max_recv_message_size = 16777216
3 ^' b. b) I$ K: X- k3 A: g max_send_message_size = 16777216
" d* ~7 B3 o" y3 \# C% y3 A tcp_address = "". R# f$ n) ~0 x- x$ b6 b
tcp_tls_cert = ""
0 ?& v6 E: K% r1 Y2 }2 c; p3 V* ] tcp_tls_key = ""3 m7 n7 p+ ]8 V9 n
uid = 0; c. X+ X/ g' c: H
7 O+ Q. V6 D' H2 c7 w% f
[metrics]
4 W/ H5 M/ M, w2 b7 t* r address = ""
" x) t) {) `: u grpc_histogram = false
1 D" p7 |- d- D/ b7 N O
* D) `( F6 F- H1 J( i. Y1 v[plugins]
) P# E, z4 e Z' M/ _! w! @# a8 J) k6 d+ W5 ]
[plugins."io.containerd.gc.v1.scheduler"]1 D8 v6 ?8 m6 U$ H0 f
deletion_threshold = 0
; d6 p& n% g8 B" o: C mutation_threshold = 100
2 p2 ^2 q6 K# H" A pause_threshold = 0.024 }2 q5 o b$ @ J
schedule_delay = "0s"
/ w$ @! {5 d$ c |8 }$ d3 T1 D" a startup_delay = "100ms"
) ^1 z7 ?0 Z; O
( _8 b) h0 I. F7 S6 K [plugins."io.containerd.grpc.v1.cri"]( q, H! b. l3 _) y0 t! Z5 U
disable_apparmor = false4 N9 l1 K' r: ~( C6 V- N: g
disable_cgroup = false
) [. x- `# U1 N disable_hugetlb_controller = true) D, i. r$ o: l0 L! ~) w
disable_proc_mount = false
8 R+ _* T- T9 s. @8 S; Q' U disable_tcp_service = true- \9 E7 T( _1 o& _
enable_selinux = false: |# V, f& o) Q) f8 J# b; N
enable_tls_streaming = false7 c& R; N& A$ A
ignore_image_defined_volumes = false
4 d Y9 E$ s5 v6 ~8 H max_concurrent_downloads = 3
{: ^9 Q; F" b7 r$ W/ W: {8 C ]2 } max_container_log_line_size = 16384
4 _5 q6 N. Q. r' h, B& a, T% D netns_mounts_under_state_dir = false
, Z* z* B( S& V4 q8 b restrict_oom_score_adj = false3 }# i% ]# m5 [9 A$ T' c( g$ ]
sandbox_image = "k8s.gcr.io/pause:3.6"
* \ D6 j5 ^; O0 i0 v% M" q selinux_category_range = 1024- a1 ]/ Z/ |4 P) v5 }7 p# i
stats_collect_period = 10; @3 ^" C" X3 A
stream_idle_timeout = "4h0m0s"
) w6 E3 t2 F5 V* e5 w- Y5 ` stream_server_address = "127.0.0.1"
4 _$ u) C' p1 @* z3 q stream_server_port = "0"
( o5 Z& ]9 }9 x7 n systemd_cgroup = false G' Y( E; t3 p
tolerate_missing_hugetlb_controller = true. i8 P. o# m4 K( g' \% i' S6 J. D
unset_seccomp_profile = ""
) w/ Q0 y; A; w' u0 O
% _: ~) |8 y& X1 y# N" Q- y0 V [plugins."io.containerd.grpc.v1.cri".cni]
. [3 J+ w' A& r+ C( K bin_dir = "/approot1/k8s/bin"9 r9 k! U- p' r0 w
conf_dir = "/etc/cni/net.d"
# x9 U7 J# Z/ z( P. R$ a conf_template = "/etc/cni/net.d/cni-default.conf"4 ?+ g" F6 A% D. s
max_conf_num = 1
7 y J; p4 z8 m
* g- }! i/ q0 c: A9 n" P [plugins."io.containerd.grpc.v1.cri".containerd]2 A0 I4 s9 ?. I$ C/ n8 c
default_runtime_name = "runc"/ Z; l* }" U- M! ]
disable_snapshot_annotations = true& `# B; A/ K I: E
discard_unpacked_layers = false
9 Z" a3 `1 Q: H$ J. W( R no_pivot = false* L* ?/ Q1 W5 K, s2 ?- `8 R9 Y
snapshotter = "overlayfs"
* |. S: s2 F! l% o/ } W A& h
! B1 G5 |* C2 M8 I* G d/ w [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]" [) l3 O+ `; V! Y8 D+ `
base_runtime_spec = ""
0 k" q' p. P+ C3 a' `+ W% p/ L ?& q9 } container_annotations = []( D7 V; F9 U* {, o1 K
pod_annotations = []
& y% A& @* w7 C3 T: q: R privileged_without_host_devices = false9 L, G) Z9 ], g( c, a! J9 g5 a
runtime_engine = ""
# n1 [* n6 z+ e7 s runtime_root = ""
( f5 {; m2 P/ s9 F runtime_type = "", l0 T& }) z7 D% s) @
$ f A" { {4 {4 x7 s( \ [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
5 W X4 ]/ c* d* n+ ~" o6 P. J$ J4 d/ y n' s2 J! O8 H& v3 ?
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]4 H2 Q! c T7 I. n. ]) B
, w7 [" `3 _) B$ l
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]* l1 ~9 y. ?6 M
base_runtime_spec = ""
, g% a1 |7 O3 u+ N& K- j container_annotations = []" `9 j1 q5 O; l
pod_annotations = []
$ m# d3 q$ t9 [( M5 v privileged_without_host_devices = false
# i# C, ?3 {9 s) O runtime_engine = ""
2 o$ Q5 J g4 J3 |/ Q runtime_root = ""
2 U9 E; S+ |; g1 L runtime_type = "io.containerd.runc.v2"2 V1 |' T' s* x
& i3 g+ z e/ j; \8 [
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]* q8 K0 N4 X8 }5 B# H
BinaryName = ""
; D% z$ \ C# u* n/ O CriuImagePath = "": ^, h1 j# @8 v1 Y( J
CriuPath = ""
7 U: \. A0 S+ ]7 X2 J CriuWorkPath = ""
( d; w9 i* f1 R1 h IoGid = 0
7 J0 c6 U; ^; \) x IoUid = 0
- q# Y' u7 C. s) y# `) m NoNewKeyring = false
) K4 O5 F- e$ v6 T NoPivotRoot = false
0 k/ @7 i3 u, u; E Root = "" a; b* E6 z( q
ShimCgroup = ""& j3 l& x9 @ W8 `: K- I7 u
SystemdCgroup = true i* ~, |# w' n3 Y8 R
+ G7 M+ _3 W* N [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
' O1 ]# T( n$ S2 Z% t5 E& T. `# u base_runtime_spec = ""7 `& C" P& b3 N0 l x
container_annotations = []6 B4 z& U: C6 e, B
pod_annotations = []0 p, T5 M$ W1 v, w- m
privileged_without_host_devices = false6 H: k6 H$ z e9 P
runtime_engine = ""7 W; B* G/ K8 {' {% ~7 }
runtime_root = ""; m3 L8 ]" I- I7 v G! C
runtime_type = ""
; r; o H/ X+ k7 M1 W& Y* S: s+ K& }! G2 ]( b q1 x: ?
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]8 p6 G+ z! l5 d& S" J P3 X
8 }& {8 u$ o3 Z: c( X* t. z
[plugins."io.containerd.grpc.v1.cri".image_decryption] W( \8 F7 T. Q/ _
key_model = "node"
# R( |, H) K- U: T( I9 z5 w$ y5 t+ |6 } H; v
[plugins."io.containerd.grpc.v1.cri".registry]
* Y+ J3 d, A" R) d% X: } config_path = ""- N# L- }) Z: I- B0 P" Y* W! `# U
4 j" P$ u% k8 l9 Y6 ?9 t0 M" [ [plugins."io.containerd.grpc.v1.cri".registry.auths]" E! o1 |& K5 D ^2 \& C& O# L( }
$ u0 S& z7 |3 Q! j, R
[plugins."io.containerd.grpc.v1.cri".registry.configs]
& y$ ~4 ]! r( X4 w9 g; D' H$ k" Z6 q6 L7 `/ B
[plugins."io.containerd.grpc.v1.cri".registry.headers]8 d) A3 s$ }4 i% X) c W7 P
1 @8 h6 P# u' J1 I6 ^3 U
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]2 J8 }6 M0 [& [% U8 c6 k0 ^3 }
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]7 Z2 L/ M4 m/ d$ S1 o- `
endpoint = ["https://docker.mirrors.ustc.edu.cn", "http://hub-mirror.c.163.com"]+ Z% f' l- P( W; F
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]) [1 y/ a5 q9 l2 s. n$ n/ Y
endpoint = ["https://gcr.mirrors.ustc.edu.cn"]6 G {1 d$ ]5 {) R, b
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
/ z3 F( T& ? r endpoint = ["https://gcr.mirrors.ustc.edu.cn/google-containers/"]
3 Z; R: f- T+ b: C [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]: A- U1 W3 C: ?2 V
endpoint = ["https://quay.mirrors.ustc.edu.cn"]" t$ y+ D3 q9 m; @% U7 b
3 z7 P" ~+ j3 y7 B" b! g/ K. @4 E
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
8 x, u) L/ r$ j8 C* Y7 G tls_cert_file = ""
4 U4 k0 O$ J5 `! p tls_key_file = ""
9 v, ]$ ?; |1 Q) ?4 N; @3 l* p. ]& p! p& O4 }9 n7 Z2 J! G1 j) ?& o
[plugins."io.containerd.internal.v1.opt"]
6 J; j, j$ R r Q0 ] path = "/opt/containerd"7 Q* J+ P& R$ g3 f9 V
, n S) ^9 t5 d, ?6 x7 e- s [plugins."io.containerd.internal.v1.restart"]
; A: m0 v |1 b; b/ Q interval = "10s"
2 y1 C! x# I1 }5 k+ r: B: R
- O! d2 v& ~ y8 B) R [plugins."io.containerd.metadata.v1.bolt"]+ L- N8 ?& r3 h
content_sharing_policy = "shared"
9 @" K# H1 j F' F; P( S! q K
2 o3 j7 \: t6 C3 }0 J- N/ u [plugins."io.containerd.monitor.v1.cgroups"]2 O% R0 B0 q* R: ]% k( @4 X
no_prometheus = false
# A( ]# j" r( e: x4 K( F2 y
/ T1 p T Q3 E& E/ b [plugins."io.containerd.runtime.v1.linux"]
9 P1 M$ p, P3 `& w0 u no_shim = false7 v+ z1 M6 A7 b8 U& L- G8 [1 v
runtime = "runc"
; _% Z' t( y' _9 _$ w runtime_root = """ D+ a2 B- Z; M7 L2 |
shim = "containerd-shim"
- x' l! Q9 H& s; o+ y shim_debug = false* R L# Q4 N$ [8 F! f- o
+ `) N" ?; E, Z( ]& T$ {
[plugins."io.containerd.runtime.v2.task"]
" [( a/ a# v& U; q, @- \2 a& z platforms = ["linux/amd64"], T1 {) B; y) X9 i- M$ |$ j
2 [2 ~7 a) U9 P, k4 D& n
[plugins."io.containerd.service.v1.diff-service"]$ d( ~" A, @; q5 d5 G
default = ["walking"]
9 d! l: X' y6 Q9 z: T$ ^2 @1 {5 ~0 G) I( {
[plugins."io.containerd.snapshotter.v1.aufs"]# k- v+ ?" [: j8 B7 c
root_path = "" q+ J; J E% ]% H9 Q: ~
/ A& N2 `$ {1 b5 b C
[plugins."io.containerd.snapshotter.v1.btrfs"]7 _1 I, z/ I B/ W
root_path = ""
7 E: `9 c5 V$ g! {: @1 z, W8 [. m r+ K L4 p
[plugins."io.containerd.snapshotter.v1.devmapper"]
0 F. `% F0 U5 \) { async_remove = false
8 [; L# t' I" Z X1 j* x' ? base_image_size = ""; e$ f! {2 c6 K
pool_name = ""
4 z7 N4 W0 D Y! S- r( a root_path = ""
! v+ O0 b3 ?7 H$ h, p! N G
5 c. \: x% c6 |9 ]# r [plugins."io.containerd.snapshotter.v1.native"]( T) `/ B! q2 m6 N$ P# W
root_path = ""
6 P& ?1 {. E) H4 B: ~; s/ W
5 R; G5 W& P: v: l: W [plugins."io.containerd.snapshotter.v1.overlayfs"]; D2 V5 S; Y W o. p
root_path = ""
6 ]" @, y( b ^( W1 h9 g$ z! e$ v8 e4 S/ O2 ~0 ?- l/ h
[plugins."io.containerd.snapshotter.v1.zfs"]9 D- E n6 L7 P6 c G: W
root_path = ""
8 W R+ I2 N$ z6 @1 y0 @4 y" I/ J4 O
[proxy_plugins]: M# [; W4 H% z X7 q
8 i8 [0 c$ d0 ]+ C[stream_processors]
5 p0 c! N) s/ _. {. p. y& w! {/ o0 E. B* h; l
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
/ V |& [" a! Q' M accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]: d) c$ r0 J+ |
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
5 @: P( s( e9 S3 B" b4 G env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]6 [7 E! M) ?' ~9 ~
path = "ctd-decoder"
' u; p6 D7 s1 @7 S) d q returns = "application/vnd.oci.image.layer.v1.tar"
0 | m+ `- C$ j- B# J, o- m
. }; [* E- b- |1 U& l. K# z [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
5 ]& p3 ~5 o$ @. D u8 w! H: L7 u) ] accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
# Z" z7 t" H- n5 ]) q: J args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
7 U* X& n# c9 e0 I* ^- `5 L env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
0 A' J1 H3 Y2 L( a2 q0 l path = "ctd-decoder"
( Q' S; v/ ?' D: L0 } returns = "application/vnd.oci.image.layer.v1.tar+gzip"5 t( f5 @# Y. ]' `* `$ H
i- E2 T5 v" I0 O% G/ T$ b+ }[timeouts]
1 e0 [8 X4 N6 ~1 r d' I5 @+ l "io.containerd.timeout.shim.cleanup" = "5s"1 u+ t. E; J. f- A" D
"io.containerd.timeout.shim.load" = "5s"0 |- `9 b" t/ A8 B# w5 R
"io.containerd.timeout.shim.shutdown" = "3s"9 w4 K! s9 l$ {/ a! p7 @
"io.containerd.timeout.task.state" = "2s"
6 J3 F6 M& B, ~: |+ H* e' v7 F3 S3 n4 \6 _* k! R
[ttrpc]
3 x3 [/ C# W7 \, a1 B address = ""4 q& _+ I% i6 v! ]8 }
gid = 0
1 O* V0 D' S% f! P8 Q7 M8 z" C uid = 00 u1 b: P" q# F$ @, h& w
配置 crictl 管理工具; e( R7 x6 o Q3 t- F+ @) Y
vim /approot1/k8s/tmp/service/crictl.yaml
7 a9 S# E* i: H8 y* w; T6 nruntime-endpoint: unix:///run/containerd/containerd.sock3 q' d5 ?8 W1 ^+ u
配置 cni 网络插件% h# |8 O( g K4 G
vim /approot1/k8s/tmp/service/cni-default.conf
8 s r2 ]. _: r# {& _& `subnet 参数要和 controller-manager 的 --cluster-cidr 参数一致
$ H3 o4 V% A- u* p6 i2 @' F' s( B! d1 N5 r; @5 n) u4 |" }6 `
{
) K0 K* H* q* n5 ]7 F "name": "mynet",
% ?6 ?5 X; e6 T "cniVersion": "0.3.1",5 H6 K8 |- q: ^8 S
"type": "bridge",
8 D Y# E6 Y" C0 x "bridge": "mynet0",
5 c5 P+ y1 \$ Q! ]! Q "isDefaultGateway": true,
1 J# l, Z5 u4 d0 d, M "ipMasq": true,
0 [) z+ I1 L R! m% Y$ O "hairpinMode": true,% M1 _+ Y2 U$ W
"ipam": {
0 r( _) _( I1 C- J" Z "type": "host-local",/ v3 \- v5 F' x+ A
"subnet": "172.20.0.0/16": z/ \: \7 s* g ~5 G7 I1 G
}/ q! P3 p' d0 L0 |) n6 H( F
}
9 j3 i- L3 c2 Z' P6 e分发配置文件以及创建相关路径) s0 E7 ?) T. g! U- p$ P$ K8 b
for i in 192.168.91.19 192.168.91.20;do \
( m# G( K' f, s- ]3 n2 L+ nssh $i "mkdir -p /etc/containerd"; \4 g7 g4 i' ~2 B: \2 }
ssh $i "mkdir -p /approot1/k8s/bin"; \3 ~, j5 v# K/ P( |
ssh $i "mkdir -p /etc/cni/net.d"; \% k' ~7 @+ t; n) D* {
scp /approot1/k8s/tmp/service/containerd.service $i:/etc/systemd/system/; \
9 K w K5 J) Mscp /approot1/k8s/tmp/service/config.toml $i:/etc/containerd/; \
" J; X5 K( T: s% c2 W) E& Q7 dscp /approot1/k8s/tmp/service/cni-default.conf $i:/etc/cni/net.d/; \
; i2 F# a/ ^2 g) r$ u( O. T) {scp /approot1/k8s/tmp/service/crictl.yaml $i:/etc/; \/ Y+ m" u0 j0 ?6 W* y% C4 q
scp /approot1/k8s/pkg/containerd/* $i:/approot1/k8s/bin/; \+ @$ T: I8 K- Q G
scp /approot1/k8s/pkg/runc $i:/approot1/k8s/bin/; \
. h* \/ p* I( u5 r: F6 V. wdone
' M7 }6 N* B) E/ h! o7 H) s启动 containerd 服务
1 ]; i1 ^; j% P6 Y2 _* hfor i in 192.168.91.19 192.168.91.20;do \+ a# a. l0 h. b: {% b5 F
ssh $i "systemctl daemon-reload"; \: c% }0 u2 m7 l5 V
ssh $i "systemctl enable containerd"; \
( w4 q" M1 k8 vssh $i "systemctl restart containerd --no-block"; \
) d U8 v" i) S, ~( fssh $i "systemctl is-active containerd"; \
8 i2 K$ S% t$ \3 F3 w/ r; N' q- o7 jdone
2 @0 o5 y$ L2 j0 k/ M5 c0 R返回 activating 表示 containerd 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19 192.168.91.20;do ssh $i "systemctl is-active containerd";done$ a) ?; l* B s) [" A9 q' y
5 {! y, y! U4 E7 T
返回active表示 containerd 启动成功7 y; a: I2 \' F; H8 i e
+ d* f2 l* B/ n
导入 pause 镜像
# T% I7 J" ^$ A% cctr 导入镜像有一个特殊的地方,如果导入的镜像想要 k8s 可以使用,需要加上 -n k8s.io 参数,而且必须是ctr -n k8s.io image import <xxx.tar> 这样的格式,如果是 ctr image import <xxx.tar> -n k8s.io 就会报错 ctr: flag provided but not defined: -n 这个操作确实有点骚气,不太适应" m) A( S% _) ]7 M# @5 `
; `5 }+ J8 D7 R% o如果镜像导入的时候没有加上 -n k8s.io ,启动 pod 的时候 kubelet 会重新去拉取 pause 容器,如果配置的镜像仓库没有这个 tag 的镜像就会报错
4 {; N9 |9 O n# e6 w, L
* I! T. D' M" A" Ufor i in 192.168.91.19 192.168.91.20;do \. C9 {; U+ ~2 g1 S! Q
scp /approot1/k8s/images/pause-v3.6.tar $i:/tmp/- D, p$ _ i d( Y
ssh $i "ctr -n=k8s.io image import /tmp/pause-v3.6.tar && rm -f /tmp/pause-v3.6.tar"; \
; U$ c; i4 ^: t9 B8 _* W5 Ldone9 p6 ~! |8 a* V
查看镜像
6 H m M0 w! o
: t3 W' x; T r. z. ufor i in 192.168.91.19 192.168.91.20;do \
0 u' P( S. S5 m( `, [! Fssh $i "ctr -n=k8s.io image list | grep pause"; \
7 X& w* f$ T9 [. V2 @+ Y/ @+ b- cdone
9 l$ x+ m- z# _6 {部署 kubelet 组件
$ K! u2 A% z/ T; A3 k创建 kubelet 证书7 \3 t+ M/ G! q/ o4 ]* Y4 I) {5 T. ~
vim /approot1/k8s/tmp/ssl/kubelet-csr.json.192.168.91.19
9 d2 M; q/ z4 o" s. F: J' e3 N这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴,有多少个node节点就创建多少个json文件,json文件内的 ip 也要修改为 work 节点的 ip,别重复了
' L0 h9 |, v7 |* D! L& w+ Z8 k1 G6 ^" N. ~8 x/ b
{
9 u: }* f7 c2 f$ r/ C "CN": "system:node:192.168.91.19",
- e' M x! y: |; U "key": {: L' i$ y8 x8 i$ V- x+ {" z
"algo": "rsa",
& N v4 }# z0 Q5 J+ ^; l "size": 2048
( l1 u3 ^7 t" V2 r },
2 _+ M- v* f- c "hosts": [- c! g2 q# _# G6 Q* n$ N6 t
"127.0.0.1",4 L/ k! }6 i8 [) w9 U
"192.168.91.19"
1 z/ b. D' y7 } q7 L7 u h ],$ L6 O0 c1 b- O, F1 y8 ?
"names": [; ^0 G% j, }& j% n6 `' E
{
S$ `! y3 s" n5 K "C": "CN",
; }: d) P* \! v, @+ A) t" i "ST": "ShangHai",) v. n# z% S7 h$ V
"L": "ShangHai",. n) t& s# k( H, b
"O": "system:nodes",
/ r) J% a k' e8 v6 L1 y: | "OU": "System"* s" Y; L5 n$ W) z, n
}
* d& t- D$ E o7 k: {, m( ` ]: \: @0 H4 r2 P0 |3 x2 H5 ~
}
( [+ f3 t, ^# k% p: c7 nfor i in 192.168.91.19 192.168.91.20;do \& i7 o& _- z. Q9 j S
cd /approot1/k8s/tmp/ssl/; \
* q, z: k% x% v* @) @( M, D4 Hcfssl gencert -ca=ca.pem \9 w. I. o" D( d/ g2 l4 [
-ca-key=ca-key.pem \% \6 C, n* h9 v2 r, s, _, }
-config=ca-config.json \6 h* o, x' _* G3 K+ D1 ]$ }9 o) r
-profile=kubernetes kubelet-csr.json.$i | cfssljson -bare kubelet.$i; \
1 |- P" U) N$ X( p9 f1 F7 Ndone
. `% h" n0 M M! Z( L创建 kubeconfig 证书' f! {7 N+ P7 Y& U' q' O
设置集群参数
0 `: N1 _" t' G( A
; G, W, U( s3 `; i3 b e--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver
+ d. I* V8 f0 l' q" c* m6 o9 ~+ N6 ]4 e! u% L) I6 M7 _; V: t
for i in 192.168.91.19 192.168.91.20;do \
. @; @2 P$ W" C: o2 @2 \cd /approot1/k8s/tmp/ssl/; \
( @3 f& D% j" Y5 e& t/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
! k8 a& {" V$ ]/ J) s--certificate-authority=ca.pem \
/ P; F2 P- k/ E5 @9 _--embed-certs=true \6 S7 ]9 g0 T% _* T7 b+ G$ u" ^
--server=https://192.168.91.19:6443 \
& N' S! ^2 L6 Q/ k! _5 u--kubeconfig=kubelet.kubeconfig.$i; \
+ ?# g# w) G! |* }done/ c& {1 L* M( J1 |
设置客户端认证参数+ T- Y3 V' S. k5 C5 T' O
9 ~) Q( ^3 J- H
for i in 192.168.91.19 192.168.91.20;do \0 V" s/ y! W& S H; l9 ^8 L0 u
cd /approot1/k8s/tmp/ssl/; \
4 M: v4 s: d; ?+ E/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials system:node:$i \
- t4 f4 I8 y6 d/ X--client-certificate=kubelet.$i.pem \# e3 [. ?! m- B- @" ]/ C7 |* O4 a% D
--client-key=kubelet.$i-key.pem \
2 z& C# @2 y+ Z- @6 ^" ^--embed-certs=true \
8 i3 p8 }' `! n# x3 w8 M2 p--kubeconfig=kubelet.kubeconfig.$i; \
5 E: z+ a- J- [( @9 C2 N! P$ }+ vdone+ c4 a& k4 }, t5 @
设置上下文参数1 F8 e5 L5 k5 S6 L y$ r, ]
% a w2 d" I3 @: @ g' `/ ]for i in 192.168.91.19 192.168.91.20;do \+ `: y. x4 n5 e* @1 l
cd /approot1/k8s/tmp/ssl/; \- P" j( I7 I9 w ~, ^2 @* T
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context default \ R; j& B% a8 H2 P
--cluster=kubernetes \* x* \6 Y+ C1 I7 _3 c8 _5 E
--user=system:node:$i \
* L0 E( Y/ W; R9 k- l, b--kubeconfig=kubelet.kubeconfig.$i; \
6 T1 K( t& b; ~! D8 Sdone
; N7 \' N. F2 o& s6 |设置默认上下文6 S" {$ C3 u2 k( V' D* p* C
" n% q; T6 P$ D, W8 w# e# Q
for i in 192.168.91.19 192.168.91.20;do \
, s( n: Z2 C$ N0 A x6 y3 ~7 @cd /approot1/k8s/tmp/ssl/; \. N( Y$ t, g* ~* I! Q
/approot1/k8s/pkg/kubernetes/bin/kubectl config \
Q1 m4 d( C( Ruse-context default \6 y( W4 q8 b9 k* p I& F3 r
--kubeconfig=kubelet.kubeconfig.$i; \
! l2 j5 U3 [" V/ v: ddone
. D" W& {& ]: Z+ ?/ [8 J, w& g) o1 k配置 kubelet 配置文件
( C: d3 E6 R: q) V( W# qvim /approot1/k8s/tmp/service/config.yaml
6 K& X9 j$ ]( G V2 G9 G# lclusterDNS 参数的 ip 注意修改,和 apiserver 的 --service-cluster-ip-range 参数一个网段,和 k8s 服务 ip 要不一样,一般 k8s 服务的 ip 取网段第一个ip, clusterdns 选网段的第二个ip& p) W9 F! o: H2 B+ o$ @2 S
3 @& d$ w3 V& C; k* z5 r+ akind: KubeletConfiguration
' W: i! Y, U2 E) @0 JapiVersion: kubelet.config.k8s.io/v1beta1
4 | Q# I) o$ J" baddress: 0.0.0.0
' U K- [; W: F! z# [6 Tauthentication:" x+ N+ a; M7 `
anonymous:
: A) X7 Q2 b4 G) u) W3 t enabled: false
6 `, c* p' M ^! N webhook:
1 {4 l2 _9 x* u( @. Q cacheTTL: 2m0s( A- e- }' J: ]. J2 J2 O7 N
enabled: true) q. o5 H5 W# `. l6 V: ? ]( U2 a. o
x509:
& i4 j- `% Y# p# H* s7 o6 B+ A clientCAFile: /etc/kubernetes/ssl/ca.pem
8 m& O0 G1 f- P3 y7 `+ Z3 Cauthorization:
& B& ?/ k/ S1 o) o l. y( j mode: Webhook
9 n# k: K, g* y0 I' u2 T( q webhook:
7 F( [0 R. }' v5 S cacheAuthorizedTTL: 5m0s" B) q) l& v5 m0 a, d: ~# ~2 u1 b2 O
cacheUnauthorizedTTL: 30s- Y4 H4 ]; p I7 y) Q
cgroupDriver: systemd8 h- I& u# A6 x* ^. p* [0 v9 g
cgroupsPerQOS: true! X. Z/ Y) V' c- P0 ~; s) V' X
clusterDNS:( V! P3 L* p. |2 ~, [
- 10.88.0.2
1 U2 R! m1 q- F3 ^clusterDomain: cluster.local8 o9 X( ]* N& ^& U6 j2 N( n
configMapAndSecretChangeDetectionStrategy: Watch
; ~% ^( B/ c$ I0 tcontainerLogMaxFiles: 3
5 S; M4 Q8 \# u$ Y' u' d% B2 `containerLogMaxSize: 10Mi
* {1 I3 X9 D$ U5 V% R! U$ C0 ienforceNodeAllocatable:
' q' q# W' ]2 k; o' h; ?; `- pods
4 H' w4 P4 D9 i- g2 j: v7 x% A( @eventBurst: 10* Y3 u6 L. F& q; h& j9 C7 S3 O W
eventRecordQPS: 5- q2 U, c5 V' m8 k
evictionHard:/ n0 L6 u7 ]# x' M( k9 x) X) s
imagefs.available: 15%- @4 ?* B/ Y( }! R" _) _! Q
memory.available: 300Mi
' x+ H8 E8 Z5 P" a# V) p+ n7 h( W2 L% i5 P nodefs.available: 10%
, v" c3 x8 D# S( g3 p nodefs.inodesFree: 5%
4 g. W* G! `$ j L! n* YevictionPressureTransitionPeriod: 5m0s% c4 H5 z: o- B* {- s
failSwapOn: true
1 Q& X4 V4 @: C, y+ _) x8 \fileCheckFrequency: 40s
4 f+ A* k* Q: e0 DhairpinMode: hairpin-veth
& S/ ^" s; f/ Z& XhealthzBindAddress: 0.0.0.0
( }; M2 i* |* nhealthzPort: 10248
& e' m7 }& D+ NhttpCheckFrequency: 40s7 n1 h: o: A6 {- ~
imageGCHighThresholdPercent: 85
" G) W: {/ p+ ^2 s6 n8 ]imageGCLowThresholdPercent: 80' \! ^ w- j2 F
imageMinimumGCAge: 2m0s8 g( P$ H1 X8 H7 @6 [" ~
kubeAPIBurst: 100" b. x/ r0 v2 j) [
kubeAPIQPS: 50) m& L" u5 O) f
makeIPTablesUtilChains: true) s; q' K" S& M% d7 b
maxOpenFiles: 1000000
! }4 h9 w$ @* l; X. Q# FmaxPods: 110
5 a' {8 \! [) `& G$ ?; c. OnodeLeaseDurationSeconds: 40# t+ ~0 l: S' R: y
nodeStatusReportFrequency: 1m0s: }4 N: f7 b/ x2 P( h) Y
nodeStatusUpdateFrequency: 10s% H5 V4 I& ?. @" u( L# \7 M
oomScoreAdj: -9993 j, f9 k! H# L9 g# r/ K
podPidsLimit: -14 z: w$ Z, g! P& Z2 M& ]. F
port: 10250
7 }. \! _/ d( |/ e, h, M# disable readOnlyPort: o% A( L8 m) X# G/ C' e4 [
readOnlyPort: 05 f( W( V; F4 S4 c# ~4 o$ b5 M
resolvConf: /etc/resolv.conf1 h4 y+ \$ M- k( S
runtimeRequestTimeout: 2m0s& Y! ]$ @) }: [5 J3 c" K
serializeImagePulls: true7 S. z1 D& i3 j: N
streamingConnectionIdleTimeout: 4h0m0s
* f: ~3 U) c+ }2 x9 b, FsyncFrequency: 1m0s
4 }. x1 \7 B: f2 @! GtlsCertFile: /etc/kubernetes/ssl/kubelet.pem
( \# D1 a4 A1 c) A- {" m* _tlsPrivateKeyFile: /etc/kubernetes/ssl/kubelet-key.pem
2 l# m3 D) `# h# E2 p2 g* w% L- x配置 kubelet 为 systemctl 管理
' l7 d* O: s% v$ ~vim /approot1/k8s/tmp/service/kubelet.service.192.168.91.19
S' n( o/ w8 |" U, f) |! T9 p这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴,有多少个node节点就创建多少个service文件,service 文件内的 ip 也要修改为 work 节点的 ip,别重复了
" ]3 u5 w4 A3 V. h7 g* w+ c. D5 a) A* ` q1 `) @; s6 f: [+ o
--container-runtime 参数默认是 docker ,如果使用 docker 以外的,需要配置为 remote ,并且要配置 --container-runtime-endpoint 参数来指定 sock 文件的路径
) I$ h+ E: c6 _$ v
7 m$ g- Y( q t2 vkubelet 参数% [0 y* j' C, I% Y6 p" U& W
. c/ j, s" _- T
[Unit]# o K0 K- P$ D: K$ U$ ?( k- ]
Description=Kubernetes Kubelet' R) p, u2 O. a! }6 K c5 j. r
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
) c9 q$ S/ M c9 T5 K9 j* N& J0 W( H8 ?* N i- A
[Service]0 F! a2 p# c" p9 J9 F! [* l' ]: [+ W( U
WorkingDirectory=/approot1/k8s/data/kubelet
- T- @3 k3 k9 W/ R6 E+ V" k, xExecStart=/approot1/k8s/bin/kubelet \' I, b. T8 Z7 I. _$ @3 m; S4 i- W$ a
--config=/approot1/k8s/data/kubelet/config.yaml \
# O$ X; @, |7 E% y* [ x: { --cni-bin-dir=/approot1/k8s/bin \
& s8 k5 N' ^/ J/ f --cni-conf-dir=/etc/cni/net.d \
) R7 R# k. r" L9 p! X" W. p5 ` --container-runtime=remote \
3 M: {& m# d( U& n1 i; T' z --container-runtime-endpoint=unix:///run/containerd/containerd.sock \; i8 Z' Q. Q# \2 n: H: S4 s5 ]
--hostname-override=192.168.91.19 \
2 s7 J9 m' f* s --image-pull-progress-deadline=5m \$ l m# E+ s' Q0 @4 y
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \' c, w& f/ F# U: {3 U: j4 ?% Z( |% y8 o
--network-plugin=cni \7 ^# c4 a1 x. f. w
--pod-infra-container-image=k8s.gcr.io/pause:3.6 \
+ ~( V8 W- q# s" o# Q --root-dir=/approot1/k8s/data/kubelet \; t% p" o; V/ V0 G+ P- ~3 c3 M
--v=24 K8 |4 b; T/ I; ~
Restart=always$ j" F+ J* ~* b1 ^& J
RestartSec=55 p+ L# R N" F! g" |- v* h! U
! |* W- x( R; [1 t- W8 I+ z
[Install]
% C* ? ~! S" @; k* {5 {. XWantedBy=multi-user.target i/ z! ~, _" _7 G/ O2 |
分发证书以及创建相关路径
1 \; [1 M, I) `0 v$ p# t2 ]+ y如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
1 p9 [1 u1 n, B+ @3 f" a
2 {# B) I: E5 `9 W5 B, t G: d对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败( L+ k3 K9 ^" d
$ z7 o; M* n7 Z& R
for i in 192.168.91.19 192.168.91.20;do \& N) V+ v! M. m& @- ?
ssh $i "mkdir -p /approot1/k8s/data/kubelet"; \* f* I% n$ w5 W6 c- g% y8 D7 v
ssh $i "mkdir -p /approot1/k8s/bin"; \6 x1 n) l8 n& N( ^, @% c5 y
ssh $i "mkdir -p /etc/kubernetes/ssl"; \
" c4 [8 G3 i- Z F- |scp /approot1/k8s/tmp/ssl/ca*.pem $i:/etc/kubernetes/ssl/; \
) G: {4 X* ?& \$ y' ]2 {2 uscp /approot1/k8s/tmp/ssl/kubelet.$i.pem $i:/etc/kubernetes/ssl/kubelet.pem; \
' W" S$ r7 h& W; |% x6 V6 |/ k8 Lscp /approot1/k8s/tmp/ssl/kubelet.$i-key.pem $i:/etc/kubernetes/ssl/kubelet-key.pem; \
* R$ K1 D& j2 y' V- L; h' zscp /approot1/k8s/tmp/ssl/kubelet.kubeconfig.$i $i:/etc/kubernetes/kubelet.kubeconfig; \
' _. v8 ~7 s! A4 mscp /approot1/k8s/tmp/service/kubelet.service.$i $i:/etc/systemd/system/kubelet.service; \4 q( J/ s! L6 H* ]8 x$ d+ m
scp /approot1/k8s/tmp/service/config.yaml $i:/approot1/k8s/data/kubelet/; \% K! A- C! h+ Q. {4 |3 o
scp /approot1/k8s/pkg/kubernetes/bin/kubelet $i:/approot1/k8s/bin/; \
( f# u% M4 |9 w( j! Tdone
0 {0 ~- }( g8 A! ]$ O' H启动 kubelet 服务6 k4 i8 y, ?6 A: r1 d
for i in 192.168.91.19 192.168.91.20;do \+ V2 t- J4 q. _
ssh $i "systemctl daemon-reload"; \
j6 Q$ b; o6 \9 Fssh $i "systemctl enable kubelet"; \/ ]8 n# i; K9 P, i. s+ ?
ssh $i "systemctl restart kubelet --no-block"; \
: w- ?7 W: F4 [ssh $i "systemctl is-active kubelet"; \
1 n( R4 ]! t, C9 |done
$ y2 n8 D6 b9 d* U返回 activating 表示 kubelet 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19 192.168.91.20;do ssh $i "systemctl is-active kubelet";done" B! F* s" b" m3 V) U5 I, I
/ i5 F) B0 y/ ~) V" y y* c8 S" h( X
返回active表示 kubelet 启动成功1 k( i) ?8 F. F% W
; V) N; d. {' k3 |
查看节点是否 Ready% U X+ S- K9 A: C. T% ^
kubectl get node
) ~) ^ a2 M' e/ b( B7 o) I预期出现类似如下输出,STATUS 字段为 Ready 表示节点正常. W- `0 j! d3 Q D) _" |" G' ^' o' P
+ Z- n- F4 q4 t; A1 h V; tNAME STATUS ROLES AGE VERSION
' V% i$ D: P+ w0 U, b% M, c; V: D192.168.91.19 Ready <none> 20m v1.23.3
3 T7 x2 G/ [5 c6 o. P192.168.91.20 Ready <none> 20m v1.23.3" `6 P4 C) n }, N- l& {
部署 proxy 组件
0 {+ q- y9 i' ~% t8 }; f创建 proxy 证书; t% r' ~# K' W _. K5 O
vim /approot1/k8s/tmp/ssl/kube-proxy-csr.json9 X! o! p6 I# P" I, C
{( \9 l- g+ @% c) _& {+ M
"CN": "system:kube-proxy",
) b& d# H- d \2 s2 z "key": {
# o$ p( a8 v) i "algo": "rsa",( k+ q; R3 r- Z1 d" y
"size": 2048$ [ f2 V) \, \7 n! i; R
},
: j" G9 c6 `& O, g. ~7 O "hosts": [],8 h K# K. v! x" ?2 I
"names": [4 J/ D( e9 m: G4 _* @. i% i7 _
{
- l; @2 h" o" w7 P& } "C": "CN"," ? p9 U; T7 o! g9 u7 X( a
"ST": "ShangHai",/ m4 X l5 X8 B! h& M
"L": "ShangHai",& D8 }+ y( ^+ `
"O": "system:kube-proxy",
/ ], N3 Z E0 j k) R "OU": "System"* ^; h6 C" j+ J* f/ H& f
}; A7 a* W. i- u; p3 z; ~
]
* R7 N* a/ l/ Z8 F4 w}
# o0 w/ l7 a5 j" H( h0 |- Fcd /approot1/k8s/tmp/ssl/; \
: h! P$ P: n& n* G( V: j' Bcfssl gencert -ca=ca.pem \
* U2 a' S0 d% a1 q3 ~+ V& G( p4 w-ca-key=ca-key.pem \4 R& W' i& v' h% W
-config=ca-config.json \) O! K# y) Z4 M
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy2 z; r1 j. F% L- P% E) G
创建 kubeconfig 证书/ H/ W1 ^# Z7 {! p
设置集群参数/ a8 ^' N( g3 \' k
) I! x* a5 `5 i0 J: _5 H--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver1 j. v# o% V+ p' k2 l, Y: R
" v U- {! z" }cd /approot1/k8s/tmp/ssl/
& B1 e# @* ? _3 a/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
- e5 m0 } D) ^ V/ g--certificate-authority=ca.pem \" c P! j) f) O, S
--embed-certs=true \
3 i1 A ]# t; ~4 F3 a--server=https://192.168.91.19:6443 \1 p+ m7 U% o- }( B/ k# d$ |
--kubeconfig=kube-proxy.kubeconfig5 ~) o2 H* D3 [$ A- t8 s, p
设置客户端认证参数4 ~3 x. L1 M- X
: P3 S, [1 u' M. _# e
cd /approot1/k8s/tmp/ssl/# \& e5 }8 ~7 ^0 [
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials kube-proxy \. E+ ?( i" s/ e; V
--client-certificate=kube-proxy.pem \
% y" ^- W. a8 ?" Z; Y" o* Q f--client-key=kube-proxy-key.pem \
5 Q- `! ] f+ a* D$ @; |" t! M" |--embed-certs=true \
# K$ y; D# ^, ~--kubeconfig=kube-proxy.kubeconfig+ o* p( `& t# y4 W3 |; F
设置上下文参数
/ ^3 P* y+ l- A9 a5 y+ @; q. N: m, A, n. B1 i0 W9 d
cd /approot1/k8s/tmp/ssl/
8 S5 n N) w+ I& Z; X7 P! K/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context default \
. H- [, A0 Q& e4 I. Y0 V--cluster=kubernetes \0 R* t) c2 p6 X' y! U6 q
--user=kube-proxy \
8 o5 u, n* |3 f2 z' Z6 I--kubeconfig=kube-proxy.kubeconfig
- s+ @! j8 F N. w2 X% K设置默认上下文
1 z% _" M7 {% M( M4 Z4 d. ~' s9 t0 T1 q4 |; _# q6 o
cd /approot1/k8s/tmp/ssl/
* C4 Q* V- I o# D7 O' x2 h0 u/approot1/k8s/pkg/kubernetes/bin/kubectl config \
3 h" ], X# W3 e9 }9 M \# ~use-context default \
- o, f9 F7 P" G1 H [--kubeconfig=kube-proxy.kubeconfig
' ^. \2 {: j7 [/ z% L配置 kube-proxy 配置文件5 A% g; F5 q0 N
vim /approot1/k8s/tmp/service/kube-proxy-config.yaml.192.168.91.19
/ K | P* i7 o* U; U' Z这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴,有多少个node节点就创建多少个service文件,service 文件内的 ip 也要修改为 work 节点的 ip,别重复了
/ J9 m+ K' M: |% [6 I9 E! g9 Z# r7 C- O: _* G
clusterCIDR 参数要和 controller-manager 的 --cluster-cidr 参数一致
& @" z! j. ~6 p; ^
6 q' u- m* |3 \4 M8 y8 |hostnameOverride 要和 kubelet 的 --hostname-override 参数一致,否则会出现 node not found 的报错7 P. {2 u0 G4 f7 o" `
2 V' |6 O1 f* Okind: KubeProxyConfiguration" C2 ~& v* [0 R! G& _/ I# f
apiVersion: kubeproxy.config.k8s.io/v1alpha1
1 ]2 q* z4 q6 N. u2 TbindAddress: 0.0.0.01 P. a1 y1 m5 O' C5 R& S2 Z: A
clientConnection:
[& {( v& z/ E& Z' ~/ u kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig"
& L- w5 c1 N* C+ bclusterCIDR: "172.20.0.0/16"
% k! Z d3 o2 \, x2 ^conntrack:
2 g6 Y P# N% J6 y$ X3 ^ maxPerCore: 32768. f" |4 j+ F% E; G' ^5 V" ^+ z6 e
min: 131072 R3 E. V' ?: S, H C4 n
tcpCloseWaitTimeout: 1h0m0s5 \: c# X0 @( c, B6 t
tcpEstablishedTimeout: 24h0m0s
' ~: P* @; |6 i! O2 a# fhealthzBindAddress: 0.0.0.0:10256
, K. _; l! O& a+ I$ @' C7 @hostnameOverride: "192.168.91.19"
1 d0 u) F7 H ZmetricsBindAddress: 0.0.0.0:10249* X7 F4 p. y* b
mode: "ipvs"
& U7 P! i/ n' k( x( n% G配置 proxy 为 systemctl 管理
% p' u0 V0 F3 I) u- p; t& Jvim /approot1/k8s/tmp/service/kube-proxy.service6 X- O0 T/ k$ Y" H' c# n2 v( }7 p$ y
[Unit]- E1 w* S6 P7 J6 }, v! P
Description=Kubernetes Kube-Proxy Server9 _/ ~$ I- k6 z
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
7 E5 r+ E, C4 F4 {5 a3 M% eAfter=network.target
# P8 H8 O/ j% a1 U* h+ W" a/ Y$ g- b2 U7 K
[Service]
; S' {9 J$ a- W: L+ O1 m# kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量% D' \$ `; y% L. @/ P: m6 I
## 指定 --cluster-cidr 或 --masquerade-all 选项后
! n9 M) C9 a* w1 k F## kube-proxy 会对访问 Service IP 的请求做 SNAT
$ Z4 m0 P1 g" A' q xWorkingDirectory=/approot1/k8s/data/kube-proxy
" Z$ S( _- X4 aExecStart=/approot1/k8s/bin/kube-proxy \
9 @2 @% }7 g( g" b --config=/approot1/k8s/data/kube-proxy/kube-proxy-config.yaml
5 H0 @% l. j/ @, W% C% ARestart=always
" S5 e/ s" D* b, l% h, n8 sRestartSec=58 [* ^- X! M6 A; T6 F
LimitNOFILE=65536
; }, w' ]$ f1 l* r1 b; C) U( Q w7 p
6 z; [, i8 S$ [[Install]
+ U8 A9 Z% @4 J: sWantedBy=multi-user.target5 n0 ?) Q& q5 d: s8 l; o
分发证书以及创建相关路径. \9 e/ s3 X: `# M: _0 e5 h
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
6 v: r+ E( {: z" Y( q. z* V% G& I8 V# o# V6 l
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
6 l k& A* X* g D* R, K! t4 I$ I* w- e% L% V% u
for i in 192.168.91.19 192.168.91.20;do \
. I% u& L% d- Mssh $i "mkdir -p /approot1/k8s/data//kube-proxy"; \ n B) O) a, _8 C/ G8 Z
ssh $i "mkdir -p /approot1/k8s/bin"; \
: r; V2 p+ ?/ d( S) h9 d: n) wssh $i "mkdir -p /etc/kubernetes/ssl"; \' o; Z9 f. K- P/ b: }4 U E* w
scp /approot1/k8s/tmp/ssl/kube-proxy.kubeconfig $i:/etc/kubernetes/; \* J( O. ^. g: ~; y9 }0 p
scp /approot1/k8s/tmp/service/kube-proxy.service $i:/etc/systemd/system/; \
" E. B: C" P( p2 Y6 @1 \scp /approot1/k8s/tmp/service/kube-proxy-config.yaml.$i $i:/approot1/k8s/data/kube-proxy/kube-proxy-config.yaml; \( H9 D u+ @& q
scp /approot1/k8s/pkg/kubernetes/bin/kube-proxy $i:/approot1/k8s/bin/; \, U' n: ~ ~- j8 }# d
done
4 g1 W; r& D |) p/ M. p启动 kube-proxy 服务
8 @. }* f4 D* f3 Cfor i in 192.168.91.19 192.168.91.20;do \
9 C9 b: r2 V/ L' O$ I" |3 |ssh $i "systemctl daemon-reload"; \
2 L9 s+ Y0 c. y7 X( }ssh $i "systemctl enable kube-proxy"; \
5 W6 [0 Z, x- ~% O7 H( J9 [ssh $i "systemctl restart kube-proxy --no-block"; \
8 B y+ l. \- O$ Ossh $i "systemctl is-active kube-proxy"; \0 T: J3 b* H( O' d, Y( l% r5 m! ?
done
4 P! c- j& ^0 l( n" {; F返回 activating 表示 kubelet 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19 192.168.91.20;do ssh $i "systemctl is-active kubelet";done
5 w1 n3 M% k P& N( u* |4 `5 m4 N) C- ^% P# `# l7 |/ ?
返回active表示 kubelet 启动成功6 I: X1 g- b4 P' t: o3 D3 z/ W5 r& r
2 W4 h" N u0 _
部署 flannel 组件
1 Z4 P- x! v9 O$ vflannel github
1 ?4 H7 c) ?' c7 r/ b" d: u N \- i+ l$ R
配置 flannel yaml 文件
7 t; H' ~4 c% u8 m0 T2 fvim /approot1/k8s/tmp/service/flannel.yaml" B) x) R9 a" a% _
net-conf.json 内的 Network 参数需要和 controller-manager 的 --cluster-cidr 参数一致
$ J/ G) P% {1 s0 h
5 x: p/ D/ |" S/ q& h---
1 g1 |5 }& [- \apiVersion: policy/v1beta1
/ h+ [4 A/ v. t6 }* [( @kind: PodSecurityPolicy
0 ~* e1 }/ c4 h: [metadata:
: ?4 S- d* a5 Q k2 G* B1 Y8 m! a name: psp.flannel.unprivileged
& Y8 @; P3 Q8 D6 Z annotations:" e( B1 }9 P7 s
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default/ A3 r7 u" r' [) P. E
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
4 \* T0 y: J" P' V- {1 H2 `3 X, t apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default+ s# [9 A$ ~) V d1 ?% z' B E
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default7 n& K" [" J( ]* M8 Z2 k5 J
spec:" I0 o! u0 Q9 }1 L3 y
privileged: false$ O4 O8 o- V9 N! @; W; e
volumes:9 J4 j: p o9 y7 O( q' o
- configMap
& X5 s; J# G8 j" j; v - secret
5 l6 ~7 _: @2 P6 s - emptyDir
. z/ o- a. H# H$ d$ A - hostPath
0 C7 t' F9 }) Z- h allowedHostPaths:
! X& P9 s' {7 v0 V) y) y) I - pathPrefix: "/etc/cni/net.d"
. E0 Q* ~! A3 S' `# z7 z - pathPrefix: "/etc/kube-flannel"# M: E: T0 q; v- P- C0 k
- pathPrefix: "/run/flannel": E$ ?' M1 E8 G$ d2 P& b/ r* L
readOnlyRootFilesystem: false2 N+ J' i: W, b* h
# Users and groups2 T8 G. O& H4 q3 y9 `' o5 t, S
runAsUser:0 d- D$ Q% b( {: V
rule: RunAsAny
4 N/ l( z+ R" C" R supplementalGroups:
7 G6 n8 q2 ?- o) ~, y/ { rule: RunAsAny7 U/ S/ g/ r3 ^
fsGroup:6 e6 w0 B! x/ g/ R- w5 l+ l
rule: RunAsAny
1 u$ W: ?, w" a7 Q6 l9 o' N. U # Privilege Escalation
8 ?7 i5 |+ [1 c+ s* L! h# {3 F2 [ a) N Q allowPrivilegeEscalation: false
( } r/ [+ X6 S7 C8 [ defaultAllowPrivilegeEscalation: false
, g- Y- y( Q; c8 ?1 j # Capabilities* {9 e: q/ J8 n0 B
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
1 C# Q3 c" W' @& w: m; F defaultAddCapabilities: []
% T K# s* ?" r- {! U- f requiredDropCapabilities: []
7 q: k& }5 N4 m # Host namespaces1 ^% ~1 y' q5 U% [. B3 ^# l* n- S/ [
hostPID: false
: l2 ]; h7 t0 v! s hostIPC: false/ H9 i3 w1 g, B) i6 S y
hostNetwork: true
; ?' w( u% u4 p# d hostPorts: p% A" ~! M- {' q) r
- min: 0; K5 a2 Q6 ~. }: ?3 L
max: 655359 h% ?5 h5 v: J+ b* y, Q" R" F
# SELinux
* ?- F8 o- d. Z0 Y/ L6 U! ^ seLinux:
6 p% [$ E0 R- `% f# c/ N# k6 q # SELinux is unused in CaaSP
; Y" C* Y5 k3 ?- p1 R rule: 'RunAsAny': a2 B) s( q& _5 _ R
---9 t, {6 b* i/ I
kind: ClusterRole+ Z3 i* m ?; a: |$ @4 U
apiVersion: rbac.authorization.k8s.io/v1; h) o5 {8 q4 l0 {( K, `
metadata:
0 x6 l' q. E N) k) a name: flannel
. E* F }- e- {* r& {rules:
& r m/ o" P' ?8 @& x- apiGroups: ['policy']$ o/ h! J% t) n( K4 V6 a& p
resources: ['podsecuritypolicies']! T! d) Y$ M, U% V
verbs: ['use']
7 Q) \7 I2 G2 k resourceNames: ['psp.flannel.unprivileged']1 E; o7 N4 w* }" ?1 U* [5 ~
- apiGroups:/ M) ?$ O" n4 G7 w9 d
- ""/ W$ O2 L: y. k+ x q
resources:% p. G" W5 o6 d1 I9 _
- pods
& q1 W( U P, G verbs:# r. t. ^3 J6 u- J z# O+ w
- get s3 B3 s6 D, X& S, P% f
- apiGroups:
' c! n! C' D1 h! o1 | - "" Z# @2 Y2 O- _; T/ |
resources:
, P9 M( r8 C% C9 Z5 p- v' [, y - nodes
; E- B+ J! g5 f- m verbs:9 _% f8 ^3 u; a5 ^" g m. E% E, g$ n
- list" S6 B. J; y; F( `8 }
- watch" I' H# h* E0 i
- apiGroups:; t' E( y5 r5 c0 D3 o4 ]( a( g
- ""
! T- ~% F/ h: L) z resources:) M0 Q' b, s# e& z
- nodes/status4 J" |7 A5 ]3 W( {1 E( E& ? b
verbs:' |" [& O$ w# f; C( Q( t" x
- patch
/ e9 I, O$ F% l$ z---+ L; a4 u5 }2 w, e
kind: ClusterRoleBinding
7 H7 e9 k' q% e( q/ ^apiVersion: rbac.authorization.k8s.io/v1
$ ?/ Q% A: t! Y2 }5 n2 W: xmetadata:
+ }7 s+ N/ Z8 _. Z* p; e name: flannel
* O) f6 D+ S6 A" e7 x9 z. eroleRef:
% z6 s# ]: n& @( w4 I* h9 }+ n* a apiGroup: rbac.authorization.k8s.io$ v$ ^; g% F. i) ]
kind: ClusterRole2 k. B2 r2 j) t b+ m* W& s
name: flannel
& L; e, n; Z7 osubjects:. @! R! e, e" |0 r' g5 X6 c
- kind: ServiceAccount& `4 k+ a/ ] ?1 `" f+ k: W9 ^
name: flannel) \; _: y) w$ U8 ?& v
namespace: kube-system: X/ C4 G. f. J
---
$ }% d- o( a* v# x! G8 Q7 zapiVersion: v13 V: @+ U, H* H( K- Q$ K
kind: ServiceAccount
3 i; h7 k: N# V3 d6 ]metadata:
& _; P/ a9 A1 }! S name: flannel% v- c2 I: P$ f- g5 F5 W0 d
namespace: kube-system4 P- W, t7 }1 ?) L" x1 G
---* _# N& p, H# r, R' m/ ~4 N
kind: ConfigMap# ?0 e! _) m, U$ n4 b: f7 V" g
apiVersion: v1
' R# S7 O6 v; z. a& K. Lmetadata:
s8 C. \6 s x* c# C name: kube-flannel-cfg
6 _$ K) B$ }* v ] r; o namespace: kube-system: U- d$ {0 c, a0 f/ R$ y ^
labels:& I. x2 k, x7 ~, t/ r3 M7 l
tier: node
/ Y' i r) A' ~+ Q app: flannel
* Z7 ~. |, r6 o( A1 r( ]" [& x7 [data:% T, n- S2 {2 f+ x7 Q
cni-conf.json: |
1 c6 P r' E- Y* r {4 W- [8 ?3 U% q, H T8 d: `
"name": "cbr0",
5 Y9 }" F& |# A% ` "cniVersion": "0.3.1"," \2 U- o: ~9 V: v% S& ^5 Y# {7 d
"plugins": [, w, S+ v. G: L1 s [. f
{
# v8 _+ f/ u) `" q6 [( O1 G "type": "flannel",
. S2 j( W; G" F' E "delegate": {
( A4 `: R) G: }1 E "hairpinMode": true,, \+ \& ]) I* v8 w4 ~
"isDefaultGateway": true
$ U8 v: Q" H3 C( R2 R( d6 t9 ~9 M5 p }
* I! W' S& T3 r },5 O5 N+ ]! U0 X
{
5 z" N' `/ R' ` "type": "portmap",2 a( ~- f, c: `3 }5 W f ^
"capabilities": {# ^3 ~4 @5 J; e# c# ]% S
"portMappings": true
! t4 v. x& I' V7 f3 v }4 i9 ~; g1 y/ f& X( ^- G
}
7 h& h! r9 E; G ]' N, E- A, ?# w4 H
}" Y) V7 u) j% o0 g
net-conf.json: |+ Q$ `: @' K. N+ e$ D* ?# c# n
{6 J* m) U( h; i
"Network": "172.20.0.0/16",, h% h4 { ~$ k" l1 U8 M
"Backend": {
; m2 g& d' j3 R8 y7 q/ d- \ K4 q "Type": "vxlan"9 A/ H7 c5 B9 `1 B: y
}
( k+ }5 X. Z g$ M }8 ]- @# s) H+ { b- {0 H9 p
---9 R; {7 U9 J8 ]+ z- N5 B
apiVersion: apps/v1
$ p! ]8 ^6 H% C6 k: e4 \' Akind: DaemonSet
# S. z8 b3 D: {metadata:$ l$ `- E- T- l" ^/ Q: m
name: kube-flannel-ds7 ?7 U2 ]% Z1 q; Y M9 |" ~' W
namespace: kube-system) x. R, ^3 G3 L
labels:/ v. m! S5 B) @" |
tier: node
. r/ A% S" {% b) O7 J app: flannel+ U. h. P2 C# Z
spec:
* W# D( G% A; A% y* z5 \ selector:
+ |$ Q! R* o' `7 T* Y matchLabels:- c7 S! f- e% Z
app: flannel
, E9 u2 \+ P9 f$ E* o template:
% G4 }" |! H( n9 X+ ^ metadata:6 U% y% h- x8 f) \4 i) g, o% @
labels:
" j3 o2 f. d/ C1 v tier: node8 ^2 r3 }# B# [& Z
app: flannel
% M7 P% h* ~# O' G" l( x( G: I4 a spec:. ~0 F. d: g; D0 t* G% _: [
affinity:0 e) ?! u& J5 {. n$ K( F! F" P
nodeAffinity:
3 m2 p8 T6 |" `" U3 L9 U requiredDuringSchedulingIgnoredDuringExecution:
4 z, x5 b$ m& o1 `% |3 Z- s nodeSelectorTerms:7 z) e2 s- ^: B2 z* {7 O
- matchExpressions:1 \$ d2 `" ^2 y U1 Z0 \, n/ S
- key: kubernetes.io/os
& ], g0 Y! K4 W% z! b" `7 C& _+ @" @ operator: In
# d- Q- v8 P N! Y0 b5 { values:
4 `) v/ @, k% ]6 F* |6 Q - linux0 h7 z+ O" _) b5 ^/ C. L" g+ D. V
hostNetwork: true
, I& X9 |8 f" U5 U4 M9 s priorityClassName: system-node-critical
4 e$ x; T9 L/ B tolerations:
9 n7 N4 X, B9 W0 v6 F3 J" E - operator: Exists, G# b9 E* N/ |
effect: NoSchedule
) l1 M0 C& x1 @0 O2 u8 f! N N/ y serviceAccountName: flannel
+ V2 o/ ?" l* F* T+ R initContainers:
0 f8 |6 Y( l* O/ M" [; s - name: install-cni
* {' g. V8 \* ^# _" ?+ [7 f image: quay.io/coreos/flannel:v0.15.1" x8 M3 _$ c% Q* f+ q. [. s
command:
$ t J# T" S8 }- c, E+ D- O - cp# P: s- u3 N1 d% [
args:
) B) @7 Z% E; d8 ?+ Q - -f7 s9 `9 e" P/ B0 c
- /etc/kube-flannel/cni-conf.json
, c W8 C0 R' C2 @ ? - /etc/cni/net.d/10-flannel.conflist
6 I* \8 F. {5 V) l* ~ volumeMounts:
6 ]% A- F# s, w' M - name: cni; k- T! L% \: X- L) `7 \) x+ e/ X9 Q
mountPath: /etc/cni/net.d
2 ]: q/ [# v3 }7 ?" P - name: flannel-cfg
8 k0 J' `/ H5 A0 R; w2 G) ? mountPath: /etc/kube-flannel/* F* b8 ^, d/ G) p E
containers:: B9 ]; A+ h% v1 }$ H8 j2 i
- name: kube-flannel, ~4 v- }7 H, H' B( g
image: quay.io/coreos/flannel:v0.15.1
/ {) {; M. ]5 P/ }) F command:
" P, e* C7 s2 D' A2 ? n - /opt/bin/flanneld
U& l$ B% u1 _, _% w- A: Z' P args:
" n$ D4 x2 I1 m/ M) x - --ip-masq
+ |6 h }1 h( p( a - --kube-subnet-mgr0 y' Y3 Q8 K7 `0 M9 G
resources:
, l( l# ~5 [% H+ d8 l requests:
. P3 N3 s, E4 g% g: g' i( f cpu: "100m"3 J0 H$ N ]. t' ]- r/ A
memory: "50Mi"1 g; j1 l7 |. z/ h# I6 R' P) l
limits:+ Z8 E$ c% I1 n+ R( L+ p* A6 o
cpu: "100m"9 }+ ~: w' C9 k6 I# d0 `' x D
memory: "50Mi"
/ G- D1 i+ J6 P/ }' t& f R) L securityContext:
9 ]: K/ k7 T0 m4 y# @ privileged: false3 r2 c/ Q! j( j; d& t
capabilities:* x* X" [( V# I9 E
add: ["NET_ADMIN", "NET_RAW"]
. i) S V9 O( ?- B4 x* x4 W/ K& Z env:
" x u( J0 ` I- R - name: POD_NAME# e M/ k: [& x. B0 b
valueFrom:
# h2 B9 w' ?) H* W1 S fieldRef:
$ l0 N' ]/ I9 e6 l: h: z0 o) c! K+ J fieldPath: metadata.name
. f$ `, @% J# P+ t' h* t - name: POD_NAMESPACE
9 V$ r- P) P$ X* B valueFrom:
. T, a( V6 W. f, P9 r( n. h fieldRef:
( I$ V. z& K) }5 h fieldPath: metadata.namespace
5 `" d- N+ n3 D6 p# e volumeMounts:
; T. F% F2 n" j" [ - name: run5 y% _- S2 q6 n. B0 Q9 C
mountPath: /run/flannel
% O2 o; b9 K/ {9 R0 D - name: flannel-cfg
0 K# U7 r! ~: w1 i' C* P+ ?) q* X1 V mountPath: /etc/kube-flannel/
: h) I/ U* G9 O4 ^' l+ {+ I: I volumes:
* ~" y! Q* H0 t/ F* [2 \/ h - name: run
' X1 m, t, S; z7 p8 Z+ n2 Q hostPath:
\. |* C8 l6 A. \8 F) g- m path: /run/flannel# K+ {6 U0 W8 F: ~
- name: cni9 A: U: e6 M' D% K9 o: [; ~! l2 h
hostPath:+ R$ l! K* D9 D: `( N/ r# y
path: /etc/cni/net.d8 p( t9 R" _3 w" `, q2 j7 O$ l1 }
- name: flannel-cfg1 ?# K8 S" ?/ `4 O
configMap:
8 v( n! y3 l! s" `2 \3 } name: kube-flannel-cfg
3 A, x* G# ~+ _配置 flannel cni 网卡配置文件
b g( w+ f& m7 B6 K+ w, I& ?vim /approot1/k8s/tmp/service/10-flannel.conflist
: l) G& w+ h2 E; T; x3 P! R{
. \( y& ]$ A/ d' C. B "name": "cbr0",$ p) P9 q% R, y3 e' C
"cniVersion": "0.3.1",: }/ ^ A) R* |: a! X+ X" D
"plugins": [
& G+ z5 A" j* T; ^% `& V4 p {
2 X% R& m4 ^ N ?9 Y "type": "flannel",' i4 L: b" `" B+ V& {% X
"delegate": {6 X1 O% i1 j4 j% o: ~
"hairpinMode": true,7 V$ W: F0 G% a8 W
"isDefaultGateway": true
4 \1 P. Y' m ]8 s }
+ o. m8 Q2 z; v# [$ g' ^3 ] },/ x. J+ |; h I. m
{
& j3 j4 W! T/ T: P% D p "type": "portmap",
5 b d7 v- e! x( m( r/ v "capabilities": {
- t9 K4 o- i/ w* Z3 x7 T" g "portMappings": true: u7 n' i( P4 f- s% O
}
* A1 l0 {5 ]/ r }
# t# n, P1 G! ?$ a* e& y& \ ]2 n6 G3 S5 ^/ A9 h6 J
}; @4 i4 L8 m% T& a f
导入 flannel 镜像4 ?8 Q3 c6 q( H# N. u& t F7 A
for i in 192.168.91.19 192.168.91.20;do \
% |2 s9 l, }- w G( q' A$ z0 r) Kscp /approot1/k8s/images/flannel-v0.15.1.tar $i:/tmp/
: l: |% R1 O4 ~3 xssh $i "ctr -n=k8s.io image import /tmp/flannel-v0.15.1.tar && rm -f /tmp/flannel-v0.15.1.tar"; \9 S2 S6 ?9 a8 e& ~
done/ |- I+ c& Z1 M& [+ _
查看镜像& O, M" n, I" q% i
/ X. P6 I+ ^3 jfor i in 192.168.91.19 192.168.91.20;do \& E3 O9 I. y6 J, t. h
ssh $i "ctr -n=k8s.io image list | grep flannel"; \
! ]4 r f. d' Cdone
8 C% A. m8 g1 a8 Y* I! k分发 flannel cni 网卡配置文件
* w' @ h( q- ?; Ufor i in 192.168.91.19 192.168.91.20;do \3 u* Y! {7 {+ V' l8 P9 a8 A
ssh $i "rm -f /etc/cni/net.d/10-default.conf"; \3 u8 P* O+ @8 i# I0 h( c0 @% K
scp /approot1/k8s/tmp/service/10-flannel.conflist $i:/etc/cni/net.d/; \0 N. e" r1 s+ W6 ~ H8 J+ m1 X2 g
done
" E* ~# p+ r. N分发完 flannel cni 网卡配置文件后,节点会出现暂时的 NotReady 状态,需要等到节点都变回 Ready 状态后,再运行 flannel 组件% j- S: Q6 R5 |8 a- g
" [# G1 p Q5 p+ V/ M* F, C在 k8s 中运行 flannel 组件
- A8 A P0 _0 Mkubectl apply -f /approot1/k8s/tmp/service/flannel.yaml/ K6 ?7 Y" ?* v3 V
检查 flannel pod 是否运行成功7 T, j. e$ Z& ^
kubectl get pod -n kube-system | grep flannel" j6 z) {) }, e
预期输出类似如下结果; w5 u6 |0 ]. b/ Y6 S- R
8 v7 u7 d; E' S. N! Kflannel 属于 DaemonSet ,属于和节点共存亡类型的 pod ,k8s 有多少 node ,flannel 就有多少 pod ,当 node 被删除的时候, flannel pod 也会随之删除
- u) s8 g) b i& w4 g
" d, ?$ A% L' ?( K. }kube-flannel-ds-86rrv 1/1 Running 0 8m54s
' Q3 |8 B, [* Rkube-flannel-ds-bkgzx 1/1 Running 0 8m53s9 \- s; ~/ _% c" a/ Y, R; U- v
suse 12 发行版会出现 Init:CreateContainerError 的情况,此时需要 kubectl describe pod -n kube-system <flannel_pod_name> 查看报错原因,Error: failed to create containerd container: get apparmor_parser version: exec: "apparmor_parser": executable file not found in $PATH 出现这个报错,只需要使用 which apparmor_parser 找到 apparmor_parser 所在路径,然后做一个软连接到 kubelet 命令所在目录即可,然后重启 pod ,注意,所有 flannel 所在节点都需要执行这个软连接操作
1 K0 S* c7 K/ I& l" d
% s, {( _( z! u& J部署 coredns 组件
4 p6 S. b+ Q* S' C5 M5 b配置 coredns yaml 文件
v4 i9 ~5 }) b0 a- Zvim /approot1/k8s/tmp/service/coredns.yaml
/ y* i i; G% j# Y7 n3 X- JclusterIP 参数要和 kubelet 配置文件的 clusterDNS 参数一致
8 A8 ?# k8 z6 v6 `+ L" v0 p5 G4 |
apiVersion: v15 I0 z+ A# e0 {6 ^3 e. b/ v
kind: ServiceAccount
6 w4 ]+ m n% `% I! D" K8 Kmetadata:
& w) [" }* o* `" l" t1 Z4 b name: coredns
! W/ w8 u K' e. E namespace: kube-system0 M0 U4 `7 y. ?# g
labels:
$ o9 F& H) k3 o5 y2 D, t2 U kubernetes.io/cluster-service: "true"
8 o0 w# ^4 d; b addonmanager.kubernetes.io/mode: Reconcile
0 W+ v* v4 v1 d! e; R* m---3 h0 Z9 L% v5 f2 D9 S! T
apiVersion: rbac.authorization.k8s.io/v1$ {8 G' [- Q- q7 V
kind: ClusterRole
1 H* S7 a S7 A7 `metadata:
' ^" d! k; h* b labels:
1 |5 C7 f+ N/ z kubernetes.io/bootstrapping: rbac-defaults
: X. l" y3 q$ S& v. H" r7 e! [ addonmanager.kubernetes.io/mode: Reconcile H+ g, O4 A- Y; `
name: system:coredns! R( b2 B. {- `
rules:7 u. v# T8 _* F% f* u" R
- apiGroups:
' @7 W; [2 O2 w1 t - ""( P# k) _2 P8 V
resources:
8 w: f7 M9 { q" Q) U9 Q - endpoints
) D9 `: e1 K, v/ W8 x# O$ s - services. F! e7 r& Q2 P
- pods
# I; f2 i0 x- v5 z5 S - namespaces
8 L: k" J7 o4 o8 B' O' g verbs:9 M( h% D- G% [( ~
- list
/ z& {; K* r- y - watch* ^' ]' r7 K8 U' ^- [% Z
- apiGroups:
+ K; `$ A% E3 U- A4 d; { - ""
' ~4 z7 Q( ~ T" w6 J resources:
& r. R- x i% r: l* \: x - nodes
X9 O: X, H1 {+ u) n- s/ P verbs:) [( [0 B8 c$ H+ i
- get
6 ]4 N. H$ j7 I- t9 w$ C" x s- apiGroups:
' y" j) D7 Q6 q/ Z3 c% X - discovery.k8s.io; I* J2 p) }! {, k
resources:
+ m; A; i' a/ `$ J {& ? - endpointslices
) `5 s9 @ _" X) U3 C verbs:; o9 \6 l# c9 z
- list
% c7 b7 n. r! | P; Z8 T - watch
2 W; ?8 a( g2 Y$ g---, M* y8 H( K, j+ X
apiVersion: rbac.authorization.k8s.io/v1. I) l' C& d; v h& ]) ^; L% A
kind: ClusterRoleBinding
+ ?# i- T, V9 imetadata:; R k% A7 T9 f4 v# D/ V
annotations:
7 ^0 ^& B U3 s* y; Q rbac.authorization.kubernetes.io/autoupdate: "true"
+ t% V. J' T; \, o labels:
# B, q) g% }# M3 y* s kubernetes.io/bootstrapping: rbac-defaults' A! _+ n$ z5 Z) j
addonmanager.kubernetes.io/mode: EnsureExists2 b, b0 D- i6 G1 y" G# @
name: system:coredns
" {1 k7 w, V7 U) F* d5 B4 E/ f2 MroleRef:1 x+ B& g1 N. J$ [/ \( H% ?
apiGroup: rbac.authorization.k8s.io) E' e1 j" F) F" @' }
kind: ClusterRole
* C( P4 G7 Q: h, v- t name: system:coredns
4 ?7 b( S; p# Osubjects:+ @9 i: I1 I, Y; x6 h2 }. p, u
- kind: ServiceAccount
8 F8 f; f, _0 k: \: j$ v! _ name: coredns
: r8 ]# v7 O6 T7 k: Q. U namespace: kube-system
5 i: d. j3 U1 }" `, |---
, E7 W" s" M ^" V4 C3 BapiVersion: v1( C5 N1 Q( g0 i4 ^
kind: ConfigMap
/ v0 d( Z- r+ zmetadata:" U" F8 `/ \" N" v* a, ]0 @
name: coredns
% E. M1 E0 b- X M/ z G namespace: kube-system
( ^6 s0 C. B7 F( Z: t labels:' W. J3 G/ G# D+ }( s: f
addonmanager.kubernetes.io/mode: EnsureExists
7 _4 B5 E* ?) o4 V, V( ~# f: B; rdata:
6 `1 n: `1 p6 b0 y6 ]% `; E Corefile: |0 K; {/ b9 [+ ^# ^! D! R
.:53 {
9 J% z) p9 z9 A0 N9 V' U# i9 K errors
7 d4 I3 O, T( Z# u" K- U& I5 q health {
9 f2 V! a: N2 W9 `1 n+ {( d lameduck 5s6 j1 n7 S/ f3 r$ R1 x
}$ X0 K9 e$ X) ^' o. I& F
ready. y# Y! ~& y5 l
kubernetes cluster.local in-addr.arpa ip6.arpa {
$ w3 f* c9 ^2 T* x$ Z3 {$ R- U( Y pods insecure: s y4 {" Y0 h6 t# d& b% E' ~1 f
fallthrough in-addr.arpa ip6.arpa
5 |, L4 e! z! m9 [6 Y+ r! N ttl 30
9 m, z$ T) S8 o# ?- |3 K }+ {, k; g# i1 [. J9 _9 T
prometheus :9153% [0 n" T2 _! C1 q, {
forward . /etc/resolv.conf {0 c* T/ f5 E( M3 M3 y
max_concurrent 1000; Y; ?6 d7 g0 i& h, ]9 f) w6 [. D
}
6 Z7 V- O X, G/ o+ { cache 30
; N& y2 p! [6 \3 r! B7 R. I reload
: ]* Z V: L2 x9 N3 s8 O loadbalance2 V' k; C8 Z/ r0 D3 l5 X
}
8 Z7 n! G; `; A5 h; }8 R& w9 t---
# o g# c. |( _6 L3 LapiVersion: apps/v1
. g7 g9 t5 W& m' u# c9 akind: Deployment) c3 r+ }7 M X2 B+ D/ H( `
metadata:
7 T, Y1 Q3 ?! k# F name: coredns
! q& F }9 Z3 S namespace: kube-system
/ H! _7 A5 ^6 e labels:- O1 D6 c( m: P* u# X
k8s-app: kube-dns" m$ N; l& V3 ?' Q2 o# e0 [
kubernetes.io/cluster-service: "true"
" J3 b% T4 [- n# T k+ V9 N addonmanager.kubernetes.io/mode: Reconcile
. C6 A8 v3 |' p' N J kubernetes.io/name: "CoreDNS"& `1 |3 M" v+ e% D6 ~& _! c' B" P
spec:" g; d6 j `/ [4 F3 M
replicas: 1
4 D, z, Z) g& C% D, k/ d+ L strategy:
$ g! R7 G g5 c$ m. p type: RollingUpdate
7 V& o& X8 J% W* t$ `9 l rollingUpdate:
`/ x; ~" q+ |- t+ | maxUnavailable: 1& i: ~ _; G0 u6 ?# B7 K8 _; l
selector:) |" S8 p4 w& q( G) h
matchLabels:/ e0 o0 S" L# y. x
k8s-app: kube-dns
# S; g4 W2 B7 p7 q/ `% q8 K+ w template:0 o+ \, D$ q- N
metadata:
# ^7 c% L& j9 n. p+ _ labels:- @8 F t) f& d; r; c7 {7 J- A5 E
k8s-app: kube-dns N W n& {8 M3 l5 T
spec:
" c6 m5 o# j# ] securityContext:
6 _/ {/ J# Z4 W" B* B seccompProfile:
! |# x6 w6 ~. V! u( F9 y( w type: RuntimeDefault
; i% _0 _) N3 f6 \1 S priorityClassName: system-cluster-critical
$ ^1 o& ^: }% u# i5 J serviceAccountName: coredns
& W4 j1 E% @' q5 g$ [ affinity:
, D1 t8 P( T5 `3 P" Q3 q$ F9 N podAntiAffinity:
* [3 i. X* N2 f: B preferredDuringSchedulingIgnoredDuringExecution:2 }" t* i# M i0 c- O
- weight: 100
$ ]9 \$ h/ P7 u0 ]& l podAffinityTerm:
' R% `: V; c/ i+ b' m8 L* M labelSelector:! e, Z/ q$ Z; O9 ]: W6 o
matchExpressions:3 p$ g, u, i4 c
- key: k8s-app
n. a g( @0 V8 z/ R8 ` operator: In% F& n: e5 `: W
values: ["kube-dns"]
% u l% q# {5 n& b& a! Y topologyKey: kubernetes.io/hostname# Q2 ?. Q) q" _: j$ }& p
tolerations:8 ]9 ^$ N) l' V
- key: "CriticalAddonsOnly"
* i& C5 V5 l) V5 |4 Z; D9 D) h' b operator: "Exists"! O0 u( L- T5 g7 ]/ I
nodeSelector:2 ]( V' ^2 y' C" q
kubernetes.io/os: linux2 m, @ ?' l0 F' ~( Y& b" j
containers:
! i; G3 L0 U H& d% ] - name: coredns( R2 k! X, _0 i6 F8 X
image: docker.io/coredns/coredns:1.8.6& z) f. ~8 D& k& P
imagePullPolicy: IfNotPresent9 j& D$ `& s; I4 E8 x6 X
resources:" ?. o6 h8 ?" R) `$ ~5 h" a$ Q
limits:2 _# \( q" n- W9 w$ B8 S: E5 C
memory: 300Mi# O* `5 C: {" Q1 \! W3 S5 A( A
requests:
: i9 `1 z$ C. c( l T- N" [ cpu: 100m* P( ? V2 x1 ~) N7 E8 C3 h
memory: 70Mi
" l' b, K; ^9 W U3 n C S args: [ "-conf", "/etc/coredns/Corefile" ]# K( W5 p9 i: z& b4 ~9 d2 z
volumeMounts:
5 X$ I- g+ s# b# b; x - name: config-volume
8 X1 v1 X' ^$ R8 c) ~0 J mountPath: /etc/coredns7 V, K/ P1 K* L1 ]
readOnly: true
. \+ }' [& r$ b+ _: M9 \ v ports:
! \( e; u! d1 P1 n - containerPort: 533 c9 ?" G6 Z+ n& n2 H
name: dns- b9 c" K% t; ~, S
protocol: UDP7 ^. e& H$ o0 e" z7 t0 R2 Y( Y
- containerPort: 53
* m' @; r/ {( m name: dns-tcp
8 a' W0 o9 o6 a& p protocol: TCP
9 x4 C# b1 I j! C6 @- P6 L0 u - containerPort: 9153
6 ?7 O3 D& c q9 X( C7 G" z name: metrics
. u g7 h* f k3 I$ k$ d" { protocol: TCP
" e* o* a1 C& I3 s* q livenessProbe:
" k' N% i8 i0 G# z" J httpGet:
4 x- u: d# [) z0 T% o; k path: /health5 A6 U$ c* s% K' n+ F% F
port: 8080" _, t( V- V& q) Q
scheme: HTTP- d- h/ U- W$ J9 @+ a! {+ W0 C
initialDelaySeconds: 60
' A- @6 m( s2 ?" ~4 `# O timeoutSeconds: 5
: G! [& f8 L% o+ T0 q! ]" C successThreshold: 1
4 s4 I$ q! C# y. w" P failureThreshold: 5- A2 {, l3 d1 S
readinessProbe:
5 r! r* Y9 o* ` httpGet:
6 o, A1 t) f G( M r" C path: /ready
1 H4 z. Y& I1 v0 H% I. q port: 81813 J$ y( h. I; I7 ]) F) r
scheme: HTTP+ r P" [2 f3 t: Y9 S/ s
securityContext:' R5 u- Z; k6 z! E% V4 n* c
allowPrivilegeEscalation: false
- v# F) Y, z0 p5 a+ |/ O9 w9 i capabilities:$ O2 r8 k3 B* T! R
add:
' D3 M" x/ S4 R4 O - NET_BIND_SERVICE4 L1 m9 b6 f G% V
drop:$ E5 l q, s. ]& E/ n
- all1 R2 e' u! V8 p. s/ O
readOnlyRootFilesystem: true, |6 c* {* ]- s7 T
dnsPolicy: Default- G0 E% z( o @# n2 M
volumes:
& D( S; i, {% o2 o0 L" U - name: config-volume& [9 H; W. O9 ]( N/ I9 Y
configMap:# K7 Z5 F7 i! r8 p9 Q; i8 p! E# }
name: coredns
: R5 T; R7 ?! U: \1 W* j: l" D items:1 H+ j" f0 n: X1 y# f2 F
- key: Corefile: a. R7 A+ _! t" q
path: Corefile( P- e( G' I1 W/ }+ X* f
---) Q) ~2 c) B J, D1 D
apiVersion: v1
4 c: Z! b/ o9 Mkind: Service
3 ?9 n% t; [2 M; ^ w1 M3 Xmetadata:
- S0 i1 j6 ^! E* J6 R3 ]( t- y' L name: kube-dns
4 m: L8 Z$ d F% j( a7 Y9 i. S3 u namespace: kube-system
2 q# U5 o5 B6 y ] annotations:5 t4 R0 _% ^8 z, j6 p1 f
prometheus.io/port: "9153"3 F- r3 ^0 a! R. d# A# q* x4 l
prometheus.io/scrape: "true"
$ A- j8 U% [. H2 D5 t labels:
) I" U6 |/ Z+ x$ X2 T j! O6 r# O k8s-app: kube-dns, g- ~1 W8 x2 ?; L6 u" \" P
kubernetes.io/cluster-service: "true"
7 a+ i y- Y2 k2 V addonmanager.kubernetes.io/mode: Reconcile
6 t1 b0 ?+ o/ e7 |+ V) X$ C kubernetes.io/name: "CoreDNS"
+ U2 O6 L J+ c8 R4 \spec:6 N; @" [, O1 L
selector:# f! H' H; ~4 s, b4 X# } T4 Q
k8s-app: kube-dns
- q& x9 V' `$ N; d9 H: L clusterIP: 10.88.0.2
: R9 k2 [' k! E& h ports:
! e, M: K6 j5 P6 Q, T9 C) h6 C - name: dns9 c0 P: e; X6 f1 t' x9 e
port: 53( o9 c, W& k) q/ E8 p7 b- b2 h
protocol: UDP
% J+ Z7 v& D- w0 K% [4 b2 v4 r - name: dns-tcp
- M4 [& Z2 c( }( L$ |% D/ V; o/ G port: 53
* f5 |7 S! t, p b, f q4 R protocol: TCP. C! {: d% K8 R) s, M
- name: metrics
& J: I& U1 `/ N- U1 }/ Y2 Y port: 9153
( n1 {# N$ o! g ` protocol: TCP
! C! f. j0 R/ `导入 coredns 镜像* p' ?5 I) ^( C$ u
for i in 192.168.91.19 192.168.91.20;do \
- E: _7 X' y9 _- W5 @scp /approot1/k8s/images/coredns-v1.8.6.tar $i:/tmp/( K- i% d8 X Y$ ?/ A
ssh $i "ctr -n=k8s.io image import /tmp/coredns-v1.8.6.tar && rm -f /tmp/coredns-v1.8.6.tar"; \8 r" Y2 H6 J' X$ c6 |
done
6 H& `+ `0 j; z. A, t查看镜像# s* y. [0 a: }4 }6 H% n6 x
1 L4 x* {* @& E
for i in 192.168.91.19 192.168.91.20;do \
" P' ^4 ?: k( H- a6 G( |, i" I1 K; @ssh $i "ctr -n=k8s.io image list | grep coredns"; \
0 z3 A9 f8 w' {0 j2 u3 y) L9 Odone- a! q% D! `1 z/ P
在 k8s 中运行 coredns 组件& m9 z2 _* \1 o, @. Y: S
kubectl apply -f /approot1/k8s/tmp/service/coredns.yaml
* k4 e+ b0 R- @5 b检查 coredns pod 是否运行成功2 C% o) S: B" K1 S. s; G- p
kubectl get pod -n kube-system | grep coredns1 Q* ?; e/ l" e( }! N
预期输出类似如下结果
% M# G; O/ P7 e9 ^% E" m& ]* w) L* a2 | c
因为 coredns yaml 文件内的 replicas 参数是 1 ,因此这里只有一个 pod ,如果改成 2 ,就会出现两个 pod
' @' d. I; J2 w4 ^
3 K, o& J7 |1 K# ?0 `$ B! c; S7 {9 ocoredns-5fd74ff788-cddqf 1/1 Running 0 10s
$ h, B5 N: @3 {& H/ l* X8 s部署 metrics-server 组件
z# @6 d V" Q8 ^9 E. `配置 metrics-server yaml 文件! Z' L8 a+ x D0 w0 ?; u# N
vim /approot1/k8s/tmp/service/metrics-server.yaml9 v5 x5 X/ k9 Q0 q3 k# g; ]! x7 `
apiVersion: v1
6 c1 b- ]9 M# Wkind: ServiceAccount
+ N( U. S2 M$ j$ C0 |& O; Qmetadata:
2 K( P, ]- U' \, Y! m3 X labels:
4 W3 ^8 b9 K% z Q" z k8s-app: metrics-server6 c# M6 o9 g6 j7 u5 Z
name: metrics-server5 T3 B& J* _6 s1 j t, f
namespace: kube-system
+ K" G9 f6 U4 k+ M# a; d& C' ~---
, V6 T# ?& `2 w% c& c5 y- _apiVersion: rbac.authorization.k8s.io/v1
& [: Q0 f) U2 r) x! c3 p$ h4 ]kind: ClusterRole
7 R. p0 X c* B& q: O% Dmetadata:
4 D! u) m; o! N4 b( M9 o: t5 B labels:
% g0 H* p0 [9 a2 q: ]! N k8s-app: metrics-server/ H4 a! O* X$ J+ V1 Q
rbac.authorization.k8s.io/aggregate-to-admin: "true"
! S o+ W( A3 S+ C8 y0 ^ rbac.authorization.k8s.io/aggregate-to-edit: "true"' p G; Q. _+ k) I5 @. m q
rbac.authorization.k8s.io/aggregate-to-view: "true", M! I/ e; Z. }
name: system:aggregated-metrics-reader- p8 M! D3 l% E! v" h) v6 U# i( m+ _4 i
rules:
1 _) i4 d# n. u6 ~- apiGroups:
) J7 m+ h3 Y( G& X7 T - metrics.k8s.io
. P& d# s9 ^2 M! ~ resources:
9 \. ~& V! `% A/ G2 R4 @4 z - pods
0 @% W! m6 _; k$ J- ~2 N, U - nodes
+ {1 R/ u0 n* L j% r7 M+ n" A5 z verbs:. z0 D0 v8 K% @) z3 [
- get
+ g& X$ e3 C) N/ M' k' H - list4 M- `" N: P0 T. L0 B' C/ F& A
- watch- t j; \+ W1 s; D4 i" I# i
---
& Q x `5 k* _1 K( d2 |apiVersion: rbac.authorization.k8s.io/v1
, C9 s7 H% H7 d# l; R! p9 Wkind: ClusterRole
/ _( W n; G/ P3 emetadata:
: ?: G6 o4 Z' M3 H2 j/ z. n5 D) F labels:- X& q9 v: m: `' ~6 X
k8s-app: metrics-server
" E. m4 d. \ h4 @9 a# k- h name: system:metrics-server
# L% T& m1 L" i+ zrules:
2 P' c4 V" j+ i# p1 L* t* `# g- D- apiGroups:
, M" w1 D8 [# u0 L; E" ?- n - ""$ T5 M2 f N/ r6 l+ e a
resources:
4 c$ ]- W& T1 t' @7 C. I - pods
' Z4 W0 v& V r$ z - nodes
2 I( D4 X8 B# w4 m! ^4 U - nodes/stats
9 {5 S. f; [1 g/ \" S - namespaces, T9 G! ]8 J/ M# I6 n$ O( l$ T
- configmaps
4 _) S- F% Z0 I4 O6 B verbs:
2 g4 @/ v5 s" n8 c: g - get
4 S2 _; x3 I% L5 ^6 h& s5 I) e$ } - list
) g7 x9 G$ J6 E! N8 D2 Q7 P" d( d - watch
9 H' ^- S1 H1 A# L8 ]---
i: u' D3 d% D0 m4 w5 K6 N- e( ?apiVersion: rbac.authorization.k8s.io/v1
W/ b- z7 o) jkind: RoleBinding% r q h% T) A G
metadata:. y3 i6 ~* t: D1 J
labels:
+ L& C+ _$ g( j* d( n. l% P& ^ k8s-app: metrics-server
7 [ {( h. u3 K name: metrics-server-auth-reader
! o6 L. V( c( U+ X namespace: kube-system
; o; j/ M$ C1 S& o* n, Y0 FroleRef:
/ R2 R! Y; u2 ~+ ~ apiGroup: rbac.authorization.k8s.io
& q- e, E/ f* K5 V5 [# c, B0 [ kind: Role9 D# V( ]" v: S5 \3 B
name: extension-apiserver-authentication-reader
2 N$ ^ S4 U9 q$ isubjects:+ L, V) M. @( f2 v& T
- kind: ServiceAccount
% b* \- D ^$ v, t9 w% ? v name: metrics-server' }3 y2 G5 n( b3 m/ Q
namespace: kube-system" `+ P* a6 G( k& d' v6 Y& y+ V% @! p
---' V7 f! z5 I! s$ m
apiVersion: rbac.authorization.k8s.io/v12 x( A: N, ?4 V( n
kind: ClusterRoleBinding
4 Y K) g& o( J) Zmetadata:
; a1 b$ ]4 E1 W' _8 m( b$ f labels:
1 n0 T. r+ s; d; r* I7 Q# j' q k8s-app: metrics-server
: }, \* U: y2 k5 Q) W- M& y. g name: metrics-server:system:auth-delegator' y, {# Z2 M/ M, d" g/ F) ]
roleRef:
- h% m) n: S0 c3 ]- p apiGroup: rbac.authorization.k8s.io8 E/ X" w; B4 W9 g: F3 U
kind: ClusterRole/ D, @- O7 o/ ^/ i5 x$ g
name: system:auth-delegator
$ t8 Z1 |9 [1 o' C4 o; Ysubjects:
6 d; S, X. f5 ?1 k1 G- kind: ServiceAccount( b% h- L# S0 z! {3 V/ X3 _0 l- Y
name: metrics-server
% p# _7 T. k2 r0 O D namespace: kube-system( w1 F& K' u+ D- W! N
---2 I! R9 B8 _ {- r, B3 R
apiVersion: rbac.authorization.k8s.io/v14 g5 K, K, ^9 _* r
kind: ClusterRoleBinding3 y2 w1 [: |% B9 e0 m
metadata:( b, V. T n9 l) ^! [; A3 T" o; {
labels:
- c# F( T) q2 p1 H k8s-app: metrics-server( X6 l( X. n+ i2 s1 T- w0 q
name: system:metrics-server
: B% ^( m8 \$ E) h& m4 uroleRef:6 Z* c Z# A+ _. I! W6 R& j
apiGroup: rbac.authorization.k8s.io( |5 b+ v% z2 N4 D* D( t
kind: ClusterRole: T8 B! `0 K: Y# {. F' i* A
name: system:metrics-server* j, t. H3 d2 V2 @ f: t6 ?0 F/ b! A6 r
subjects:! o/ N1 _/ S |) u$ V
- kind: ServiceAccount
: q6 y9 ^0 }8 p: h+ g$ O/ M4 A name: metrics-server
4 F @& h F* b9 g$ q3 ]2 ]4 b3 }7 k namespace: kube-system
% g* ]0 g1 M; l1 ? h---
g- c9 B5 E+ h/ c7 Z0 B+ v2 Y! iapiVersion: v1) b" S1 E4 R9 T! R5 ~
kind: Service8 L: t- Y7 h) @7 c
metadata:6 T; u% X3 n. @5 {
labels:
( Y% Q" N8 k" H; H" } k8s-app: metrics-server
/ U# }& c3 X. ]" J8 @5 R$ X7 n* t9 w name: metrics-server
) E) f# C* F. T' Q7 f: x8 |; T namespace: kube-system
m4 N4 Q0 ?; L" xspec:& G- ?; q7 E M. q
ports:
% G( n% [2 Y4 l7 f* t* C9 o e - name: https
' N A$ c. O L: T port: 4432 X# M2 p9 y5 d% m. X. }1 w
protocol: TCP
- b9 b' D; Q! N- ^ targetPort: https
! t9 B/ C# x+ g7 {. P9 H; y/ y" l selector:
/ s* p; D. a9 s3 w; ~- I k8s-app: metrics-server- K m0 R: o, {# Q
---# R7 d. W$ G/ B# H8 J
apiVersion: apps/v1
; v) J( W$ f) \kind: Deployment
2 E/ K! g/ q. N0 W+ S/ |metadata:
5 j( |6 [7 q/ u- i- Q$ b& W labels:
6 x$ c j. t( s! H; U, `9 N8 t k8s-app: metrics-server8 M3 N) e4 L9 L0 I! p5 ?
name: metrics-server+ R1 T' Z. F. m* ?; E1 {/ |
namespace: kube-system
# i. k1 \. S) @: b2 ~spec:. ]8 h% ~' h/ ~7 k* B; W# X+ D
selector:3 y5 f d; m" D4 j6 R
matchLabels:
/ k3 J0 j: o6 W- V3 ~8 D k8s-app: metrics-server
6 T6 j# K$ o+ `9 ~9 Y V strategy:
% s7 U# r# ^4 z rollingUpdate:
- O9 V4 ]- ~9 I0 e. u- Q' K# w maxUnavailable: 0
7 }$ N; j0 `. i template:
5 B. Z6 x0 @7 c# c9 ]" S q metadata:
7 S* w5 b* [# T& t$ K- T2 l labels:6 i# Y& N# x8 U: e* q; V* k
k8s-app: metrics-server' M/ t' C! v; I9 q: E% O1 x( f& T1 l
spec:4 S# }$ Z! ? ?4 s
containers:* s) ]& G& q% r, S+ t; t
- args:6 l% H4 S) ~1 n4 h9 t: z# R
- --cert-dir=/tmp5 {0 B" b* ]: G
- --secure-port=44431 O, J2 F7 z8 T; o* _2 I/ ^
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
& p; N% H! @, _# ?$ [% [2 s- e' V - --kubelet-insecure-tls
2 H3 o9 H- O5 z8 Y2 @# W* W - --kubelet-use-node-status-port& M# X; T# \, l$ K9 z
- --metric-resolution=15s
0 |# R5 M. A4 D. a image: k8s.gcr.io/metrics-server/metrics-server:v0.5.2
2 L: Z9 Z7 e5 z imagePullPolicy: IfNotPresent* E1 I) u7 k! W
livenessProbe:
- Z& Y" e7 z3 C$ K failureThreshold: 3 Z" b. S( N( v& U' z# t; d) Y: x
httpGet:9 V. g$ w+ ?- w$ J' `$ x
path: /livez0 b/ U9 A5 N W* ^, C" ]
port: https* q/ t6 `7 W* E; `& b. t; v7 r
scheme: HTTPS# R* e" q/ {9 ~+ s$ K
periodSeconds: 10 O0 E" N" g1 l- d# `
name: metrics-server2 z( T" p* A* X, j: m
ports:
/ W0 Z: N2 x& C" o - containerPort: 4443
! G1 l1 C4 l8 a name: https4 P! t$ G8 i o" i2 Q6 _
protocol: TCP8 @) p/ ?% X& F' m {* C
readinessProbe:
8 y& k' S9 j9 K5 I$ u4 G7 E failureThreshold: 3
M- y# a0 X g& @ httpGet:. f; R+ l1 Q! x' n/ ]
path: /readyz
6 X% z; N7 b" H4 v port: https- l1 V& ?, J3 l( ]3 B" R( ]
scheme: HTTPS0 r& e: J0 |% y& O2 V
initialDelaySeconds: 20
5 G. X6 r* k6 b. m periodSeconds: 10, T2 T- m1 }( N7 O
resources:
2 w2 R8 W& q! n7 \ requests:
0 s0 Y( m" t1 L( u; }. W# e) G cpu: 100m
- N2 N+ ~ j) b# c+ N; h% A memory: 200Mi
k/ [+ w& \4 o securityContext:3 T) O9 w7 h |- _5 t; f
readOnlyRootFilesystem: true. R/ t+ W9 a: S. k" O
runAsNonRoot: true
' I& E. T/ S" d! z runAsUser: 1000
$ f) r% J% i1 w& m( l volumeMounts:
' s1 [9 X" v1 [& D1 d - mountPath: /tmp& D' n# a |7 |4 E8 W& P4 Y; u
name: tmp-dir! s. ^" N* Z1 a, l+ h- T
nodeSelector:
' U O `- k1 [3 i1 a" M& q kubernetes.io/os: linux
& c* h9 ]; S6 d& | priorityClassName: system-cluster-critical; A( B6 w" O3 v5 s
serviceAccountName: metrics-server; ^: p, n# q v) Y
volumes:
& b$ @! E; z- @: g% ?( o - emptyDir: {}
) G8 x( g) M% d# c) }7 ] name: tmp-dir) Q! t* b7 ?9 u4 S8 P! S8 S
---
) U/ d7 L% f3 D' E7 CapiVersion: apiregistration.k8s.io/v1
. _- w* x* Q" F- ^) M) ?kind: APIService* P; w3 Z* c+ `
metadata:
. Y* d* F# f* F5 c labels:" z; j1 o6 [/ A* c4 b( z4 @
k8s-app: metrics-server
. {8 W* ^2 t- ?% J7 h2 Y name: v1beta1.metrics.k8s.io! m9 t1 l' O/ ], V, z2 W! R1 w6 X
spec:
3 ~# \; u; ]3 U; _2 A( q1 P) n4 h group: metrics.k8s.io
A# J6 _5 @$ z, U: _# O: j groupPriorityMinimum: 100$ {8 _% D$ k, c+ o8 B2 M
insecureSkipTLSVerify: true
, M( C9 c E" Z% ~" J4 f service:% Q: ~8 j M2 R K ^# E, h
name: metrics-server7 \+ T5 e2 C7 x' w1 ]
namespace: kube-system
" P4 D8 ?% o/ v' `- L8 O/ h: r* H version: v1beta1$ U+ B' ^6 J# U1 [
versionPriority: 100
2 [' N4 W1 c' J3 }; c! X' E导入 metrics-server 镜像8 w7 ]% z4 @4 Y
for i in 192.168.91.19 192.168.91.20;do \* j1 j, d5 V. N3 b* q
scp /approot1/k8s/images/metrics-server-v0.5.2.tar $i:/tmp/
( Q2 c) U8 H/ E# t8 u1 essh $i "ctr -n=k8s.io image import /tmp/metrics-server-v0.5.2.tar && rm -f /tmp/metrics-server-v0.5.2.tar"; \' x; g0 G8 c, ?+ Z. @1 s
done8 R, P) l* W. I! H! m/ L$ r. ^0 N
查看镜像
7 J. u2 I1 o' _1 O/ V
& v3 I3 Z4 ]: L' \4 h* v" k/ zfor i in 192.168.91.19 192.168.91.20;do \; M) |! Q% `3 D9 A! G, Q
ssh $i "ctr -n=k8s.io image list | grep metrics-server"; \
% I6 C" A. |1 z4 ?, e- ~5 qdone
) Q: W* z7 \& ?0 o @) T在 k8s 中运行 metrics-server 组件8 R- J# K+ L( o3 h* [
kubectl apply -f /approot1/k8s/tmp/service/metrics-server.yaml
: ^( Y2 n/ l0 Q4 G0 v$ d+ |检查 metrics-server pod 是否运行成功
1 @5 [; k! V1 Y9 ~9 U! Fkubectl get pod -n kube-system | grep metrics-server
1 N# p1 f0 L" O0 d预期输出类似如下结果
# _- S1 f) s$ R4 `' h9 J0 h8 w! R, _ G1 K
metrics-server-6c95598969-qnc76 1/1 Running 0 71s, s4 n7 F. \0 P+ c5 ~+ X! A
验证 metrics-server 功能
+ c- Y( l* v9 c( x7 h: i# r$ q8 n8 i
" M8 J# S9 |9 h {( W查看节点资源使用情况$ S M3 I1 W- u6 c& Q
) Y% |* I* G% N" n3 }
kubectl top node
Z& J: j9 x+ o* z, |& [预期输出类似如下结果; n' M( \8 R3 I) _4 C' ]
" Q+ L5 Y' q/ g+ h( xmetrics-server 启动会偏慢,速度取决于机器配置,如果输出 is not yet 或者 is not ready 就等一会再执行一次 kubectl top node+ F3 s3 q' Z8 t4 a$ `1 j
2 S0 I& Z- l2 i1 ?* J0 W7 J+ qNAME CPU(cores) CPU% MEMORY(bytes) MEMORY%6 m d0 \7 o' X9 Y1 X( S. a
192.168.91.19 285m 4% 2513Mi 32%- `' e- ^( @0 P8 v
192.168.91.20 71m 3% 792Mi 21%
$ ^6 G" R; Q3 k# y查看指定 namespace 的 pod 资源使用情况
8 m, x, X, B8 D, F% R# m* W1 Z. b; W& J+ l1 i+ F# C( G
kubectl top pod -n kube-system# ~0 p& s9 O( T, o' H* s
预期输出类似如下结果
. v6 W' h$ a4 x, F
7 c. A% k Q6 U/ xNAME CPU(cores) MEMORY(bytes)
+ u! W% f7 L+ @/ jcoredns-5fd74ff788-cddqf 11m 18Mi% a1 k! y- }7 |6 k$ q' v! G9 M5 f
kube-flannel-ds-86rrv 4m 18Mi
5 y ?& ?, _, \; Dkube-flannel-ds-bkgzx 6m 22Mi$ c' [& m4 ^, v/ C( @' W
kube-flannel-ds-v25xc 6m 22Mi
0 [1 Q, A/ B5 |1 m9 l3 ^7 Qmetrics-server-6c95598969-qnc76 6m 22Mi. e1 @6 l0 o; ]( ?! N
部署 dashboard 组件
6 y$ Q9 X/ ?% j: q" G- D) D, x配置 dashboard yaml 文件
. R: o7 v- F$ ]1 n3 a: [vim /approot1/k8s/tmp/service/dashboard.yaml
$ i, P1 _+ _5 C& p) E---
. k; V1 Z2 u& B6 M3 h, m& K' L" EapiVersion: v1( N5 s4 Y0 K. n- g
kind: ServiceAccount2 k3 h, |3 G4 p
metadata:
* x+ S. e$ m4 {* U6 P name: admin-user
% v+ G& T; ]. |5 r; f& T namespace: kube-system
0 u" B7 m! T2 e" E1 e* p# _% ?7 f; \+ s7 C+ ~9 t1 w6 k
---& M; w9 [3 D& w
apiVersion: rbac.authorization.k8s.io/v1
7 s5 R$ f- o- |kind: ClusterRoleBinding
6 F( S, K1 {- umetadata:# [; O3 F" Z- [6 c% \
name: admin-user( C5 G! H+ @7 {! d4 d. Q9 Q/ ~& K
roleRef:2 I+ m! @$ R- q0 E& a$ G9 a6 ?
apiGroup: rbac.authorization.k8s.io
9 Q+ ^0 p* _4 O. x1 @& K+ }5 H kind: ClusterRole
2 b' S" J; E- J1 I3 ]+ g name: cluster-admin8 m1 z1 y, I8 E \
subjects:$ C8 L6 G+ C8 ~% E/ E
- kind: ServiceAccount
! t8 S/ x ^8 m8 v0 j; t name: admin-user' e$ l3 |/ W8 s9 C
namespace: kube-system8 K+ [- K1 {* J2 f( P' y1 s/ T9 u
t I1 J$ R. Y8 O" @, a' \ H
---% t/ u- h% L# ^+ Y
apiVersion: v17 x v7 @! Y: x2 X4 Y
kind: ServiceAccount
$ S; m4 U9 h2 F3 L. Smetadata:% T! z2 B7 X: W E1 U
name: dashboard-read-user
& c( o8 g9 C# L5 T namespace: kube-system5 q0 m2 ?- c7 O5 d8 U, s7 U( P
0 s' c3 k0 |5 W' ?---
. e5 J$ l6 n7 |. k) E N# F# B2 SapiVersion: rbac.authorization.k8s.io/v1
( B! q( q5 m+ L: i3 ?0 e) t% r# _! skind: ClusterRoleBinding
' t; g9 I% e5 C4 X0 |" @- j$ qmetadata:
/ G6 J% X5 G/ k" T& X name: dashboard-read-binding5 c- @8 O1 ^8 q' A8 w; V5 b$ T
roleRef:9 A$ y% I! b) ~3 ?
apiGroup: rbac.authorization.k8s.io7 l4 e$ h1 J3 y/ [
kind: ClusterRole
* t7 ~! S4 C" _2 w( R name: dashboard-read-clusterrole0 q) F* U! {7 r7 s+ u; f
subjects:: @0 V3 i n. P' ?$ O& J; O
- kind: ServiceAccount- X6 h6 ^5 u) c
name: dashboard-read-user
0 C/ J1 T! G4 i7 W3 B- s namespace: kube-system
7 J. H% k2 Q4 q
1 z9 S8 R8 |& k) Q1 G( `8 o---8 D ~4 V3 H8 }/ Q# w0 _
apiVersion: rbac.authorization.k8s.io/v1
; J$ }8 v# K( n2 K8 @* f+ a9 d! Gkind: ClusterRole& j& Y# C- x0 u( \, Z' X6 K! B* Q
metadata:( D/ x. n' m! V( M! N: |! ^9 C
name: dashboard-read-clusterrole- \6 Z' r. X- V F
rules:
9 |0 X( Q- q/ y) q! ^$ Q- apiGroups:
) t6 r' O4 f8 r; {$ E% R, i - ""
- }, t" }/ x0 b' q5 L resources:
+ m) z' d. d7 E* U: k* v - configmaps
/ L O0 A1 e" I0 o2 d* N - endpoints
% H2 J. t7 \3 ^; W. b; c: v - nodes
y5 H6 _/ q0 x- {8 r2 y - persistentvolumes
/ O+ M0 f/ ^& T8 o1 v - persistentvolumeclaims
% I! g- _2 Q5 B8 ?- h+ V4 U - persistentvolumeclaims/status! Z8 k# X9 }: s* F
- pods- q/ I1 }2 l: q8 F& o
- replicationcontrollers8 G: ^) d9 N& ~' u3 w
- replicationcontrollers/scale
( r4 c) `% ~- Y) j* M - serviceaccounts
: U) c. a* d7 z5 w# Y' F - services
; V) h6 ^& c" i! {) b5 F - services/status
: a1 s: R6 D4 h$ t2 @& _# H1 O5 }. { verbs:
6 ] M1 Y! o( ? {) c* I! v - get+ S( l { u' ?% C
- list" x& p3 R8 I4 O
- watch
/ ]( W& M( R E: G3 L. z/ N- apiGroups:1 B$ S( l+ J' S) K
- "", @) M% N$ G; L! Y; U! ^: J! W$ ^) @
resources:
! N! W: c: e, U: R - bindings
2 [5 v* O& z8 k# Y - events$ U+ o6 O2 e. }
- limitranges
' y2 P8 I4 q3 T) b" t& V - namespaces/status
3 d4 P+ Z& b% G6 }( m, _ - pods/log
/ `! w# v* F9 d T - pods/status
* s2 {# R, C8 G+ A* I% t - replicationcontrollers/status z5 N4 Q" A5 I6 N2 c* k( y
- resourcequotas" v4 q% k7 f8 V9 G1 T* ~
- resourcequotas/status
2 J/ ?" T/ ]) T7 L/ v9 t% O( ~7 `5 { verbs:
! W0 H) z4 m) n1 ? - get# d% k- s# L8 s0 c' M* \" k
- list K8 O! i, x$ ~, S* q
- watch
$ V3 A2 c- r' D9 S- apiGroups:
c5 T8 T6 t- V7 E' y, k - ""- M4 J3 `5 b& b9 h, c
resources:- @0 P# X" i( J r3 w# `3 g' f
- namespaces
4 ]- }' h# L" C8 d1 k8 s verbs:7 s3 ?) K8 F4 S" h- X
- get
. u" ^* r D. R: q s* e/ s - list/ s( h9 I+ F/ z: R8 \
- watch
( Q1 b3 [2 T# D+ i" [9 z% R6 O- apiGroups:# T6 X k( i: `% e
- apps* ~1 P5 e& b& C0 A( U5 T9 t2 G2 O
resources:
" X9 y( P2 N. z: Z' T7 |3 u - controllerrevisions
' U( |$ R2 g, m7 d# }2 Z: K0 i- J - daemonsets1 y9 i9 u9 F6 O/ J- e' ~
- daemonsets/status3 Z- i4 n/ O% N t& M
- deployments0 y$ _8 l! B* s2 M- [5 h: s
- deployments/scale
! |' O% d& q! V - deployments/status
' o1 c6 G( R7 b9 _ - replicasets8 T' t% L0 @( C4 ]. N
- replicasets/scale, g# M0 M+ Y3 |% p' B9 s7 I' c
- replicasets/status
7 v3 U) z1 h' p" q: s" H - statefulsets+ x* Q9 ?/ w& A- y0 p7 X
- statefulsets/scale6 Q% ]6 w7 G+ G: |* o% Z4 ~
- statefulsets/status
# {( K8 @: L) m! Z9 U) b verbs:
: @) O& H1 r8 n. }$ W) ?+ c2 U - get2 ~9 S0 K6 o# h% t
- list
5 [% d3 I3 N8 {- g4 l - watch
: P9 H1 F# h* s& T+ b- apiGroups:/ S' J+ T4 N6 ^
- autoscaling
: W8 W4 O% @3 a. }, t1 H0 Y resources:( E8 g- v: K: E$ Z7 f
- horizontalpodautoscalers/ c" x6 T1 f7 ?- I& M
- horizontalpodautoscalers/status
! z3 p- J+ }* b3 t9 j) l# k verbs:
! A% G0 x3 [& S4 H! B6 ^+ `- h: c - get& U) s% T$ Y, ]1 O* J+ [
- list
8 p3 C4 }3 f7 P, M- O( G0 a - watch9 J, X' ~" D& z' M8 p
- apiGroups:
& v7 P* ~ Y/ L4 i6 x, F8 ]2 x - batch# H: t( ]" I0 B6 K" P3 w
resources:" ?1 _7 g& D( p! ?& I
- cronjobs
/ I7 ^) R* E; j# [' V: _, k - cronjobs/status
- l z2 B: M' l9 n0 C& p2 j - jobs
v5 j* H7 }* r - jobs/status
( |: O. ^& Y( a e1 i1 B verbs:: q; f& x2 Q. U2 T$ T* c
- get' b* \- y' Z8 G6 \& r
- list
, f5 h$ N% k- m u3 O - watch
% r' h- y; M4 [$ p u- apiGroups:
1 q. Z E6 w. w+ A - extensions
/ R* G# t9 D) v! I resources:" G5 }9 X _2 g7 g+ L- g, V0 T+ Y
- daemonsets
2 H# R+ S1 R, @, f - daemonsets/status6 K% b/ U6 U0 I% J- c; O! c& T
- deployments
3 h, ~ x; E- t/ m; t - deployments/scale4 a# j& T/ g7 ~* h# ?9 ~
- deployments/status
% `2 b; y) b6 d# ^% j/ e5 g0 w( g6 G - ingresses
% _0 L5 { s4 f, @- O, D - ingresses/status6 i: v3 C4 L9 x6 e: _) V5 O
- replicasets
4 Y$ _9 U; U) [( Y7 p: X0 m - replicasets/scale/ m9 Q t# G$ f Q; f( B7 a/ s
- replicasets/status" R! F* ^7 z' U. ]$ p3 h. ?
- replicationcontrollers/scale- e3 W; E4 e! P. ~3 t' q
verbs:
+ q0 H# K6 D" f5 m$ m) N8 u - get
) U/ [& j; l% ]0 ] Y: r. G$ H7 D) L - list- G- |8 B; \" a+ y4 U; I$ T
- watch
" j# U& a9 S% k$ c- apiGroups:
8 s% g" y# z B# ]& u+ G0 g - policy
. U8 z7 N4 p! v9 h1 C- b resources:$ e7 E8 C* {3 e& F; B% I9 f) m
- poddisruptionbudgets! g, l/ A/ ]( |% c* ^
- poddisruptionbudgets/status
) u- g1 e' n5 l4 j- @ verbs:, r0 F2 p( r u1 @
- get N& h1 k; k( g, l* ]
- list u3 \ a) v k: X; l7 O. |
- watch
6 S s/ n$ G5 k' j- apiGroups:
$ k; z4 o8 W+ z4 f8 Q: h - networking.k8s.io, t" c& p4 t: Z; U
resources:, n" d1 s, d; @
- ingresses! h; E& }) {2 z+ O& `+ C
- ingresses/status/ _$ r5 f. g- [ ?3 w. y
- networkpolicies
+ E0 r5 {" M* z+ z. h) { verbs:
& u) B( ~. L2 [; e - get; b& a* L0 h; z
- list
. ?, ` l5 T# O4 s+ Y) s3 T" y - watch
$ b) n1 G! C$ J' _: x" S- apiGroups:
- W) ?7 d" P) d, A x3 V - storage.k8s.io
& q! Q* O% v7 \$ ` resources:( D5 Y$ \5 a {% _
- storageclasses
, g, ~% o, ?# k3 Y6 X - volumeattachments( B! c; E; y; ~
verbs:
+ E! n) Y2 h) z - get
$ @ Z/ P( e" H - list
9 x- }5 n2 b8 x - watch9 V5 r* d) l) W6 k; |3 K; d: I# \
- apiGroups:* j E9 ]7 {+ o5 {0 _: k
- rbac.authorization.k8s.io9 Z0 E8 O' [; j7 j* {/ v! `$ R; D
resources:9 ~0 h. q7 s( W) S: Z+ J4 [
- clusterrolebindings" B" q! ?( E% ^" S# A
- clusterroles/ m# B$ k. A: Q3 P
- roles0 l: C% Y8 T! {$ X
- rolebindings' _' I5 x9 J0 N, ^
verbs:4 y5 h4 B' q2 K1 g( a8 m5 l
- get( Z5 ~" |& K. ] V$ D" w) p
- list
( y+ z# z5 i0 ~ - watch9 [* O4 J, \3 x2 B( Y' n5 t
* g( L5 o& t; E" y---
1 o5 P! i/ p: [, J4 x6 i5 d4 fapiVersion: v1
- Y5 x* B9 m8 c. m$ }3 qkind: ServiceAccount
6 \7 u6 [' ?% K; N9 I8 x$ Smetadata:5 b- x8 t D" v1 w( k- S6 C. T5 `
labels:
- S5 y8 L' r/ a( \ k8s-app: kubernetes-dashboard
1 b3 G0 l, V! j3 t8 N9 m name: kubernetes-dashboard+ Z4 A% _; K/ d) h% [2 d
namespace: kube-system( b, T* e. u' ?5 ^. V
( d7 H+ O* o# I- q1 h
---
6 h$ |2 L2 q9 \6 ]kind: Service
" b! j9 M3 `" G* @6 UapiVersion: v1
- E0 H' J# I, B5 r+ y0 a$ ]' umetadata:
/ A0 \/ V' e7 J labels:8 S" ?6 ?7 k/ s: \2 z* {2 Y9 Z
k8s-app: kubernetes-dashboard
: z1 v9 H* U1 C- b$ M kubernetes.io/cluster-service: "true"4 \/ T) V) O. J; v# k U+ B
name: kubernetes-dashboard6 t! {5 q' D% m; }' f
namespace: kube-system) v- U, l* j- f+ f
spec:
; Q5 m4 O h' G2 ` ports:8 r" Y# w& v6 k8 h3 W! w$ `
- port: 443- J& B5 \9 U% l' z
targetPort: 8443
$ F/ U4 l+ g' L; p selector:# ?) |4 h4 q5 Z% Z! y( N4 S$ f4 V$ e
k8s-app: kubernetes-dashboard
- C- {8 B9 c5 ~ type: NodePort4 v1 [2 g+ t7 C$ ?( H
2 r( a9 L' S: ]* m2 l/ N
---
9 P7 S3 m! T7 E$ ]* j, LapiVersion: v1. o$ |7 J0 o- A4 [8 ]
kind: Secret
' r/ P$ Q { O0 l/ }metadata:
+ p! c' n. n1 g- e5 Z; R labels:% n( }7 z+ @- f4 x+ H' t
k8s-app: kubernetes-dashboard
3 A3 o$ u% X0 a% |& D4 C# m! U name: kubernetes-dashboard-certs$ ?3 ?( y! {9 V! ], Q
namespace: kube-system
# A6 Q: a: Q* ]0 jtype: Opaque% f# D+ Y& C+ `$ K& W8 }: _
+ E+ T8 r7 `" J7 s3 Y: k---
j# f% E3 j/ |# y# tapiVersion: v18 m' ~* `) O% k" E+ j6 l+ V3 R
kind: Secret9 Y* X& m0 C7 n3 i, Z
metadata:
3 m1 F$ W6 l* s8 X$ n3 N6 I labels:
. S) d2 Y2 N0 L8 y k8s-app: kubernetes-dashboard2 ]* e; y8 t5 }! c, Z
name: kubernetes-dashboard-csrf
( P- S$ f; |$ ? namespace: kube-system
+ [6 C9 a4 P; w* N/ t) Ytype: Opaque( ~( \, i5 w7 n+ n7 n8 k) `) X
data:7 `* H; F! @0 a4 z% p
csrf: ""& {6 | g, D; X V! {7 D% c# ?! o. T
; C/ j! |: ~4 h---/ u" d" [; H4 e) Y9 f5 d, y
apiVersion: v1% Q1 b6 D2 e1 ]) k9 k, R% f: z8 H
kind: Secret
* f. G4 g1 M3 X, L1 d7 | k+ vmetadata:* Y$ y7 i! w7 U$ q. R; S7 E
labels:
# q; O# d" O1 N$ A4 R# P& N k8s-app: kubernetes-dashboard
( f! b* x: h& N: K- W name: kubernetes-dashboard-key-holder
' V1 _( X4 Q' }2 w namespace: kube-system
* {) m! c! O# F0 _: ptype: Opaque) y% P3 d4 e5 u, H: [* v1 i
) A0 b5 b4 X# R% P) x
---3 {) R$ R( o! d8 F* y+ X7 b2 W7 R6 f
kind: ConfigMap
8 r: T+ Q1 G8 Y: oapiVersion: v1
+ ?, [- T# c/ C. ometadata:
! ?; c/ g( \9 g2 U5 D$ t labels:, X% W) N! `$ P* U0 b+ X" Z! _
k8s-app: kubernetes-dashboard, _1 O. O+ n4 }5 d' ]! \2 v+ K }# N
name: kubernetes-dashboard-settings
% a1 ~2 R' f4 K) ^4 M namespace: kube-system
. j0 T `7 }' U: q! b$ E4 n# y& W$ x5 P: y1 O, s( G* S
---7 e, Q8 Q1 [* x1 w
kind: Role
, S4 P( G3 G: k# ] q5 T7 j) T OapiVersion: rbac.authorization.k8s.io/v1
4 K5 d1 [5 |4 O. y7 |: v2 b2 I" wmetadata:
2 a3 L* O% D1 o: [* \6 R labels:
8 l6 E6 Z+ K3 L7 }9 f) k n k8s-app: kubernetes-dashboard
2 V: {) `$ t* Z$ T: R name: kubernetes-dashboard
; \# |, ^/ T# w3 T namespace: kube-system5 P( G! @' v. x) X
rules:
: J/ f* X2 T2 B1 h Y1 I # Allow Dashboard to get, update and delete Dashboard exclusive secrets.2 u6 p2 o: I9 ~" L) N1 c: [
- apiGroups: [""]; B/ H9 r9 E2 g7 \8 f+ N! E
resources: ["secrets"]- t. i& ^ ?$ Q- y' z- z K
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]" j% m5 }7 E1 i# Z' }& L. u
verbs: ["get", "update", "delete"]3 B% Z* C U6 B V. ~- v
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
7 B8 U W; x( |+ A& I - apiGroups: [""]
7 e: R9 T5 D3 E) Z) V7 i) f! n resources: ["configmaps"]
. \6 \, E$ {' t5 @7 O$ v, W2 C/ S resourceNames: ["kubernetes-dashboard-settings"]
$ q; J; X( d ~5 s verbs: ["get", "update"]! ~+ U0 o0 T' n; n" |- B& d* i
# Allow Dashboard to get metrics.
9 h$ F* w, i x - apiGroups: [""]
8 w8 \5 |: [; D resources: ["services"]
2 J$ T4 l" N3 t8 D8 e2 y resourceNames: ["heapster", "dashboard-metrics-scraper"]
: i( p$ s0 _$ h9 { verbs: ["proxy"]
0 [9 s+ k' {4 F) _ - apiGroups: [""]
6 z% x: W) N( J resources: ["services/proxy"]$ j, S4 T1 J' v8 d1 N+ W8 I
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
, @# g8 z" n( F }3 Q9 u verbs: ["get"]' }" z9 M( a9 ]0 S+ ~
- M4 {! c: G/ _0 \5 b& E
---; I P4 @/ z# Y9 m# u
kind: ClusterRole5 F/ x' J" t$ N6 L9 T2 P
apiVersion: rbac.authorization.k8s.io/v1( \: D; E! a' h1 E. z. ^
metadata:5 q+ M6 B3 x7 }1 X" g. H" q. x" m8 K
labels:' k0 Z; k2 |# l' ]
k8s-app: kubernetes-dashboard% N" i' J1 P: Y# c
name: kubernetes-dashboard' ?: T' z% `7 e; d1 @% r2 n
rules: x/ [5 b* i2 `7 M t. P+ d
# Allow Metrics Scraper to get metrics from the Metrics server
) C4 F$ g& ^! v: R0 P5 Y) G, O. o - apiGroups: ["metrics.k8s.io"]
9 D" V7 Y' h5 Y2 M1 C resources: ["pods", "nodes"]
6 `$ k7 F9 m( _ verbs: ["get", "list", "watch"]
: T3 n- N& |: S' |: t5 H/ e* x$ P5 N7 ]0 o0 N3 G
---5 M+ R- V; G9 m% u; _
apiVersion: rbac.authorization.k8s.io/v1( J( K% S" l7 F% t" X7 o
kind: RoleBinding
4 v7 B7 s S! y: T7 t. Jmetadata:" {9 I( R s! x0 J/ e- O
labels:, m# O( p! P5 {+ P! A/ z! }
k8s-app: kubernetes-dashboard
1 ?3 z8 S0 [( T2 u5 m. L name: kubernetes-dashboard h5 l+ |0 B- N4 U+ L, X% f% \8 A
namespace: kube-system3 |# p* B3 n( _5 R3 F
roleRef:
& V9 Y. t, M* |% b6 a# I: a" L7 U apiGroup: rbac.authorization.k8s.io! p, ]2 T0 K a+ b8 r
kind: Role2 j/ d1 O4 ^3 B4 y1 [* C
name: kubernetes-dashboard1 G4 G6 i$ \4 F$ A! R; e
subjects:0 d* K0 Q& v4 g3 ?8 X: `) R( X4 l6 k
- kind: ServiceAccount; Z/ h5 q- }. V$ a( k
name: kubernetes-dashboard& Y, G! e% y2 z/ o4 m/ O
namespace: kube-system
; h6 J) d% [7 Z* b! b* f
- z" W' O* w. B---
; S' E5 d! A6 papiVersion: rbac.authorization.k8s.io/v1' y( S5 I) B7 U# p1 I- @
kind: ClusterRoleBinding( D# Z: H" O9 k% b d3 d9 D3 S0 }
metadata:
- d N, H, s- R) p/ d# R name: kubernetes-dashboard
9 i( K. b: d. |2 v8 {" croleRef:
+ k& W$ v/ N/ y; S apiGroup: rbac.authorization.k8s.io. A9 ~3 S/ f* g6 _: J% h
kind: ClusterRole
% ]/ Y, T' R& k6 ]- L name: kubernetes-dashboard3 f" G. r. b" Q2 g" X
subjects:
1 H1 o7 T1 X/ }, @6 q1 h - kind: ServiceAccount6 }7 H% E F3 |
name: kubernetes-dashboard
( G1 b$ P7 b) v! v namespace: kube-system
! f) s) g/ [- {) q. d
- S* V5 U5 ~4 C( z1 g3 p8 _---
' _5 M% a% w3 n zkind: Deployment
4 R7 u/ w) S* C. vapiVersion: apps/v19 l: V4 p7 H/ O+ N6 n7 q
metadata:
0 ^+ d$ c0 e. d2 j+ m labels:7 F' S- e2 e% R. t" l% P8 x
k8s-app: kubernetes-dashboard
' o) p# @, E' t name: kubernetes-dashboard
8 t! r1 m9 U I) W( f) q namespace: kube-system
$ ~4 K6 x+ X2 j$ lspec:
0 `( y$ M; _1 b6 b7 _: D$ D% V3 ?- \ d replicas: 1. o# j- ^! W( [# v7 r I0 z& I% y
revisionHistoryLimit: 10
1 _0 C2 [) a; e$ Y' o0 H' \: ] selector:$ [5 v! y6 N9 x; ~3 e+ z. j! ^
matchLabels:' v) b2 E- f+ c0 I
k8s-app: kubernetes-dashboard6 w, y2 U! B) \. ^4 T% T
template:
8 K$ e& F" x: _+ N metadata:( ~* H7 [2 n; d5 r2 V- p, ?9 P" H
labels:
7 ? q4 ?* x* ? k8s-app: kubernetes-dashboard# {3 B* X; Z2 B! e
spec:
3 g3 N- @4 [+ f* Y, o. c containers:5 ]; D4 B" D( z% m' D: h/ m+ ~
- name: kubernetes-dashboard
" G- R. O1 W3 b0 z image: kubernetesui/dashboard:v2.4.05 Z+ F& ~, ^' p6 G
imagePullPolicy: IfNotPresent) j8 e* m, ]0 K+ A: u( V
ports:0 u* ]$ b3 z; E$ v& ~
- containerPort: 8443
" j: N7 g5 u8 c/ m" q protocol: TCP
$ H- q) [# k% m# \' `: F args:
5 M2 J0 f& C+ d$ ?; x) s: R - --auto-generate-certificates
- r' q; A+ ~0 B7 _ - --namespace=kube-system, E9 p* a7 E0 M( {% L
- --token-ttl=18008 w6 O5 p$ \% ?+ Z" C
- --sidecar-host=http://dashboard-metrics-scraper:8000
2 P1 k% i1 G8 E, G # Uncomment the following line to manually specify Kubernetes API server Host* d+ j$ n+ y/ Z9 d9 ]
# If not specified, Dashboard will attempt to auto discover the API server and connect9 ]/ S9 ]. T$ m4 D* i
# to it. Uncomment only if the default does not work.% c0 R9 t6 @ E. |; J: Y
# - --apiserver-host=http://my-address:port1 H, x* p+ y0 O5 M+ c* Z) B6 A
volumeMounts:
6 J2 z. d/ ~4 y4 \7 a+ f - name: kubernetes-dashboard-certs
$ Z; L. |4 F; k0 ?% q mountPath: /certs
' O$ O* w0 E9 D' _1 b' ` # Create on-disk volume to store exec logs
' z9 Q/ `; H/ Q& _* f. | - mountPath: /tmp
5 \% c2 [! V/ ?' W name: tmp-volume* A% h; p2 p: `: E# E
livenessProbe:, o7 }; j% ?) R! y% Q
httpGet:7 s7 D7 q2 {6 Q: q2 S
scheme: HTTPS" m# U8 d. \$ D% { z% a) h p
path: /& {5 g/ R! ^& H# {( P) S: @# \
port: 8443
, j1 X! n) q7 T+ e# K$ J/ {8 R; I initialDelaySeconds: 30
. y7 W% w2 h. L" D. M& V* U timeoutSeconds: 30
. \+ V% O7 t( o1 p7 F( n, r8 d6 x securityContext:
* {4 n( e- B2 I2 s. `2 } allowPrivilegeEscalation: false# \8 u: w, k& L6 j$ i
readOnlyRootFilesystem: true
5 Y0 C: {) U" N) V runAsUser: 1001
6 r. `2 z! U& k runAsGroup: 2001/ j* x% w L; u) D6 B) Z8 S" s
volumes:0 {& h& b, n: n% e
- name: kubernetes-dashboard-certs
P% Z& I" [3 R0 D& z secret:' `( E# g0 I7 x2 _
secretName: kubernetes-dashboard-certs
* c; j! O" v, k9 v - name: tmp-volume. j; X& p! f. W. Z+ @3 E
emptyDir: {}
* @$ r! G6 {; C, _) E, E2 X serviceAccountName: kubernetes-dashboard
6 a$ A2 {6 \2 ] nodeSelector:- T- K$ \+ x' N, e
"kubernetes.io/os": linux
. {% J) c: C! U7 x. r # Comment the following tolerations if Dashboard must not be deployed on master Z0 [1 Q' F' C+ C: e& O
tolerations:
% K" ~8 X3 s2 Z8 q% c, _ [1 P - key: node-role.kubernetes.io/master9 ?9 E+ G' j7 N* N* Q i; d3 @8 B5 B
effect: NoSchedule
$ Y8 _$ S) y/ c) W0 [% l2 B
# h$ ]$ Y, l2 B--- g, d6 Q0 c; Y& S1 r
kind: Service
J4 F$ ?& `5 N1 l8 X$ |) wapiVersion: v1
+ F1 r2 [, [0 Imetadata:4 }+ I, [3 o( i' O
labels:8 Y/ a+ e1 x6 q) j' v! z! k
k8s-app: dashboard-metrics-scraper
6 ]/ }( j* ]+ q* C. x$ z name: dashboard-metrics-scraper
9 T. a0 m1 @$ n. ]+ ?# g4 l namespace: kube-system3 U1 o9 ]% x7 \9 s
spec:
: ^$ k3 ?% G5 e N/ f Q ports:' j5 l$ b' x3 f8 m! U& w9 H9 u
- port: 8000# [, r& k: [# I- P$ K0 Q
targetPort: 8000 y% C* @% L& f- L% X3 D
selector:% s# p5 P1 k! R2 g
k8s-app: dashboard-metrics-scraper
' U* S$ K) P1 K
6 u1 b) @- k, J7 C/ B5 Q% R---
0 S' j, X; v/ K9 {" w% `. jkind: Deployment) l! ?7 z1 V6 u1 R& L
apiVersion: apps/v1
1 {/ R4 ?' X1 ametadata:
9 {' Z% T1 Q+ k; B labels:
9 \+ v" q8 y" y0 `7 e* ^& G6 L k8s-app: dashboard-metrics-scraper
& l! r b4 Q+ A) J6 k name: dashboard-metrics-scraper
, ^/ P7 X* Z+ M7 J namespace: kube-system
& p, l+ X7 c8 o! ?1 f9 nspec:
7 j6 ~1 A; p# Q* d0 ^ replicas: 1
9 l7 n8 i9 ?2 P* N/ v revisionHistoryLimit: 10+ V6 g3 Z2 O. p+ M: O
selector:
* u* B$ H# Y n matchLabels:/ x+ M- Y" I) f/ x9 p2 c
k8s-app: dashboard-metrics-scraper
. Y- L+ d( c3 N+ g2 }1 x8 T# }- k' Z template:
# K6 {$ R6 j. B0 s* R8 j5 j" o7 a metadata:5 I' k1 d/ b. W, W
labels:! y5 E+ A. m7 i# |
k8s-app: dashboard-metrics-scraper/ Y& r# \4 r" L- u$ F5 o. w
spec:' K& G" K o+ r; W; H
securityContext:
% E5 }; P& n% e9 I seccompProfile:
' h' T9 S( ^, A: L9 { type: RuntimeDefault8 F& f, |8 F: D5 i
containers:
+ u9 ? M' C2 N8 q - name: dashboard-metrics-scraper2 \- t: N* }( s8 _
image: kubernetesui/metrics-scraper:v1.0.7. _( I C8 `$ M, ^
imagePullPolicy: IfNotPresent/ m) a' l' s7 e2 U8 s Z- z
ports:
/ d& t3 x% \5 ?. D1 D, o - containerPort: 80001 Y9 _ a2 t0 Z; R* O/ r Y
protocol: TCP
, }2 _# Q" E5 f8 @ livenessProbe:) G. j+ A7 O0 R; w! M# [
httpGet:& C; H0 i% |3 G- C; z
scheme: HTTP
9 c! o j" S1 C% | path: /
$ N, @" _/ H( i/ Y port: 80001 G* R5 o- D! h5 `/ |
initialDelaySeconds: 30
. C$ x) ^: m$ v. J3 X6 X* o timeoutSeconds: 30& z* ?" W `1 U6 a, T6 t
volumeMounts:
1 V9 r/ ?" H. A: A' Z - mountPath: /tmp) _1 H: @* \( w* `* s# z6 V
name: tmp-volume
6 y" _# j) K$ k3 C securityContext:& ?( y7 [. H0 T) R: ]
allowPrivilegeEscalation: false4 V; b/ E4 `2 N
readOnlyRootFilesystem: true
- h& J# \' r: H/ U runAsUser: 1001
% v$ X( w2 n) T$ n0 b# m* U4 K; s( m runAsGroup: 20010 O4 S3 I. V8 ~( W" r! ^
serviceAccountName: kubernetes-dashboard
3 \$ O/ i" D6 y) h; Y6 c nodeSelector:
& f0 t2 ]1 @; j2 v: R1 W, |, { "kubernetes.io/os": linux
I1 [5 Q! D! x) N # Comment the following tolerations if Dashboard must not be deployed on master. O: D" } c; F0 n) `3 O, U
tolerations:
- G6 K8 ?9 X. G7 @: R - key: node-role.kubernetes.io/master
& }, w1 I! k- ~ effect: NoSchedule9 i5 w. a4 W) C8 U% t- r
volumes:
9 i3 ^/ Z& k0 Z" ^ - name: tmp-volume1 v5 O7 X; Y" \9 c
emptyDir: {}1 F8 Z s4 z$ \7 g' R( C$ S( P
导入 dashboard 镜像
' H4 a: A2 U3 c# H9 Bfor i in 192.168.91.19 192.168.91.20;do \8 g0 t4 r, x8 ^+ l5 k3 R
scp /approot1/k8s/images/dashboard-*.tar $i:/tmp/
( ~3 _9 L# v4 k4 O# g3 Xssh $i "ctr -n=k8s.io image import /tmp/dashboard-v2.4.0.tar && rm -f /tmp/dashboard-v2.4.0.tar"; \* k! @& Q# Q) \, B
ssh $i "ctr -n=k8s.io image import /tmp/dashboard-metrics-scraper-v1.0.7.tar && rm -f /tmp/dashboard-metrics-scraper-v1.0.7.tar"; \ A9 y- n7 H9 _5 H
done" ^% j+ G$ l% U' E8 ?- F6 M a
查看镜像0 T7 R$ \3 D1 x( Q. t+ \& F
6 z& a r+ @( q
for i in 192.168.91.19 192.168.91.20;do \
$ f2 A. g2 a: v! W' s4 ^ssh $i "ctr -n=k8s.io image list | egrep 'dashboard|metrics-scraper'"; \
1 s# n2 e" ^8 Cdone" y% F( k/ ]+ B4 q1 g
在 k8s 中运行 dashboard 组件
; M* l9 m5 P$ Nkubectl apply -f /approot1/k8s/tmp/service/dashboard.yaml
* M$ P/ M) p3 d9 B7 }检查 dashboard pod 是否运行成功$ g! H) T2 ^9 V8 P1 J9 Z
kubectl get pod -n kube-system | grep dashboard
: k7 k4 m0 @: t/ B9 `2 T预期输出类似如下结果+ D: G9 q0 w1 S& A& `# b
! f% j" U9 ^3 Z0 `7 y1 A& o: H" Adashboard-metrics-scraper-799d786dbf-v28pm 1/1 Running 0 2m55s
$ ], B. p$ V F+ J: I" Z7 Ikubernetes-dashboard-9f8c8b989-rhb7z 1/1 Running 0 2m55s
* Z$ n0 j6 v! ^查看 dashboard 访问端口; I( s+ D" L1 n+ w6 i& a
在 service 当中没有指定 dashboard 的访问端口,所以需要自己获取,也可以修改 yaml 文件指定访问端口
8 B" f( P/ R# [/ C) }: c4 e g9 [6 i8 X) ^
预期输出类似如下结果
. o- F7 v6 g3 P8 d8 [5 [$ }
3 M! x1 E$ q; k1 B1 T$ u' Q我这边是将 30210 端口映射给 pod 的 443 端口
/ i' h1 l5 M5 z& @% T0 {( G3 E+ _* s' t' \5 z. c0 h
kubernetes-dashboard NodePort 10.88.127.68 <none> 443:30210/TCP 5m30s
; M( {! n8 K1 Z# K. z根据得到的端口访问 dashboard 页面,例如: https://192.168.91.19:30210
0 ?3 h& m% H0 u, U& L
& E8 s. h B C" y查看 dashboard 登录 token
/ u& a& X/ H5 f5 e9 E获取 token 文件名称! Y* c' E* O! B& d- W7 r
8 L; o9 u) j% O i$ t6 |5 H5 W
kubectl get secrets -n kube-system | grep admin
) g. ]7 w3 e4 z8 W: k预期输出类似如下结果5 \+ |2 m6 S& l% G5 h( }
* E" B7 W$ ~6 I4 uadmin-user-token-zvrst kubernetes.io/service-account-token 3 9m2s
+ z8 c8 D( ?( V获取 token 内容( i+ R2 S6 N% y6 T$ m; @- j5 v) A
" R, I+ g( _& R. \kubectl get secrets -n kube-system admin-user-token-zvrst -o jsonpath={.data.token}|base64 -d6 z! G2 K' u6 ]1 `2 F) o
预期输出类似如下结果
# \* [. `3 p2 N, x3 g: ?/ j+ j! e! J5 A' t. s9 o5 {- S* n
eyJhbGciOiJSUzI1NiIsImtpZCI6InA4M1lhZVgwNkJtekhUd3Vqdm9vTE1ma1JYQ1ZuZ3c3ZE1WZmJhUXR4bUUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXp2cnN0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJhYTE3NTg1ZC1hM2JiLTQ0YWYtOWNhZS0yNjQ5YzA0YThmZWYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.K2o9p5St9tvIbXk7mCQCwsZQV11zICwN-JXhRv1hAnc9KFcAcDOiO4NxIeicvC2H9tHQBIJsREowVwY3yGWHj_MQa57EdBNWMrN1hJ5u-XzpzJ6JbQxns8ZBrCpIR8Fxt468rpTyMyqsO2UBo-oXQ0_ZXKss6X6jjxtGLCQFkz1ZfFTQW3n49L4ENzW40sSj4dnaX-PsmosVOpsKRHa8TPndusAT-58aujcqt31Z77C4M13X_vAdjyDLK9r5ZXwV2ryOdONwJye_VtXXrExBt9FWYtLGCQjKn41pwXqEfidT8cY6xbA7XgUVTr9miAmZ-jf1UeEw-nm8FOw9Bb5v6A
) R* e" @2 E# @
; t- a/ M* ]0 V: w3 c到此,基于 containerd 二进制部署 k8s v1.23.3 就结束了
4 r$ l+ z! I- f9 R! d( e# z
" s# X( J4 D" C$ Z, U8 n3 l: Y |
|