- 积分
- 16841
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2025-1-1 19:51:59
|
显示全部楼层
创建目录
; `6 ^6 c' q8 p2 m. |根据自身实际情况创建指定路径,此路径用来存放k8s二进制文件以及用到的镜像文件
* _% I, l- K) R4 u o2 k2 F3 M1 E' t
mkdir -p /approot1/k8s/{bin,images,pkg,tmp/{ssl,service}}/ G7 }1 H1 {! J N
关闭防火墙( h3 E J9 _; @+ c$ h! }% E. i7 z2 t
for i in 192.168.91.19 192.168.91.20;do \
! |' m0 i* R( G/ ossh $i "systemctl disable firewalld"; \
4 I& u( c7 J3 yssh $i "systemctl stop firewalld"; \5 x' x' ] L4 P* k B
done
9 x: C" s$ q$ D) {4 A关闭selinux
8 z$ U. X, k8 d4 I. v临时关闭2 J8 C+ R' j+ B" Q$ I
, I" C" o6 g( c/ X( F
for i in 192.168.91.19 192.168.91.20;do \( z) P+ ^1 l9 }; Z
ssh $i "setenforce 0"; \8 A3 g6 l0 |5 n5 N8 h. G! m
done0 a- [$ Y* D6 n! n
永久关闭% b2 n0 U" I4 ^ l8 R
- k) `- Q8 j+ [/ \for i in 192.168.91.19 192.168.91.20;do \
" c/ N, C) m4 g3 N G2 u Yssh $i "sed -i '/SELINUX/s/enforcing/disabled/g' /etc/selinux/config"; \' P+ s `8 ]8 T1 j. J- P
done
: B8 u" _) E3 o关闭swap% p9 S% b+ E6 x! \# c- c7 H5 B: J4 H
临时关闭" T1 x7 v: R9 e$ `8 B
* W% U) N1 b" t
for i in 192.168.91.19 192.168.91.20;do \
$ T4 o+ K2 `/ R; Y- _ssh $i "swapoff -a"; \
" v( E7 C) h4 p V$ O0 X& [done1 A, N% [1 r! `/ `3 h) K+ A3 x6 {
永久关闭
f0 Z# `# J8 a5 S) s- ^9 o8 K( I& A2 q( p$ v# M( v4 T
for i in 192.168.91.19 192.168.91.20;do \, y; S# b/ P+ o6 @; k( w' D7 t
ssh $i "sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab"; \
3 }& t4 a3 y M ^0 S2 ]4 k4 ydone
, C- z! T- p# `9 k5 P3 ?+ d开启内核模块+ K4 `6 g7 Y P" Z! \+ u# o
临时开启8 P; l& r$ m+ r6 B6 v
' r7 W: E0 l1 Jfor i in 192.168.91.19 192.168.91.20;do \* F3 }# |$ C, w" J* I% j0 C
ssh $i "modprobe ip_vs"; \! E, w9 K9 U# K# e& s
ssh $i "modprobe ip_vs_rr"; \
7 h2 F( D7 u; Q3 y) Pssh $i "modprobe ip_vs_wrr"; \
3 ~. i# b$ i* q$ f+ W s, |- O( |ssh $i "modprobe ip_vs_sh"; \; H# h8 y* B5 c% z7 c
ssh $i "modprobe nf_conntrack"; \
( H6 z6 h- d" \- |# Q/ H/ Hssh $i "modprobe nf_conntrack_ipv4"; \
$ V ]+ i9 I+ }- G# U' } U3 _8 Dssh $i "modprobe br_netfilter"; \
- k4 ^1 a. R# C* r# ~: s, _ssh $i "modprobe overlay"; \; r" i- T) H& V+ n" z" j' x
done1 h2 ]8 C: b! z2 V; Z G! r
永久开启
, I0 E% T! d2 G5 c
+ ?9 _$ U% U# ~$ }% |vim /approot1/k8s/tmp/service/k8s-modules.conf
! j Y0 }( k3 R( G6 Y+ t7 o9 n+ rip_vs& N; e) Q% i W
ip_vs_rr
2 _9 K! \) Z2 _% j8 |0 q5 A9 ^+ p9 n$ zip_vs_wrr
. j1 j. m7 K& \# [0 Kip_vs_sh
4 e1 w; \+ x& S9 w1 F' rnf_conntrack
3 v/ f; t# }$ {9 s/ m6 A5 Inf_conntrack_ipv4: B' o4 F5 f7 o" y7 Y! v0 m
br_netfilter
, u7 P- S* M# m" {overlay8 x0 M j( q4 }$ }
分发到所有节点, Q+ j/ b. Y. W1 n" n
for i in 192.168.91.19 192.168.91.20;do \
' [0 u1 x! f$ ?( G0 pscp /approot1/k8s/tmp/service/k8s-modules.conf $i:/etc/modules-load.d/; \7 e! l( j( W, p
done
5 i3 L" G6 B# g' m) @启用systemd自动加载模块服务8 y" b) u$ p& f* a' ?
for i in 192.168.91.19 192.168.91.20;do \
0 l# Z+ u$ |: |: Zssh $i "systemctl enable systemd-modules-load"; \
( M; _/ Z. O8 H6 _0 Mssh $i "systemctl restart systemd-modules-load"; \5 L3 ~5 n; h8 Y/ ?
ssh $i "systemctl is-active systemd-modules-load"; \
1 _: l* f5 c5 hdone
$ V4 H6 i0 N3 f6 H& B返回active表示 自动加载模块服务 启动成功' `3 T1 j, P! G2 d! ]/ L
- d2 N( L' Q6 ^4 `0 h, Y$ y' C; D
配置系统参数
; q& E" q3 i6 C以下的参数适用于3.x和4.x系列的内核, h# W$ L+ M) ?
9 ]; m/ t, F' ]/ e: e$ X
vim /approot1/k8s/tmp/service/kubernetes.conf$ x# }; E9 A0 l$ V5 n
建议编辑之前,在 vim 里面先执行 :set paste ,避免复制进去的内容和文档的不一致,比如多了注释,或者语法对齐异常9 C R% w7 H& {! Q" \
2 k6 `% v* l( u# 开启数据包转发功能(实现vxlan), ~$ {5 H c4 g; l& ~- [/ { Z
net.ipv4.ip_forward=1
* _" w+ t5 \0 t1 h: [6 x3 \# iptables对bridge的数据进行处理4 E2 `5 t* y9 N H* T! O) C& D
net.bridge.bridge-nf-call-iptables=1+ I/ u8 ?. d- J( V1 `
net.bridge.bridge-nf-call-ip6tables=1# y% O$ }% T& p% J
net.bridge.bridge-nf-call-arptables=15 l" ~& ]8 R& d9 U7 ]. Y" ^* c8 t
# 关闭tcp_tw_recycle,否则和NAT冲突,会导致服务不通
, t( S8 J& _( K9 M/ M/ Q- D3 w$ hnet.ipv4.tcp_tw_recycle=0
! C: j5 v j, _# 不允许将TIME-WAIT sockets重新用于新的TCP连接( Y% x. e- j v! m* ], D4 K9 v$ l
net.ipv4.tcp_tw_reuse=0
# t! j7 K9 H k9 [# u9 y6 `1 z" @( O# socket监听(listen)的backlog上限: I% G: B ^/ r
net.core.somaxconn=32768
# Z, [; J5 d, ]+ L/ w; t# 最大跟踪连接数,默认 nf_conntrack_buckets * 49 N; b% J/ T9 ~
net.netfilter.nf_conntrack_max=1000000
+ l, y# v) F! h, a6 a# 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它/ v1 t- ]2 R( t
vm.swappiness=0. |0 E) t4 G1 x8 e0 v4 I) V
# 计算当前的内存映射文件数。$ q+ F N$ x5 @& l
vm.max_map_count=655360
/ l9 j3 p3 W6 T# 内核可分配的最大文件数9 N8 z" X2 f5 j( t( t/ v) X% _
fs.file-max=6553600
+ l5 o' V. x' y5 }6 Q, F# 持久连接
5 z; M& Q {" N- P& @$ jnet.ipv4.tcp_keepalive_time=600
! {! T4 z7 q; {3 ^4 M5 I0 inet.ipv4.tcp_keepalive_intvl=30
1 z- X: A0 z& z% j/ m1 l- X) }6 y; Dnet.ipv4.tcp_keepalive_probes=10; i& J* ]3 F4 B0 E2 L$ [
分发到所有节点" C0 t" S7 Y' N m0 g
for i in 192.168.91.19 192.168.91.20;do \' x3 j3 _4 ^7 s& @" ^* d# ]" Z: t
scp /approot1/k8s/tmp/service/kubernetes.conf $i:/etc/sysctl.d/; \
0 T; c: L+ v$ S5 udone
1 a# g3 f7 n; C& ]# B加载系统参数# f( H% L5 w% ^9 e, Y3 d! b* j
for i in 192.168.91.19 192.168.91.20;do \. U8 m4 S$ c; \/ v: ~
ssh $i "sysctl -p /etc/sysctl.d/kubernetes.conf"; \
# u8 h) t& C7 A% w6 l8 V% Q2 h# Odone* \5 u I, J$ A2 {5 a
清空iptables规则
. U- {/ B4 P' i+ F, afor i in 192.168.91.19 192.168.91.20;do \
7 |9 h N7 A* c4 Pssh $i "iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat"; \
( a$ `! q/ s' {1 F5 Kssh $i "iptables -P FORWARD ACCEPT"; \! l9 Q5 I9 M# x# r( D; e" G# l
done
4 Y/ o/ t% h8 N% w0 L配置 PATH 变量8 w+ t8 D1 i" I; \
for i in 192.168.91.19 192.168.91.20;do \
3 N, o2 t+ O; ~& u5 ^9 q& G: Sssh $i "echo 'PATH=$PATH:/approot1/k8s/bin' >> $HOME/.bashrc"; \
, M! O3 Y) c9 f9 Mdone
+ x' f8 R. h2 |, l" S8 t. _source $HOME/.bashrc9 E2 Q- f( v7 E& z; n
下载二进制文件
* N% \1 p9 R/ S- C0 H$ O1 `其中一台节点操作即可
+ T- n6 P" I% ?2 j. |' {/ ^& \$ s& }+ _; U9 }: F
github下载会比较慢,可以从本地上传到 /approot1/k8s/pkg/ 目录下
1 K" i3 O# `. ?) D# r1 c" s
8 _% y+ B* q1 j9 t Ewget -O /approot1/k8s/pkg/kubernetes.tar.gz \: L* s7 H$ q# j
https://dl.k8s.io/v1.23.3/kubernetes-server-linux-amd64.tar.gz
; `" Z9 K5 r/ h# a( y: G; P6 X
* h' V; q) @' X5 r. D$ X, }$ Rwget -O /approot1/k8s/pkg/etcd.tar.gz \$ ~3 n% W: Q1 L/ f
https://github.com/etcd-io/etcd/ ... -linux-amd64.tar.gz
0 X* o$ e+ B0 L解压并删除不必要的文件
, C" v" R8 Z; `' W9 X( R4 Z1 G% g$ r$ l+ G9 |8 j
cd /approot1/k8s/pkg/
. E' g) _* F/ h* K$ o d* efor i in $(ls *.tar.gz);do tar xvf $i && rm -f $i;done
" t* L: H/ p- L. |8 Omv kubernetes/server/bin/ kubernetes/
- ~# T5 u0 N; e& Trm -rf kubernetes/{addons,kubernetes-src.tar.gz,LICENSES,server}, Y0 D% h; A: ^6 I9 I
rm -f kubernetes/bin/*_tag kubernetes/bin/*.tar7 r3 o9 p$ H3 `
rm -rf etcd-v3.5.1-linux-amd64/Documentation etcd-v3.5.1-linux-amd64/*.md
& n8 i3 v; p0 W0 }: o部署 master 节点" I' T4 b, u1 L! p
创建 ca 根证书
h- f, W: K) Q) |6 B5 nwget -O /approot1/k8s/bin/cfssl https://github.com/cloudflare/cf ... l_1.6.1_linux_amd64, {+ d& a: K- `* ]8 B- p* Y
wget -O /approot1/k8s/bin/cfssljson https://github.com/cloudflare/cf ... n_1.6.1_linux_amd64
$ ^& F. j, V( ?chmod +x /approot1/k8s/bin/*# v3 l4 k$ A O/ _/ Y, w1 z) D: @/ u
vim /approot1/k8s/tmp/ssl/ca-config.json: T/ U2 R3 ^9 O( u; ?
{6 |1 D2 p( S! R# V
"signing": {
7 y1 t# x' I$ K) | "default": {
0 |5 ~# I! ~* Q# V0 s "expiry": "87600h"4 C: F. ~4 F( x6 z
},$ l' g! N G y i% _; Z- X0 v! K
"profiles": {
5 {; }- a9 D3 v1 ] "kubernetes": {
9 t# G7 b7 D' u) N "usages": [- g( h0 [# v" Z$ `7 s" Z6 Q; m; h
"signing",: F, N! A: t' |) f5 e
"key encipherment",
6 X' e9 f: G9 M! P6 D/ e "server auth",
6 T0 l% U4 c: N5 Z, ~& ?& y" l! i "client auth"
! |5 z. H/ e% L" q5 D ], S' @/ z. F9 X5 m+ t8 `
"expiry": "876000h") h3 U+ u9 S- ?, P n# h* Q: `8 a
}
5 `( M) R3 D9 {' k( J { }) T/ f, g) j! O _
}2 N8 q/ Z2 O' O0 Y
}+ R- r6 \5 ^" _6 {. |& K2 X
vim /approot1/k8s/tmp/ssl/ca-csr.json8 Q9 b, h. g+ C0 R* g+ ^
{) L w4 S$ z1 \, G1 H- N0 a* W
"CN": "kubernetes",
: H$ `9 k, a3 e1 V: a+ g; z4 ]7 @ "key": {9 j2 Q2 E7 r0 k, z/ i& W) _, b
"algo": "rsa",6 W/ o" {* _. S8 x8 j4 V
"size": 2048
& t+ \- w6 G, x- u& d T8 q" _; c; \ },
5 u9 j1 r' G9 C3 `. G+ Q4 e "names": [
( R% m) T: x9 R2 q1 n- ^9 w9 ~ {
+ ?# W8 A) Q& S "C": "CN",
1 }$ J; \6 E5 l' A: O: h4 \ "ST": "ShangHai",+ N, b' ?( N& e; k I" y* g
"L": "ShangHai"," o j: Q7 Q, `
"O": "k8s",
8 q U# |- j: ^6 @5 O" Q "OU": "System"
/ Z$ C6 \$ ]9 ]9 J6 e* j! | R, W }! Z" h' D. ?$ [1 {0 P( v
],
1 D# R1 ]$ L) C& i "ca": {; b- Z. s: J; s' o
"expiry": "876000h"& R$ B8 H! d9 Y. m8 b4 [# l
}
; A( B2 x& b% x}- h/ ?! b8 O5 D! \* _
cd /approot1/k8s/tmp/ssl/
1 T6 v6 A* F2 tcfssl gencert -initca ca-csr.json | cfssljson -bare ca9 X# O# W! L F& y/ Z& U9 _
部署 etcd 组件6 H5 P2 E A: Y3 N7 R, g
创建 etcd 证书
0 M3 Q+ ]3 ^) g9 g" {vim /approot1/k8s/tmp/ssl/etcd-csr.json& C* C7 ]0 l4 {: L2 T
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
# l4 U8 S' C i- y W
' }3 \7 Z! I/ |: h0 u: T注意json的格式
% C" U" S' o9 a* U7 a- l
# ]1 u% s0 c- d, i" U{
3 {- J# X( g* Z/ _ "CN": "etcd",2 x }6 H: X% l6 W
"hosts": [
: g8 B# z, j# j" e2 e) N% R1 n4 ~8 p "127.0.0.1",
! H* l; w+ Z; e6 c2 A "192.168.91.19"5 p& @" s8 C4 N$ f; K& y3 j
],
$ e8 I- ?4 c( V3 y8 k# Q; f "key": {
% D) h! ~4 c! P8 f "algo": "rsa",5 P. p1 e5 Z- w/ r# ?( r
"size": 20482 i+ _! p8 o; U( E
},
+ l! W. ?+ r0 `! y "names": [
/ U" o4 `/ {. F: `- Z! N {
5 l3 O. i4 d) Y* I, Q "C": "CN",
5 M! H# [: H( K- |+ ] "ST": "ShangHai",
! H l) j8 t( i4 d, V "L": "ShangHai",
0 z( x; `5 k' f6 E4 z "O": "k8s",; ~( T8 y4 ^$ g8 G
"OU": "System"
/ B A6 Q3 v, E1 K- a }( W2 d3 Q/ T- L
]
9 W! P( x. y8 ?9 R}3 v2 e9 R( h( e* p
cd /approot1/k8s/tmp/ssl/2 o3 P3 y5 F& Z' I5 D
cfssl gencert -ca=ca.pem \
2 h3 t' O& I* o. u$ f; T! {' x-ca-key=ca-key.pem \
" a7 n ?) n9 r6 S-config=ca-config.json \
# |. Y8 z6 t2 V% J5 J-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
/ }* j& e! r4 h. }" G( W- w配置 etcd 为 systemctl 管理
3 Q, k2 I4 f" A/ Svim /approot1/k8s/tmp/service/kube-etcd.service.192.168.91.19
* W J% r1 m4 N, S j- E这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
6 T7 h9 l0 u1 m6 J- N& [6 A8 R+ C- O' l5 b: T* | U. m
etcd 参数
F+ k7 M4 C9 }5 G( d: o$ C F1 K2 K s8 u$ q" j
[Unit]# p( {. k3 i7 ^* s
Description=Etcd Server) d: ]/ _0 E* h) L
After=network.target+ e- e/ }1 @' [ C- @7 Y
After=network-online.target3 h y; j* }8 B( ^4 A
Wants=network-online.target% {: z' ? X- s! Y$ P; T* k
Documentation=https://github.com/coreos3 K3 t I( d9 D9 ]+ c; V
7 b2 Z) u1 i+ \( Z9 c# o
[Service]
0 n) W( t- Y. z# a ~' DType=notify
/ ~8 b5 s% l0 R8 SWorkingDirectory=/approot1/k8s/data/etcd7 } h! ~: G6 m3 C1 v! O
ExecStart=/approot1/k8s/bin/etcd \% U! S7 i" T; C6 u3 D
--name=etcd-192.168.91.19 \, i! P+ K' b5 L+ B
--cert-file=/etc/kubernetes/ssl/etcd.pem \
$ u( G5 c) i, `- Z: \ --key-file=/etc/kubernetes/ssl/etcd-key.pem \/ d5 ^5 x$ m S, ~" x! }
--peer-cert-file=/etc/kubernetes/ssl/etcd.pem \" y4 V7 k" t G) k- i
--peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \# ` R! G% }" R# d
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem \5 H6 z! q' Y" q; @/ y
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \0 y8 \& V5 P: G) e2 [7 N
--initial-advertise-peer-urls=https://192.168.91.19:2380 \
+ `( k0 |$ j: M3 T --listen-peer-urls=https://192.168.91.19:2380 \
$ ~$ H/ k4 ]1 |, W7 }( V! B --listen-client-urls=https://192.168.91.19:2379,http://127.0.0.1:2379 \
- [* V' {2 h" M2 e5 O --advertise-client-urls=https://192.168.91.19:2379 \
9 C7 W2 O+ E* l9 ]; x4 I3 | --initial-cluster-token=etcd-cluster-0 \
! }9 S R; `$ j2 z6 R --initial-cluster=etcd-192.168.91.19=https://192.168.91.19:2380 \( X4 m) c3 ]' n
--initial-cluster-state=new \
$ E9 H$ P1 u, U; h2 _$ c; E --data-dir=/approot1/k8s/data/etcd \* Z, v( _3 U% i# _0 U
--wal-dir= \
: U" m6 C/ s, f+ o; S --snapshot-count=50000 \
6 r$ g( E% O# [0 v6 S* U --auto-compaction-retention=1 \" j0 ]- r! ~( O# S2 f
--auto-compaction-mode=periodic \
) K# q( P/ T# y& o* g a) k --max-request-bytes=10485760 \) L: r& J1 [4 L
--quota-backend-bytes=8589934592
( O6 y$ A' S# x0 GRestart=always% S6 T4 _( a2 w' H
RestartSec=15
, b U5 G* i$ J0 P' qLimitNOFILE=65536
- P- ^3 W: G% D Z/ bOOMScoreAdjust=-999- P' q, _; i8 {$ l! |5 t
1 K% |3 f* w. m) U, v[Install]
/ c2 X. D, |% s$ B- MWantedBy=multi-user.target' S' u- e% r# V
分发证书以及创建相关路径0 |: c9 U4 R" C8 d/ o6 p z
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
" W, f" }" L. O% y; e5 f5 r5 R: ?8 V! I, k" F3 c& D) H& g
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败% O3 ]( `1 U" _2 k( o3 b
" k1 i8 c. I% U; J$ X. Yfor i in 192.168.91.19;do \
/ j# N# N( M1 nssh $i "mkdir -p /etc/kubernetes/ssl"; \
$ k- q6 z3 V u3 b& \4 mssh $i "mkdir -m 700 -p /approot1/k8s/data/etcd"; \6 }% X# r9 c @0 ^7 [; W1 S
ssh $i "mkdir -p /approot1/k8s/bin"; \* Y( x" o8 {, m M! R" r' c
scp /approot1/k8s/tmp/ssl/{ca*.pem,etcd*.pem} $i:/etc/kubernetes/ssl/; \3 U0 A2 b H4 `9 y. n
scp /approot1/k8s/tmp/service/kube-etcd.service.$i $i:/etc/systemd/system/kube-etcd.service; \7 A* L0 Y0 X# h) d0 c
scp /approot1/k8s/pkg/etcd-v3.5.1-linux-amd64/etcd* $i:/approot1/k8s/bin/; \
1 ^0 Y+ c3 s Ddone8 B+ n5 v! `8 `) b6 |
启动 etcd 服务) _& n9 S+ V3 h0 ]1 O4 [( e/ R9 \
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制& O+ f' ?- }: C- @6 _7 q! a6 o
! t6 B3 Z' Q$ }- V3 z! gfor i in 192.168.91.19;do \
& u) `0 Z9 O3 g( E7 Mssh $i "systemctl daemon-reload"; \# X2 G" s" T' X0 \8 n
ssh $i "systemctl enable kube-etcd"; \
C8 v7 C, w2 |: a+ o$ Y) Q; Tssh $i "systemctl restart kube-etcd --no-block"; \
1 w! B: _6 t* issh $i "systemctl is-active kube-etcd"; \+ Q; w Q% H( x* y; E) i& w/ v
done! T$ @% f3 D9 Z0 y7 O* y1 y4 Q
返回 activating 表示 etcd 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-etcd";done) L) x! ~; y' R3 V: `4 m2 L$ ~9 b
! s0 J, v) E9 p0 m; v
返回active表示 etcd 启动成功,如果是多节点 etcd ,其中一个没有返回active属于正常的,可以使用下面的方式来验证集群' B% u& @# {; X3 L6 F
( `$ x( J9 w' k6 y' Y
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制% v/ j% R$ s/ p0 M m
, x6 }1 G/ e9 G$ I' R
for i in 192.168.91.19;do \) {8 p1 |" O6 }9 }# @2 ?# Z
ssh $i "ETCDCTL_API=3 /approot1/k8s/bin/etcdctl \, m! X* O' G$ p; k8 B
--endpoints=https://${i}:2379 \
. @ l( E: y2 |: t3 y --cacert=/etc/kubernetes/ssl/ca.pem \: a9 P% R/ u. ?+ r
--cert=/etc/kubernetes/ssl/etcd.pem \9 J- e+ P0 x. Z4 _6 F2 c, e
--key=/etc/kubernetes/ssl/etcd-key.pem \
7 i1 M6 `2 G0 s endpoint health"; \9 y2 {/ C+ X" q5 R
done
9 p1 D9 ^% S/ f9 s2 nhttps://192.168.91.19:2379 is healthy: successfully committed proposal: took = 7.135668ms0 ?7 X/ W& m. ^* a6 ]/ x6 V
3 m' y6 j. i; \8 n; F$ B返回以上信息,并显示 successfully 表示节点是健康的5 H. s& q5 M+ `- z! s5 c% a
6 m; S* w+ Z, q7 j# s4 }) d部署 apiserver 组件$ r- |! u( o! Z7 q! ^ Q+ L
创建 apiserver 证书
( u/ P& m4 o7 g* x% q) l. ^2 Wvim /approot1/k8s/tmp/ssl/kubernetes-csr.json
, A8 _" l! s3 f" q4 G, o/ R* A |这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴. p7 {! v, E( G3 [: l6 d
: u7 V U( C& i2 J2 u# S& E
注意json的格式0 w% {2 U$ n1 G5 S$ ]- o
' i0 t5 E5 C* F: n
10.88.0.1 是 k8s 的服务 ip,千万不要和现有的网络一致,避免出现冲突
! x6 D9 X. X3 A
A$ t% Y6 h6 b4 C{/ O% D& R- L+ T2 S4 j/ U
"CN": "kubernetes",
5 I4 ~% [4 g3 p1 U# t6 O! L! O "hosts": [- ]& r9 k8 B2 w* \' U' ?. o' F# Q
"127.0.0.1",
) z# @4 _* k0 t9 W% { K$ Z6 t "192.168.91.19",
1 [4 M% A5 W0 e5 B% D7 \: m "10.88.0.1",! a0 o1 t! t6 Q8 {) N* L+ b/ C: z
"kubernetes",
( U+ e$ m$ h5 T, x, q( y" m "kubernetes.default",8 d& q% w; |5 j1 B- ?. T3 H+ v! w
"kubernetes.default.svc",) @# F5 `3 N2 ~, l' ]& p6 j! p1 q
"kubernetes.default.svc.cluster"," `0 J6 b `, f& {% B/ K
"kubernetes.default.svc.cluster.local"
# |; q! u$ D- ~4 F$ t: W' V ],' P: h; M' u0 Y4 k+ S5 i. R
"key": {2 g9 i/ P* k5 r9 ]6 \1 o
"algo": "rsa",0 {+ s. |$ v; n+ k
"size": 20483 i% C, b$ i B' {
},
- b0 D4 Z6 H i. p6 t0 F* C8 l "names": [
$ T; M; w1 b) J' `9 ~/ z- q {
2 _/ f5 s% @+ ~* Q4 x) o: W "C": "CN",' I3 r0 ~+ B+ ~5 A
"ST": "ShangHai",
* S7 K3 T" _2 V( b4 v "L": "ShangHai",. P0 J S$ {/ e4 ^
"O": "k8s",
4 {0 C4 x' M. w I% ], V "OU": "System"
# p3 k) @) X7 A1 g }5 s: R' o6 P$ R& I
]/ k$ Z$ Y% d4 Y m8 m! G* b
}
* G5 R' |2 p/ u$ ~- o8 ~! Ocd /approot1/k8s/tmp/ssl/2 q) d5 w7 \5 q) E2 Q) Z
cfssl gencert -ca=ca.pem \
( d- V. o4 v, ~( \* M. A-ca-key=ca-key.pem \1 {/ w6 n# w0 P( C7 E7 p
-config=ca-config.json \
: w$ |4 y8 i# c5 R3 C9 q1 h" o& ~-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes& o+ G, F2 r# d5 I6 o; {5 ~% x
创建 metrics-server 证书0 C( ]% ^/ k7 A6 X# \% b+ ~
vim /approot1/k8s/tmp/ssl/metrics-server-csr.json
% j) e! _" X- D# t8 U{; c# w, P' k: q/ p9 |
"CN": "aggregator",/ }1 E) m* N& {% D# X/ T7 p& v" L h0 A4 e
"hosts": [& J+ n: B( P! G! V) _
],
2 k! Z: [+ X5 F2 J) s4 |( W "key": {; t" G% v' w; p) D0 q
"algo": "rsa",. L5 ^/ n/ H: @, A/ |" Z- z, B
"size": 2048
6 R' K3 R3 s3 `8 ] },
( C/ \) T7 G1 _9 ~0 B8 u "names": [9 I1 R! f3 D0 t0 d* h2 D. `9 @! U! B
{3 i6 _. }( i0 ~1 k9 A: M. ?4 g
"C": "CN",1 R8 i3 _2 U. O" {) g1 |
"ST": "ShangHai",4 j- \# d/ Y. i
"L": "ShangHai",& l; i+ P# W4 j3 K& s; D/ S n
"O": "k8s",
6 K3 Q. |% A1 U" S, q; C1 t M% D "OU": "System"# v' \" H8 ?$ o7 H0 b$ R: w9 `
}6 S8 {6 Q* Y1 @! u
] c1 Q6 j3 x' z
} r- \8 X2 H( |( e1 D4 D* K
cd /approot1/k8s/tmp/ssl/' ]! p$ e+ m6 Y7 k; T& A8 f
cfssl gencert -ca=ca.pem \1 L- c3 r0 H/ N L, L
-ca-key=ca-key.pem \) }8 P( V2 Q) ^+ T+ ]5 |
-config=ca-config.json \
/ {' G! _2 f% {4 ~* ~0 N-profile=kubernetes metrics-server-csr.json | cfssljson -bare metrics-server
# L% B) f8 z" @: \3 {配置 apiserver 为 systemctl 管理2 u3 _0 ]& W# P8 b& {- Q& F. }4 f5 e
vim /approot1/k8s/tmp/service/kube-apiserver.service.192.168.91.193 M1 O+ `4 J# b( Q( `
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴 \' W) H$ A3 M+ ?1 h6 f
, n4 J" w& E6 x( a
--service-cluster-ip-range 参数的 ip 网段要和 kubernetes-csr.json 里面的 10.88.0.1 是一个网段的0 b2 s- \- S: ]# _
: @' o1 g2 n, q
--etcd-servers 如果 etcd 是多节点的,这里要写上所有的 etcd 节点
$ J, r$ i6 |( N* W; ^; [& S* S1 D' X3 o$ M$ v
apiserver 参数
% m$ ?( L$ Q7 |. @# P5 q4 |/ ]4 o5 s
[Unit], Q0 j' h8 Z& p# w- z8 a9 |
Description=Kubernetes API Server7 J. \3 M6 |4 k: }& E; p1 q) i
Documentation=https://github.com/GoogleCloudPlatform/kubernetes' `# N+ b! d0 f
After=network.target
+ m- F, R4 d: _: {: d8 z7 F5 B: J" R
1 v. @* N: Z! S2 D2 A9 t" t2 j" k$ {[Service]2 u. k) a4 a# Q' V5 o C
ExecStart=/approot1/k8s/bin/kube-apiserver \
$ T$ P0 g: u1 {; _# c) N --allow-privileged=true \
6 x" V6 P8 c o, d --anonymous-auth=false \
4 v& a" v6 ^7 N( h; \' i --api-audiences=api,istio-ca \$ c; h/ N1 J/ l8 ~ w6 ^* k
--authorization-mode=Node,RBAC \) X2 r# _: @: @
--bind-address=192.168.91.19 \
5 F5 {9 B0 `+ n --client-ca-file=/etc/kubernetes/ssl/ca.pem \% n) k* ^" b9 z( K( i+ i# Q8 Y* k
--endpoint-reconciler-type=lease \) Q1 T6 M1 [6 ^0 S% {! s* K% V
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
% u6 `7 K! v" I1 t( C) A --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
4 Q+ X" l' l4 X, E2 z --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \0 L+ E b+ @4 `6 G
--etcd-servers=https://192.168.91.19:2379 \
. v9 Y6 f0 d6 Z/ I$ o5 P4 ~ --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \8 ?/ |) V/ n4 b! D7 d
--kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem \
/ i6 i X7 [ o( P } --kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem \: @( N! p8 C( t5 p4 \( m! {
--secure-port=6443 \+ E$ }! @' C. X) g
--service-account-issuer=https://kubernetes.default.svc \* Q" w! e6 r. a) p3 {2 a" U0 S
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
6 N- P1 T5 ?6 G) w# N9 x! d7 w --service-account-key-file=/etc/kubernetes/ssl/ca.pem \+ j1 p9 _( r$ a% {
--service-cluster-ip-range=10.88.0.0/16 \
. C' W7 U9 p( J/ k" D6 z% Y9 u2 N- n --service-node-port-range=30000-32767 \* f: u% z# M' I$ o( o, b6 V
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
$ ]' y* [" L# g& p1 q2 }! {+ O --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \2 n m# n) X( E7 a# D; R8 Y, M% s6 T
--requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem \
/ r" _( `# t0 S& A5 z) g --requestheader-allowed-names= \
( c) p2 N) |5 n: S --requestheader-extra-headers-prefix=X-Remote-Extra- \: S- v7 l0 M! {# Y, }, C
--requestheader-group-headers=X-Remote-Group \* h3 @; ^: e3 J* `
--requestheader-username-headers=X-Remote-User \3 J4 {" e0 r0 }9 [
--proxy-client-cert-file=/etc/kubernetes/ssl/metrics-server.pem \1 X5 z) T9 s' t' {0 J
--proxy-client-key-file=/etc/kubernetes/ssl/metrics-server-key.pem \
0 P. J/ t W% L; A --enable-aggregator-routing=true \, c' E4 ?: t( Y' M& X' U' r
--v=2
8 `) h4 x' H& `Restart=always# ?* l( `- |+ A3 g) x6 s
RestartSec=5) e$ \0 e) a" D- k, T% B% u8 t
Type=notify# R: b- ]1 G4 a4 ?3 Q
LimitNOFILE=65536$ @. ]" R/ J. y; k0 M6 ]
, @3 L" r: y5 k6 v' O
[Install] m: t6 x4 r. O( a1 ~
WantedBy=multi-user.target4 p- Z& P+ N# ?" ~( o5 l7 L3 W0 ?
分发证书以及创建相关路径
Q7 V% p( x* ?/ A* D如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制2 U% C8 w) J& r+ C$ a M" {; g
+ Q- d6 `! w7 ~4 i对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
- S* Z1 n+ O$ R6 O7 G5 X* }4 Q! i8 a& s6 c4 L+ u0 \. P
for i in 192.168.91.19;do \- v- B w9 I9 H7 [3 m, h
ssh $i "mkdir -p /etc/kubernetes/ssl"; \" y2 ^3 n1 H% F; ^
ssh $i "mkdir -p /approot1/k8s/bin"; \
& O* Y5 ]3 f) S/ w3 M+ f3 R9 vscp /approot1/k8s/tmp/ssl/{ca*.pem,kubernetes*.pem,metrics-server*.pem} $i:/etc/kubernetes/ssl/; \2 X* C* g, m6 S3 m
scp /approot1/k8s/tmp/service/kube-apiserver.service.$i $i:/etc/systemd/system/kube-apiserver.service; \( K" }4 k: T* ?& `! H; I8 L
scp /approot1/k8s/pkg/kubernetes/bin/kube-apiserver $i:/approot1/k8s/bin/; \$ v# e3 H& }0 H; `
done* K- t5 X: {) s1 V: L( o2 B
启动 apiserver 服务" \( @5 ?9 L# O# U
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制. m1 X; T" T, ^% r8 M
( P) X [7 O' ^ Q2 f
for i in 192.168.91.19;do \
$ K" P* |$ f; C1 E1 z. \+ _ssh $i "systemctl daemon-reload"; \: r2 z$ w$ Z/ x# `3 @
ssh $i "systemctl enable kube-apiserver"; \/ q9 j. @2 P/ G i$ C: Q
ssh $i "systemctl restart kube-apiserver --no-block"; \
3 S' h# V5 f* L2 w9 |2 issh $i "systemctl is-active kube-apiserver"; \# ]# r$ [/ `# l
done
( B0 s3 I3 D# q! Q9 j, I返回 activating 表示 apiserver 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-apiserver";done5 n8 b6 d1 |3 v+ b' d
- q% L, Q/ K! G$ A
返回active表示 apiserver 启动成功
1 ?6 q5 a6 ]& z% i
0 _7 a9 I q7 g. bcurl -k --cacert /etc/kubernetes/ssl/ca.pem \
6 R9 @9 w! H! h) _( \% ?--cert /etc/kubernetes/ssl/kubernetes.pem \
+ }- |' ^" s; h; e" x; V--key /etc/kubernetes/ssl/kubernetes-key.pem \5 D7 q, g/ q% s
https://192.168.91.19:6443/api! N0 Z- h* C+ `6 H- Y
正常返回如下信息,说明 apiserver 服务运行正常, D; q4 A% W- r
5 V% g# B0 t8 [" O9 H/ T! ?0 \{* N" ~+ n( |, H( r
"kind": "APIVersions",7 t; ^) T. |; w% b
"versions": [0 s# W R; P& q5 I* i
"v1"! _- H4 U7 t9 \1 o' u I5 R0 \
],1 ^# w6 K, ^+ d8 I
"serverAddressByClientCIDRs": [
7 E. W+ V$ q4 A/ l; o9 p! [% s {* w' a: O. ~+ ` k' f
"clientCIDR": "0.0.0.0/0",6 z( Y {) z$ w0 N
"serverAddress": "192.168.91.19:6443"
. c8 X6 u8 G# e# z+ i }+ ?" a8 `# O' @* U' r$ O/ K
]
% v6 D9 r8 i4 [5 F5 k9 i7 z}5 @# S, P" r' D# g$ I
查看 k8s 的所有 kind (对象类别)7 z/ ~* L# V( j1 {# F
- I& A: P& U2 M$ Fcurl -s -k --cacert /etc/kubernetes/ssl/ca.pem \) a9 I: L4 U* F- j( v
--cert /etc/kubernetes/ssl/kubernetes.pem \% J$ g5 N1 p: y& \
--key /etc/kubernetes/ssl/kubernetes-key.pem \
. [ B0 e* ^: s3 g* r! w0 thttps://192.168.91.19:6443/api/v1/ | grep kind | sort -u: U& B! I& k6 V
"kind": "APIResourceList",
/ e; p, R5 C; {7 k "kind": "Binding",
* V5 t- q; Z& X/ [* N* A* P0 B "kind": "ComponentStatus",
8 O3 I+ d! Q6 B6 y "kind": "ConfigMap",
5 O& I/ x( \: F0 ]. v4 `, s/ P b "kind": "Endpoints",9 C2 j' W- |9 h! `
"kind": "Event",0 N3 Y0 H6 A( q% o( S
"kind": "Eviction",
! [) J: J9 | m; j9 m3 w( W& a "kind": "LimitRange",( d; b0 l0 g# u" X* V
"kind": "Namespace",
" Z9 W6 y9 x. b' M+ a) E, m "kind": "Node",. o6 z% O& N" x) A/ G
"kind": "NodeProxyOptions",
( t) g% i2 ^% K$ Q i: X& g "kind": "PersistentVolume",
* g C6 z, o! s "kind": "PersistentVolumeClaim",0 }( D) g" W4 C* X; q3 O8 [
"kind": "Pod",* N: `6 e# }; y" J4 I k
"kind": "PodAttachOptions",. p$ b9 g, Q2 f: a. n
"kind": "PodExecOptions",# ^, X8 M* ~, w, U7 `5 S
"kind": "PodPortForwardOptions",
; W2 j% C. c( Z4 { "kind": "PodProxyOptions",
2 G. `) d. w4 D) Z, v, A "kind": "PodTemplate",
/ s+ [. G5 z) B- [* e' S "kind": "ReplicationController",
* g3 a- ?) [" ?! L. v "kind": "ResourceQuota",1 Q& H b( Q+ w5 z
"kind": "Scale",
' m" |% ~2 C" W- q% k2 K8 ?# \ "kind": "Secret",
- _% w/ X6 p5 R9 G0 Q/ m; T- A1 D, L "kind": "Service",
+ p& F! j4 x8 S! V; z! B( J "kind": "ServiceAccount",' L Z7 R) F# i5 N+ l
"kind": "ServiceProxyOptions",
; o( n- g3 I' ]3 Q. D "kind": "TokenRequest",
# p, Y/ U" I2 a5 d4 Q$ ]配置 kubectl 管理
4 E8 F/ h# Z, h. B创建 admin 证书
9 D4 O- R, |% r# ]vim /approot1/k8s/tmp/ssl/admin-csr.json
- f9 W5 p& O6 R) `{
! v) w9 o" X" n" L) G- x0 n "CN": "admin",
% q" m8 s) E. [( d; H2 p' S "hosts": [
j- s4 M/ T# } ],
! e1 v. z( s. v- h: G4 ~2 t( x "key": {
" D4 L' n2 `/ m% i. s" r( u6 v0 D' k "algo": "rsa"," s8 b# l* v' y# j; o: t
"size": 2048$ {7 I0 V; |0 X
},
9 Z) ^% H& a* a" u "names": [' J* L! A8 O i8 J
{0 x2 F! [, F& N: X, U# X! U
"C": "CN",
2 \2 y6 o0 [$ y$ D+ j4 V "ST": "ShangHai",2 h: {( J+ c$ k
"L": "ShangHai",
7 Z! E7 N. _/ v$ [0 j "O": "system:masters",0 b8 M' P- y0 ~( @
"OU": "System"
" G6 c5 g) Q+ q }0 t- N+ S* V9 {) N7 t8 i
]
, g R/ m& [3 k}
7 v) C. {+ H" c/ J- ?% ?cd /approot1/k8s/tmp/ssl/
8 u# O+ K4 q% F+ vcfssl gencert -ca=ca.pem \6 k9 q5 s& [7 i7 J
-ca-key=ca-key.pem \
* j$ t$ }% H9 `" N+ u4 t+ q-config=ca-config.json \
# i4 r4 c. R! f6 T& b# e-profile=kubernetes admin-csr.json | cfssljson -bare admin( C( h! n8 F0 N
创建 kubeconfig 证书
3 t8 F# i! ^! n% {# ?( D+ G设置集群参数
: L+ d+ D$ k) _5 E1 u+ ], R) _
; [, a9 J. I5 b- N8 [4 J--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver
( h, m6 {" V$ V, l0 F# A x- ?$ l1 o" L# i6 ?6 n
cd /approot1/k8s/tmp/ssl/
. V! [: { n" _* D2 e/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
3 [* X9 {) k1 g$ V+ G9 F--certificate-authority=ca.pem \
* t9 A z( h1 q. @6 u6 t" i--embed-certs=true \0 F* _* i+ T3 n) ^0 A
--server=https://192.168.91.19:6443 \, _% o' i; e5 ^0 d1 M6 ]# I
--kubeconfig=kubectl.kubeconfig! [3 k' z1 {' \9 m
设置客户端认证参数: Z5 y; o2 I! t- W, N
% a) b. Y8 ^& X0 A5 G- {
cd /approot1/k8s/tmp/ssl/. R. _' W& y% I% m
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials admin \
; @6 e1 P- s7 [9 G( }--client-certificate=admin.pem \
5 W- n9 `4 [. b$ T) ]--client-key=admin-key.pem \7 O0 H6 _ [, p2 U
--embed-certs=true \) {0 R5 L8 x/ a! W$ i+ v
--kubeconfig=kubectl.kubeconfig0 R" g( z; G K$ f) i9 E# ^/ j' t1 I
设置上下文参数
% A8 V6 `( P4 H. O( ]# z; n9 \. Z
! G1 o9 A( Q- K& U. b. ycd /approot1/k8s/tmp/ssl/
/ I0 ?/ u% P# U2 Q5 R3 w. V/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context kubernetes \0 x* \$ P8 r3 j
--cluster=kubernetes \5 ]/ g, {' _. f! _
--user=admin \6 u% m) q* E4 q
--kubeconfig=kubectl.kubeconfig+ \0 r$ W& V7 \: k2 O
设置默认上下文% N2 y [1 K* }) ^) `
( b: G- e' q# }9 g8 U- ?; Q! ^4 tcd /approot1/k8s/tmp/ssl/# n4 v1 j( o$ y
/approot1/k8s/pkg/kubernetes/bin/kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig4 n8 L! V8 x K
分发 kubeconfig 证书到所有 master 节点( {" B7 v% w- X( X7 Z3 _
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
. B, r! v" k b/ t
# F- A% e& a2 _+ p2 r8 lfor i in 192.168.91.19;do \0 `0 P2 P! y& m
ssh $i "mkdir -p /etc/kubernetes/ssl"; \4 o' L3 x# `% q! \; u0 u
ssh $i "mkdir -p /approot1/k8s/bin"; \5 k z4 N/ f# y: O
ssh $i "mkdir -p $HOME/.kube"; \& ^4 t, ~6 b- m, K- q [
scp /approot1/k8s/pkg/kubernetes/bin/kubectl $i:/approot1/k8s/bin/; \
$ j8 P1 q' p0 @5 {7 S) ?$ z& wssh $i "echo 'source <(kubectl completion bash)' >> $HOME/.bashrc"4 a- C* @! g7 h8 s9 S, s# k& S* q
scp /approot1/k8s/tmp/ssl/kubectl.kubeconfig $i:$HOME/.kube/config; \
7 Q/ a0 @' m1 ^, bdone, G) q" Q7 u& G y4 J7 X0 f3 t0 J7 J
部署 controller-manager 组件
, A! ^) x5 q6 f4 y创建 controller-manager 证书* x! M. @% x3 w" R4 g' ]
vim /approot1/k8s/tmp/ssl/kube-controller-manager-csr.json
8 k3 K c7 c' {( ]这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
: G1 S" L4 D( k8 V7 S: V$ g* V/ c
( B* K) T* R$ X9 C9 w. U注意json的格式
) H- Y2 M7 W# e9 r9 ? |$ B A: Q, s/ }
{# V( w) z. @. k/ Z
"CN": "system:kube-controller-manager",
" b$ a( O; L: `/ E "key": {. F( r$ R1 `- E1 U# L0 I
"algo": "rsa",
1 P! j g' J, h1 M, o! `5 h "size": 2048
0 h$ \* p. i4 n# ]; b },
8 a" |5 O h8 r# h2 Z "hosts": [
; `+ ]+ x v" d* N: E9 O) p "127.0.0.1",
: v# W; C" F" z! B/ a2 s# _- R "192.168.91.19"# ~/ H! y& b2 r- H2 A
],+ U$ i5 |" f9 m
"names": [3 f* @% G6 w9 i4 C( R
{
2 j4 v% i6 v% q* O: _$ _ "C": "CN",
4 N- u8 a, z' T2 }) ` "ST": "ShangHai",
" m1 p# U+ C5 h# | "L": "ShangHai",3 {) A4 E% r3 t
"O": "system:kube-controller-manager",
) b+ J6 g8 N1 a8 T V! [ "OU": "System"1 q2 F, E1 s* \# M8 c0 [) m) e7 x
}, K% W3 U# \& i
]) D$ l6 A) `& ~2 R2 B# h$ v
}
! s8 D# b8 b( K( W" X* B2 B" bcd /approot1/k8s/tmp/ssl/- c( j+ {$ ^6 J
cfssl gencert -ca=ca.pem \8 ]; e- N& N" a7 G2 Z- i
-ca-key=ca-key.pem \
, J( C8 M* k% [' r& @1 w- b) n-config=ca-config.json \
( o% H8 [' @6 U9 I, K/ _-profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager1 J2 x+ K% B" Y, P
创建 kubeconfig 证书 r+ ~/ G" t% L1 t4 n
设置集群参数 o, k6 J+ @, G5 C
( _: S$ f3 ~/ y
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver8 y5 @, K* B) Z( }
( F3 `: C. J% f; }4 s+ Y, J* ^cd /approot1/k8s/tmp/ssl/* Q+ l* f/ `9 l6 a2 E n4 A
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
) ^; E7 h1 b( Y$ m0 K: w( I--certificate-authority=ca.pem \
! a/ X; d& |" Y0 L* W) i8 c: d# {( j--embed-certs=true \+ S# Q4 F: }( r0 Z# C4 i
--server=https://192.168.91.19:6443 \
" U( l: ^2 Y) Y, w; G--kubeconfig=kube-controller-manager.kubeconfig
8 i3 X! p4 @/ m6 ~: c设置客户端认证参数
' w3 ^* [1 ^. q; f9 _
5 O% U) J d; ~1 bcd /approot1/k8s/tmp/ssl/% i! x+ i% I' ^. l6 Q( M, Q# Q' w, w
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials system:kube-controller-manager \
" x" P( i; u2 o% X--client-certificate=kube-controller-manager.pem \
& Y+ c1 K8 g5 x& U- K--client-key=kube-controller-manager-key.pem \
! q! j( p- Q& J& E5 g7 p7 B--embed-certs=true \: x& ~& G/ O% V. E' s
--kubeconfig=kube-controller-manager.kubeconfig( g# g* {& L; Q, X- c( [* O
设置上下文参数* C+ d4 V1 Y0 m: ]2 a
: A- r8 F3 O! hcd /approot1/k8s/tmp/ssl/
% ]: N# P' J+ r ~/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context system:kube-controller-manager \
9 Q, W3 J+ m) i* J; l" X& Y( v--cluster=kubernetes \$ Y' r; q& h6 r+ S; h/ X' X9 E! t
--user=system:kube-controller-manager \
6 `/ u- r7 s( a) ^--kubeconfig=kube-controller-manager.kubeconfig9 `. ^1 s, u: Y* \9 a/ Z
设置默认上下文; H$ f7 r+ P5 ]" G$ M
K9 _( R2 p& V J7 X) Kcd /approot1/k8s/tmp/ssl/6 q) v4 [, _7 |, J i& p
/approot1/k8s/pkg/kubernetes/bin/kubectl config \
7 |- n: N8 o9 Q5 juse-context system:kube-controller-manager \3 z! G: i f1 Y' e- J9 y# }4 o
--kubeconfig=kube-controller-manager.kubeconfig- A# t* o* u" p# z" y2 |
配置 controller-manager 为 systemctl 管理
% Y( P6 {4 J) C7 E3 N% evim /approot1/k8s/tmp/service/kube-controller-manager.service7 S4 w4 L" ~4 b* g) U
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴- D/ v) @) o6 R* J" f5 C, P: D' m
) R6 d% u/ _+ J- f
--service-cluster-ip-range 参数的 ip 网段要和 kubernetes-csr.json 里面的 10.88.0.1 是一个网段的
% S9 P- E( [ ~& z# |1 [0 F9 E
--cluster-cidr 为 pod 运行的网段,要和 --service-cluster-ip-range 参数的网段以及现有的网络不一致,避免出现冲突5 W3 e2 j8 C C2 \2 U7 n' _' j7 N/ }
: J+ ^ _( Q- l, e* J1 U: a
controller-manager 参数
' }* X2 I" _$ I
: Y* G. m! R2 b# E7 H[Unit]. v5 O2 G/ U/ R
Description=Kubernetes Controller Manager3 C1 t3 n; o* }% p& T
Documentation=https://github.com/GoogleCloudPlatform/kubernetes8 N! z9 k [( a2 Z+ J. ^' q3 {! c1 w
7 D! [/ ^. d& Q- W9 v[Service]
- u4 N6 @2 |' z" r$ e$ JExecStart=/approot1/k8s/bin/kube-controller-manager \
% P9 x$ a+ S' j6 h5 g --bind-address=0.0.0.0 \
/ v9 x x4 @5 i& t2 g9 P2 J( N --allocate-node-cidrs=true \+ z+ I# \) _+ @2 S% j3 g! J! ^2 k
--cluster-cidr=172.20.0.0/16 \9 g2 D9 }( j7 A. M& I3 f) j! e
--cluster-name=kubernetes \
6 ~, ?# C c# o4 ~ --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \$ F, \; g+ W% l5 M
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \# r1 f0 @0 \% v2 @! h1 Y
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
7 ]+ t5 E( V+ o" p% S --leader-elect=true \& x8 J( h7 _0 F/ [" h
--node-cidr-mask-size=24 \. d3 {+ k- s* h# T
--root-ca-file=/etc/kubernetes/ssl/ca.pem \& n/ U5 R0 o O. @ k+ C
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
# T8 P/ y9 D$ e- k3 i! v# l! g --service-cluster-ip-range=10.88.0.0/16 \
! g& a1 H, ~" |& N# r --use-service-account-credentials=true \9 u# m# a* z. |5 k
--v=2" b4 v& _% o, \6 C( S% m$ a7 ]' G
Restart=always U8 E$ t2 `% g% }9 B- ~! M8 K0 z5 F. P
RestartSec=5) R/ q. Z. g. M: v, t; [, z5 I" ~. g! _
* r* r# J; k4 i+ z- o J3 E- h[Install]
) Q a/ y3 i+ U- l2 MWantedBy=multi-user.target
7 H) r9 p: E7 b/ o8 f分发证书以及创建相关路径2 m7 e: j) I# k- h7 a
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制0 }# v; K2 u& p& T! s
4 P! L" U6 E7 W3 l7 c
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
- D8 s( q, N. d
7 W% {1 [# ]7 J8 @/ [3 @for i in 192.168.91.19;do \
$ S, S! `* C V: Wssh $i "mkdir -p /etc/kubernetes/ssl"; \% p R% `' {( u3 O
ssh $i "mkdir -p /approot1/k8s/bin"; \4 J1 m0 B# n& C7 o' [$ B- w, ^
scp /approot1/k8s/tmp/ssl/kube-controller-manager.kubeconfig $i:/etc/kubernetes/; \! s7 {& _0 U. h) x% o% p. [
scp /approot1/k8s/tmp/ssl/ca*.pem $i:/etc/kubernetes/ssl/; \
5 t# D! Q* H+ p6 e/ _' \scp /approot1/k8s/tmp/service/kube-controller-manager.service $i:/etc/systemd/system/; \
& V2 x+ f) H# l/ Cscp /approot1/k8s/pkg/kubernetes/bin/kube-controller-manager $i:/approot1/k8s/bin/; \
* ?8 P+ B- n: m6 f1 |! \done) r! C# T/ b7 h6 s' I; b) T
启动 controller-manager 服务- w' j3 h3 p* e( f
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制) r, }: p/ ?. O; {6 R1 r
: R+ k: F: k) u- b( _5 n
for i in 192.168.91.19;do \( E5 R+ M) i8 c- k) g
ssh $i "systemctl daemon-reload"; \
2 q; _( b& j4 g2 bssh $i "systemctl enable kube-controller-manager"; \5 q. g$ Y/ ?! I3 B2 ^) n6 [7 ?# C$ E
ssh $i "systemctl restart kube-controller-manager --no-block"; \7 E. Z3 V; r. z4 A7 k
ssh $i "systemctl is-active kube-controller-manager"; \
4 c) A6 K0 l0 Tdone0 E$ H' B/ ]- {4 v0 h" Z
返回 activating 表示 controller-manager 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-controller-manager";done7 m1 D: n( W9 q. z' }' i% Z
, G7 N, u8 r5 G; L$ g* I
返回active表示 controller-manager 启动成功
; {- o0 {3 ?6 {, u8 d/ d- D( j) b! P1 W8 E0 ]3 H/ a
部署 scheduler 组件: K1 S' l) O0 a% T2 C0 k
创建 scheduler 证书
! d. F8 a* u6 O3 Gvim /approot1/k8s/tmp/ssl/kube-scheduler-csr.json; S* ^- b; d0 @
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
' A* ^6 s1 V6 I
/ u: T' ?" `. U; U" U注意json的格式
9 k1 T$ u7 u5 N$ P5 q
, A! u: W8 \+ M% X- R{
' }5 R- V3 X# B7 z" x; O "CN": "system:kube-scheduler",7 B; u& k$ g9 O
"key": {
! \! Y) q/ d) a) S- ?* b "algo": "rsa",
/ _. t/ ^ n: Z "size": 2048# q ^# P1 A& f
},
" R4 N+ N: z* I6 o9 k "hosts": [) J* ?- h7 R8 |8 t* c: }# ^
"127.0.0.1",! y$ Y" C4 A m. v5 K& m6 `5 a
"192.168.91.19"
1 z; V. y+ c8 }) ^5 `; W: S& f ],+ r; ^; T* o2 A
"names": [
$ Y5 [; B% P! S0 ?" {4 P3 M8 Z/ u$ | {
# p6 S- P9 k3 t2 v "C": "CN",) Q! p6 E& O4 R3 I' B
"ST": "ShangHai",
$ B8 l* x$ q, A/ ?3 B$ P- w "L": "ShangHai",
8 s! x% p% ]& e8 f& q "O": "system:kube-scheduler",
3 E0 k3 a$ B: y7 W2 Y- e3 L "OU": "System"
! V0 g! k- B! n- j! K1 p }1 {" j; ?% D/ x, a) x [
]# N, @: c& b6 q2 {# M
}" O! J4 R( c- [& I
cd /approot1/k8s/tmp/ssl/
( A, x: c9 K1 n$ i2 p: W% ecfssl gencert -ca=ca.pem \
5 [( P3 M2 S/ [3 r-ca-key=ca-key.pem \
4 R- j' h$ K x0 q6 }: x-config=ca-config.json \
$ u+ O0 v! k- ~# ^% q-profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler7 b h2 |* q$ B
创建 kubeconfig 证书
( p! {% D. h4 n( b- ~设置集群参数. A0 D! m" P0 V7 V0 }/ u
( B1 C4 B' q! p' R
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver
4 n0 e5 L+ r# L( D, y# j. |: B' k" q5 G0 w- E- G! k/ C
cd /approot1/k8s/tmp/ssl/* o, ^! S6 ^' h3 k1 w6 I( c9 A
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
; _# K6 c R4 D--certificate-authority=ca.pem \9 [& p1 V0 b9 F: n6 P' C7 {8 g: i
--embed-certs=true \
. b3 d" p8 B( I% D2 L+ g* m--server=https://192.168.91.19:6443 \
: x% w3 X" m, H, C- w--kubeconfig=kube-scheduler.kubeconfig& p4 H! k* i9 u5 ^$ b1 N2 y: x T- t
设置客户端认证参数* ^6 o7 @0 K! T
- f0 p! J: Q4 i* h. _$ H: Acd /approot1/k8s/tmp/ssl/; \6 {: R7 @/ w7 S; E, I5 n
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials system:kube-scheduler \$ s# k7 C% t* a' Z
--client-certificate=kube-scheduler.pem \
; O2 Z; [7 l' y$ X--client-key=kube-scheduler-key.pem \
0 n6 u; c* X8 M! p6 S) ~8 v4 n0 x- l--embed-certs=true \
7 h2 W6 L' R4 e1 S9 k! O! Z--kubeconfig=kube-scheduler.kubeconfig8 `5 [. H& h# B9 h: F6 K3 e
设置上下文参数
: @) |2 |+ Y4 x; n1 p1 d0 U; L$ y' C3 ~) q+ T( l6 q
cd /approot1/k8s/tmp/ssl/
1 i0 c+ v! T; x! B/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context system:kube-scheduler \) \8 G1 [$ c3 I
--cluster=kubernetes \& B# Q4 p5 W6 u% |
--user=system:kube-scheduler \
# x1 j7 q3 C$ b+ A; t! C- U--kubeconfig=kube-scheduler.kubeconfig; k, ]# z8 |9 I4 F* G+ F, H
设置默认上下文 }) ]5 U9 c; j' }4 h( f, p( Z
& v: M* A! |0 f/ `/ Y: r# m0 Rcd /approot1/k8s/tmp/ssl/- A0 ~0 Y* d" L& \5 I
/approot1/k8s/pkg/kubernetes/bin/kubectl config \
8 ]4 V- J7 v% X: A. w q# C+ cuse-context system:kube-scheduler \. N+ g& z9 @( ^+ X0 K6 P
--kubeconfig=kube-scheduler.kubeconfig9 M$ T) [* r! }, Z6 s' B' v6 _
配置 scheduler 为 systemctl 管理8 _! k; J p) g4 ]/ \( I3 [& i! _
vim /approot1/k8s/tmp/service/kube-scheduler.service
0 k5 M+ |. u* f! M. ^scheduler 参数6 O$ J0 {# O* L. M! x$ Y, ~( Z
7 }1 i, \7 y2 w" Y' Z# s, Z[Unit]2 ?; \4 T" v# n8 }( g3 V: v z* V! O
Description=Kubernetes Scheduler# D+ O/ u- J4 O9 R9 P7 i3 ~6 G. ^/ d
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
) ~* n$ B' {4 i6 O& a8 p5 c: E$ D/ i0 X6 C( E
[Service]/ t1 p4 ?7 s: H0 Y: w
ExecStart=/approot1/k8s/bin/kube-scheduler \% d5 |3 P# \" V
--authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \2 T# j4 p1 h5 a9 p: S
--authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
7 ~6 x7 V3 f" f" S) A4 f --bind-address=0.0.0.0 \5 i* W& {* e" H' x' F* h! _& e8 t
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \: y- i% N5 s% M6 t2 x# g) Q2 T6 q$ r
--leader-elect=true \7 ]5 I3 e, C* l: K9 l5 C( t8 n
--v=2- g0 ^% a" b/ P( |1 v, |! T0 M3 `( n
Restart=always
* N4 ^3 e9 |$ r/ k `. _RestartSec=5$ M) ]0 m+ }& Q& g: m/ x" C! M3 H
) v7 l* P/ Q6 s1 ^[Install]
8 \- _5 u9 }/ YWantedBy=multi-user.target
; l4 D% N3 H7 e j分发证书以及创建相关路径
9 s3 P9 a5 N4 k% g如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
o$ ]0 |* U, c+ l* o6 ]7 ` H/ k, p! k
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
1 ~- P8 F. r* k* l5 u& R; P" ?9 {! ?, n K5 e. K
for i in 192.168.91.19;do \5 f) E. U% _# v) P4 F) J* l
ssh $i "mkdir -p /etc/kubernetes/ssl"; \
6 ]3 r+ [, b z% xssh $i "mkdir -p /approot1/k8s/bin"; \9 j; p! _5 P- A- v; Z
scp /approot1/k8s/tmp/ssl/{ca*.pem,kube-scheduler.kubeconfig} $i:/etc/kubernetes/; \6 D/ B' U3 D& M7 c* c* g( i% s5 F' Y
scp /approot1/k8s/tmp/service/kube-scheduler.service $i:/etc/systemd/system/; \
: }) c7 I% ?* a; Q' ?scp /approot1/k8s/pkg/kubernetes/bin/kube-scheduler $i:/approot1/k8s/bin/; \' F" W7 n) u. H9 k2 ~
done/ H: b' {1 l8 i. `7 [+ o
启动 scheduler 服务
/ L* ^2 U* ^) K1 q/ E g7 A3 O如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制: w- g0 P- c' E f. a
0 t: @: Y; }, |7 O: v# N9 z
for i in 192.168.91.19;do \. N: ^5 V' a' o
ssh $i "systemctl daemon-reload"; \( F% y$ ?$ Y) f5 O7 f! D
ssh $i "systemctl enable kube-scheduler"; \
' `2 G3 V( {* {2 e" Bssh $i "systemctl restart kube-scheduler --no-block"; \
( |/ t6 H u, S9 @6 v* m0 f0 J$ zssh $i "systemctl is-active kube-scheduler"; \- H' m& J4 P; H. V: q4 J
done
+ w8 F, z, X) ]返回 activating 表示 scheduler 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-scheduler";done
6 }6 g k9 z; [" E* ]" B7 u
# {% A1 q. E9 O$ _" L( R# v8 l返回active表示 scheduler 启动成功1 L8 g, ?; e$ h$ n7 {
4 k. V( C$ R. j3 Z
部署 work 节点
0 @+ G# G( Z8 B) U9 O部署 containerd 组件: ~6 }+ h7 X$ L
下载二进制文件, t' b$ P! @. b: {3 n' q6 o/ y8 W
github 下载 containerd 的时候,记得选择cri-containerd-cni 开头的文件,这个包里面包含了 containerd 以及 crictl 管理工具和 cni 网络插件,包括 systemd service 文件、config.toml 、 crictl.yaml 以及 cni 配置文件都是配置好的,简单修改一下就可以使用了
' l! Z# B0 J5 w, ^
' V4 L8 c+ E4 w% X# x虽然 cri-containerd-cni 也有 runc ,但是缺少依赖,所以还是要去 runc github 重新下载一个0 `6 y8 K' q5 z% a" h" F0 X0 J
4 H+ W/ I1 v4 P. {. M+ Z' L7 ^
wget -O /approot1/k8s/pkg/containerd.tar.gz \+ X$ x0 d# [! O7 }0 S9 X" d4 m M
https://github.com/containerd/co ... -linux-amd64.tar.gz
9 a6 R/ Q5 V& w; Dwget -O /approot1/k8s/pkg/runc https://github.com/opencontainer ... d/v1.0.3/runc.amd643 l P) {8 v8 D' [% K( X
mkdir /approot1/k8s/pkg/containerd) Y" R' _7 q# l3 t4 N; z4 `
cd /approot1/k8s/pkg/
0 ~0 q0 |" a7 l' H/ Xfor i in $(ls *containerd*.tar.gz);do tar xvf $i -C /approot1/k8s/pkg/containerd && rm -f $i;done3 r& _% ]9 D: q! H; F0 p: r; W2 {
chmod +x /approot1/k8s/pkg/runc- l: Y. W! P* _" a# W
mv /approot1/k8s/pkg/containerd/usr/local/bin/{containerd,containerd-shim*,crictl,ctr} /approot1/k8s/pkg/containerd/
% W6 O; Y7 p8 [' Dmv /approot1/k8s/pkg/containerd/opt/cni/bin/{bridge,flannel,host-local,loopback,portmap} /approot1/k8s/pkg/containerd/* U% U+ {: f, N4 `, h3 }9 [
rm -rf /approot1/k8s/pkg/containerd/{etc,opt,usr}
9 U- e# V5 r4 r" ]" I }6 h配置 containerd 为 systemctl 管理
! j5 W0 V9 |/ [) s2 A5 m& xvim /approot1/k8s/tmp/service/containerd.service
. V2 M1 M; g6 ~注意二进制文件存放路径 H) x" Z" H6 R7 S" l! v! D( m1 X
5 A3 G; X" V8 G7 h4 P
如果 runc 二进制文件不在 /usr/bin/ 目录下,需要有 Environment 参数,指定 runc 二进制文件的路径给 PATH ,否则当 k8s 启动 pod 的时候会报错 exec: "runc": executable file not found in $PATH: unknown# N8 w" ]+ X3 \
v+ D6 ^& _& [- t2 F
[Unit]4 s# j# G& y5 y9 D5 o. o
Description=containerd container runtime
2 F& P7 Y3 _- D2 G7 rDocumentation=https://containerd.io
$ d9 i+ u* z! B7 KAfter=network.target! n$ I. N* b& R, H
; k! a& f8 Y5 \+ [! i
[Service]
$ p3 \. _! E# A9 GEnvironment="PATH=$PATH:/approot1/k8s/bin"
& R% O) [4 I, N0 n% P- RExecStartPre=-/sbin/modprobe overlay
: P6 T( C0 J# o7 lExecStart=/approot1/k8s/bin/containerd' q' k4 B6 F4 T2 K9 ~# z7 \% Y
Restart=always% x5 n7 _) i9 w: `) N- v" ~
RestartSec=5
( F' |7 _8 P0 B& N: J7 N6 c8 vDelegate=yes, v$ j3 m3 i, s, {
KillMode=process; Z* R3 m3 i# i4 h' \5 ]
OOMScoreAdjust=-999: B# J' b8 j5 m3 t7 ?8 r: h
LimitNOFILE=1048576
+ W2 n8 K5 N/ W6 y3 }. t ]# Having non-zero Limit*s causes performance problems due to accounting overhead
# R) ?' N& j ]% N7 O# in the kernel. We recommend using cgroups to do container-local accounting.
+ i* r, S, U+ S. _& TLimitNPROC=infinity
A: Y; A0 N4 y% a/ a0 nLimitCORE=infinity) a+ F; M8 G h4 w8 b1 }% u& N* l. S: P" {
7 V% q$ [& V8 b* R# R* V6 f
[Install]
, W- r8 N6 V% h X v& Z+ oWantedBy=multi-user.target
; U, }! d) R5 H# q% |/ Q) U: M配置 containerd 配置文件- L" g5 ^7 I$ ]8 g0 E0 v
vim /approot1/k8s/tmp/service/config.toml
' P1 W3 K9 ?9 troot 容器存储路径,修改成磁盘空间充足的路径
; I" v/ W9 i( D/ P0 l! Y
# K! k- R8 J% h7 [% a' n6 f ybin_dir containerd 服务以及 cni 插件存储路径+ d: B) s/ X* h6 R8 @ R$ ^2 J
$ l0 n1 g4 G3 N$ M2 U- D6 psandbox_image pause 镜像名称以及镜像tag& k8 O& k' l) v
, P9 `: h1 `! D$ t \
disabled_plugins = []
0 \7 }4 i0 t: p1 X" p; pimports = []% {# S8 s' z* N* N+ W
oom_score = 0
. ?6 g5 N0 U/ C& z" w9 Aplugin_dir = ""
" Z+ R0 x) a7 a" }. s# C1 ?" drequired_plugins = []
8 ?$ v+ G; u& r/ ~; p( [$ ]* mroot = "/approot1/data/containerd"
- q: l4 @. P/ C3 tstate = "/run/containerd"
* J0 P' M8 d' `) n) M- u/ ^4 rversion = 2/ g& Z8 i `( |' R
. v d% K& o5 V: i$ G: j8 f0 e
[cgroup]
$ u+ ~! H( Q2 g path = ""/ y O/ |9 K) k M; E# _% x
7 m: T6 n7 U) M r) {8 ~[debug]
9 N0 N: E6 l# z4 f address = """ x* Z6 ^% k3 d: B
format = """ j0 i& O# P' m5 B2 D
gid = 0) F2 r' |* b, C0 N) ^$ a
level = ""% M, M- Z, p: m1 p
uid = 0
/ }. U$ V7 D8 J4 Q' |8 E6 R
: I- S% G8 u4 R* \[grpc]# W) r3 `/ k6 |/ M8 P
address = "/run/containerd/containerd.sock"/ ~/ a$ t% _9 L! z% J- Z5 o' T: m8 D
gid = 0
" P' S" K2 b. p, |6 k" w max_recv_message_size = 16777216# r- Z6 n. _- C+ d7 m
max_send_message_size = 167772166 q; m; R5 a# L p
tcp_address = ""5 o- b4 z# Z# F6 G, R
tcp_tls_cert = ""$ b& j7 B" w: E! q3 S
tcp_tls_key = ""
: Y0 }/ [( ^0 b2 Y5 I& j0 P O uid = 0
2 O* I6 ]1 I n6 q
( J" Q# Q |: s$ o[metrics]
7 U' H" Q$ ~( T8 Z ~) ~0 L address = ""
. a5 G: S9 g+ s grpc_histogram = false$ w+ B$ ?7 Q, |7 B$ I) T6 ?
9 k! o) `8 l0 O$ u. c9 E- e[plugins]9 O6 g! W( h) Q
& N6 m0 h7 \; J' g/ X: M. P6 G' H
[plugins."io.containerd.gc.v1.scheduler"]
- \) z3 t* z) P deletion_threshold = 0
+ P$ a$ y0 m! t. b3 x2 x: d) |& m2 D mutation_threshold = 100
% L+ P8 _# P/ C' r" q pause_threshold = 0.02, R# j1 H: C& ~+ U& Z
schedule_delay = "0s"
# v; x/ {) v3 \# b+ L9 h# k startup_delay = "100ms"
' D. _" U# K' K8 f0 A* N/ p; N. m F! [0 Z* q4 B: w) I9 e }) N3 x; a
[plugins."io.containerd.grpc.v1.cri"]: B! O( W$ f0 }) m
disable_apparmor = false4 v& O) @' V& j2 c0 D
disable_cgroup = false
2 S M% ?3 `. w5 H# Q- [ disable_hugetlb_controller = true8 V# b' o z7 \. z- Q8 j8 M* L: Q
disable_proc_mount = false
2 A! p5 g* A2 ]3 T, q1 h+ J' w" x disable_tcp_service = true
; W5 N& ^6 m$ z$ r: J! L enable_selinux = false
, r; D0 U8 s9 H' O8 I& G$ `- ] enable_tls_streaming = false/ M- q8 ]; H3 y9 H6 _' J
ignore_image_defined_volumes = false- w' y! A3 C' w) a+ C5 H! Y
max_concurrent_downloads = 31 L* f1 a' m+ `; G) Q* \; W! _
max_container_log_line_size = 16384
8 e$ s1 D8 d6 x' V* v netns_mounts_under_state_dir = false
' I. r! N6 t1 m8 @% d6 D/ b restrict_oom_score_adj = false" P0 f6 F$ s# P: a# m4 d5 N k
sandbox_image = "k8s.gcr.io/pause:3.6"
2 q0 @6 B, E. `% b) d selinux_category_range = 1024: L$ p* l" E7 t( C& b3 j2 _2 @+ w
stats_collect_period = 10& a6 R/ e- P6 X& g# U; F: j3 [
stream_idle_timeout = "4h0m0s": b& G) \+ x. v# ^& V. ]
stream_server_address = "127.0.0.1"7 h/ P+ L0 G4 K E6 M/ q
stream_server_port = "0"5 x F6 b- q3 @* `9 J. n( k: Y8 ~1 O
systemd_cgroup = false
# G- u/ D \$ ?- r tolerate_missing_hugetlb_controller = true$ t; Q& {6 z1 T( r
unset_seccomp_profile = ""& h4 V) x; p! G! Y" y& ^5 E) i; i! Y
& x% q+ `6 `, \$ X6 ]8 d [plugins."io.containerd.grpc.v1.cri".cni]
! g! C/ ]! x4 [ bin_dir = "/approot1/k8s/bin"; U/ J+ J# }- X. A
conf_dir = "/etc/cni/net.d"% f+ [$ L% v$ Z5 O
conf_template = "/etc/cni/net.d/cni-default.conf"
: @) p9 S9 Y0 b9 r+ \1 s% t max_conf_num = 1
2 C. E( A# N* s( `$ B k4 Q: ^# }& ?# ]
[plugins."io.containerd.grpc.v1.cri".containerd]' l/ O1 }: O" _1 I& O2 W
default_runtime_name = "runc"- C1 Q% [' c4 R4 |; A( I$ f0 l# B
disable_snapshot_annotations = true+ @( y+ C7 r9 f0 @* \3 J3 M
discard_unpacked_layers = false% G4 C& p( y# }$ o. d+ b/ w
no_pivot = false6 \" X9 B1 S/ @$ b* ]
snapshotter = "overlayfs"" ?9 F, e3 H6 G6 B
; G4 T9 Y/ b/ ^
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
. R7 d/ a" H& A- I$ b base_runtime_spec = ""
6 q( W* L3 M' `7 z3 }: N0 q( a' P L container_annotations = []5 \1 `# p- G) B) z( f6 b3 b( n
pod_annotations = []
* K. [2 D* q* Y% R1 c privileged_without_host_devices = false, r' f" \) G1 X( R
runtime_engine = ""
' K/ V, ^0 Y$ a runtime_root = ""1 U. b9 E& @: s o1 i
runtime_type = ""6 J* H5 x% q$ `$ E6 s8 m1 P7 _/ d
7 l- C: }2 b* m0 A6 X' c0 f) h [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]1 z8 Z' K; \9 M1 o! ~8 }
* e7 H* s' E- h# K [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
' ]1 s4 B3 @ ^' V' y
( _; h$ g) z2 P9 U+ f- L* I/ S [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
( J; K7 T6 x8 b' a base_runtime_spec = ""
" X" h5 O- P$ {) \ container_annotations = []
% ?8 E# a' U. f$ e/ y pod_annotations = []- C5 p. v9 a/ K& Y% {+ ^' V
privileged_without_host_devices = false
% G0 M4 O \) ~5 t& V runtime_engine = ""* m& g' z, \9 r* v" @
runtime_root = ""- e; `7 B+ ~+ D) s9 d( X3 n
runtime_type = "io.containerd.runc.v2"" }# l2 q6 d" x o$ D# V' J
7 p* S. m7 E% }/ e. [
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]5 w4 {; ~* F! i6 ^( s: w3 i6 n
BinaryName = ""
' ~: a4 y7 U! b. {! `2 | CriuImagePath = ""
0 t1 G6 M7 j$ z% V CriuPath = ""
) |3 u# Y" c, W: b CriuWorkPath = ""4 L2 L5 t9 q* y. [9 f: ]
IoGid = 0
8 P/ S- L( r; x; r2 ~" |, b IoUid = 0
7 R! U% K" T: ]' j( C% M NoNewKeyring = false
' l9 J& h i6 B( X( w NoPivotRoot = false
% o! b7 o7 _5 u+ ~$ A6 z# I3 _3 ?0 c' Z Root = ""2 \6 q, t2 C2 l% u9 N$ S
ShimCgroup = ""1 n) j5 b6 Q) k, x, B& O6 K
SystemdCgroup = true
\( F3 J- y& b: g6 M+ y' d& ?/ @: Z k
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
% C/ j, k3 ?$ @$ D base_runtime_spec = ""* i+ K8 o! x$ r5 A
container_annotations = []& g3 N1 q4 ~* u3 L0 Q
pod_annotations = []
8 Q7 e9 B+ a# A/ w" j/ p m privileged_without_host_devices = false
& C8 ]+ k9 g2 s% O g runtime_engine = ""
* z% r4 A3 r/ V0 ?" k runtime_root = ""
( Q9 l" A: ]! N2 J5 y& x runtime_type = "" Y% B4 i' v+ ^
! S: O. ] s4 `# A- s
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
x1 C. Q* d$ x3 F( U
9 R$ A0 p9 y0 N) J T/ M* z' D3 ^ [plugins."io.containerd.grpc.v1.cri".image_decryption]7 P" r I, }& p# x8 A9 Q; R1 n
key_model = "node"
) n& Q6 B* J! Z! w9 J
}& r5 ]( r( E [plugins."io.containerd.grpc.v1.cri".registry]
1 y+ e6 b& U! N, R: {! M config_path = ""
/ D, `! D! F0 t3 Q: x5 h& m2 O. Q# ?. o2 d4 z- Z( F& d
[plugins."io.containerd.grpc.v1.cri".registry.auths]
0 y% g# e. {1 X/ \; T. R1 {. g
* ?9 D% f0 H% p" h- C5 x( [* y [plugins."io.containerd.grpc.v1.cri".registry.configs]' M& b! d& I" t! l# l1 n( v
6 Y$ x6 |* W& ^* r4 s! u- | [plugins."io.containerd.grpc.v1.cri".registry.headers]
' A5 I4 `! N5 c6 m/ [
: |, c( q4 ?* ]) |+ p& o$ n6 [ [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
; G2 y% t; A8 F9 D [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]5 t U2 z, A9 m- Y2 g
endpoint = ["https://docker.mirrors.ustc.edu.cn", "http://hub-mirror.c.163.com"]
6 X5 t* w' k, `! F' W [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
" ?8 h! ~- [% f+ q7 r endpoint = ["https://gcr.mirrors.ustc.edu.cn"]
$ r' F' p! _1 @: T1 n [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]& Z3 l* h4 Z7 a' ~, q4 Y+ m
endpoint = ["https://gcr.mirrors.ustc.edu.cn/google-containers/"] T! b8 n0 B- H4 d0 O
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"], M- p1 q+ S+ l/ j$ F& X
endpoint = ["https://quay.mirrors.ustc.edu.cn"] J5 H: U& p/ U( f1 S. x( n6 Y H
' o, H& c! M5 c! r [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
# m- m b2 ^! u* B$ M tls_cert_file = ""
0 z2 g, ^9 r$ K" j. p+ s tls_key_file = ""# e. I2 F8 p% _# x
' \5 i1 v9 O# t* d3 V4 @
[plugins."io.containerd.internal.v1.opt"]
( G0 u7 g3 v, d% d- x5 D path = "/opt/containerd"
" n4 v' [$ b# [1 x0 h5 z) \ |* z/ j9 F6 v+ e% v9 _" r
[plugins."io.containerd.internal.v1.restart"]
, e9 e6 p* k; }% i interval = "10s"
$ s T8 _6 i+ q' h
0 \. O, f' Q+ T [plugins."io.containerd.metadata.v1.bolt"]4 w' O. U1 Q/ K5 ~, ^
content_sharing_policy = "shared"5 E. s, c+ E! r% r
8 E9 s2 n- Y3 Y/ o! e$ }$ C
[plugins."io.containerd.monitor.v1.cgroups"]% j3 w! }# h" o" x6 y. D
no_prometheus = false
* o% l1 O* X: k2 X a; s: m3 h1 O; h1 I# A+ u; Y( o
[plugins."io.containerd.runtime.v1.linux"]
: v$ }0 c% i7 i9 e! w& f no_shim = false. k3 U- B% Y7 a. x- R
runtime = "runc"; q. _- E# @( z- |- M- T3 x4 `
runtime_root = "") \% J( U4 |5 b; K1 O; |. _* f
shim = "containerd-shim"
: {" q1 Q) P4 J* U shim_debug = false
8 J' }6 w" ~& B# F p. X! z, p F- z" n3 m7 G5 w
[plugins."io.containerd.runtime.v2.task"]
5 D* a" ~; g" b( _; B platforms = ["linux/amd64"]/ f) ]0 U: |. ^/ D: E( @0 g6 n% M
* M: F* ?1 E. T( L
[plugins."io.containerd.service.v1.diff-service"]
! V; t7 b0 F$ u( f default = ["walking"]; h9 Y: K9 _. |) j" Q
; c5 Z8 J5 g# w$ Q- p% j) U [plugins."io.containerd.snapshotter.v1.aufs"]8 S! P' x- R# S# B3 u9 C7 W
root_path = ""
. G& k/ Y' c5 m* I$ X2 R
: E3 |5 [. R9 M( i8 s& O% ~ [plugins."io.containerd.snapshotter.v1.btrfs"]& j$ k6 A; H+ S0 N" @2 z
root_path = ""
% M: J" U+ H2 y4 ?; w' k8 G) `% p
! S2 o* B2 q7 z2 d) F+ @. a [plugins."io.containerd.snapshotter.v1.devmapper"]
; R, S( z% ?, W- m( O async_remove = false1 o ~' W1 V$ o% [. I! j w- G5 n
base_image_size = ""
+ o2 {9 A6 `" _* g+ D* X0 ^$ F( P pool_name = ""
7 m8 m, _; w& {, e! h) k root_path = ""
U: C7 {, E6 z* j
; g. ~8 ]) \# T9 \5 b$ f( p3 T [plugins."io.containerd.snapshotter.v1.native"]
' g0 ]# p/ a" v2 S1 V root_path = ""# D$ `$ k* m: @7 Z" k$ |0 K
5 k3 x R, v3 T- o. p6 i
[plugins."io.containerd.snapshotter.v1.overlayfs"]. `$ O8 a% ?9 D
root_path = ""; w2 ]4 J1 h7 e: W F2 {' w* B
1 W6 o9 _/ V; Y/ X$ k) l, Z [plugins."io.containerd.snapshotter.v1.zfs"]
) ]* y- i0 p8 f3 \9 s2 ~9 H. ] root_path = ""! N; C. B$ T$ @2 C, E, A6 F7 J3 _
4 W. Y, E7 I! L$ M$ R) A[proxy_plugins]
% k( A7 |: x7 |; {- c7 u& |
& E# z1 ?# v. ]7 C# M[stream_processors]
8 h6 o& q9 [9 l2 x- f; w2 i
2 p5 W& I% ]1 j [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
: @4 d; e; |& `& U3 C# Y7 U accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
4 _. u% ^6 z# X# `* C) V args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]) Y/ ~/ ?' j' W; O$ m, c% [
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]- X% \' K9 {& S' D
path = "ctd-decoder"1 J \4 b! o% C- G5 }
returns = "application/vnd.oci.image.layer.v1.tar"* v' T0 G0 k" h6 V
M" G& a6 v2 m+ ^
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
# s5 Z4 O+ M! U6 a accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]' a" ?2 U t. T. _& Y4 X( N" e
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
9 A& P/ G- G& G1 P% B; b7 E env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
& y F% b- k* R) T, j- t. F path = "ctd-decoder"
4 x5 H( ^- O2 v- X* {8 _ returns = "application/vnd.oci.image.layer.v1.tar+gzip"- P u6 W( j% c' _& ?' ]
1 _2 u0 x7 K G
[timeouts]
+ ^4 _+ q ?, u J, r; l- P "io.containerd.timeout.shim.cleanup" = "5s"
# m G# y4 C2 I1 U "io.containerd.timeout.shim.load" = "5s". y1 u1 E# V( A9 ^8 ^
"io.containerd.timeout.shim.shutdown" = "3s". ~3 j6 z( u" z0 L+ ?% N
"io.containerd.timeout.task.state" = "2s". \0 U- J0 n" ]7 B8 D0 G9 A
0 L \3 ]7 R- I' q: ~; b[ttrpc]
( X0 s. S% J3 J7 C5 N* L" `9 J1 B address = ""9 p$ }( e2 ^* M7 ~' D1 d
gid = 0
. C( S* i8 f) w" Q uid = 0
+ H$ B5 @8 P z配置 crictl 管理工具
' r. H! \8 ?6 `. w! R; [vim /approot1/k8s/tmp/service/crictl.yaml
$ T8 f7 Q$ [0 B: a9 U' Wruntime-endpoint: unix:///run/containerd/containerd.sock
2 k2 J4 H! G; i# G配置 cni 网络插件+ n F$ I3 b, k1 g$ y
vim /approot1/k8s/tmp/service/cni-default.conf
5 Q- a8 _/ f9 f( U5 `subnet 参数要和 controller-manager 的 --cluster-cidr 参数一致! |4 S; I+ |& U( z
! h; H3 w9 s" w
{' K; c! \" e1 ^; i
"name": "mynet",( m7 \' R0 E% v5 O4 N
"cniVersion": "0.3.1",: p4 Z" C' M* S4 e/ ^
"type": "bridge",/ ~1 U6 s8 T3 Z- b7 K- l
"bridge": "mynet0",
* `7 q H* ]: o- [) \6 K) o "isDefaultGateway": true,5 ` N" l# V# U$ P
"ipMasq": true,) c. R; J: G" f+ {# t6 ~; J
"hairpinMode": true,% ?3 c$ N2 w" g9 _
"ipam": {: x; ~+ @; h* N7 y @7 \- Z9 f
"type": "host-local",
1 q4 Y0 v6 n; z+ ~; U "subnet": "172.20.0.0/16"5 m% f5 G( i7 e3 c) K+ c5 t
}7 h. o; \( z6 V) c2 {& H7 c- L
}
9 Z! V& S- d2 w# j3 y8 ?* F分发配置文件以及创建相关路径* ?3 F6 l. |5 N+ Y2 ~2 Y# G! O
for i in 192.168.91.19 192.168.91.20;do \6 ]- I- l# R( I( o5 n- }
ssh $i "mkdir -p /etc/containerd"; \6 M+ a! X' x& q7 T n& g$ ]" {
ssh $i "mkdir -p /approot1/k8s/bin"; \
! p5 R/ h x/ a, |4 U& }" ~9 ossh $i "mkdir -p /etc/cni/net.d"; \
& X" j8 O B- ~% n3 C# Dscp /approot1/k8s/tmp/service/containerd.service $i:/etc/systemd/system/; \
4 n1 A, }( m8 u g* I9 Jscp /approot1/k8s/tmp/service/config.toml $i:/etc/containerd/; \
( E) x: P6 X- c+ |% pscp /approot1/k8s/tmp/service/cni-default.conf $i:/etc/cni/net.d/; \
% }; G# D( w& ]. uscp /approot1/k8s/tmp/service/crictl.yaml $i:/etc/; \. S- H% l% e6 p) j u- T* k# P
scp /approot1/k8s/pkg/containerd/* $i:/approot1/k8s/bin/; \
! y1 \6 s, q( escp /approot1/k8s/pkg/runc $i:/approot1/k8s/bin/; \
" W. v, O! z, N( Q. m) l7 W. l) h- [done
' d1 j$ o; Z' c& d启动 containerd 服务. B2 ?9 C/ y- ~; B& f! H
for i in 192.168.91.19 192.168.91.20;do \9 b: k, T! D$ K9 a- ^9 e6 p: t4 z
ssh $i "systemctl daemon-reload"; \
7 a& \, R1 }4 Z, kssh $i "systemctl enable containerd"; \
. f5 M# h- l4 N. D! Ussh $i "systemctl restart containerd --no-block"; \' V4 S% ?6 H" G) P9 |- H6 J
ssh $i "systemctl is-active containerd"; \
]; M' ?6 V* E$ |5 }, P; O5 Wdone6 W- N0 O; A- g8 s/ S9 Y. h6 U3 U
返回 activating 表示 containerd 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19 192.168.91.20;do ssh $i "systemctl is-active containerd";done$ C2 v4 L9 b( w) R8 p5 V
1 r: ^4 C$ I) g s {4 \返回active表示 containerd 启动成功6 N+ O& Y+ l5 i* d6 Q
' L% d7 [# ?- ^- l( V
导入 pause 镜像* V' _2 H3 }) N( q; Y7 }8 @
ctr 导入镜像有一个特殊的地方,如果导入的镜像想要 k8s 可以使用,需要加上 -n k8s.io 参数,而且必须是ctr -n k8s.io image import <xxx.tar> 这样的格式,如果是 ctr image import <xxx.tar> -n k8s.io 就会报错 ctr: flag provided but not defined: -n 这个操作确实有点骚气,不太适应1 Z" H; T( ]( w1 z& T D# C
# D4 t" k+ L7 D% t/ r5 b
如果镜像导入的时候没有加上 -n k8s.io ,启动 pod 的时候 kubelet 会重新去拉取 pause 容器,如果配置的镜像仓库没有这个 tag 的镜像就会报错2 {# V2 g( R; G1 Q& U
* ^, `! d" L) U0 ^2 d. M' R% gfor i in 192.168.91.19 192.168.91.20;do \
. \1 ]4 I+ W7 Rscp /approot1/k8s/images/pause-v3.6.tar $i:/tmp/
f, W1 X$ r+ w" p, Zssh $i "ctr -n=k8s.io image import /tmp/pause-v3.6.tar && rm -f /tmp/pause-v3.6.tar"; \
& {$ Z: A; f% V& \done) Z5 T' ~: V3 n1 D- L
查看镜像3 s6 q4 x8 x1 q Y5 g
, D! }: _8 u9 B+ i* l! d
for i in 192.168.91.19 192.168.91.20;do \
: d4 A; R! F- \" ^ssh $i "ctr -n=k8s.io image list | grep pause"; \
4 ] b+ z2 W- \9 W: mdone7 w" S/ k5 h4 c* [1 B
部署 kubelet 组件
; m/ {9 m5 M. t L创建 kubelet 证书
6 n; ?3 Q+ f5 i9 ^vim /approot1/k8s/tmp/ssl/kubelet-csr.json.192.168.91.195 G' l6 Z0 q- p8 h k
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴,有多少个node节点就创建多少个json文件,json文件内的 ip 也要修改为 work 节点的 ip,别重复了/ @% }# _/ h- O* k
1 }4 i2 r! I2 I. w5 s! X2 m
{9 m. N; L! ]3 j7 r
"CN": "system:node:192.168.91.19",6 ]( _/ j0 q5 ]; J2 d
"key": {, r" a" B5 q9 K: C- O0 X% M: R
"algo": "rsa",6 i: T) h7 Z/ U4 [* l$ y
"size": 2048/ Y; ~+ Y6 y0 k; Z( \; o
},/ ?: b7 I0 C4 V4 L8 o
"hosts": [
7 f' b" `/ f9 _, ^ "127.0.0.1",
- P# w% A/ u5 g "192.168.91.19"+ y1 ~% B3 [8 L8 u W
]," t% ?! Y3 M* q
"names": [
9 _, |+ C- `8 Q. P: _0 A# ]& ` {; K4 h# _/ r8 X3 U: m, K9 P+ m
"C": "CN",
- k" Z: Z+ `, ?6 m: R7 N) ]6 y "ST": "ShangHai",' c9 r: t9 [0 r
"L": "ShangHai",
) e' a+ E7 i% f3 [+ ]+ \ "O": "system:nodes",
L5 G. h" ?6 Z# P; d' E "OU": "System"
! f3 e5 O6 l% t* \1 E' l }, ~% D% e- ^0 {( V- J
]( h9 n( H9 \! ]* r% i: s& H
}
; s k9 G8 ?/ y8 c% e' Cfor i in 192.168.91.19 192.168.91.20;do \4 {) t! h6 M# C4 q$ t: V
cd /approot1/k8s/tmp/ssl/; \
5 K# G/ G' d( k1 u& ]6 O; M" rcfssl gencert -ca=ca.pem \8 K$ A. o3 p) S8 R
-ca-key=ca-key.pem \
+ b H- g4 {: A) c: p-config=ca-config.json \
- Z7 N" \# K: [7 h* D-profile=kubernetes kubelet-csr.json.$i | cfssljson -bare kubelet.$i; \& M7 |/ m! C# L# T7 G
done# ^' y8 ]; W0 w4 F3 l6 X$ \4 E
创建 kubeconfig 证书- \( E. _% d5 q2 A2 `
设置集群参数
. }; G5 c/ B* [; V
& \- b0 N" Z P2 x7 [# P) e3 y--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver2 H9 j' ^6 y# J6 v% s0 F* x
! o/ Y1 r4 m- o( k0 |" Y1 f3 cfor i in 192.168.91.19 192.168.91.20;do \" |( ^4 n/ c7 N
cd /approot1/k8s/tmp/ssl/; \$ }! [8 P' S) t* ?3 }$ b+ P
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \8 R1 Y4 z# U- R9 c, W- |
--certificate-authority=ca.pem \
5 J% d' p4 m$ n/ q' S9 ?--embed-certs=true \
$ s, {1 f% Y+ z8 i! g2 U6 W7 W# a# E--server=https://192.168.91.19:6443 \% [, b# a( r5 K, a
--kubeconfig=kubelet.kubeconfig.$i; \ M" T- o- @6 `( P, A$ ?, n5 y/ s
done: P, [2 r) h& t/ `! u( @* i
设置客户端认证参数
2 b/ L- E! }: D3 b8 t2 U' g( G9 F
, b/ k9 l- z+ ^) H7 y7 ^" ^for i in 192.168.91.19 192.168.91.20;do \3 K2 `& F% T6 O- s2 b* c, d$ _
cd /approot1/k8s/tmp/ssl/; \1 n I3 m0 D' V" K D) D/ s9 K
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials system:node:$i \
: v% A5 j" o" ~7 D j--client-certificate=kubelet.$i.pem \9 ^; B8 Y8 g' ` \( R
--client-key=kubelet.$i-key.pem \3 Q$ I' T7 ^) T" j
--embed-certs=true \7 P- A( p, l3 ]% m
--kubeconfig=kubelet.kubeconfig.$i; \
- F, x" L9 I9 ?/ { @1 \done& p! z8 _$ X1 t+ `% {
设置上下文参数( w2 k; d" n7 w7 A
# d" a& t# L p3 [6 d: i
for i in 192.168.91.19 192.168.91.20;do \+ J- j+ N% m, ?: z; [
cd /approot1/k8s/tmp/ssl/; \
) e8 i! Q9 L9 h- [! M/ f/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context default \
G+ D- y. ^( V% \7 f--cluster=kubernetes \" J# u4 ?$ K+ Q' l1 S0 G
--user=system:node:$i \
0 x3 x8 e$ d W _ v Y) t--kubeconfig=kubelet.kubeconfig.$i; \
6 W" C1 ]2 Y: u4 u; \1 Ndone6 c" W) J4 ]' m) M, h; G# K; C
设置默认上下文
6 L$ ^4 ]( V+ t) k9 K# N% Z; n* ~8 o* J/ I! a
for i in 192.168.91.19 192.168.91.20;do \8 l* x1 N |1 ^& a& ~
cd /approot1/k8s/tmp/ssl/; \- w% W4 _" y4 v" q3 \7 s
/approot1/k8s/pkg/kubernetes/bin/kubectl config \' A c% Z: L1 @* R9 j/ Z
use-context default \" Y4 c3 _ f4 D4 h
--kubeconfig=kubelet.kubeconfig.$i; \1 R9 e; Y, J! f4 `
done
: Y% H, W! | n5 c配置 kubelet 配置文件8 \ w0 g- Z: B/ J3 |
vim /approot1/k8s/tmp/service/config.yaml* y/ S# I0 w9 V
clusterDNS 参数的 ip 注意修改,和 apiserver 的 --service-cluster-ip-range 参数一个网段,和 k8s 服务 ip 要不一样,一般 k8s 服务的 ip 取网段第一个ip, clusterdns 选网段的第二个ip
/ K2 @5 }) l/ o7 m2 w# i: |3 z% Q5 h$ I0 ~* a( i3 ?
kind: KubeletConfiguration/ I$ R' R. ^% L4 O; j
apiVersion: kubelet.config.k8s.io/v1beta1
4 R# J0 p6 i. p5 Faddress: 0.0.0.08 C- T4 O5 I5 ?% O$ G: I N! d
authentication:: P' u: N* G) F6 c6 k1 ^
anonymous:
# M7 L2 w! ]$ R. f/ I t* w enabled: false( c) \3 \5 [ G& Z
webhook:- G5 I( c6 G% J4 r0 w
cacheTTL: 2m0s# d$ \3 e, a3 k: J; D: r; ~
enabled: true
6 X% L. ?% r! U& {% b x509:
+ {7 U# {! t% v6 N1 M: @5 b: r! Y clientCAFile: /etc/kubernetes/ssl/ca.pem
1 j0 W1 c) V- |authorization:
' ^$ @: G d: M; E mode: Webhook
: T! a2 Y# P# @; a9 P. Q/ L* ~' ^0 x- O webhook:
7 d! W% g% ]- v p0 ~4 K cacheAuthorizedTTL: 5m0s$ I7 P r2 c V# A0 g- c9 @
cacheUnauthorizedTTL: 30s
$ b7 I w+ m/ U9 p# s; t7 TcgroupDriver: systemd
, k9 S/ m( Z6 i& ZcgroupsPerQOS: true
4 {0 Q6 U: O2 ]# t! jclusterDNS:
% p/ Q0 P$ {% R, y6 w- 10.88.0.2
5 B/ q# @: C) q; N, i4 oclusterDomain: cluster.local
# S$ C6 Z1 f ZconfigMapAndSecretChangeDetectionStrategy: Watch$ P; o$ C+ @# C# T* ?; T
containerLogMaxFiles: 35 c1 R1 ~) l) Y6 L! h' R- T9 n
containerLogMaxSize: 10Mi: e8 Z6 v! N0 u
enforceNodeAllocatable:8 i5 N% G% r9 E0 |* ]% d
- pods
. q8 ?' ]& d$ l& W9 y X+ feventBurst: 10
5 Z7 g8 A( E4 F# ^- s: seventRecordQPS: 5
( W& y' g2 I6 `! AevictionHard:
- n$ k, p# a4 }3 G5 p imagefs.available: 15%0 ^) X3 I# U b- k! T7 w
memory.available: 300Mi
& T$ X @/ W: ~* ~3 \) s nodefs.available: 10%
' w0 S3 g) P% j* Q3 q; G) [5 S$ d nodefs.inodesFree: 5%
6 d% ?: {, q, |, }1 d2 M$ aevictionPressureTransitionPeriod: 5m0s
0 F2 o3 {2 }5 B$ YfailSwapOn: true. y' N N, E9 V
fileCheckFrequency: 40s
% g9 \+ _! |; s& WhairpinMode: hairpin-veth
: Y2 ^- _5 `; H% A4 u/ u- E* OhealthzBindAddress: 0.0.0.08 D% J$ I! F" f& t
healthzPort: 10248
4 d9 i0 h2 J& o7 ~* ]2 |httpCheckFrequency: 40s7 y" `0 q# m/ W2 ~
imageGCHighThresholdPercent: 851 L! @3 X+ X2 Q8 Y
imageGCLowThresholdPercent: 807 |$ Z8 s i7 V) W3 h
imageMinimumGCAge: 2m0s
' S' T" y' \* m* F$ X6 Q7 R1 f: p* NkubeAPIBurst: 100
; m' K1 {: n! i* _. }; O) ]kubeAPIQPS: 50
- C: d2 B ` w% L+ ZmakeIPTablesUtilChains: true' S# }& T! o4 Z# k2 \- J
maxOpenFiles: 10000003 N. y* J. H& Q' _& l
maxPods: 110/ G0 D! [3 @9 Y2 a
nodeLeaseDurationSeconds: 40
5 P% C1 h; H; `! |nodeStatusReportFrequency: 1m0s8 o& T# C1 f5 G- X7 f$ W. H2 {
nodeStatusUpdateFrequency: 10s, V' \/ \8 [( U1 j6 S: H
oomScoreAdj: -999$ F2 O$ X5 ~8 q& H
podPidsLimit: -19 O8 V6 P# ^, f" f6 z8 w% x
port: 10250
5 _- k1 c4 e, X0 t% \# u# disable readOnlyPort
* C% E: }5 F' W$ c. ?* _ YreadOnlyPort: 0
, t4 g- u% J8 q5 b- SresolvConf: /etc/resolv.conf
# i# a* p E, WruntimeRequestTimeout: 2m0s
4 t1 Z$ b \1 q& K! o8 jserializeImagePulls: true
C- i+ I% p& v5 Y" ]1 W# OstreamingConnectionIdleTimeout: 4h0m0s' K/ P( f% c) f3 D
syncFrequency: 1m0s
% ]/ y: D9 u) L+ ?! vtlsCertFile: /etc/kubernetes/ssl/kubelet.pem: o* y! ?3 K6 f- U
tlsPrivateKeyFile: /etc/kubernetes/ssl/kubelet-key.pem
( p+ X0 z% q2 Z5 A* f配置 kubelet 为 systemctl 管理$ r/ \- B2 Q+ M! P! V- O3 z# O/ D
vim /approot1/k8s/tmp/service/kubelet.service.192.168.91.19
# k! s% D% s$ j3 _$ d- g% _) D( ?这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴,有多少个node节点就创建多少个service文件,service 文件内的 ip 也要修改为 work 节点的 ip,别重复了# T! Q: n. { [7 `
. @1 T# L- _8 I2 m
--container-runtime 参数默认是 docker ,如果使用 docker 以外的,需要配置为 remote ,并且要配置 --container-runtime-endpoint 参数来指定 sock 文件的路径
1 ^; ]% F8 O, l! p. q. g% I( C1 `
kubelet 参数
) ~* [& U* a' _& K+ S5 J
& s% Q" c+ y. u$ f- y) G5 f[Unit]
4 s4 \9 ^# P- |+ m$ kDescription=Kubernetes Kubelet
8 b9 C% D9 s J& JDocumentation=https://github.com/GoogleCloudPlatform/kubernetes- }/ f" ^6 k* z/ B/ J4 Q
- ]# w* n! E& s[Service]
/ L. ?. ]% Y) s4 Z& N( cWorkingDirectory=/approot1/k8s/data/kubelet/ b( U2 x9 j; q& x u
ExecStart=/approot1/k8s/bin/kubelet \$ i7 t8 l! O& G. J4 S' w
--config=/approot1/k8s/data/kubelet/config.yaml \+ {+ T8 V/ g& d% [) j% [
--cni-bin-dir=/approot1/k8s/bin \' {0 Q" f% b+ m6 ]: H. P: d6 j; n
--cni-conf-dir=/etc/cni/net.d \
0 N% y2 R1 _: e2 q --container-runtime=remote \
" c# v8 F5 L9 T --container-runtime-endpoint=unix:///run/containerd/containerd.sock \
. J8 W" f0 f9 h --hostname-override=192.168.91.19 \. M M9 n. \# X( z
--image-pull-progress-deadline=5m \
7 q1 s/ `. F8 m) M* U$ g; F --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \1 Y& m/ Y$ ~2 B* S& F# `4 ]$ l
--network-plugin=cni \
+ X! }' [! f t1 L U --pod-infra-container-image=k8s.gcr.io/pause:3.6 \: G! k& }; h/ k( ?! O) b% l6 o
--root-dir=/approot1/k8s/data/kubelet \
; s6 Q5 \6 C6 A$ m& ]2 v& K6 k --v=2
1 {! p! O( o9 H1 @Restart=always
$ O) D- C/ U. [' m0 a! LRestartSec=52 j2 Q2 s" @4 e- @! Z
4 Q. L3 o4 M; u
[Install]
( a0 o7 p1 i# Q8 z- ^- vWantedBy=multi-user.target6 F) R4 M0 k8 h- c$ b
分发证书以及创建相关路径
2 y: X l7 w" G5 r4 P) \5 d如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制4 L4 a0 g% t7 {5 I8 r
* ]8 S! l: q& }8 ~- _& u% }- Z对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
: ]' [" q4 N* ]! b' E1 {/ T, n8 M2 m3 x
for i in 192.168.91.19 192.168.91.20;do \
) g2 w# e" ]( a1 p- S) p/ _4 P# kssh $i "mkdir -p /approot1/k8s/data/kubelet"; \
& \& @# j% Y5 n- U" _ssh $i "mkdir -p /approot1/k8s/bin"; \
* K1 H( O. ~6 g. k' essh $i "mkdir -p /etc/kubernetes/ssl"; \3 [' O0 @- E1 F+ e
scp /approot1/k8s/tmp/ssl/ca*.pem $i:/etc/kubernetes/ssl/; \. \, c% S- b, A. M4 c$ k; Z
scp /approot1/k8s/tmp/ssl/kubelet.$i.pem $i:/etc/kubernetes/ssl/kubelet.pem; \
% S/ z- @( i5 H' d$ l: qscp /approot1/k8s/tmp/ssl/kubelet.$i-key.pem $i:/etc/kubernetes/ssl/kubelet-key.pem; \; j" Y P, |4 T- Q. ? o; W
scp /approot1/k8s/tmp/ssl/kubelet.kubeconfig.$i $i:/etc/kubernetes/kubelet.kubeconfig; \
6 ^# {+ ]1 g) S) G& }scp /approot1/k8s/tmp/service/kubelet.service.$i $i:/etc/systemd/system/kubelet.service; \
$ S/ P9 S I4 z6 y4 ^2 T8 Q$ s6 ~scp /approot1/k8s/tmp/service/config.yaml $i:/approot1/k8s/data/kubelet/; \
+ [" G( T* Y2 d# B2 V0 r% cscp /approot1/k8s/pkg/kubernetes/bin/kubelet $i:/approot1/k8s/bin/; \# s. o' M- Z! h* l7 b; w
done5 @. C* f, T+ S' U4 z- _
启动 kubelet 服务& R# \ q! `" [
for i in 192.168.91.19 192.168.91.20;do \, b# k: ~7 t8 ]/ N
ssh $i "systemctl daemon-reload"; \5 o1 G$ E- V% p1 S2 p4 g+ ~
ssh $i "systemctl enable kubelet"; \
$ I3 ^, A9 }0 D9 i3 Ussh $i "systemctl restart kubelet --no-block"; \# V3 z5 a/ ~0 i
ssh $i "systemctl is-active kubelet"; \
k! ], z- n* u2 c- {9 udone& t) a* b0 z$ X! ]" e# I( g
返回 activating 表示 kubelet 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19 192.168.91.20;do ssh $i "systemctl is-active kubelet";done. ~' Q5 w, O6 J8 f* t1 z
, ^; C8 s6 m" W- R' L& M" R% b3 p
返回active表示 kubelet 启动成功
$ b9 ~' Y* X* Q2 c9 D2 A! c- f/ D5 Q. J- A0 K/ }8 I) H
查看节点是否 Ready
, f+ }% W9 ?$ x+ D2 V; z4 U+ @kubectl get node
, c# c' [: h' p# C( |7 P. O预期出现类似如下输出,STATUS 字段为 Ready 表示节点正常
4 m; \$ _8 d8 c7 E6 m
4 B7 O" i5 P9 \% ?NAME STATUS ROLES AGE VERSION
$ a9 Q8 L8 M3 i. G) I192.168.91.19 Ready <none> 20m v1.23.32 L1 b; K; f7 v5 ^
192.168.91.20 Ready <none> 20m v1.23.3
* Y! p5 ?2 c) P% H; W部署 proxy 组件
. B& b J1 X% o& _5 E+ \; }创建 proxy 证书
2 Y- D" M" ]5 H3 B. avim /approot1/k8s/tmp/ssl/kube-proxy-csr.json
, O3 t4 h: Q r{/ B; F' V4 X6 B" N3 Y
"CN": "system:kube-proxy",
- u- x% t9 @. d% Z: z+ h3 g "key": {
# W: {0 H3 |7 e6 I3 W "algo": "rsa",
( j$ u, ?1 z4 X' {- Y7 Y1 y "size": 2048
" u% N) M+ i* E# t* A; f },) t+ o0 A: h# j9 J s! c+ l
"hosts": [],+ K* Z4 f6 ~6 U% P' L
"names": [
4 H% d, p' l3 E% m8 F8 x" D$ f# v {( e' K) I9 [7 T
"C": "CN",
" _$ Z8 @' [4 [# T+ A1 t "ST": "ShangHai",
6 {& Z) ]/ l% [ "L": "ShangHai",9 G g& ]9 P* T3 ^) [3 p( r
"O": "system:kube-proxy",
1 ]: s! {" v' I' E7 Q/ s "OU": "System": _2 P3 [0 r) I' c6 o& v
} Y. d- H: a0 e
]& q- @. n3 V, k% v" ~
}4 D5 B0 J+ ?1 G0 B8 W5 ~
cd /approot1/k8s/tmp/ssl/; \
6 {: j( R0 c8 @ h( _. l: ?cfssl gencert -ca=ca.pem \# _+ Z; n6 x) t4 e; M
-ca-key=ca-key.pem \
* \- j! a8 S3 M-config=ca-config.json \
5 U( G2 B8 {) N, T" k: Y' W& _) c) A6 z* C-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy6 U; F( ~+ Q. u4 n4 {6 x
创建 kubeconfig 证书! R4 s5 }$ `# L7 a7 L! k6 L
设置集群参数8 e) j' n) h) ?; c
+ p2 f6 f* F( H- v0 W+ D* H--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver6 c! ^0 F- S1 ~
* k; P- @+ x Bcd /approot1/k8s/tmp/ssl/
A; Y# \8 Z: F G, s4 x+ p/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
7 \9 D# z/ {. n' d* V2 { R$ W5 T& Q--certificate-authority=ca.pem \
; d1 _8 A, }7 F$ N# D& y$ H--embed-certs=true \
& e9 q5 ^& T, b8 Q# Y$ y) Y--server=https://192.168.91.19:6443 \
9 _+ i# a- P1 [- v/ A j--kubeconfig=kube-proxy.kubeconfig
% E" R" S3 g1 x( Z& c( o6 Q设置客户端认证参数" G/ f; Z. D. u- `
( g* N- C1 v% n1 R& zcd /approot1/k8s/tmp/ssl/. L+ Q. P) X2 J2 ]* f
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials kube-proxy \" f" D' s! D+ w
--client-certificate=kube-proxy.pem \
6 W+ B; b- U! I4 ?, l O- ?9 N; A--client-key=kube-proxy-key.pem \9 c9 C# q" `; f8 U2 J, v
--embed-certs=true \
. h4 G* `- a6 a0 I' x2 p3 I2 v$ G--kubeconfig=kube-proxy.kubeconfig
8 _$ q5 L9 w; k. b4 s5 M7 O6 n设置上下文参数( J* a$ I5 ?& A, w8 P8 [# s& l
, \9 l2 x) a {0 P/ H
cd /approot1/k8s/tmp/ssl/
' a8 Z: _/ m( |* W+ j/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context default \4 ~% U& J7 v8 i7 C" t
--cluster=kubernetes \+ h0 Z* w! ~# j8 ?
--user=kube-proxy \6 D0 O! z7 z9 s/ B$ L
--kubeconfig=kube-proxy.kubeconfig! W/ L' K' j5 V& O
设置默认上下文' P& `2 C. s' F9 x D
( L* }! R8 v) |0 G2 w1 K' N
cd /approot1/k8s/tmp/ssl/0 s" c0 c5 y8 `* X
/approot1/k8s/pkg/kubernetes/bin/kubectl config \, J% ?2 ~- W& Q8 I* {
use-context default \
) N- f$ Y6 ~1 N5 W--kubeconfig=kube-proxy.kubeconfig# q2 ^* ?) G V$ _
配置 kube-proxy 配置文件
c8 u9 ^; ?5 [5 ovim /approot1/k8s/tmp/service/kube-proxy-config.yaml.192.168.91.19
0 e+ u, T j7 W, f9 \这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴,有多少个node节点就创建多少个service文件,service 文件内的 ip 也要修改为 work 节点的 ip,别重复了% [3 } y5 \+ O
# K+ n# V2 X3 L0 W
clusterCIDR 参数要和 controller-manager 的 --cluster-cidr 参数一致
) F5 p# k$ r8 m" M: a4 P
- h' R6 s" R* T8 `3 P. J" \hostnameOverride 要和 kubelet 的 --hostname-override 参数一致,否则会出现 node not found 的报错' C- {$ F7 \) U4 Z- e
( [; ~& j/ X. N6 j0 W8 A
kind: KubeProxyConfiguration
2 o8 {6 Q% m+ c+ KapiVersion: kubeproxy.config.k8s.io/v1alpha1
! I2 w% ^5 e! o) v" k0 \bindAddress: 0.0.0.0
5 x m( Y, y' `! D3 D- m3 y5 ]clientConnection:
' b9 h+ ]# C; q0 A2 d kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig"2 G* y8 W. C& S% D* h$ K1 q g
clusterCIDR: "172.20.0.0/16"
( {- a) [& R) J: U6 j1 S: o x2 a' @conntrack:
2 m3 S0 V6 Q* U% R4 Y- \ maxPerCore: 32768
s9 K/ r% V9 P$ c min: 131072
; K( x. X8 O/ T: P4 { b2 W9 K tcpCloseWaitTimeout: 1h0m0s3 p( d% R5 i- I! w3 ~. ?: i
tcpEstablishedTimeout: 24h0m0s0 P9 B, S, ^, a. o
healthzBindAddress: 0.0.0.0:10256
% X. W c" j. g3 q ]! C/ ]3 RhostnameOverride: "192.168.91.19"
3 n& X9 z! @5 K0 ^( i+ P- C4 EmetricsBindAddress: 0.0.0.0:102497 D3 e# p% M# X% r
mode: "ipvs"4 z: H& w1 }1 Y4 i9 ~+ K9 |' i5 @" v
配置 proxy 为 systemctl 管理
1 U, U0 V+ U% l. H4 H0 Lvim /approot1/k8s/tmp/service/kube-proxy.service
* l5 k% Z4 k2 U& K6 `[Unit], G% ?7 _& t. J, Y
Description=Kubernetes Kube-Proxy Server4 Y! I' K3 C& d8 q
Documentation=https://github.com/GoogleCloudPlatform/kubernetes; {# ]3 @' x* T/ j3 u
After=network.target
4 @) {# K$ P8 P3 E; P+ c3 Y# `: [5 Y6 o1 z" H8 U# z
[Service]! ~% y/ b- R4 o2 p% B
# kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量2 Y) c2 M* [: q7 [8 i
## 指定 --cluster-cidr 或 --masquerade-all 选项后
' T0 Y7 s, t& Y## kube-proxy 会对访问 Service IP 的请求做 SNAT5 F6 r3 J8 d5 j! y- F9 F" A
WorkingDirectory=/approot1/k8s/data/kube-proxy: L- G, p7 D0 F1 ]: }4 r8 c1 J# P& Y
ExecStart=/approot1/k8s/bin/kube-proxy \- { u2 I# q, f) s: E. g
--config=/approot1/k8s/data/kube-proxy/kube-proxy-config.yaml! h5 I& g; V) v7 Y- F9 [% r! V* P
Restart=always
% Q3 w8 F+ Z7 S$ HRestartSec=51 U0 R r3 T9 c* I9 f+ {) h
LimitNOFILE=65536; D: g) J0 ?* e6 v& w9 `
& y& S9 w# T: m' u[Install]" g9 n; q& {0 M% m2 b
WantedBy=multi-user.target* l5 A+ b. t: {& X) f! D8 h1 s
分发证书以及创建相关路径, P8 h+ e0 Z% L* a. t
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
! v! d+ `* F6 e" n5 d, }' A5 U+ o, t9 M: d
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败, y; D" T8 P5 x
- W; _$ W! F# h1 n: v/ Q" c
for i in 192.168.91.19 192.168.91.20;do \8 \% H- @" D. M9 V8 q6 q b
ssh $i "mkdir -p /approot1/k8s/data//kube-proxy"; \
/ k. }" R# C3 @+ |1 d9 I% @ssh $i "mkdir -p /approot1/k8s/bin"; \; g% I* \2 N+ k6 i9 e3 @2 d
ssh $i "mkdir -p /etc/kubernetes/ssl"; \
9 [" O; ?- W! m: t: q1 x/ t& Lscp /approot1/k8s/tmp/ssl/kube-proxy.kubeconfig $i:/etc/kubernetes/; \
- d% n4 a0 _ F8 b9 h) x# ~6 Kscp /approot1/k8s/tmp/service/kube-proxy.service $i:/etc/systemd/system/; \0 e" \% l% |6 m
scp /approot1/k8s/tmp/service/kube-proxy-config.yaml.$i $i:/approot1/k8s/data/kube-proxy/kube-proxy-config.yaml; \% a$ X3 X, e+ m/ E% P
scp /approot1/k8s/pkg/kubernetes/bin/kube-proxy $i:/approot1/k8s/bin/; \
5 A% x! E" k, \4 i4 {$ \done
, h; C. }8 @" L启动 kube-proxy 服务+ ~; {) Y3 d! Z2 ?" e$ \) x0 ?. G) G
for i in 192.168.91.19 192.168.91.20;do \
, j% _1 d7 a) A5 R- p$ sssh $i "systemctl daemon-reload"; \* \& J9 y% @, R7 F0 v; J' F$ V
ssh $i "systemctl enable kube-proxy"; \
; K; R- \% Z$ F) W0 ]% T9 ~ssh $i "systemctl restart kube-proxy --no-block"; \
2 P Q' \3 t1 S7 F# bssh $i "systemctl is-active kube-proxy"; \/ H. P {; w* I8 L4 q5 ]
done/ \! T. a" g2 \
返回 activating 表示 kubelet 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19 192.168.91.20;do ssh $i "systemctl is-active kubelet";done# ~2 y! j4 q1 ]) ]* g5 y( g
$ j) A& f$ U- s/ ]6 ~返回active表示 kubelet 启动成功+ \' a# m4 |! ~3 C; N3 T2 b
# l: Y% r' N9 l7 U9 r
部署 flannel 组件7 G* A9 a) V8 X$ |, K
flannel github$ j' _2 R% T& g. Q4 ^, f" J
/ B$ }! b2 v7 d* T' N/ R
配置 flannel yaml 文件5 a" o* `, k. O, ]
vim /approot1/k8s/tmp/service/flannel.yaml; Z- E9 P& E1 j& W a6 S
net-conf.json 内的 Network 参数需要和 controller-manager 的 --cluster-cidr 参数一致2 i) U1 x) v( c# b. w
6 Q8 X* T6 s- H* \2 q1 [1 O---
* f6 t% J9 J; iapiVersion: policy/v1beta1
. z- R- ~$ j# t& ~kind: PodSecurityPolicy* E: q2 y/ p! D* z( \
metadata:7 f) [# C8 T& y+ T! U: A( C' T
name: psp.flannel.unprivileged
9 n3 r- f% A; y) r4 R5 \ annotations:
& G( k/ ~* U' s# A seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default; K# S6 i3 c! H9 Y& e# P
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default z; }8 v! c% u$ |+ k
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default) ?7 H7 g3 c s8 j- M0 r! X
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
3 U0 O7 f4 n4 A) |spec:$ W9 B- {8 A t5 ^- |( c
privileged: false
u; D# \% ?; v# @! Y volumes:) ^8 D6 E0 M/ R( m
- configMap
2 C2 J2 d" R2 U1 r - secret# i& d8 Q& }* [6 E; _
- emptyDir8 i6 G, i5 |; a! T" n3 h8 O
- hostPath
$ C+ R6 Y9 A# k2 ~ allowedHostPaths:* M$ U6 b5 L6 g, U5 V5 S9 U
- pathPrefix: "/etc/cni/net.d"
" C4 U/ S" g$ f) e; @' \4 ` - pathPrefix: "/etc/kube-flannel" m+ n8 k0 L/ l# I8 Q$ l6 r
- pathPrefix: "/run/flannel"" D. r0 L6 ~: n, f
readOnlyRootFilesystem: false5 ~( b7 v e: p: ^1 F; l9 Z
# Users and groups
( M0 n) l1 f" z% T: ]$ B runAsUser:
' O! g% ?! s4 X rule: RunAsAny7 J+ r# p6 M( O( Y
supplementalGroups: {7 M% j9 q* H( h/ X' {/ i
rule: RunAsAny4 s. ?- ~- @. u% ^& a4 w
fsGroup:' ?2 v. o: ?% H, C9 V( o, Z: @
rule: RunAsAny# x: H# V& K/ f- Q+ |% T
# Privilege Escalation: x8 }% }# `4 o7 q" g8 m' Q$ f. ^9 O2 e# i
allowPrivilegeEscalation: false
( ^. M9 P5 p9 t. f defaultAllowPrivilegeEscalation: false
" g: A/ u b9 s; s+ B # Capabilities" `3 N% `. H5 S X8 ]9 v m
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
/ V( x3 y6 T: W$ { defaultAddCapabilities: []
9 d# B, r3 E4 x- c requiredDropCapabilities: []
( |1 @+ F& X; d/ v # Host namespaces2 Q6 J! ]: R& c) u I
hostPID: false
" B% c* R; o6 N* d hostIPC: false/ [$ Q& K- N0 g& A/ i9 x7 Z
hostNetwork: true
( n& a9 e B' j4 K u9 H hostPorts:
, I! ~ m) p; `! w - min: 0; L0 w* |% _( ~2 ~/ ?# U* A
max: 655359 Z) \+ O. u; S' ^
# SELinux! X6 t8 b" x1 c% k) D
seLinux:; _- |; a$ ]8 Y) X
# SELinux is unused in CaaSP$ [7 V) B7 D4 z/ ]' p
rule: 'RunAsAny'
) o* |+ m- o$ e" z# `$ k0 V" e5 @---
, i3 R0 o3 v J+ F9 \( S+ akind: ClusterRole
' {3 U5 c4 C) \apiVersion: rbac.authorization.k8s.io/v1- t( h' ~. x+ ^, J3 \. `* n% U
metadata:6 _/ j$ N9 ?) Q- G4 I% O9 l6 I
name: flannel
2 i f1 a. x( C/ _rules:
- `9 G5 O& ~0 b- apiGroups: ['policy']& E( K/ t. K' t9 h/ z
resources: ['podsecuritypolicies']
! a( S9 A5 C4 R5 d- O" o& [ verbs: ['use']
! H m/ l: E" F9 M# p resourceNames: ['psp.flannel.unprivileged']1 {2 }9 Q" [8 I0 o
- apiGroups:* G5 z, S. [, `8 Z9 n7 i- U
- ""& U& G7 @7 N" Q/ ^0 w
resources:# d& z- H9 Q5 t% ]3 b/ X2 U
- pods
4 z; G( P9 e3 c. e5 W3 m verbs:' l6 x4 J+ r% N6 R3 w8 g1 f
- get& t# z c! [4 y1 X! t# Y
- apiGroups:
# x1 c) C i! \0 w4 b0 i - ""
: J* v. r' J8 S( p' E resources:7 l2 r6 [- K: X7 c# } p, P8 v
- nodes
( o; B6 o8 i e6 B0 M" K8 Z verbs:, f, U2 n2 x6 o: l5 W4 S6 H, @
- list2 L3 z) {: b9 d0 d1 y0 w
- watch
1 x( o0 |9 V+ g) N9 {- apiGroups:
( j% K, V/ [! O' h, |; q - ""
3 P! ~/ m/ q5 b, M& { resources: M) \% a" X7 g. i2 B- X
- nodes/status
, Y8 \. V: q& H8 @- r! L verbs:8 ]% ]2 J" L# C3 P
- patch
! z9 Y/ o9 C4 t- y( C: L2 [---8 D6 S, M# c5 R2 N1 O9 J7 {
kind: ClusterRoleBinding2 A+ O) s$ ?/ R9 }
apiVersion: rbac.authorization.k8s.io/v1
# M, w2 b' R: R) D# gmetadata:
+ S5 U2 c. Y& ` name: flannel
; B; ^4 [; w% z+ k$ v" OroleRef:- ]4 m; T' Z, d) k, M
apiGroup: rbac.authorization.k8s.io' k+ ~# D- S$ s
kind: ClusterRole
7 ~" ~8 f% F! S* r# S name: flannel% b7 N" |$ R0 U3 |6 s, h- P& K% n
subjects:$ \7 N0 l, [. i6 K0 T1 q; m4 Z
- kind: ServiceAccount
6 b. J2 B* U7 U4 B) p name: flannel+ w& p$ U, ?: [! g% Z
namespace: kube-system
) y8 h' e2 P' r; d6 [& H2 U---" h" |, {( L# y: V# m4 U
apiVersion: v1
( B( Q# n5 o( U6 J' Z+ rkind: ServiceAccount
( t1 y& E2 u U J- qmetadata:
! ~; f: e: b( |7 c name: flannel1 |$ m' t# o9 ^; k1 z/ p5 A$ n
namespace: kube-system; N2 u2 ]1 i! S& T- c' t- |
---. N# X% b& g! b2 l! n8 q
kind: ConfigMap* x# u4 L: a$ X( {, ]4 a" K3 F! n
apiVersion: v1
5 A3 C' B3 {4 Pmetadata:' D: e% g% Q7 ?" d1 n
name: kube-flannel-cfg: c4 |' T1 _7 N1 q, U8 O. a
namespace: kube-system
+ n1 U9 P( x' }/ [: d labels:
4 ?9 K2 g- j6 F$ P! E1 A tier: node% e% `! V+ \- i9 p7 n. j6 n
app: flannel
S" G7 m% s0 ldata:9 Q: ~$ H4 d/ j8 o
cni-conf.json: |# {% H% g2 N5 ~" N' V
{! c: V. I4 Q6 ?9 D
"name": "cbr0",3 X; t; x- Z0 f s" s' b) ^$ N
"cniVersion": "0.3.1",
) B4 J2 \5 w/ S, Y$ p9 Q# F7 X "plugins": [
5 |, `; O/ R2 V1 Z) g {
3 j7 X) r! `# p6 G "type": "flannel",( ]0 A7 F$ A1 a+ x) l2 R
"delegate": {9 C# d% `& ^& H7 P" w
"hairpinMode": true,
6 X7 s& ^+ X; a "isDefaultGateway": true
9 V d0 O! h. f9 M }# [0 Z, q; l0 o& h( s
},4 N# R: {( ]' l& ^6 N( }' o7 `
{9 e. K; @! u; b7 S
"type": "portmap",
$ i& m, C. r- p0 P7 o& | "capabilities": {% e+ s0 e0 ` w3 {7 w' c
"portMappings": true$ z7 j/ ~. ]; S u
}
$ Y% b" h! C) l+ t' N2 K }
; j1 j+ K: u7 s9 S& H ]
' |& y8 L# f: O3 H- p2 b1 \2 K" f }- w$ s, \* Z" H. V6 m4 @
net-conf.json: |, Q5 |; Y# O2 j9 Q
{% L V" a# w$ J1 P f
"Network": "172.20.0.0/16",
3 v, |* I9 f8 k0 _ "Backend": {
; d) v4 J0 ?) V' Q O0 \ "Type": "vxlan"
# i: J# }' \. U8 ^) {6 ] }
* D# P4 ~6 s' B" Y% Y; N }
7 Q" o9 m: t# t- C( Y, b---7 b& P9 X4 k% W9 m
apiVersion: apps/v1: {5 U- B) M8 J4 R# A, e: z- \ l& ]! t
kind: DaemonSet& Y: s) c1 S) \
metadata:# R3 C# s! _) R2 |( d& M) X
name: kube-flannel-ds3 N, D/ K" N2 o1 L$ I6 g. R& S8 @
namespace: kube-system( i6 d E* t: I
labels:
7 n L; s, P' K* ~4 j6 `1 X; A* g! U tier: node
5 T$ O6 t# a) h7 q: ^+ q& y app: flannel* w1 ~. D2 I& B' J* G
spec:
) [. [# W7 v" R: { selector:
( e& f0 R/ m7 n g; [( D matchLabels:/ i7 W3 p+ a1 p1 {/ w
app: flannel
$ u( V. [: o1 I( S2 Q! F6 | template:8 c, [, h6 Q! T/ Z3 {' F3 O; W
metadata:
; g- V0 J& V: U/ m labels:
; m5 [( E6 X, D1 e, G% H) b9 V tier: node1 q6 l% N' G9 s" C- q
app: flannel
9 @7 W* q8 g# e0 Y spec:% S, U: j0 Y; G1 O3 E
affinity:
1 R) |5 g- r2 N) a% v# U nodeAffinity:' [, m4 ~: h- P: D2 S
requiredDuringSchedulingIgnoredDuringExecution:
4 `5 J: ^+ ^5 @) h nodeSelectorTerms:
- D. r+ n7 p& p* b0 W3 i1 w. x - matchExpressions:; B( m2 E S0 f" \; _6 T8 f
- key: kubernetes.io/os. i0 `$ q& D. U
operator: In
/ l- \5 J. f0 G- }5 Y d values:, s! t4 s; i) d" H1 S) Y
- linux
& K. y( U% p4 p9 R& Z5 g hostNetwork: true
: G0 T4 J' ]& S2 g7 I% |3 m+ w7 c$ }# X$ c priorityClassName: system-node-critical
7 m9 C2 M) r4 q e tolerations:
! I/ j6 B$ ]- m' U R - operator: Exists9 V0 w( }- Z* j! C# p
effect: NoSchedule, b4 R4 a6 G% G& D" I: e; v. A' Q
serviceAccountName: flannel
' c% n$ \9 l5 b* l [$ G" ?/ l initContainers:
6 A9 O ]" b4 U# O0 o - name: install-cni
4 G$ T4 Y* J B" v( f) f image: quay.io/coreos/flannel:v0.15.1% Z' D9 X: a6 T# j s$ n
command:" h5 d! Z5 P$ @: `$ b
- cp
$ o% x4 i* F: Y7 j args:
- v- w5 U$ l: A6 C+ P - -f3 b% p9 @4 d( j4 V9 m& Q/ D5 r- B! I
- /etc/kube-flannel/cni-conf.json4 A" W+ K7 t; ^. V
- /etc/cni/net.d/10-flannel.conflist
9 W9 p; r& {' r: C2 `3 k volumeMounts:
; L3 b4 B6 Y& W! t2 m I+ j - name: cni* Y9 [9 J5 A+ \, [0 @4 r4 N
mountPath: /etc/cni/net.d
* T" a* n5 X% ` - name: flannel-cfg
; c% o! n" M& ?# L* X! o% v mountPath: /etc/kube-flannel/" A( @5 ? n1 y9 t* F
containers:. B: Y7 m9 a k# i' i/ w
- name: kube-flannel
& k, `' i" u: i. D/ t1 n, A0 A image: quay.io/coreos/flannel:v0.15.1- I/ _- t u! ~+ C9 I
command:, w1 ^0 R: s7 X9 {; W! G# _; r
- /opt/bin/flanneld" t1 f5 L* }+ Q
args:
[/ i% w0 m1 ]' y - --ip-masq
& |, s9 U# U/ {, F' t! k! N - --kube-subnet-mgr
& y5 ? _7 w7 X) U" X resources:1 q9 m! d! N7 u; D. b: `
requests:) {8 I8 y4 B- T3 R, r! F' R; h1 g* Q
cpu: "100m"
" C' c6 q0 T8 j' v memory: "50Mi"2 m! Q3 z# l& @3 d0 o# r
limits:! z9 z. _- _% s+ P) f) P
cpu: "100m"
+ r' j4 Z. e* B' [1 ~, E memory: "50Mi"! M2 I* W% r) d. O
securityContext:, I1 f* ]# d* X, Y& }- L' R
privileged: false7 Q7 l: E9 H; n; \' c! v
capabilities:
, ~+ Y1 F- a& G3 e9 K, |( r add: ["NET_ADMIN", "NET_RAW"]) g0 G5 P- G+ e& {) Z$ ]0 X
env:
t0 ~' k- s" S! l. b1 l+ _: e' i - name: POD_NAME# P/ {% x: f, f
valueFrom:* F: o+ F( ~- b4 ]0 I$ j
fieldRef:
6 O, U# e) U' j& a( h# K* K fieldPath: metadata.name
: m" B: m1 m) r6 }3 J6 _ - name: POD_NAMESPACE
8 i: `3 V3 V2 }9 B! B2 b3 B valueFrom:, c* E7 b/ K9 ~- o- c( |
fieldRef:
) k* G) q, b& m- A4 o' L fieldPath: metadata.namespace
, f/ _, m7 d& U' ]9 k' {- ^ volumeMounts:
2 b7 d$ b8 {3 C% Y* J$ P3 {0 t0 P - name: run' B& H- p& q+ w
mountPath: /run/flannel
; g+ ~! J# U' ~* o6 N - name: flannel-cfg5 R) O4 S: K7 c
mountPath: /etc/kube-flannel/
2 O K) L' F/ ]% J$ q volumes:
0 M* i4 H k: V @! ^5 ` - name: run1 @/ [# ~4 i9 |. I: e. C
hostPath:
) U8 W" V+ L* h path: /run/flannel! x- U; J. x" V1 A+ @5 P1 t
- name: cni
+ s Q, I/ `- t, |* ^ hostPath:
, t% U) u& Q) p7 j; d# p path: /etc/cni/net.d
4 E. @' s& b9 k - name: flannel-cfg
1 J; O' N( K4 f8 V9 ` configMap:
) ~& P5 D# K* e$ s3 g+ E4 z1 F) J name: kube-flannel-cfg
$ ?% T+ L6 D; O; H- f5 L配置 flannel cni 网卡配置文件
( T+ @; b0 Q, M+ D& b; g8 I9 D, uvim /approot1/k8s/tmp/service/10-flannel.conflist
2 @( ]: t: n+ O2 j9 @6 a{
2 C) ^$ M0 p+ B4 r4 l; x! B4 h "name": "cbr0",
, @ P( p3 F2 I; @* I0 d0 q" g8 x( d "cniVersion": "0.3.1",
R+ c. Z( [, e, Y! O1 A3 d "plugins": [. g; v+ J) J% n! x
{
% x2 F2 ^9 V& B8 o "type": "flannel",
# O' t" l/ ]" D5 {% Q/ _ "delegate": {
; ^$ w2 d! R7 h "hairpinMode": true,0 Y- X: L/ v8 {5 t8 E/ t: {( w
"isDefaultGateway": true4 F$ l9 |9 d! H
}
! N: Z6 Q3 O% l& X8 P9 K },
2 f$ Y" @- V1 _; S3 H. W u {
5 A) v7 p( E( m$ a "type": "portmap",
' L$ C0 I/ j8 N8 N/ C& t8 U9 | "capabilities": {* N+ _ Z9 A# L; p. A' x
"portMappings": true5 w) X/ V4 y d" j
}* J! ~% Z. i& p8 o
}/ J) L% H6 b- |) U# Q2 q. ^
]
8 A% C' P- {2 }2 y% i7 h6 O}
\8 z6 o1 Y( M, X# V7 i; Z导入 flannel 镜像+ K6 {& ]3 o; {2 `
for i in 192.168.91.19 192.168.91.20;do \+ n6 C& F u+ I8 P T9 T) k3 |4 ^
scp /approot1/k8s/images/flannel-v0.15.1.tar $i:/tmp/
7 x4 N7 I/ v8 Kssh $i "ctr -n=k8s.io image import /tmp/flannel-v0.15.1.tar && rm -f /tmp/flannel-v0.15.1.tar"; \
s3 e: B& o: H( G+ z- N6 n* X. M/ Gdone' ] o6 w: s( `4 \ d
查看镜像7 Z: x* P; v% a5 t
1 e# x5 O A( _' E" c" Z: V
for i in 192.168.91.19 192.168.91.20;do \ z' y. T" v& ~" v. |0 O n/ ]+ \
ssh $i "ctr -n=k8s.io image list | grep flannel"; \
6 \- _" t% S" ], z( l6 d5 ^done
- G6 }: H7 W: u分发 flannel cni 网卡配置文件. c( s1 e. l6 s* M* J1 \$ ?' p
for i in 192.168.91.19 192.168.91.20;do \- r0 Y' Z6 t# b$ `" {3 \0 E
ssh $i "rm -f /etc/cni/net.d/10-default.conf"; \
+ C- S7 R7 Q/ Y L8 \ Jscp /approot1/k8s/tmp/service/10-flannel.conflist $i:/etc/cni/net.d/; \4 `! C3 \# Z" L* Z& F
done: T* S* T& p6 D7 c2 v
分发完 flannel cni 网卡配置文件后,节点会出现暂时的 NotReady 状态,需要等到节点都变回 Ready 状态后,再运行 flannel 组件* e* N+ p% V2 g+ z6 z! y
+ ^1 V6 w1 G. _; T5 K- c. k
在 k8s 中运行 flannel 组件
) A1 f8 S4 }/ ~- G; m, zkubectl apply -f /approot1/k8s/tmp/service/flannel.yaml
' s% A) }8 d( k5 L6 d4 d0 l0 U检查 flannel pod 是否运行成功
* G- l+ S7 k0 ?" E& Zkubectl get pod -n kube-system | grep flannel. y) Y0 a! {( n( n# G7 {
预期输出类似如下结果
8 Y# X c3 N% \
) T$ N( E- n$ g4 Bflannel 属于 DaemonSet ,属于和节点共存亡类型的 pod ,k8s 有多少 node ,flannel 就有多少 pod ,当 node 被删除的时候, flannel pod 也会随之删除* E+ a0 I' k4 b% a! e; Q7 U) b
" {. h/ I1 z2 }# v* ]kube-flannel-ds-86rrv 1/1 Running 0 8m54s7 ` @8 |& g4 u& z
kube-flannel-ds-bkgzx 1/1 Running 0 8m53s; ]# J3 n0 {9 U. O8 V# B9 _
suse 12 发行版会出现 Init:CreateContainerError 的情况,此时需要 kubectl describe pod -n kube-system <flannel_pod_name> 查看报错原因,Error: failed to create containerd container: get apparmor_parser version: exec: "apparmor_parser": executable file not found in $PATH 出现这个报错,只需要使用 which apparmor_parser 找到 apparmor_parser 所在路径,然后做一个软连接到 kubelet 命令所在目录即可,然后重启 pod ,注意,所有 flannel 所在节点都需要执行这个软连接操作
9 o0 `, A: o9 z: p2 D: A- M7 P' |$ M7 W+ M3 V& [
部署 coredns 组件
8 m1 A! c/ R& R! `4 C- d: F* m配置 coredns yaml 文件+ e8 w1 A$ [* m8 y; O) l
vim /approot1/k8s/tmp/service/coredns.yaml1 K# ?0 Y3 w2 b3 W
clusterIP 参数要和 kubelet 配置文件的 clusterDNS 参数一致1 ?) ~0 y% |( }5 r4 s: z8 X' O! r* V
$ M; @" D2 G4 E) v
apiVersion: v1
+ o) n8 W3 c2 R2 j2 ckind: ServiceAccount
( R! B' L* o% a- |; s2 M/ [metadata:. P8 k: \- ? Y
name: coredns
- X1 j6 G( W5 ~$ o6 k8 j& b5 F namespace: kube-system
; I r7 v- o& V$ _) I' G9 L labels:& e {2 h6 k8 o O. P
kubernetes.io/cluster-service: "true"4 B$ L7 O- ~& K3 s
addonmanager.kubernetes.io/mode: Reconcile
% s, w4 X9 B7 r8 }, q---
/ g" z) ` M+ }8 papiVersion: rbac.authorization.k8s.io/v1
) O% V% c+ I; n: q! Fkind: ClusterRole1 O4 T- p8 x$ V5 V) @$ [5 A5 m1 U" e
metadata:
8 b# k( \/ f5 n9 J" u; S labels:5 ]. \( R6 \+ O, v8 I5 @3 o# ]- t
kubernetes.io/bootstrapping: rbac-defaults
# a! v' x. A& f2 m addonmanager.kubernetes.io/mode: Reconcile7 `. b0 X; V8 h$ d
name: system:coredns
+ _# y3 J; i; Rrules:
% k: f# q$ p" l2 T* ]& S& f7 V- apiGroups:
, Z$ z/ B& h* O* k9 i - ""
0 ^* p4 q9 L) n) H: D) n4 p4 P resources:9 N0 f) j* }! G
- endpoints
5 W! m8 \+ t/ ?; U v - services
: I' Q: C# E! E1 ?2 Z6 {( H, [ - pods
8 V$ @7 ~0 N# p k - namespaces
7 ~4 l4 l6 V! j+ N9 m# y7 ~7 U0 n2 Q verbs:; I, ?% l/ E! o4 T2 N" v d7 J$ x
- list
! z. d: w# C2 V$ _# Z: e) L- ? - watch. @) W, C3 L |+ \+ d8 a
- apiGroups:
4 _# m0 P0 j9 T- u6 e+ C4 b3 w$ i - ""% b4 I9 @0 D- a0 a& v
resources:
3 I( y* R/ ?4 R7 n5 Q m Z/ {$ E+ }5 Z - nodes
$ B8 j r, y* ~6 Q' T3 } verbs:# I& a: a& V2 d
- get* [; m# p) Q, U4 t6 {9 f; T
- apiGroups:
8 O) C; Q: p" }1 l - discovery.k8s.io- f5 V" @, u' {- W6 T- F
resources:* L# C, V5 c9 i7 D6 N! |
- endpointslices$ K; t- C6 K' ?0 G- }" h0 b3 Z/ h
verbs: C! o0 v, P; R0 p0 Z5 ~# Y
- list6 }: [ X5 z! U2 R! z( T
- watch
3 z/ k* G7 M' y& d' ?6 @9 J4 X/ W# z---
1 G8 T3 s( ?/ N2 zapiVersion: rbac.authorization.k8s.io/v1
9 c- i6 s3 |; j: vkind: ClusterRoleBinding
7 `+ m" Q; A) O1 d6 S! J9 U, @8 ~4 Bmetadata:
* I/ ~) l# f7 E# Z. G5 ?, J& o annotations:3 c7 j$ Z5 q" t! d- w& W
rbac.authorization.kubernetes.io/autoupdate: "true"! x; D! A* `& e' A8 U8 a& K
labels:
. S$ S7 s4 ^% ~+ V7 k kubernetes.io/bootstrapping: rbac-defaults
4 x: t* K W% C4 G8 J addonmanager.kubernetes.io/mode: EnsureExists9 S Q+ A& b* B. J: r4 W3 ?
name: system:coredns q6 z: V' O/ b; o+ Q5 l. H
roleRef:
2 A1 h0 z+ O, N apiGroup: rbac.authorization.k8s.io: v" y- D3 D% B5 W
kind: ClusterRole2 u& X/ B8 p0 D9 h c: c. G
name: system:coredns
4 w' S5 K% |* u3 L% \subjects:
& G s: e# o% m( m- N" y- kind: ServiceAccount+ k3 ^3 }/ C8 Y5 N1 n8 ~
name: coredns8 d: Y3 V% c, B& S
namespace: kube-system/ R, A, E! c; E& [
---9 u7 ]2 l6 O) N2 d
apiVersion: v1
: O( S1 m6 ?3 J3 Nkind: ConfigMap
8 O8 g z+ R' y3 x+ w$ kmetadata:8 R2 D6 C9 t. `6 D( u3 X) i
name: coredns8 k0 Y' G3 b/ n5 Q2 v+ V
namespace: kube-system
0 O% Q& j) u6 U0 x; w labels:
; s. Y+ Y! F2 K- ~4 \ addonmanager.kubernetes.io/mode: EnsureExists
0 a; F; b. W6 D" o8 O, i" _data:
# Q( }2 p5 L# U, T4 |4 v Corefile: |
! k t+ y: U7 T/ H .:53 {' I, @# l5 Q/ U d, |: n' }$ c
errors
) e; b, i/ m z6 s0 h, E health {
+ ~' V; l% s I+ V$ N6 ]% w lameduck 5s7 Y: n& B% U9 m0 p) k9 g
}4 S; ^- [* K! V" ~8 m' b
ready9 m8 ?# U* }' p; h# e# m
kubernetes cluster.local in-addr.arpa ip6.arpa {: k, G" v. \, y! Z- \
pods insecure
9 b9 W6 u/ @0 [2 r fallthrough in-addr.arpa ip6.arpa8 B6 N" X% |3 i# C
ttl 30
$ B. t* J; r- U( v }4 L; k/ N* a8 U8 x
prometheus :9153; {9 F _0 }& w) F- ]0 ~
forward . /etc/resolv.conf {
& g$ w) L4 w `+ L# W max_concurrent 1000
) P f3 }6 ^6 d9 i; ^ u+ g* c }/ v3 Z6 h( z, G& z9 M
cache 30
& M2 U; P" G# A" |* W reload; _8 W, u* y5 E- j# }' R9 u6 }7 R- N* w: i
loadbalance- {7 u) Y0 z+ {$ k/ v/ |, h: J& S
}: A: j% ^9 \3 I* ?' r0 [1 f' d4 ^
---
- Y4 I( \4 t9 T& x8 w0 x9 `apiVersion: apps/v1 t) {& A4 i) b2 ?/ d+ y- f
kind: Deployment
& r9 }" o1 D; \3 Q# k) [metadata:5 Q L3 b" U2 C' F* Q W
name: coredns
* \' S3 H* c+ N namespace: kube-system, O. Q* W6 O% C5 j$ J% D% _: E
labels:
& K s- y1 q* B d k8s-app: kube-dns* u* x( p7 @6 ~ b0 t" K& Y- J
kubernetes.io/cluster-service: "true"
: a, x3 N |% b addonmanager.kubernetes.io/mode: Reconcile
3 Q7 s& t+ N, O1 H8 f8 X, M1 A3 r5 } kubernetes.io/name: "CoreDNS"- v1 [7 J! d6 j5 @6 `+ B
spec:
! Y2 }3 i( ^$ c; k0 T. @ replicas: 1- ^- d: {/ a! c, P
strategy:
; E( C9 d, C) u/ {5 {1 E& Y$ C type: RollingUpdate; D# b" W6 u8 y a) {7 G
rollingUpdate:1 O/ M2 D4 j: ~* L4 E% P) R. ~5 F
maxUnavailable: 1( H I& L c$ c- S9 K) ~" Q
selector:& t( o3 a( Z- T% r, t
matchLabels:
/ U0 p8 v& `6 y* q. R2 s9 ^! q k8s-app: kube-dns1 y; r; g$ O' M% W0 i
template:
1 [4 f' W; c3 E( m% u0 _ metadata:
3 D1 x0 e! Y1 n9 t7 N2 p { labels:
$ Q9 R$ M. g9 Y, g6 y k8s-app: kube-dns# [- v$ x" j: m( E. W; i: l0 z) [
spec:
" `! u) Q9 ~5 Y securityContext:
# |' T- m$ [: D9 ^9 L) p seccompProfile:4 ]& T0 C9 q' y% F( e6 r% N
type: RuntimeDefault
, G% r7 ? R) h! b; Z priorityClassName: system-cluster-critical& M8 H4 ?1 E: c9 F
serviceAccountName: coredns# P6 I! w$ y- F/ v# Q
affinity:
) @( o$ M @, i+ l' _ podAntiAffinity:
# L/ w4 o' m- y2 e* j% h; E preferredDuringSchedulingIgnoredDuringExecution:
; p% |: X% M; R% d1 z2 F - weight: 100
, o" g$ L/ G% M( f podAffinityTerm:
$ d( @6 r* |! H' } m2 Q labelSelector: w. Q1 L$ u; { ?9 M
matchExpressions:
* x7 ~' w; [5 ]; q$ e% g - key: k8s-app3 Z- K Q' c3 D: O4 [) R1 F4 [! Z
operator: In0 I; A' U: u3 t
values: ["kube-dns"]4 I! N" O( W% O) q1 h. q& G
topologyKey: kubernetes.io/hostname
$ r" z k8 i6 V! S v9 q% Z. j tolerations:
! J* @6 Q9 L' C+ O9 ]. u - key: "CriticalAddonsOnly"0 |4 C( _* ?# X; d+ [) B
operator: "Exists"
8 l7 H2 {9 l& O4 s f nodeSelector:
% U# X" v X' N( X4 M! D( F% j: K; K kubernetes.io/os: linux
- o2 T* j7 p+ B p2 i9 B containers:2 G5 i5 _" _: ^2 ^
- name: coredns
& @# J. X4 R6 V7 l' p( o3 s- ]* \ image: docker.io/coredns/coredns:1.8.6
- H2 L+ ~5 H$ h% F' w9 I# F/ ` imagePullPolicy: IfNotPresent, U' ~$ P" v e( n# _/ u6 u
resources:
0 z7 d# N4 J# R% B) [, b0 I" T) l limits:
% P$ D* `- F' C. z memory: 300Mi
* z3 j1 \/ a- H0 d, w* \ requests:
) ]) G9 Z% ]! e6 @' y' f* q cpu: 100m) e I8 Q8 G3 s6 r. P
memory: 70Mi
0 g' f- J6 O: _! i/ s, n args: [ "-conf", "/etc/coredns/Corefile" ]5 o* T5 I3 z" z+ T! {! G
volumeMounts:0 v: g, p3 r' K
- name: config-volume
" G5 e4 v6 f' P% a8 u: ] mountPath: /etc/coredns, W& c. M \6 x8 `! ? b7 P
readOnly: true
7 c& a: T# ~# ]$ L ports:- L Z5 k" i+ I: T! h
- containerPort: 53
2 }9 F$ Q' J# L" v* a name: dns
8 m. n7 v# z0 D* \% d/ H$ J protocol: UDP- j- T% Z5 H7 T6 Z
- containerPort: 53
6 g8 ]- e3 S. I; V; H- C$ Y name: dns-tcp
* o5 y- Q# w; F, ` protocol: TCP9 q1 b9 Y% y' V2 H) z, ~, A' ?/ q7 Y
- containerPort: 9153
4 Y1 R3 Z. |1 x3 H name: metrics# m0 S6 T. V2 U3 p* R
protocol: TCP* Z/ G' g9 W/ M* z
livenessProbe:
" l% N* {: h" D2 K) ?2 M U httpGet:
* b9 U8 y* y* d6 s0 U/ n path: /health
- K4 Q8 b5 X+ W3 q$ ` y& D+ \4 Q port: 8080
: ~4 t" u; n$ c! H scheme: HTTP
% K; Q( p$ ]; E" C initialDelaySeconds: 60
0 S. Q! f" ~* J* m% w% t timeoutSeconds: 5- p/ k' S+ d6 C7 @% B' N0 R+ Q7 ^
successThreshold: 14 w. J3 I! T* u5 K: }. v8 R
failureThreshold: 5/ m0 y5 `1 n' `0 ~1 m+ L& U$ b9 x9 i
readinessProbe:6 G( x) i9 L- Y' D7 {; R; J
httpGet:
5 J' N4 l# |0 @: x& { path: /ready
( n* R; [3 {; N port: 8181
6 F+ t8 f9 P; ^: m9 z9 x: W. k# f9 L scheme: HTTP) u) p3 }* a8 V# ?/ E9 }
securityContext:. N; o* m! Y+ a1 U
allowPrivilegeEscalation: false
+ _7 j0 e0 c% O3 ~5 W0 V capabilities:; v8 p( h. f; z5 A" ~( {2 L9 y
add:
+ W! ?$ }8 d2 i. V- r - NET_BIND_SERVICE
+ j" E8 k$ U/ G- J drop:7 C& a( m1 F* z. R+ D
- all
5 b- X. g. D! F2 T readOnlyRootFilesystem: true
f+ C) [! I: ^2 S dnsPolicy: Default
9 p _' Y2 q# H w! u: A- U volumes:
) A& q0 z( ]/ E0 Q2 o* |; P - name: config-volume3 ~6 Y3 |: l/ a7 ~( a
configMap:4 U9 w1 r9 Z1 \7 _
name: coredns5 e' P: E+ O$ k7 y& W
items:
# i/ W8 J! h( K$ U - key: Corefile% b/ f: o: \% l) L# G, }2 [& L$ \
path: Corefile
* V3 J/ m! W% j0 \$ N, `* W---
% E, g9 {0 i Y0 eapiVersion: v1* M4 ]/ g/ l, z/ j! o1 T- w6 j
kind: Service% m, F* Z i1 q% C. d6 {
metadata:
; y/ O! t: l$ X0 Q9 g1 k name: kube-dns2 p5 S% M# h) j# B6 w1 L2 k1 u6 H! q1 \
namespace: kube-system5 q7 e2 L; N4 J% p
annotations:, \1 V @: W9 v. g$ ]9 M# v \$ l2 K
prometheus.io/port: "9153"
7 d% A" Q! g/ O$ Z* z; E prometheus.io/scrape: "true"$ o- R# r; g* A6 R2 R4 A8 d
labels:' X" N; U5 g. D; h) q
k8s-app: kube-dns
6 g: k; K. w: i% Y' A kubernetes.io/cluster-service: "true"
; L5 J% M. o4 {2 N5 X" n7 h% g addonmanager.kubernetes.io/mode: Reconcile
: u2 r) f& J( L5 _: ^# Z kubernetes.io/name: "CoreDNS", _9 E4 c/ r3 ~
spec:, B5 `: D8 H/ ^; L
selector:3 i* X* x* v3 u) m7 C1 Q9 T! R2 ~
k8s-app: kube-dns
& O- J: t6 {6 h9 s, C) U clusterIP: 10.88.0.2
; U- F0 w# \ c ports:
4 L& c; i; _8 i' T0 R8 m7 @ - name: dns) P9 h1 p0 g& V9 g, G
port: 53
1 O/ h+ w# d% H( r4 k8 r protocol: UDP& b. S6 [! k$ v8 @
- name: dns-tcp
. H: _" i5 W$ Y; n. O port: 53
; R' s( M0 W9 s: v/ e) | protocol: TCP
8 b4 a2 U8 P9 C! {. q7 @ - name: metrics2 y$ i9 E% _( R' f
port: 9153+ `* Z" N, i. v0 R# `0 _
protocol: TCP5 N. T$ |. Z4 ~0 B T0 ?$ h+ C# R) ]
导入 coredns 镜像
1 S5 `. V' I& i. j4 a: V ^for i in 192.168.91.19 192.168.91.20;do \1 u, h) a! ~' h* |2 h, ^
scp /approot1/k8s/images/coredns-v1.8.6.tar $i:/tmp/8 G& `. m- O: j3 b+ S+ P- a
ssh $i "ctr -n=k8s.io image import /tmp/coredns-v1.8.6.tar && rm -f /tmp/coredns-v1.8.6.tar"; \
/ ?4 F$ c- [: F) F: tdone% Q4 @( t" c/ U8 j* A8 b
查看镜像
# A: w& l3 `! C8 l" J j9 @+ u- G
) j7 q6 X/ D9 U( d7 k0 Qfor i in 192.168.91.19 192.168.91.20;do \* ^- U% o# {1 p. K- g1 C
ssh $i "ctr -n=k8s.io image list | grep coredns"; \1 X' j1 E3 P$ J
done; p* \3 C3 ~1 H, U% U2 a7 l
在 k8s 中运行 coredns 组件
. s, _: ~$ e1 i+ ikubectl apply -f /approot1/k8s/tmp/service/coredns.yaml
p8 E" s! o4 A$ M3 W" [检查 coredns pod 是否运行成功
4 |# p4 D4 t( j" }kubectl get pod -n kube-system | grep coredns
. j% x- b$ R4 {) A @& W预期输出类似如下结果
( W( E9 I# P3 ~+ l5 c
/ S! E( }, \2 c4 P6 y因为 coredns yaml 文件内的 replicas 参数是 1 ,因此这里只有一个 pod ,如果改成 2 ,就会出现两个 pod) V2 g4 @4 q7 z0 s
! e( `3 i4 _% S4 l# H1 ?5 l9 S
coredns-5fd74ff788-cddqf 1/1 Running 0 10s3 x0 C$ J9 s* p5 ~% R5 n
部署 metrics-server 组件/ C& o- l) z; A' H& y
配置 metrics-server yaml 文件
: J. S5 F/ b, O3 e$ _# w3 wvim /approot1/k8s/tmp/service/metrics-server.yaml# x2 u& x% ~% A
apiVersion: v1, F1 s( P" i: k! `* A$ W, @$ X
kind: ServiceAccount
i Z' B- c" Vmetadata:
2 K' W; R) z. O+ Q4 ?- R labels:
1 e9 G: u% F" M0 k- v! m% \" Y) F5 w& F k8s-app: metrics-server; ]" \3 b5 s) \* `
name: metrics-server0 b) Q# T# Y& K
namespace: kube-system
" e: c7 g- B4 l6 t9 ~---0 R7 {! \0 o! u/ S
apiVersion: rbac.authorization.k8s.io/v1. g8 C: c, F3 m0 L* ]& W
kind: ClusterRole7 w3 T( ]: Q* } Q! w# L
metadata:5 B/ N5 ?+ U* q( V9 N+ _
labels:
$ W l3 K3 \4 k# A+ ]/ G k8s-app: metrics-server
! y+ ]5 z% a0 ~* C1 p2 Y rbac.authorization.k8s.io/aggregate-to-admin: "true"
4 g4 ]) K2 Z! s, p$ R! n rbac.authorization.k8s.io/aggregate-to-edit: "true"
0 \* q" D" ]9 I+ M6 z! z' j, [ rbac.authorization.k8s.io/aggregate-to-view: "true": B. r5 F% @2 X8 p. d
name: system:aggregated-metrics-reader
2 i+ u: d+ u9 h2 ]0 G4 m+ _6 lrules:5 ]* v& V! \7 b7 [
- apiGroups:
& W ^; R4 Z; L' x- ?* G z7 _ - metrics.k8s.io
0 Q4 m/ X {/ u! s" b1 E resources:
4 H. c) H* t2 f% }: h% k9 L - pods
i7 q" P+ @. t. s5 d2 ] - nodes S* B; M: \$ U' j
verbs:
+ s) e4 ^1 l" e5 E4 R - get" C- I* w2 d# v9 b/ s
- list! v; X9 [) ~' ~7 j/ A3 H* y8 d
- watch
9 a. I1 T* Y& s: u. W0 Z---9 F$ M2 Q8 ^1 \* `; F O
apiVersion: rbac.authorization.k8s.io/v1/ C+ H& e4 B3 K0 M; t* v; \' ~8 P
kind: ClusterRole% J) d# c% v0 E8 z! d+ M) a
metadata:+ U4 s7 h! H6 m: j; m3 d. Y' N
labels:
% X5 N0 `* D7 V% T9 j. R, ^ k8s-app: metrics-server
, c H8 ]* f, y \5 r7 d. V& `9 R name: system:metrics-server- z/ |; C) E4 \$ }" C h% V
rules:
! Z+ U6 A3 T; a) `" k- apiGroups:
1 o3 Z* b! Q* _$ M1 N+ B. [, D - ""
0 i f! L& ~" c, Z7 S resources:
0 A. a3 @' D- M7 i/ G5 {: h1 l- e - pods
9 T; R5 E2 ]$ p4 a& l# m1 X - nodes2 t, c m" Q7 ]( L- ^: P
- nodes/stats
& W* W& u) D4 ]; X - namespaces
$ N& x: Q6 E7 G6 ]* C - configmaps
4 p7 C) g8 |% P8 z verbs:3 d0 ?2 T! Y: H6 n+ c/ _
- get
3 [* I: G7 q7 |; f0 b8 T( ^ - list# h3 Y3 z% p! \+ ]" O
- watch
2 P7 T, u1 M2 I/ |3 I7 M---9 g/ J% Z6 m; q8 {1 X
apiVersion: rbac.authorization.k8s.io/v18 q7 |6 v4 c! i9 D
kind: RoleBinding
; G2 R+ h" Y% N& }0 umetadata:5 c8 q- x& _' _
labels:
, T5 k! t) \% ^' x, ^* } k8s-app: metrics-server
; S2 n6 E: V9 u$ b( V" ]. L name: metrics-server-auth-reader+ P4 x; C( F! T( \
namespace: kube-system5 Q3 D+ N- ]$ G( v3 N5 n6 L1 t) n
roleRef:
. |/ O* ]: ~5 |, S' r apiGroup: rbac.authorization.k8s.io
( _# K# r. f, r' c5 d kind: Role
+ h( _5 W: j* r4 j" U/ e name: extension-apiserver-authentication-reader
: Y' _, q( A. x* p0 ssubjects:1 D' e! ~3 l: s6 H* P4 B! T% s: U
- kind: ServiceAccount l$ z8 q+ y. A3 U2 P, c) u
name: metrics-server0 m( Q* ?5 i7 p; p
namespace: kube-system3 K9 A x5 r' p4 g* H- d
---/ D& Y& Y' J# f1 _" L) A& @) x
apiVersion: rbac.authorization.k8s.io/v19 g# j; B. ^" t& O" D7 G
kind: ClusterRoleBinding6 f4 J1 Q, @9 m# E6 e" s
metadata:) h( ^$ Q0 i$ L8 e$ @" q5 K
labels:
5 G' U3 k1 k7 C# m/ O$ Q. X$ [ k8s-app: metrics-server
' t: L/ s5 K4 L' S. ?( k( ]; W name: metrics-server:system:auth-delegator6 H$ |; G, n0 s) {2 O
roleRef:' X) V4 ^' \6 z& \; h
apiGroup: rbac.authorization.k8s.io+ J" M+ a+ ~( u' M4 _8 U5 H
kind: ClusterRole
, p7 `" T+ e0 t% V0 t* D name: system:auth-delegator
) R/ s+ X9 |' Usubjects:
4 W( u# ^ ]: \' u- kind: ServiceAccount+ ~: b: }1 g% _$ q/ M5 i; R8 c
name: metrics-server
- e+ r" g$ f6 X* e( q$ { namespace: kube-system7 k5 l, S7 r' t9 Z( x
---
4 w- N3 u% G# F, L, t1 w/ uapiVersion: rbac.authorization.k8s.io/v1
9 x" c( Z8 d! jkind: ClusterRoleBinding
: c. {) o& t2 A2 M6 h. e9 u* |metadata:9 i- a4 O5 f2 j' J/ b
labels:
3 I' C3 z8 U. I+ V) ~$ [2 L! | k8s-app: metrics-server2 |) I$ Q0 \9 i b
name: system:metrics-server: |) u$ ]" n/ t' Z3 q
roleRef:( @8 l& d( T# t
apiGroup: rbac.authorization.k8s.io
& u! E1 e, K Y2 M kind: ClusterRole
1 o# i3 L# \: x; \6 y name: system:metrics-server! n/ d- _3 C0 R
subjects:9 q/ T4 { H, F+ m
- kind: ServiceAccount) q) L1 Z) {! Y. y$ V
name: metrics-server4 a: ?, `1 P1 y! G
namespace: kube-system
( Y# }) K: s& D( W' r---
- m `2 E6 L! `apiVersion: v1+ }( M* p) A1 I' V( G- N( A7 G( H
kind: Service
7 o# s4 `" m: f. b$ z3 Bmetadata:9 l% O& V- T% X3 c
labels:. }" _1 H/ O8 |1 @8 |! R
k8s-app: metrics-server! O9 `* [/ s+ k* q: p5 G
name: metrics-server! f0 Z6 F2 x9 {8 ?
namespace: kube-system& d+ G) |& g; `" h( V7 o
spec:( d# M+ X! N. `* V' q* ^2 N
ports:- s; r" E2 I, i
- name: https
3 I- N: A. R& r9 P port: 4439 m' k% f: ^( N4 d" \, }0 C- M
protocol: TCP
' o( m4 m ~+ Z$ v: w6 O targetPort: https
6 e: |5 s( c' `$ L selector:
+ m9 I& y8 K1 C! h5 g4 U k8s-app: metrics-server3 ^4 H( ~ B1 m/ Q1 Y* W6 L
---
1 u1 J/ m( D9 BapiVersion: apps/v1
+ V$ Y7 P1 }( ~, z* dkind: Deployment
5 A, N; w) A* ^# J: pmetadata:
, J$ g( ]$ a8 ^8 }" ^) q& H labels:
2 B: r& \- G# f6 D+ u5 d- ?& F1 O k8s-app: metrics-server
6 Y0 _+ k$ l, V3 w* v( N name: metrics-server% r3 v' w8 K/ x- S# r- H9 Y2 U5 D2 u
namespace: kube-system
7 }5 \8 W- N* @- tspec:9 ?3 [- S6 r! T
selector:5 O8 y, P, t: Q: o' {0 Y& L
matchLabels:0 g3 x- k/ Q( Q y) [) U3 ]8 Y* N
k8s-app: metrics-server
% l. P7 ], Y- q, W* d strategy:! ~/ r4 C% X# W/ F& a5 y% k
rollingUpdate:
7 z! V+ [ {* t& b' `4 f/ E maxUnavailable: 02 T% \8 N/ y/ \1 [: U# b8 @, a% q
template:9 g# J9 h6 q& ~- |/ u
metadata:# W0 t/ p7 d9 s( _8 s! D- k, X
labels:7 s4 |7 h$ ^: U; P, L& S/ [: w
k8s-app: metrics-server: ~+ w8 d& c: h) M- \. {
spec:
" X2 [* g- z ~; `3 V containers:
) u0 y( w. P. ~* s# L - args:# V; D% [1 y3 K5 z' Z
- --cert-dir=/tmp" b% K% |3 ]- g# G4 E" ?" t- w2 I) f
- --secure-port=4443
7 u Z- D2 \' d$ x/ R - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname: Z4 ~7 H9 { k C% Y1 U6 E
- --kubelet-insecure-tls$ O6 q; F' r7 @/ S
- --kubelet-use-node-status-port! e1 n# _1 g$ B! {
- --metric-resolution=15s
/ c& A8 X5 y7 a+ w4 ?( C0 c4 R image: k8s.gcr.io/metrics-server/metrics-server:v0.5.2
9 L! d2 n6 w8 h) n, w. f imagePullPolicy: IfNotPresent
; d* Q* O6 p6 `! a x livenessProbe:
$ Q& t5 Z; P" K$ d# }' m failureThreshold: 3
2 I) U- R. u6 m9 w, u4 T! I* u. u httpGet:
0 _* w$ f# j, p" K: V path: /livez+ O4 A. I# i; B+ l! @
port: https
4 c9 O" P& i$ @6 R- ^ N" r scheme: HTTPS
% y' [. V) E$ A+ N0 B$ W periodSeconds: 10
% M; b8 d" ]: |/ p name: metrics-server/ h& P [- t3 q+ s, c8 H) y
ports:1 t, H& F+ ~* e4 {' r; A0 J
- containerPort: 4443; N" l; e O2 B# {0 K; r0 X
name: https$ B* j: o3 k% y) s9 N, H. a% d; ~
protocol: TCP. s* c. Z+ A# ~
readinessProbe:
/ p% X; P% W8 b- W5 L l+ U failureThreshold: 3, J; O; t; Z. Q6 t' t" i
httpGet:, K4 x a. e$ g6 ?
path: /readyz
( N& D/ p' ]' S+ n) O port: https
j# _1 i3 N4 r4 Y5 C6 z- [ scheme: HTTPS U# K2 e% h9 O3 ~
initialDelaySeconds: 20
$ M$ D$ {8 K# t8 ^% K1 H1 O periodSeconds: 10
2 Q; l5 t0 F9 A+ U$ u3 r- z) _9 Q resources:
# E0 H+ ?3 J, I. s0 \ requests:
; r! r5 Y& Z4 n cpu: 100m
~" {" ~9 p; K9 y2 u memory: 200Mi
: X7 L+ y C* C3 V securityContext:; g+ @4 W% e/ J, ?" P, O/ w% s
readOnlyRootFilesystem: true1 r ?: i1 R* Y" y0 a) l+ J4 D
runAsNonRoot: true0 O+ D8 ^- Q4 R& [% W
runAsUser: 10003 G* ^" ^, N0 E. M" z! P
volumeMounts:8 Q* @( y3 }, Y# H# j# |
- mountPath: /tmp
- H/ X) u' x! s3 u* @ name: tmp-dir
: p/ L8 p- k. v/ @" M1 Q nodeSelector:) y5 a' j& Z- b2 C$ F
kubernetes.io/os: linux
( ]: A* n W8 U4 U! W, ]) s( r priorityClassName: system-cluster-critical1 B! p" a% X! _3 s+ j
serviceAccountName: metrics-server6 \7 ?$ u, p- ^, h
volumes:, a- {. i% j2 L. U# \% G
- emptyDir: {}+ [( ?9 B$ N5 u4 y- a
name: tmp-dir
9 f1 @% m8 A) h8 P* p: t. M---
; Z$ t) D& |% y; A8 ~1 A# SapiVersion: apiregistration.k8s.io/v1) E: C8 h" w' f9 B6 l6 z
kind: APIService3 V1 W* k. l1 k) v/ G, b
metadata:
% S: N6 k4 u, ^8 d, @ labels:. O4 @, U0 ]! i- ?6 v: T! E
k8s-app: metrics-server
$ v" a# @* M. X name: v1beta1.metrics.k8s.io
4 ~ }0 {& {: R) H' h( Ispec:
( O/ ~* G1 W% s5 ^0 J) R group: metrics.k8s.io# O) ~+ |( M- s3 s; ?
groupPriorityMinimum: 100
* p* B! L5 `2 S insecureSkipTLSVerify: true2 @2 z9 b2 _# V6 P
service:) ^# A/ ]4 L) x$ ?$ F& y
name: metrics-server
8 ]6 R) E, U5 X* v" `2 W namespace: kube-system
1 I8 F. B/ X# H/ F version: v1beta1
' w! W* b& @! q versionPriority: 100
; i( t; y. p' n$ S z/ I导入 metrics-server 镜像
2 E+ p) L- L0 x+ n$ ~' o. ~for i in 192.168.91.19 192.168.91.20;do \* f4 K4 @' p% |8 c8 i0 h. p- [8 O
scp /approot1/k8s/images/metrics-server-v0.5.2.tar $i:/tmp/
& H" `6 B6 h- `ssh $i "ctr -n=k8s.io image import /tmp/metrics-server-v0.5.2.tar && rm -f /tmp/metrics-server-v0.5.2.tar"; \
: _) a& f7 Z8 D! j/ Zdone
" D X7 |" m, @/ S& F; X$ @$ u查看镜像8 F6 x5 M# y6 x! `, W% v; Q
, y! `$ r T+ \ R3 Q+ Ffor i in 192.168.91.19 192.168.91.20;do \1 V' x- O1 B9 M9 E4 j
ssh $i "ctr -n=k8s.io image list | grep metrics-server"; \& L$ S5 o9 n7 d" o1 {
done4 m3 j F" R! Z
在 k8s 中运行 metrics-server 组件
4 ?2 ` L6 A& z5 a7 y3 |+ M4 dkubectl apply -f /approot1/k8s/tmp/service/metrics-server.yaml
7 n; i# ^1 g) v3 E7 \, V检查 metrics-server pod 是否运行成功7 }* ?& d1 G/ n7 B/ J5 b; ?7 G8 Z
kubectl get pod -n kube-system | grep metrics-server
+ `7 ~! k6 i) m8 S' Q7 c预期输出类似如下结果! [& k( H2 f L% Y
6 s; K) ]2 ? F
metrics-server-6c95598969-qnc76 1/1 Running 0 71s! I h' B' P/ V3 {# A% x
验证 metrics-server 功能0 g2 }; _0 V1 L
( y, k! P& n$ J6 O. L, D: X* j查看节点资源使用情况$ m1 J* W2 L1 o' G
; r, z4 @ W0 v8 g
kubectl top node
6 U8 L$ b9 C5 y+ S) X, s/ V$ Y预期输出类似如下结果$ U) \- F' q. q1 t% U H; u4 h) ]- M
; ]: C' @" {8 i/ [. t- b2 l& s
metrics-server 启动会偏慢,速度取决于机器配置,如果输出 is not yet 或者 is not ready 就等一会再执行一次 kubectl top node
& [' U& {/ D& p* y
& t* A- h/ U& x7 q& U! Y* nNAME CPU(cores) CPU% MEMORY(bytes) MEMORY%5 W! \# g# {8 W
192.168.91.19 285m 4% 2513Mi 32%8 e! v: P# ^# f% }" l: I
192.168.91.20 71m 3% 792Mi 21%; u \( C h1 R: [+ D: n
查看指定 namespace 的 pod 资源使用情况
1 S% H& f7 J1 |' T5 W* Q% Y. s e! S% M, u2 j
kubectl top pod -n kube-system- `% Q$ X% t1 z; c
预期输出类似如下结果
0 d% b+ }. _* j$ u' f
# T; O3 B( ]0 [7 V+ L% |! zNAME CPU(cores) MEMORY(bytes)
; M* t' u" k* [! T. O( Xcoredns-5fd74ff788-cddqf 11m 18Mi; r1 S0 i" K+ P5 _$ m G# ~3 U
kube-flannel-ds-86rrv 4m 18Mi4 n' w4 I) w, m( T e, {3 W
kube-flannel-ds-bkgzx 6m 22Mi
4 b6 N# T) ?) q/ ~8 r: Hkube-flannel-ds-v25xc 6m 22Mi
" J- B+ g. v* L! z$ T! ymetrics-server-6c95598969-qnc76 6m 22Mi
- v1 Z$ t$ Y/ t7 g! ?- ^5 a部署 dashboard 组件
. ]1 w G: G- O/ N; O! V, A配置 dashboard yaml 文件* a2 p# `5 P1 ^, E, L1 J( w D/ B
vim /approot1/k8s/tmp/service/dashboard.yaml
0 o; G/ ^0 d7 O3 V---1 ^* `* Q; g) G7 |* \( S3 S
apiVersion: v1
9 |3 W2 N) O$ i+ y& d' {kind: ServiceAccount
1 M: W- ?1 C; `" o0 i% J" Zmetadata:6 n; |- ~8 |1 |; ?1 T* e
name: admin-user
6 r( ~% W4 X* \$ ]4 B namespace: kube-system: K) b( }8 E5 z E( J/ l+ u2 e
% a* X+ G' D& |6 w9 J; l2 D9 K0 a---$ n$ U* H. U* E( J
apiVersion: rbac.authorization.k8s.io/v1
" h. R, W& I4 j% w1 k) I2 o: f; b5 `+ Tkind: ClusterRoleBinding# T1 j' y& S9 J& u2 Y! _4 l5 c
metadata:
+ m4 c( z9 M+ x( a# R2 ~ name: admin-user
- v) c. y6 y! n3 T: G0 mroleRef:3 \- p/ X8 Y; i# \& C2 H
apiGroup: rbac.authorization.k8s.io( }4 {# g0 j% i, B, |& @4 o' l
kind: ClusterRole
" O; T+ }8 ?; L$ G6 E. G name: cluster-admin
0 y& ~, s! C+ t' q/ a( V8 U Z5 xsubjects: ]* x- z6 B. r- c4 q
- kind: ServiceAccount" w3 g% y8 a8 L8 z
name: admin-user
& D& R: Z5 m9 q/ [0 @ namespace: kube-system
( D, r( r) e: h c- }- `) q' Z' T7 U
---* M3 S0 P% U5 N" a6 Y
apiVersion: v1
. b9 E. \ k6 l$ |, Ykind: ServiceAccount
" a$ c5 g) g* r1 K* {metadata:
( B) S/ \2 j' g- o9 K; c name: dashboard-read-user
+ T5 k4 ~2 K% F" g0 Q: Z namespace: kube-system& j# W4 B0 o& P7 E3 f# \/ h
( I( l. w3 E1 T B; e/ F
---
+ P8 \. N) a" G% ?apiVersion: rbac.authorization.k8s.io/v1. P8 w" f% X* m1 Q8 k8 C1 C% } D/ U
kind: ClusterRoleBinding
, [% g. L0 c/ E* ^9 n5 z) \. nmetadata:
' N3 V. W2 }# Y9 x' g name: dashboard-read-binding
% |% z" r+ M7 g( c; froleRef:2 [+ Y# V o( d5 c7 v, ?
apiGroup: rbac.authorization.k8s.io
1 F, ]8 P+ E) d% i kind: ClusterRole
9 s9 ?8 o; h( Z- ~ name: dashboard-read-clusterrole
" W7 E8 p6 i( a5 h+ Nsubjects:, c$ i6 [& r5 n
- kind: ServiceAccount
7 S% l# S, A# n# a% _ name: dashboard-read-user; M1 n- o$ Y; [9 q
namespace: kube-system! B, D- R$ {0 [
. m+ ^( _4 n" @; m6 g
---0 V: |8 l/ b2 h6 {5 y: b. N
apiVersion: rbac.authorization.k8s.io/v1
?7 u, ^; e+ G$ F1 Lkind: ClusterRole3 K5 G- k8 W' l- u- v3 A. O7 l
metadata:: x* B% N: G- h
name: dashboard-read-clusterrole
! ^9 Z- o: `7 D9 Mrules:
' v* \: I5 s. I- apiGroups:
" x" r9 E" A! _: r$ s' G - ""
- w) J4 c! v1 a% \4 W resources:
5 i/ o7 F4 J& x5 u7 U" L - configmaps0 h! l4 U, T, `/ ?! [: A
- endpoints& T5 ^: ?' `+ T# i8 {
- nodes
$ ]3 U( K* m4 t6 Y" }$ [1 j* W - persistentvolumes
0 s. ?2 |+ C4 N - persistentvolumeclaims
/ |- |4 R1 s4 w' k% _2 k, q8 H0 K - persistentvolumeclaims/status
) e, n' \/ g! _7 b; ~ - pods. \" F J# F r
- replicationcontrollers7 R* g! A* C! z& s0 [
- replicationcontrollers/scale6 H" K+ R$ E5 i2 }
- serviceaccounts
2 b, H& l, b! p! X - services
& ]+ ~6 B5 Z& D4 V# p7 j9 |8 x - services/status0 l* P( f+ s! r8 U3 X, k
verbs:
- @+ `. W% H1 I9 G! C - get
$ q' v# w/ \0 W# G6 w5 D+ | - list
& d6 {/ M7 i$ u; t - watch
8 P5 l& x1 u# k1 A! u! x, {; B- apiGroups:
9 o1 m5 k2 w# W7 [' ~ - ""% [ T$ D# k: ~% H8 C( e
resources:; E+ h' M4 v/ u4 s/ g) p0 t: k
- bindings# X( }/ A2 Y1 E$ B3 A
- events% r% e% r; K; }) t& T# L- [* S
- limitranges
" H0 G+ k2 Y7 n - namespaces/status
, P" ~2 F- L4 z3 P, Q# L* Q - pods/log
4 X1 Y4 ]( F: C; c& B9 Y6 [ - pods/status% f9 A5 T$ |3 w S: Y0 v
- replicationcontrollers/status) r3 ?$ E) ]- a/ p+ J
- resourcequotas
- D, V+ ?4 }" h' z1 H - resourcequotas/status
( b% O, J+ ]( d% X verbs:$ S- [8 g' M$ f6 B2 Y3 C
- get
' ?. a! U K+ B8 o9 b8 Q - list
9 I) x* {, X `& j+ Y) {( e+ d - watch, c2 S& I+ o1 n
- apiGroups:4 L5 N2 j x4 |6 C3 b7 o
- ""/ j5 p6 y f/ ]5 a6 X8 ~
resources:1 V( G+ H1 k/ c& R8 q' T7 s
- namespaces# L0 q! D: G& s0 R
verbs:6 \5 w- h5 o' m- p% m- {, {5 @' i
- get
/ ^+ x" O, R$ J* X- |0 j2 ^# t* j - list- t# @ d' j) {4 e$ b+ q
- watch* y! y6 }: L3 _ d
- apiGroups:2 q5 P5 r; q5 |* g3 t
- apps
. o0 d: v' q, z' v* M# s resources:
6 d2 s. C! \$ O" d- c( ] - controllerrevisions! `, Q* W! {$ o5 X: n
- daemonsets
% O2 E* {/ ?9 D - daemonsets/status, i4 v# b& T, j# J
- deployments
$ _$ {; c' L& X; H - deployments/scale% ~" I2 C6 K6 O; y% p: _! u/ B
- deployments/status
1 A) d; c. Q _3 |' B8 Z - replicasets$ s$ q6 X3 Y# G7 L9 \. @
- replicasets/scale2 ]6 |# L. {* L, } A. r8 {
- replicasets/status. q, c I& x7 e4 A \, V- z: `8 |
- statefulsets
+ I7 p4 ^3 Z8 V z# M; o - statefulsets/scale
$ B+ R# X4 C& K9 q$ h, w8 S+ } - statefulsets/status
( h6 h. v. H: X verbs:1 e4 I _4 }# R% ]! G) x8 v
- get
2 Q o! a5 g. _' Z& L2 l2 w - list5 E( c" l. e% U$ C! h
- watch' q/ G4 y# U* o1 ~1 t4 |4 @
- apiGroups:% l; d, f4 A* ^* m) f9 M# Z, n
- autoscaling
# G1 L$ i1 }+ i' t# Y1 W resources:3 g, c; N: L7 g$ @5 v x+ ?1 q% s3 e: L
- horizontalpodautoscalers
; g( h2 @, r4 F - horizontalpodautoscalers/status4 G T* Z5 }$ v' O: U
verbs:
6 f# a" ~4 @) f( H4 g& N9 k. h. w - get% f- d* E7 @7 }$ u) V
- list" z) P0 s6 K+ i
- watch
) y4 S1 G/ p% A9 @- apiGroups:
8 o3 P8 J0 z8 _( A6 c/ N - batch
9 N# s9 N8 x+ n5 e: \ resources:
}2 ^. w5 j- w: X: v( q. y' M7 L - cronjobs+ q4 T) g# S$ P3 F# @$ \
- cronjobs/status
$ F$ s; |6 p4 Z# c( m - jobs z8 a6 O+ Y3 m
- jobs/status
/ ]1 P1 F& T, {" p; S, | verbs:$ Y Z; X& O$ e9 Z; a( A+ l6 \+ K: \
- get
) y4 s3 _: \ @8 f( x1 V# n - list
3 y0 h# {8 D; T3 j - watch2 e5 L" _. B, m" e) e
- apiGroups:5 z0 u$ ^- c: N" j- G' g
- extensions) e- ^4 @" K( e! k( b2 G; o* Y- N$ \
resources:- U0 M, }' }2 D. `; X9 V# ~9 ?
- daemonsets
7 O" v5 M; `( l/ ` - daemonsets/status! j; v& G! T2 }0 ^4 k. _9 u
- deployments
a/ ~: S6 p. n+ @7 I - deployments/scale. l2 b% A* r5 E; {3 D
- deployments/status
& n6 H! \9 O+ ]( X- O$ j2 g! c - ingresses- }3 ]1 J- F. }' c
- ingresses/status3 V+ a7 F$ W& k8 z
- replicasets
6 X; n; p4 I! P# s$ h - replicasets/scale
" E+ w4 d$ K4 V0 Z1 o w - replicasets/status
# }7 E: O& O& |! e, Q4 D - replicationcontrollers/scale
$ E# {& L5 U* i1 F+ A6 e verbs:; k7 J( N9 q9 \. y+ N
- get
$ E) Z* H1 ^6 s# B; S( t& _5 s - list K& r- y, K A/ P- C) ^( q" h
- watch
/ S5 W4 D- ?& l5 t2 D- apiGroups:
$ U- `( A' y" Z5 B' y! G - policy& B1 F1 I. u) P' J$ t q" U9 P
resources:
( V0 r0 q/ |) K2 @7 T - poddisruptionbudgets6 U5 J! A4 ^/ }! H7 D2 w7 M0 a/ ]
- poddisruptionbudgets/status
w/ B. p9 v L verbs:- ], ?5 j ~2 V
- get
/ u. c* c- q! m% Y0 A - list2 b: `+ `+ p- E0 l' B: ^
- watch
C2 K1 V4 @! A2 P) `1 A- apiGroups:
. Q# I1 K) v+ d! K$ \ - networking.k8s.io6 t0 b. r4 K$ D/ v" F* I" r! V
resources:
0 s1 Q# n6 R" |' P% ~& ]- l* z' P - ingresses
( a" c7 b+ N% B' ~; K& _$ a - ingresses/status a4 r n! e" d0 ~' D' T
- networkpolicies
4 S* f. X0 B$ p* `+ e+ G verbs:
- F: T1 I* A+ K4 L* l) J - get- U7 v1 d2 t! _
- list
2 |. {7 _) j0 }' O - watch
+ ?! M5 _& N; y+ Q- apiGroups:
* Z3 _. S0 R8 u' ~; o0 Z - storage.k8s.io1 A* j/ T4 d& | z" d9 E
resources:
8 t8 h6 @* Q3 J( K1 }+ S - storageclasses
3 z- U+ o- `, D7 g - volumeattachments
5 q- i4 R- g' u- p2 U2 L0 i3 H% T verbs:' m6 v) V, Y4 |+ j0 B7 p! `- X
- get
! s4 X" e4 d" U - list
# R8 X9 U1 T# r$ r, y3 ]1 Z8 |/ v, { - watch
9 Q1 _! i& O6 t+ K: z8 k0 `! z- apiGroups:. V2 h3 D6 \# U2 X+ \, ]
- rbac.authorization.k8s.io
$ q& r+ b$ R+ M1 T resources:/ W& V6 N6 Y4 [) y- p9 H
- clusterrolebindings
% `& a5 v8 n0 o+ u& t& ` - clusterroles5 y" R1 g/ ?0 ~8 T' F
- roles( ~+ R$ V) S0 K9 V: _
- rolebindings% O' R) \6 `; t, Q
verbs:
% v1 y- ^3 }1 H/ m - get
# F( ?9 [# x F( }0 G, ?, O - list
/ F3 v# M: p& T( z - watch( y, A" X* @+ _" Q
C9 |8 ?2 D2 D8 E6 B; `/ H---
9 }# @) L* k. rapiVersion: v1
! P% N: L& m" {, F1 B. X. G" P# Mkind: ServiceAccount
! H. _& W9 v6 _9 Q+ {' p4 ?% Umetadata:
' U& P! e' i3 Y2 \, ?6 ]0 r labels:( f8 e0 }- A, X" j
k8s-app: kubernetes-dashboard
4 z, l" x" E: p% w @. y name: kubernetes-dashboard0 w' F- S6 i: @, s
namespace: kube-system+ b6 ]! L% D2 x3 \& J
1 p6 _2 b1 h2 N---4 |' r K- p* J ]" L
kind: Service& G& r: B6 q6 ]
apiVersion: v1
3 S5 a* i @# x6 z* }metadata:
, A, w2 U* f# e4 i0 T labels:4 R9 L% ]8 {% a+ g3 ?
k8s-app: kubernetes-dashboard, s; p- d6 |: X
kubernetes.io/cluster-service: "true"
1 j0 U4 e6 ^# y& _: [' I name: kubernetes-dashboard# q9 q# r, g) W: h H
namespace: kube-system
+ h1 e' k2 n4 E7 n" e: Vspec:
# o& V K) E5 i9 C; d& ] ports:: H. e- ^2 _, x+ ~. M- M K. c1 a
- port: 443/ x, e: N, Y* U2 h# R! v
targetPort: 8443
* ]/ x. ]2 s8 E" Y: K. z' E: q: _# V. T selector:9 K* a+ h- ]! M6 W
k8s-app: kubernetes-dashboard
6 H5 [5 M c2 E7 _ type: NodePort5 X/ Y- T3 @, h& Y3 c. E( H8 `. i
" W6 h R. O- ]6 P. P; j
---
& A! ]3 e2 F% ~; r% _apiVersion: v1
* ^5 \& ?/ m) pkind: Secret2 a- A/ O9 F+ W6 }$ h3 f
metadata:
- n5 e9 t+ b1 r0 u( ] labels:
0 x) K3 O9 w# |2 O# s k8s-app: kubernetes-dashboard
/ Z G. T; q/ R: j8 | name: kubernetes-dashboard-certs
& Y% y) y0 j: |! c( }7 F namespace: kube-system
0 |1 t0 k3 p0 }; W( ~6 Utype: Opaque
6 m3 U1 L" H$ z& P7 [! k$ u/ H' g; W- n1 u
---
2 b# }; {8 ~9 G- x/ FapiVersion: v1
1 S2 J$ O$ y. P5 k3 ?* Z: [5 _kind: Secret3 z @( R! U Y
metadata:4 t& z. C# A+ Y }8 G* C; P
labels:( Z1 W; N: [& u: ]% k5 N
k8s-app: kubernetes-dashboard7 `3 j% v2 \) l1 U' j% a
name: kubernetes-dashboard-csrf
# }. ] J; q. K2 n namespace: kube-system% n9 i% B6 E0 X, \5 a: P8 ]
type: Opaque
: V" \1 d+ q2 C {' W+ Z2 z5 _0 g+ wdata:5 o+ h6 f; ~. [+ T$ a4 T$ ^) E
csrf: ""
5 A; f- d" Q# R+ u9 R
. R8 [" M1 B5 ] ]6 r, k' ~. ]---
. z3 L' o/ u8 w5 Z4 x. yapiVersion: v1
% N& U3 P$ q- F- akind: Secret
8 z) A X, n" `) hmetadata:
& m! ]0 W( L7 o$ H; [1 T labels:( y8 d) ~) ~! g- ]
k8s-app: kubernetes-dashboard8 N6 X1 v0 ^' u/ c: y
name: kubernetes-dashboard-key-holder5 G# j' q1 }2 J
namespace: kube-system& H8 O5 J1 \( |! O
type: Opaque
$ O, B* z. a6 l# R+ E7 d9 ~% W; L& i, R
---
; J/ ?, f$ C/ f; xkind: ConfigMap) d& |! x$ r8 M+ o9 I
apiVersion: v1" w, @; T$ Q) p! m. d6 M
metadata:
@- s0 D, X* M/ K6 k4 v labels:* i: D2 u z, ~1 M" S: U
k8s-app: kubernetes-dashboard# N0 ^8 y7 N5 v6 q5 Z* M, t) F/ q
name: kubernetes-dashboard-settings
6 d* K- E* S# `0 ]: M namespace: kube-system
: ?* _1 l: [9 O# `6 h9 Q7 k Y
# W; |7 o }4 ]1 R5 [ Z---4 ]' s" l( V5 }
kind: Role, G4 Z2 @% d: U+ W- R6 r
apiVersion: rbac.authorization.k8s.io/v1
4 T5 l% z, C/ u( jmetadata:
) b+ q0 l0 R6 w" }( B labels:! j3 m6 z1 W0 M+ f
k8s-app: kubernetes-dashboard# f. M! u; W$ P2 {5 H" Q
name: kubernetes-dashboard
! W4 ~! A- `1 Z2 B namespace: kube-system* c1 w( W6 u- Y
rules:$ i9 g( F4 ?9 ?" L- B6 i
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.& f4 d8 e4 \" ?2 y
- apiGroups: [""]
& Z4 j% }5 `9 f8 Q) o# N resources: ["secrets"]& ]% J) T7 H8 n; O" N6 Z
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
3 @0 f5 D7 b. T' g verbs: ["get", "update", "delete"]
# F9 V+ S0 m* O! P1 G # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.2 `* M, i& m5 G0 Z4 r; a9 ^
- apiGroups: [""]
) a) O0 E) j; n# T: k+ o, r; s: a resources: ["configmaps"]
' u4 C3 Z: \) K# n8 f: [# R8 k7 J1 C6 i. s resourceNames: ["kubernetes-dashboard-settings"]
5 A" w2 K2 X) F verbs: ["get", "update"]
% D8 t, ~# J' L6 K! C' @: m9 @ # Allow Dashboard to get metrics.
1 O0 _2 H/ Y0 H$ r* \ - apiGroups: [""]
/ R0 N& C* N+ V2 H5 [1 }) U1 t resources: ["services"]1 f! O0 f4 O1 y- x# Z
resourceNames: ["heapster", "dashboard-metrics-scraper"]8 o) d( e6 U) l6 a. g8 J3 M; B
verbs: ["proxy"]6 R& V5 A' i/ `3 _5 h r
- apiGroups: [""]
3 `9 T% y5 O# C; G& o3 E resources: ["services/proxy"]
- D7 d$ M. y. `$ t3 E1 z resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
# X* J* k0 G% q" p7 ? verbs: ["get"]
: P E4 q0 U, L: ]2 e6 r6 u
" u$ O$ B$ d# b6 H1 I" @---
& z4 ]% }; R, @3 v5 \6 a4 ~kind: ClusterRole
' o8 k2 u! K4 B5 g7 \apiVersion: rbac.authorization.k8s.io/v1
+ K7 h: T( U5 t( k: B: _metadata:. L; m' g. G/ V! O/ t# \; C
labels:
% V8 M2 Y8 F0 m& {1 z' B k8s-app: kubernetes-dashboard$ h V1 Z1 r( s: F. e
name: kubernetes-dashboard
: c5 x! v G$ \8 j+ c- V& ~rules:
% X4 w* i; F" C" A } # Allow Metrics Scraper to get metrics from the Metrics server" h! R+ D) J/ g9 {& m) X5 g
- apiGroups: ["metrics.k8s.io"]4 h, B' A7 p; M, W5 f& b: Z6 _
resources: ["pods", "nodes"]2 X$ m( |6 K7 E2 M* M2 p/ ?' t
verbs: ["get", "list", "watch"]# \" p' @' C* U, E4 K' d& K
2 o, s* f! U# L8 T) u---9 b" n: p2 d: V* F& u
apiVersion: rbac.authorization.k8s.io/v1
" s& C! Q: I( H; f, Fkind: RoleBinding; R) r @# k- ^& i v
metadata:
: G6 M: x g! g V9 `3 N labels:, j) x. X1 _: p4 A5 X
k8s-app: kubernetes-dashboard
# U0 p/ ], o2 a4 c1 U+ q; x$ v name: kubernetes-dashboard; Q, d2 r' \' I. C% _
namespace: kube-system' o; e' U" D! ]. S2 @, ?* g
roleRef:6 W7 k- j4 U: m0 `* m
apiGroup: rbac.authorization.k8s.io
# |: K$ n! Z9 j; E! ] g; E1 g( j kind: Role
" w% w0 d5 ^* b8 T. t0 |4 }' g name: kubernetes-dashboard9 K" M3 O2 T0 ~! A
subjects:2 U* r% R0 }2 _+ v" m1 E4 |3 t3 m
- kind: ServiceAccount* q6 N9 b6 y1 Z3 |
name: kubernetes-dashboard
7 C. f( [* z- b' N9 f namespace: kube-system8 q7 P9 r: j' f6 A. H. V* ]
' Y. P. r* R* x! C, N. N
---: z" t5 \' i+ `% g
apiVersion: rbac.authorization.k8s.io/v19 i- [8 ~4 Y: S9 `9 ~9 H
kind: ClusterRoleBinding
1 @2 a6 i. M+ t- bmetadata:
3 e/ U& v$ q% v6 E. d name: kubernetes-dashboard, F: \7 C \4 D& n4 D' {0 [' O6 E
roleRef:0 i& j* N: ?8 [8 I9 S; r
apiGroup: rbac.authorization.k8s.io
6 X9 ^2 R' }; W; F' V- S q, D kind: ClusterRole" C) w4 C2 s# V8 Q
name: kubernetes-dashboard
; U) B/ e0 g# Q N) q( psubjects:
+ ]7 e) G0 d' i. {: M+ |9 t - kind: ServiceAccount
3 `% Q$ s* Y3 V4 Y$ @" g9 U' X4 P name: kubernetes-dashboard: e4 C) i# f1 a# S0 K
namespace: kube-system
. a; N* P2 D4 g
1 b# h- z# W: \3 z7 H---" N3 [, b! d. l
kind: Deployment
" y5 u U1 I( J+ J8 a2 j: ~apiVersion: apps/v1
% |+ k, w7 t- Q/ ~metadata:
; D5 H" M! d& r/ x* s8 s [! y labels:- P. J, x/ V- q& j0 ]0 m: Q
k8s-app: kubernetes-dashboard
6 ]+ J5 D9 I/ V9 ^ name: kubernetes-dashboard
, @3 U7 S L3 q$ Q* U namespace: kube-system
2 F. v" u! H* R: P$ g1 y' Yspec: P8 j+ x( U5 x
replicas: 1" C2 O$ Z0 A: x9 c; M2 `4 M- R
revisionHistoryLimit: 102 h* X% H- U( z+ b' n
selector:
1 z# L$ S0 x: u }; q% _# Y matchLabels:
/ p. c1 k: h6 ?' D k8s-app: kubernetes-dashboard
0 Y/ \5 K* C, X template:
( v7 }; A: `2 j; ]& @0 f* c: i0 z' F metadata:
`: v; ]$ e1 U1 Y, o! D labels:% c" X6 H+ x# P
k8s-app: kubernetes-dashboard
- ?6 K) k, Z2 Z. ~ spec:: r, c2 t& G, i2 g1 ?# v2 s! F" u
containers:% T0 W& C( z, P7 H
- name: kubernetes-dashboard
4 {& H% J% n/ _- W+ e image: kubernetesui/dashboard:v2.4.0
% A2 @" ^; ~6 y3 ]# S# F imagePullPolicy: IfNotPresent# q# x* F# |0 D Q) h# D
ports:
; F* X0 b- Y4 W4 f - containerPort: 8443
; K; N. O9 Y7 t# x) b% X protocol: TCP
! u7 o5 H6 c' { args:
3 u4 F/ q- e, z$ a/ x - --auto-generate-certificates: u# p7 A: C( V4 z( ] g
- --namespace=kube-system
: j0 T- x& s$ N" }+ l - --token-ttl=1800
- J/ @1 y# q8 j A8 V. a& c - --sidecar-host=http://dashboard-metrics-scraper:80009 z- y6 x' N9 Y
# Uncomment the following line to manually specify Kubernetes API server Host
0 B9 _# I z0 v/ }, H& X. S # If not specified, Dashboard will attempt to auto discover the API server and connect
8 M$ \+ e# ?3 }5 a# ? # to it. Uncomment only if the default does not work.
) e- | u0 Z9 a9 k$ Y # - --apiserver-host=http://my-address:port
9 O! B; g+ l0 j" \; @0 G% C" Q* A volumeMounts:& ?4 Y6 j. g( ^; W$ B4 h5 }
- name: kubernetes-dashboard-certs
- Y7 ]8 ]: d' }& F9 c/ T mountPath: /certs
4 \1 b, g# E9 W# H* _! G # Create on-disk volume to store exec logs
/ \8 S8 n. q! _' t, K) D3 z/ n - mountPath: /tmp
7 y9 U8 P2 V8 l$ b# @* L9 V name: tmp-volume: P6 ?1 a2 X, M
livenessProbe:1 _7 o* v, N0 L# s0 n# ~
httpGet:
, |% g2 z0 H/ }7 ~: l( Z6 L* z scheme: HTTPS
/ w7 M) e) _& D. d path: /
( i5 ?$ k4 a. I port: 8443
7 a, E5 Y, |8 B+ y' ^# F initialDelaySeconds: 30
1 E4 s/ e" o d* G timeoutSeconds: 30' ? J1 D, R( K6 P
securityContext:0 j& f8 u, H ?' P7 W4 }
allowPrivilegeEscalation: false
7 Q$ G& h V/ F+ U. c- E* M5 S: v readOnlyRootFilesystem: true9 r5 Y6 [* }% V( F4 C
runAsUser: 1001
. h( U- _* D7 O" i; s L runAsGroup: 2001$ ?- q1 |) i, ~& f
volumes:% v' t+ h% ]) _0 d* E
- name: kubernetes-dashboard-certs
, V) L2 e7 q, ]4 X* n* U secret:
3 T7 F( S) O# i: O4 _ secretName: kubernetes-dashboard-certs$ u, y" r4 ]8 \+ h
- name: tmp-volume! D& R# x3 q6 v% X, n
emptyDir: {}
( L, J( X% s4 v serviceAccountName: kubernetes-dashboard
+ ~. K( Q" E: Q7 f- h1 a. {) B nodeSelector:6 i1 ]7 R# Q4 M# W [1 M0 ~
"kubernetes.io/os": linux- Y2 i2 ]% z; w; L8 Q, w
# Comment the following tolerations if Dashboard must not be deployed on master1 i; ?2 q8 Q! x) s# S3 w7 ^: }' t
tolerations:# Q+ T2 B1 g% R9 d. X% j
- key: node-role.kubernetes.io/master/ b& s: {; b. K9 r' b7 v) V5 s
effect: NoSchedule
$ C7 Z1 ^ I" F% B; }( D' x# X+ G" y$ M& [' Y7 ]9 p- |3 P, N) t
---
+ J5 e' a6 C" v: d* rkind: Service9 i8 j; f) I1 g- i& A
apiVersion: v1
1 a6 _% @1 R6 f2 Wmetadata:3 ?5 U3 l9 U& n0 V
labels:
* i, s \4 c# c% V- U: T k8s-app: dashboard-metrics-scraper% k; ` i2 n) i7 \" w
name: dashboard-metrics-scraper
& u" g: w+ ?2 |+ ^. ?! t namespace: kube-system
& n: P4 b: }; U& _spec:
" R/ k8 \; R9 M4 _ ports:' j; r) f8 g0 f1 n: H4 o
- port: 8000$ @& F! R, W- r* p# [0 |( Z: x# \) F
targetPort: 8000
& B, [( I P2 ^! h' _& {) _ selector:
4 u! U3 g; M+ m0 R, U k8s-app: dashboard-metrics-scraper6 B! {, R+ Y0 h4 A1 a
% d! U+ ]9 p! y5 X& T
---
& M0 L5 i4 {2 k6 `1 J2 Hkind: Deployment
- ]0 Q/ d6 ]! j& E2 capiVersion: apps/v1
+ i, C1 S/ a, c1 f: s! umetadata:
4 P! l/ M$ N" T& V5 H& I. l. N labels:+ I/ L% u* m9 [0 V# M6 y
k8s-app: dashboard-metrics-scraper
2 w6 D$ `' m i0 ~2 S name: dashboard-metrics-scraper4 Q1 O$ C( m$ ^
namespace: kube-system
8 a4 v/ ~- T3 z# z7 Q! X' X# nspec:$ l' ]5 m9 z7 V( g; _, F
replicas: 1
0 J" ?* x& |, @: T9 C+ E revisionHistoryLimit: 10' o$ E/ Y2 X- k1 V' E% s
selector:6 i9 I+ X& ]" y
matchLabels:# A( W: c* E# c6 [
k8s-app: dashboard-metrics-scraper
: _$ s! o. S: g/ o) P% n template:
4 T* E1 q. e: i' g$ R% R, B metadata:3 d% f' M- T- B, m6 _, T
labels:% _' q ?5 C o2 r5 C5 O
k8s-app: dashboard-metrics-scraper
4 a& u' z7 z0 f; O- k spec:
, _' s$ s/ d6 Y9 Q0 }( W0 I3 K- |/ H securityContext:* X4 n* J# L5 ^" K$ }
seccompProfile:
0 t2 }! i6 D- h9 n2 ]: x( ]1 S type: RuntimeDefault
; [: E: V' T' t; }: Q1 T% s containers:9 K3 {( C9 w4 O; ^. ~
- name: dashboard-metrics-scraper# d3 [5 x( D: M( J! {$ c
image: kubernetesui/metrics-scraper:v1.0.7
/ R1 t+ ]9 G8 o7 r imagePullPolicy: IfNotPresent
* p" y; h& _1 q$ ~6 z ports:* D& [( g3 ~, l1 v$ i! }
- containerPort: 8000 O( t: f2 t- R, I8 F4 g
protocol: TCP
3 t" S4 R3 }4 M# q- \6 t! `: O7 K livenessProbe:- b4 G# d7 T g% r% W
httpGet:6 T/ T" m' C8 X1 @9 G/ @) i
scheme: HTTP
* _8 h8 a# i' Z" h4 V path: /
; x7 h4 l$ e5 L- ` port: 80006 I7 H8 D1 n# u% o3 R
initialDelaySeconds: 30
/ |- d6 |9 S: ?0 z timeoutSeconds: 30
* G' a8 w5 W& S2 r$ ^! V* b { volumeMounts:
) A3 T9 @2 g K( |* t" l - mountPath: /tmp- j0 h |$ Y F7 C- t! \
name: tmp-volume
5 t9 z# p' i0 ~& R6 n* k securityContext:
5 B' {) n7 i) _4 t7 V& ?% d allowPrivilegeEscalation: false
+ o( G: z7 f( T* k9 s$ ]$ I readOnlyRootFilesystem: true
2 ^3 t/ f7 v& K6 S runAsUser: 1001. w7 E2 t3 b4 g& _$ @/ J; Z
runAsGroup: 20016 M4 r& \# n; \$ O; i6 w9 d
serviceAccountName: kubernetes-dashboard
: e5 a( C# X. a5 w nodeSelector:
( O5 X" x$ Q& x8 T4 V "kubernetes.io/os": linux1 Z ?% Z2 v' T7 G
# Comment the following tolerations if Dashboard must not be deployed on master2 j$ A" n7 w% \ l; N3 O
tolerations:
2 O5 U3 F; p! F& B7 F: ~ P - key: node-role.kubernetes.io/master
$ v) i" ^; |$ q" u( ]( G1 f effect: NoSchedule
. n8 n. b: `. a( l% } volumes:
7 h1 H1 G2 Y2 i - name: tmp-volume( ~# \/ T# Q- \8 [7 B& E% |' v
emptyDir: {}, u/ s3 T% J: \8 U7 E
导入 dashboard 镜像
: ^# P, L4 q/ D/ {$ w$ Hfor i in 192.168.91.19 192.168.91.20;do \
, s% w. \. B" U( ^- [6 F6 Pscp /approot1/k8s/images/dashboard-*.tar $i:/tmp/
- u7 m; s' U. ^: r, ]ssh $i "ctr -n=k8s.io image import /tmp/dashboard-v2.4.0.tar && rm -f /tmp/dashboard-v2.4.0.tar"; \
- r1 Z; b o( r( i3 ~) `; Wssh $i "ctr -n=k8s.io image import /tmp/dashboard-metrics-scraper-v1.0.7.tar && rm -f /tmp/dashboard-metrics-scraper-v1.0.7.tar"; \
) q& w8 @1 h( ^: E6 _6 ~done
. _7 j+ b- W2 J2 V$ i& U, W查看镜像
$ P) s B1 v d1 Y
: l# z, ~+ e3 h( [for i in 192.168.91.19 192.168.91.20;do \2 [4 m8 c2 ]- C# R5 n
ssh $i "ctr -n=k8s.io image list | egrep 'dashboard|metrics-scraper'"; \: g8 B/ ^' R( R4 c1 N j
done, A [! F2 f# N5 m6 p- K% e* l- a1 C
在 k8s 中运行 dashboard 组件; y. O! L* r/ o- F/ N9 \5 {
kubectl apply -f /approot1/k8s/tmp/service/dashboard.yaml, U7 e. {+ W( e" m6 ~! C
检查 dashboard pod 是否运行成功' t5 q* T) g" }
kubectl get pod -n kube-system | grep dashboard
% L( r, K% D( U/ X6 L1 J预期输出类似如下结果- w* n# @" u5 R6 y
k; c+ G) [4 W9 p( ndashboard-metrics-scraper-799d786dbf-v28pm 1/1 Running 0 2m55s" i" y; C* ~% j% h
kubernetes-dashboard-9f8c8b989-rhb7z 1/1 Running 0 2m55s
+ [5 d# Z4 y' f. W; B查看 dashboard 访问端口
& S6 `! y3 n0 r在 service 当中没有指定 dashboard 的访问端口,所以需要自己获取,也可以修改 yaml 文件指定访问端口" F1 B1 Z& Y' `/ \0 s( r0 I* L
" w* ^2 |3 H! v* X$ C+ N预期输出类似如下结果
$ {: U$ G' X; D' z
7 I/ l- L. x" ~" _4 Z* v0 R" U' h我这边是将 30210 端口映射给 pod 的 443 端口- F1 k) e5 @( u) ^( W
! K& U: x2 m# n$ f) L$ b y
kubernetes-dashboard NodePort 10.88.127.68 <none> 443:30210/TCP 5m30s
2 f4 |, Z# k- H$ G' U) c/ L1 Q根据得到的端口访问 dashboard 页面,例如: https://192.168.91.19:30210
9 D. t2 c( l, m# v- G& K7 y
+ P6 r, l# s4 K7 b9 k& l查看 dashboard 登录 token
" ^/ q7 m4 Z& t* T, P$ }获取 token 文件名称
- w! V' T& o- p6 Q# T, G3 B+ e8 F, b4 x8 d& \0 K/ ?8 o i
kubectl get secrets -n kube-system | grep admin
0 C: w/ a, G4 t4 c预期输出类似如下结果
' k. p- o3 ]% a! E: d" I, u" b4 W
& m! x3 C/ h8 E6 o) Z- kadmin-user-token-zvrst kubernetes.io/service-account-token 3 9m2s
+ {, ]2 \. B4 H' r获取 token 内容
3 ?; I( y' n4 d- M1 D8 F. y- ?0 l6 O; X; m
kubectl get secrets -n kube-system admin-user-token-zvrst -o jsonpath={.data.token}|base64 -d
# w6 j2 e; a! v0 v9 a预期输出类似如下结果
: m- n: l. a( a8 E5 Z+ F
' Q$ x$ q# g2 n, S& oeyJhbGciOiJSUzI1NiIsImtpZCI6InA4M1lhZVgwNkJtekhUd3Vqdm9vTE1ma1JYQ1ZuZ3c3ZE1WZmJhUXR4bUUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXp2cnN0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJhYTE3NTg1ZC1hM2JiLTQ0YWYtOWNhZS0yNjQ5YzA0YThmZWYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.K2o9p5St9tvIbXk7mCQCwsZQV11zICwN-JXhRv1hAnc9KFcAcDOiO4NxIeicvC2H9tHQBIJsREowVwY3yGWHj_MQa57EdBNWMrN1hJ5u-XzpzJ6JbQxns8ZBrCpIR8Fxt468rpTyMyqsO2UBo-oXQ0_ZXKss6X6jjxtGLCQFkz1ZfFTQW3n49L4ENzW40sSj4dnaX-PsmosVOpsKRHa8TPndusAT-58aujcqt31Z77C4M13X_vAdjyDLK9r5ZXwV2ryOdONwJye_VtXXrExBt9FWYtLGCQjKn41pwXqEfidT8cY6xbA7XgUVTr9miAmZ-jf1UeEw-nm8FOw9Bb5v6A
5 p$ {- n5 f1 q0 D) a |6 i! Z4 i$ Z8 Y$ q$ b
到此,基于 containerd 二进制部署 k8s v1.23.3 就结束了
- ^2 R9 E' ~3 G" R) ^- i1 x
& w& O- m/ \& q+ d0 B |
|