- 积分
- 16844
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2025-1-1 19:51:59
|
显示全部楼层
创建目录
" Y0 w* S/ D( M# F6 q: R8 f0 h; f根据自身实际情况创建指定路径,此路径用来存放k8s二进制文件以及用到的镜像文件# _ N& {. h5 n- U/ V+ P/ I. }. y
8 T* T% u" o7 ~
mkdir -p /approot1/k8s/{bin,images,pkg,tmp/{ssl,service}}5 K7 N4 S" D9 T) m. e5 ~
关闭防火墙
# C2 Y1 t" T0 U# K$ w3 hfor i in 192.168.91.19 192.168.91.20;do \
: V( c. A& E0 K4 l2 r& lssh $i "systemctl disable firewalld"; \
0 D" ^$ k! `& H0 V( fssh $i "systemctl stop firewalld"; \2 O& d9 [+ e2 y! ^+ e T
done
8 }3 l4 u, F9 {/ @6 J3 b关闭selinux& |! n" [6 Y0 y
临时关闭
1 A; H- t) H4 Q' J& y- Q3 v: N' C& X% ^* a- O8 S9 w
for i in 192.168.91.19 192.168.91.20;do \* W$ l2 ?1 Q5 q5 Y* D6 G
ssh $i "setenforce 0"; \
4 d; R- c* k* m; H) J4 D" ^$ fdone
& U/ h9 ^0 X: P* B6 {$ u. N永久关闭6 ^+ A# B% R6 j, l
2 b% S6 R3 z# `) _5 r. I* Ffor i in 192.168.91.19 192.168.91.20;do \( z( e% K# T4 v1 C F
ssh $i "sed -i '/SELINUX/s/enforcing/disabled/g' /etc/selinux/config"; \
* l% L/ J2 Y$ P. s* e! Qdone! V/ U6 w7 h7 J9 B: ^ x. q( }/ a0 H
关闭swap h, T: D w, M
临时关闭
. u0 ?) i0 o# m( ]4 | D1 N5 |2 Y# D4 z( v/ k
for i in 192.168.91.19 192.168.91.20;do \% Y1 L' E* ?- g% u
ssh $i "swapoff -a"; \7 E/ v, e _" G
done3 ?. a m' Y; K
永久关闭2 t( M8 h# M- r+ \9 s1 l
+ i" w$ v% }; B# W3 Z2 K! B! wfor i in 192.168.91.19 192.168.91.20;do \
6 P- D! x0 ]# O# Q# g* Yssh $i "sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab"; \. ^! u5 l* g4 d5 \/ o
done3 `$ \3 G% A# F" u0 R
开启内核模块
! T' R" @( F% a7 v/ g临时开启
( }/ ^) e5 y1 P7 L7 F8 ?/ s' Q; [7 f% T1 q! x0 x% a
for i in 192.168.91.19 192.168.91.20;do \# L& e8 ?; B/ U; z3 K/ C
ssh $i "modprobe ip_vs"; \0 w( o! o. p' U& m: ]- b) U9 p
ssh $i "modprobe ip_vs_rr"; \3 e+ h: J: O. c+ _
ssh $i "modprobe ip_vs_wrr"; \& K4 J$ Z) |- h; e
ssh $i "modprobe ip_vs_sh"; \. C# U2 U# i F4 G* A, s9 Y* f
ssh $i "modprobe nf_conntrack"; \
8 J: L8 `2 H+ g; `ssh $i "modprobe nf_conntrack_ipv4"; \* X+ _4 H% ^0 u. _ {2 R
ssh $i "modprobe br_netfilter"; \
9 n: u4 k5 x% M: W; mssh $i "modprobe overlay"; \
! h! v. G: ~1 |+ ^0 `done( j. I7 a- n+ l
永久开启: b" D( _* X! Q0 Z, @
; }& M& R2 k6 C/ x! L' Uvim /approot1/k8s/tmp/service/k8s-modules.conf
+ u' C0 M/ T7 Iip_vs
7 ^. N( s( n+ Z s( eip_vs_rr
- M4 }6 z4 m ?6 H9 N0 `' cip_vs_wrr
/ v0 T+ Q+ s& iip_vs_sh
% [$ c; t, ~9 m" a8 Hnf_conntrack" w9 t/ n6 W. X9 Y" u
nf_conntrack_ipv4
2 S- D& V" s+ r \! ^br_netfilter% A+ s6 w/ L* e0 d- P/ h9 _9 |9 a1 I
overlay$ m4 C1 G* H3 m
分发到所有节点/ y' K! E! C' m1 L
for i in 192.168.91.19 192.168.91.20;do \# C2 i' T5 q" A6 N8 ^
scp /approot1/k8s/tmp/service/k8s-modules.conf $i:/etc/modules-load.d/; \
0 k7 i- g( h* S( {4 B5 `" T: x9 sdone6 D$ E4 x9 y2 L: o2 c( Q/ O
启用systemd自动加载模块服务
/ r& L/ M4 h) W4 I) Xfor i in 192.168.91.19 192.168.91.20;do \! N Z5 P& v: `
ssh $i "systemctl enable systemd-modules-load"; \4 m- s- |0 r5 S$ X# h( R7 S( T. p% t
ssh $i "systemctl restart systemd-modules-load"; \
4 R4 D! Y, r. K( Qssh $i "systemctl is-active systemd-modules-load"; \8 O# E6 j5 b3 L3 v% d; v7 |+ T
done
% N, D1 `% M( x* ]6 V) H* X6 O/ D返回active表示 自动加载模块服务 启动成功' a4 S# k7 g1 K* r7 e
2 p. B9 d: L2 u* c2 T6 |4 @
配置系统参数/ A+ x2 L8 J- t; R2 w+ e. k% T; `
以下的参数适用于3.x和4.x系列的内核/ u- O8 X d: {# _. i; x) e% f
$ V4 [ a+ C' svim /approot1/k8s/tmp/service/kubernetes.conf+ e. w; J+ I0 J$ p
建议编辑之前,在 vim 里面先执行 :set paste ,避免复制进去的内容和文档的不一致,比如多了注释,或者语法对齐异常
" P) t/ C. {! `% B+ [3 \: Z9 T7 a3 K3 B, _2 F; k; {
# 开启数据包转发功能(实现vxlan)
8 C, y' f1 _+ D' A$ i* i8 |net.ipv4.ip_forward=10 [" P- p5 A, H$ I
# iptables对bridge的数据进行处理, c n" }5 G& `
net.bridge.bridge-nf-call-iptables=1
" L- k2 x5 K* V- S. Hnet.bridge.bridge-nf-call-ip6tables=1
/ W& p" a% K0 k: Xnet.bridge.bridge-nf-call-arptables=1. e+ }" {# }4 Z- D
# 关闭tcp_tw_recycle,否则和NAT冲突,会导致服务不通
# |2 I1 j! d; ~# R# l# g* Xnet.ipv4.tcp_tw_recycle=0) O5 k( f2 ?: i& o
# 不允许将TIME-WAIT sockets重新用于新的TCP连接
- |' [1 n; ], M+ Dnet.ipv4.tcp_tw_reuse=01 Z" ]1 g* y* P- G4 I5 t8 s
# socket监听(listen)的backlog上限
, Y, U, t( s) Z% y# }net.core.somaxconn=32768. r6 B# n7 `" O3 Z4 Z/ B" B2 A
# 最大跟踪连接数,默认 nf_conntrack_buckets * 4
, F$ }2 t* q; a) b- mnet.netfilter.nf_conntrack_max=1000000+ i n3 x( t1 a; t3 k
# 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
, Q3 v( v' `% u$ [) E) Y6 H) evm.swappiness=0
j, {5 r& d- T8 }3 K# 计算当前的内存映射文件数。/ Y8 `2 n* B4 {4 ]; m- f; O' e# ^
vm.max_map_count=6553607 ]6 j) E* }5 X9 x2 J
# 内核可分配的最大文件数
; _' [* k& W( v2 w# Tfs.file-max=6553600
& Y4 I. s) o2 v- n3 C( m3 \5 A# 持久连接
- |7 X) c: R) {6 w' N- ?net.ipv4.tcp_keepalive_time=600
% ? q9 R+ N8 X; ] t1 r& R& A; Unet.ipv4.tcp_keepalive_intvl=30; `) G) |! H. P. {3 r5 l
net.ipv4.tcp_keepalive_probes=10, Z {2 i6 |% C$ O6 R, [: \; d
分发到所有节点( A% J( t, d+ y& o+ w
for i in 192.168.91.19 192.168.91.20;do \
Z1 L! ]1 }( p2 j3 R/ [scp /approot1/k8s/tmp/service/kubernetes.conf $i:/etc/sysctl.d/; \& d, O1 C$ s* O% _. F, z- a
done. `9 t: x' J+ M! e% ]
加载系统参数/ ^* a1 p9 Z. P9 H
for i in 192.168.91.19 192.168.91.20;do \
+ ^/ _2 G: H. w' {7 u2 qssh $i "sysctl -p /etc/sysctl.d/kubernetes.conf"; \
; v! g3 ^7 v$ w* j6 Z5 vdone. q2 Q l/ T( \: l+ b
清空iptables规则
! u* a* v t1 ^# vfor i in 192.168.91.19 192.168.91.20;do \; U) s4 b+ }/ F
ssh $i "iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat"; \
f/ ?% i# g% D9 A2 g7 b# P0 ussh $i "iptables -P FORWARD ACCEPT"; \
4 w2 Q! N! V- |* l* P0 y3 }; hdone
' j3 X( t! }$ m1 j配置 PATH 变量( _. ]( S6 Y0 t) S/ V2 @/ l# ?
for i in 192.168.91.19 192.168.91.20;do \
) Z+ m8 d- a) {' A4 i& ^/ `8 y. @ssh $i "echo 'PATH=$PATH:/approot1/k8s/bin' >> $HOME/.bashrc"; \
! C5 h d+ L" M+ ldone
4 J: W2 d6 Z% Usource $HOME/.bashrc) p4 Q4 I% m7 Y% u7 {& Y
下载二进制文件
- @# u& Q0 e8 K& c0 c其中一台节点操作即可; a# ?/ }0 h+ Q
2 u! m1 @/ u2 e9 W
github下载会比较慢,可以从本地上传到 /approot1/k8s/pkg/ 目录下
" [, ]: X2 l& U* ^9 o4 J0 C3 R7 h g
?+ n& g3 y* P: P' Ewget -O /approot1/k8s/pkg/kubernetes.tar.gz \
" I% z7 n) s& V" U7 F( C5 j" e$ J, y' jhttps://dl.k8s.io/v1.23.3/kubernetes-server-linux-amd64.tar.gz
$ u. K5 h0 E$ f9 g: A+ u+ x1 T b( s( `/ z% S! y0 V/ L
wget -O /approot1/k8s/pkg/etcd.tar.gz \7 c6 I1 G% C7 z/ @9 ^
https://github.com/etcd-io/etcd/ ... -linux-amd64.tar.gz
7 c9 E) M1 I+ c解压并删除不必要的文件& R' x* i* F) ]
+ H4 D& c: b' M6 ^) T$ D; ccd /approot1/k8s/pkg/
$ b7 a* w# M& E3 v4 e7 s! Zfor i in $(ls *.tar.gz);do tar xvf $i && rm -f $i;done2 q+ [5 a8 B% I: C9 D9 e: `
mv kubernetes/server/bin/ kubernetes/. d5 H+ b; L& B+ [
rm -rf kubernetes/{addons,kubernetes-src.tar.gz,LICENSES,server}
2 D5 L0 o! m. W! X$ c) E3 Urm -f kubernetes/bin/*_tag kubernetes/bin/*.tar
; C* `6 ?% j4 }0 S7 }2 ^rm -rf etcd-v3.5.1-linux-amd64/Documentation etcd-v3.5.1-linux-amd64/*.md
O3 _+ s" B: v) n部署 master 节点% J% R5 l! b' Y; `& Y2 P9 n) u
创建 ca 根证书, x: s2 j5 d6 K
wget -O /approot1/k8s/bin/cfssl https://github.com/cloudflare/cf ... l_1.6.1_linux_amd64/ `9 s e% x' }$ F/ k8 q$ [
wget -O /approot1/k8s/bin/cfssljson https://github.com/cloudflare/cf ... n_1.6.1_linux_amd64
/ Q9 _0 s, a1 K, zchmod +x /approot1/k8s/bin/*
. ]/ T; K0 y4 U8 ]) |% gvim /approot1/k8s/tmp/ssl/ca-config.json ?' D& H- o0 i8 }
{
9 i* t7 s7 _) V: P9 P6 Z "signing": {4 Y% u# @% z- P0 r# V
"default": {- p4 U/ `/ o- d( p: Q
"expiry": "87600h"
9 c8 Q9 V. ~- \9 W2 {2 i },
6 D5 h* s$ n+ S. [, H6 G, g "profiles": {
: ~+ Z9 q ]( u# [5 y6 q# \ "kubernetes": {
, N/ B4 e: N7 S+ G0 @ "usages": [+ Q* {# l" X' Z% b! H' j
"signing",
! X7 d0 E9 a% G2 n% M0 J9 } "key encipherment"," }% d& m# w' _! b! P
"server auth",- L) W# l' K- f$ ~( V4 ~
"client auth"
3 o/ y* f8 o9 P ],
9 Z4 ~0 Q) r1 ]5 X) A "expiry": "876000h"; e/ w6 S V* d1 m; V* z8 V( a: `1 \
}
; c5 _/ }4 V( X }$ E# F0 C! [, m2 ?% J+ a: d
}( z- u% o- A1 b% t3 b( b' H4 G$ K& R; E
}
/ S9 V G- J svim /approot1/k8s/tmp/ssl/ca-csr.json
K Z5 u# M! `6 Q. h{
3 u- w6 e" z1 `/ g6 _ "CN": "kubernetes",+ L$ p8 W3 n/ h# K b
"key": {
, }/ t* E; T$ B$ A4 c2 ?. e "algo": "rsa",8 B- c$ |3 L2 O, q/ F
"size": 2048
w, }: j7 d0 ]' _ },/ v7 i" j+ N7 c% F1 Y' Z
"names": [
+ ?% U' U- X- l% J9 r {+ h/ M' G) z% V; e
"C": "CN",7 h' z3 S" i! U
"ST": "ShangHai",% ?- S3 Z [8 W* O1 \4 Z
"L": "ShangHai",/ [; d- Q. [& l2 d" m0 A/ r5 \
"O": "k8s",. z. ^/ }& Y. i! {* w/ W
"OU": "System": E z7 {; p+ u# u7 n/ E6 B% v( u
}
1 S1 S$ W, u4 ` ],
! ~+ ~, K0 o, R; t/ B% o w "ca": {# Y/ r$ @% n j2 g7 d/ \
"expiry": "876000h"
5 M/ q: m3 e# z7 F# _ }* K S/ U0 |, O* _1 X# |
}
8 G- [' K2 r0 u. jcd /approot1/k8s/tmp/ssl/& E9 o9 x1 l) L- e/ P3 s V- L
cfssl gencert -initca ca-csr.json | cfssljson -bare ca8 y6 {+ Z7 b$ o! t/ t M! b
部署 etcd 组件0 F7 h; ]( y* L: u/ |# A
创建 etcd 证书! d( _! u/ J4 u
vim /approot1/k8s/tmp/ssl/etcd-csr.json, B8 x$ o8 o5 ?4 ?' g% w# }
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴5 `1 `# y) U9 d u) x) N
9 \) s7 H% N( }7 k6 p# D W8 C
注意json的格式. A9 h- `# s- `% g' p L: u1 A
& F: `) ~' E- ~4 c6 g
{
& \ O8 F6 k) |! \7 v0 Q "CN": "etcd",4 L0 }. w3 G7 T* f9 Y
"hosts": [4 g6 I% F$ | F( l4 [" b
"127.0.0.1",
! ^3 w- u# V# p- S "192.168.91.19"; W3 U8 L# x$ A2 `' N/ ]+ W8 s9 ]
],
2 t! ^; m; j; Z6 K0 d "key": {
* }8 t) M% a3 r( Z" m9 k D "algo": "rsa",
0 p* c! d" B! b# z) \* w$ h' Y1 [ "size": 2048
) T0 V% ~+ u `% R; }9 D7 c },3 Z t) p ]; Z1 y8 z
"names": [
' Q- r4 ]5 `: O& d9 s {5 u' J9 k( a) `7 Y, C
"C": "CN",
$ _, t) x0 ^8 k; \5 [0 y "ST": "ShangHai",% P. O. g, e( ^, O9 W
"L": "ShangHai",7 l7 e+ [9 i, }" W" y, a
"O": "k8s",8 H2 ]! j w+ v
"OU": "System": ?5 R+ n/ v+ N
}
* `+ r5 [! v4 c& n8 x ]
0 `# d% ]7 \& N! {2 c}- F" D: B) s" N$ I6 w; c4 I$ T
cd /approot1/k8s/tmp/ssl/
" L5 i$ |4 J2 K2 _4 D9 ^7 qcfssl gencert -ca=ca.pem \
6 ~# }: R7 y7 F- Y! w-ca-key=ca-key.pem \7 g# G1 h7 H$ w# N
-config=ca-config.json \0 X" `- t/ K( ]/ S& Z+ n* l$ h
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
; j# u$ `; x8 f8 I7 B3 b配置 etcd 为 systemctl 管理
" T, m# l- [3 b+ X$ }vim /approot1/k8s/tmp/service/kube-etcd.service.192.168.91.195 k4 Q- W7 R7 l X
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
4 ~: a* Z( \3 S0 h ~7 J( t \7 x1 u/ Y' T
etcd 参数
6 V% E |& _5 z Q- l- p
' C( {5 w7 B2 z8 p$ S9 X0 B[Unit], p2 O, p) b6 L- {7 {( n# |' O8 E
Description=Etcd Server! [4 _, K: A, Y
After=network.target5 v" v* m0 a: F" ?9 {" l; N
After=network-online.target2 C/ [( x( P! r X1 f: L% r
Wants=network-online.target6 p: @$ ? B: v& u% l
Documentation=https://github.com/coreos0 ^3 m. j5 d* W1 k9 {/ F
, |& n2 i& `+ j- h0 ]2 R
[Service]
+ |- ~+ x4 D7 T) A8 aType=notify/ _! a$ r! o* X8 v/ F3 L( y
WorkingDirectory=/approot1/k8s/data/etcd
! ^; G, N' k, b7 g8 a; [/ nExecStart=/approot1/k8s/bin/etcd \- Z" }5 p% ` g" R4 D* j
--name=etcd-192.168.91.19 \- ~+ |' f( e5 [& C2 R' U% C3 D) T
--cert-file=/etc/kubernetes/ssl/etcd.pem \1 Q: g4 l( n, Q0 ~' g! ?
--key-file=/etc/kubernetes/ssl/etcd-key.pem \
) H9 {) v! A4 r( } --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
4 X+ {6 k4 R. O7 ]: S, z, E --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
# `$ I$ u9 S9 J7 A --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
3 O$ d% q( F8 V --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \; Z( l+ M) N# o: x- c, F, m
--initial-advertise-peer-urls=https://192.168.91.19:2380 \
}2 e/ t' N, S+ m1 e --listen-peer-urls=https://192.168.91.19:2380 \2 G% ]: \# W! ?; P3 U1 d
--listen-client-urls=https://192.168.91.19:2379,http://127.0.0.1:2379 \
' p$ O0 G8 V j! ]9 O --advertise-client-urls=https://192.168.91.19:2379 \
+ M$ K+ G, n2 E --initial-cluster-token=etcd-cluster-0 \0 q4 T& E% J0 h& Z ^
--initial-cluster=etcd-192.168.91.19=https://192.168.91.19:2380 \) R4 Y6 A! i: q( a! r3 q, B
--initial-cluster-state=new \
: q v8 d' t6 d# y! _. U --data-dir=/approot1/k8s/data/etcd \3 j" G, @1 g! {/ C$ h
--wal-dir= \6 O2 e5 O& K. M/ d/ E- `$ J1 y
--snapshot-count=50000 \8 K ?8 [! B0 U+ A$ T5 N n# ]; O
--auto-compaction-retention=1 \
9 ~# {7 G6 Y! H* B* ^8 L --auto-compaction-mode=periodic \, O Y% Z- V2 c8 m/ {8 E: [3 T' @( [
--max-request-bytes=10485760 \/ I+ B* F! C& B- N
--quota-backend-bytes=85899345925 `$ z' y7 W2 N0 C1 q) X
Restart=always
9 a" @, ~1 F8 C+ {0 aRestartSec=15 y/ Z u1 w7 c1 f
LimitNOFILE=65536
. I9 r7 }* z- X- C1 t0 ^OOMScoreAdjust=-999/ L l1 E% F% j! [
9 Q/ T3 Z( C3 J+ F# i
[Install]) U% s A: t8 t" n8 ~0 N
WantedBy=multi-user.target$ _, e! Q% Y5 p. p4 m5 K! F D
分发证书以及创建相关路径
# K" R! K3 k- v) N如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
. F) N9 b" C* r' G7 k
/ ?0 }! U4 N9 X) F. s; y对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败( h+ w, S4 r' q
8 K) N, F. P; Q* c9 I4 r) Xfor i in 192.168.91.19;do \5 x3 O" X0 V. P+ s" m8 ^& ^- u
ssh $i "mkdir -p /etc/kubernetes/ssl"; \$ G( M, W+ }% {) D+ j0 j# ^
ssh $i "mkdir -m 700 -p /approot1/k8s/data/etcd"; \
3 d/ [& i6 M7 i% f! ]ssh $i "mkdir -p /approot1/k8s/bin"; \
6 q- y. C5 }$ Y: A0 dscp /approot1/k8s/tmp/ssl/{ca*.pem,etcd*.pem} $i:/etc/kubernetes/ssl/; \. a2 X# `7 e7 B
scp /approot1/k8s/tmp/service/kube-etcd.service.$i $i:/etc/systemd/system/kube-etcd.service; \
Z. x! T5 @8 l7 T2 }7 W) zscp /approot1/k8s/pkg/etcd-v3.5.1-linux-amd64/etcd* $i:/approot1/k8s/bin/; \
* X( `$ ?6 ~* r+ fdone
/ I* E1 ]" i- i: f启动 etcd 服务8 V4 g" t7 h! F+ ?* k) `1 ?
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
* D; {# r2 S) E: A6 q4 v
& m& @& G) Q6 I+ rfor i in 192.168.91.19;do \+ }) r! z+ p l t5 @( b, D
ssh $i "systemctl daemon-reload"; \
; R v' j" [. K; Sssh $i "systemctl enable kube-etcd"; \" J) O7 B8 \" C2 A1 o$ a. a
ssh $i "systemctl restart kube-etcd --no-block"; \
7 z6 @! g; j; [5 @8 }ssh $i "systemctl is-active kube-etcd"; \
$ A9 C6 z( s" f0 E4 ~% }) G/ bdone
! m5 j9 P9 M) } o返回 activating 表示 etcd 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-etcd";done
0 K, l2 K; D* N+ y- i4 @3 U% l4 s$ ?
返回active表示 etcd 启动成功,如果是多节点 etcd ,其中一个没有返回active属于正常的,可以使用下面的方式来验证集群9 k, G0 t+ A5 ~, U) h8 O7 v
8 P, D. f$ T: G, U8 N! }
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
' s3 N& i/ z8 y1 j% \4 k6 Q/ Q8 O( L' A% h7 Y( f
for i in 192.168.91.19;do \* n4 J$ ~* A* I7 B" z h5 A
ssh $i "ETCDCTL_API=3 /approot1/k8s/bin/etcdctl \
& U2 N: K, O! e) B --endpoints=https://${i}:2379 \
. R) X( {( R: h+ J+ R2 V Y7 q! m --cacert=/etc/kubernetes/ssl/ca.pem \: r" M7 ^* y _
--cert=/etc/kubernetes/ssl/etcd.pem \
# I* U: u, _3 @! C --key=/etc/kubernetes/ssl/etcd-key.pem \* T2 ?3 ^1 x1 d/ P+ j8 m7 e6 H
endpoint health"; \ B8 H3 {0 l4 o4 w# v
done
, E0 V# {. d* ?, G a8 G% _https://192.168.91.19:2379 is healthy: successfully committed proposal: took = 7.135668ms; z4 U E! Y. W
3 s! u2 M- G j
返回以上信息,并显示 successfully 表示节点是健康的
1 b8 q0 q' D5 P
2 C6 ^6 s5 i8 P. P9 ^) h( o部署 apiserver 组件
( W3 n1 ~1 ^4 ?6 p% `创建 apiserver 证书. L& K" }6 T0 z- d
vim /approot1/k8s/tmp/ssl/kubernetes-csr.json- [, _0 j6 F; J
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
1 q R( L5 k! B0 [
' t. }$ _. X3 Y3 J/ f) c) b注意json的格式- Q+ Q F U& k) J" J! w. k
' m* O6 s. V, B5 T. r& U8 [: Y' v
10.88.0.1 是 k8s 的服务 ip,千万不要和现有的网络一致,避免出现冲突
* B5 X, L6 C- K, u; a& ]3 g4 X2 Y7 Y6 q2 `
{
$ a& D' z/ j# Z "CN": "kubernetes",9 k; k; q* A) [: h( y1 I) ]1 l
"hosts": [
, o/ [2 R8 J' v Q "127.0.0.1",1 J8 ^2 K+ k. j- a, t
"192.168.91.19",8 @0 [! O0 Y" m6 ?
"10.88.0.1", ~5 V" F; \5 m5 g
"kubernetes",9 Q [7 g/ m, \# k
"kubernetes.default",
( T8 I6 M3 q# V6 `! R; g8 T' |: t "kubernetes.default.svc",
( X6 e) i8 Z, ?2 a1 P m8 o: @% { "kubernetes.default.svc.cluster",) s4 {+ |, W: S4 c
"kubernetes.default.svc.cluster.local"
/ |9 _1 s& Y4 _. z. h ],
! ]- ?! _* d# D# J' `7 M "key": {
7 r7 X: v4 N4 }+ b+ e* a "algo": "rsa",
8 p6 w' R8 v5 A "size": 2048
- N( q# J& S% B) @: I* c* ^. S. j }, v0 h! S6 W' n9 u% C- J% f* t
"names": [
* `" {$ z# Z. ?% `3 n! D) d1 p {
( A5 ]5 {& Q8 [3 |- M) A "C": "CN",/ i& x! U8 C8 F( e: @# B8 q2 F
"ST": "ShangHai",
# r) O1 ~+ u/ ^3 L/ P8 I2 U "L": "ShangHai",
$ |+ E1 Z( |7 W4 ~4 }5 n6 f. { "O": "k8s",
7 n) H% g. |7 E; C5 `2 I$ S "OU": "System"
) _* b. H: [; h }9 @8 s( _# i& [
]
# i4 R9 n1 }" ?6 S# N8 W}
9 n1 n' b, ~3 v2 Q6 k& U3 x ucd /approot1/k8s/tmp/ssl// c' p5 F' y, ^! F
cfssl gencert -ca=ca.pem \' g/ n- A }- R% @$ n7 b
-ca-key=ca-key.pem \0 c+ b% A6 @0 W" y& U
-config=ca-config.json \) D. U& [6 `7 q& y- c, W
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes6 V; B; q; s- C' u v
创建 metrics-server 证书0 L, X0 u) k' k9 t ?, N/ h0 ]( h2 ?* n
vim /approot1/k8s/tmp/ssl/metrics-server-csr.json
5 Y! V9 d+ W* U2 w2 E9 R# h{. X& Q. W! ?) F! v6 @* R2 H
"CN": "aggregator",
0 O% y. _/ ~: {3 p1 G- Q7 ` "hosts": [
- j& M8 S1 E5 l; Y ],
V; n" \3 F2 f: P; k "key": {
# ^9 i9 Q$ z' ?: r7 [ "algo": "rsa",
0 o* B. h, e4 w- Y6 R z2 x "size": 2048- ^: _6 Q" o$ q9 E: O4 v1 D0 }
},4 u6 w I) a( _
"names": [/ w5 \# x- Q4 j
{! L8 O$ t W1 w2 s/ K6 _( z3 B* i: o
"C": "CN",1 |* Z8 d a3 T3 ~
"ST": "ShangHai",
5 `, e% i- n0 n8 f& E/ Y3 W% B0 `8 | "L": "ShangHai",
% f, Y( r9 L+ f1 v( g5 T8 m$ H "O": "k8s",
" t4 c+ V$ M# T! |7 x "OU": "System"
( M/ g; O5 R- c1 D+ [ }: \' b+ H2 K( |
]
" d7 O; {/ H ^6 m6 C}
1 N& k3 F8 v0 T: H7 Q- C, V' Qcd /approot1/k8s/tmp/ssl/! o. k& w- R$ S- t1 _) s
cfssl gencert -ca=ca.pem \5 y3 \2 C$ |# V; N9 [. U
-ca-key=ca-key.pem \
! L& j0 H Q0 h" B-config=ca-config.json \
. ?. u( {" z3 V# C3 q-profile=kubernetes metrics-server-csr.json | cfssljson -bare metrics-server( w. J9 V$ J F7 n
配置 apiserver 为 systemctl 管理5 C" k$ K+ w- y, d: g
vim /approot1/k8s/tmp/service/kube-apiserver.service.192.168.91.19
; Q" c8 [# @- H# u6 k这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴' V2 W- l- N/ O
; `7 R6 x( ~0 ]+ v' `--service-cluster-ip-range 参数的 ip 网段要和 kubernetes-csr.json 里面的 10.88.0.1 是一个网段的
& y" }; E. J' |2 q% }( I# ?4 d( E% ~9 h( P' I" W% Y; |. H; Y) |2 ]
--etcd-servers 如果 etcd 是多节点的,这里要写上所有的 etcd 节点: T& |4 D# z4 B
* I8 r% A5 {: v+ g. X
apiserver 参数
% _# T# v" ~- S% Z6 w( i" \7 B. r/ K) N/ l
[Unit]. _. k) ]4 v! P! m
Description=Kubernetes API Server9 q7 A) v2 J# v9 q% n7 c4 \3 J) u: g
Documentation=https://github.com/GoogleCloudPlatform/kubernetes. |( T5 g0 [& o% l- y2 }
After=network.target
1 Z- V, H; z+ @' B1 _
n+ \) o. Y# V8 N3 F- c* g( _, m[Service]0 c5 J3 P0 f: C m5 T
ExecStart=/approot1/k8s/bin/kube-apiserver \
* `# u& A* e$ K3 n7 i) B r* r --allow-privileged=true \
( V( R% H" E3 _* w --anonymous-auth=false \9 W$ a9 P( Z( R; n* K8 `* u8 K \
--api-audiences=api,istio-ca \
: H* m9 E1 c4 A/ x --authorization-mode=Node,RBAC \
i$ w; {& Z% o --bind-address=192.168.91.19 \$ j$ E# z/ i1 v
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
( e7 h+ ]/ {: n* T, B- i# P- v- f/ E& g! [ --endpoint-reconciler-type=lease \4 x+ z5 y' Y8 W6 K6 o. u
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \ K. N, y3 }! {. b- H+ e, {
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
) G+ R, g6 I. g; }$ c9 O& J, r: s- j --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
8 @: P: O2 I' ~) v2 k --etcd-servers=https://192.168.91.19:2379 \
# a2 o, T( t ?* v4 M --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \, T4 j% q( I% }1 D
--kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem \! C1 z( k4 K, O, u8 b: A) P* \
--kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem \- A+ V$ J5 V" K- f4 T7 J- _
--secure-port=6443 \9 \$ N% S* L7 J" \
--service-account-issuer=https://kubernetes.default.svc \
8 h! l$ _' a- F --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \' e9 p7 K7 }6 n w
--service-account-key-file=/etc/kubernetes/ssl/ca.pem \$ G8 l, ~$ g9 ?
--service-cluster-ip-range=10.88.0.0/16 \& D( l, D* K0 ~6 ^% d' i
--service-node-port-range=30000-32767 \
( T5 U9 Q+ H# R% ^" s --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
8 m5 E: q3 C4 F/ z, q; B# ^ --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
( C3 e. u2 a' j; j; C: j6 j --requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem \* y$ d1 r0 v) e
--requestheader-allowed-names= \
- q# c7 s3 u& R) i+ q: }2 E --requestheader-extra-headers-prefix=X-Remote-Extra- \
3 C$ T* u( i \( p --requestheader-group-headers=X-Remote-Group \6 d. N: _; j9 f. K
--requestheader-username-headers=X-Remote-User \
* T: f- T$ g+ S ^) v --proxy-client-cert-file=/etc/kubernetes/ssl/metrics-server.pem \
* x! T. H" J- D+ [ --proxy-client-key-file=/etc/kubernetes/ssl/metrics-server-key.pem \7 t* U+ ?3 P2 ]8 j) |; S
--enable-aggregator-routing=true \
* H, ~$ n2 ?- {5 |, [- E' N5 t7 W$ s/ W --v=2
+ t) g- Z- @& I- yRestart=always
% X: b0 T8 s4 a) ]+ LRestartSec=54 \2 @9 b+ j; s) T7 p _
Type=notify, R9 l4 j3 _: n' q- @2 `( x
LimitNOFILE=65536. l# s5 r1 t0 ?( D% H
* J- `3 n& K5 ~6 Z( ^4 w
[Install]
/ v+ S/ S+ v3 h* o4 x8 vWantedBy=multi-user.target
9 z8 K8 y1 z8 ?分发证书以及创建相关路径+ {5 j3 u, m, F' ]3 m R$ _8 ^. ?
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
( _8 x) \+ y& w- y; ~6 | Q9 _/ ~$ p" [1 k+ w! M% y! S
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败 M9 o; }+ }2 r0 W" W
, q9 F7 B2 E5 ]7 `. R- @$ b
for i in 192.168.91.19;do \7 E/ W1 o' ?4 D& K+ D& P
ssh $i "mkdir -p /etc/kubernetes/ssl"; \* T! t1 z' {/ w2 F' b: i5 v
ssh $i "mkdir -p /approot1/k8s/bin"; \& ]2 i6 g- a: o: f/ p* N
scp /approot1/k8s/tmp/ssl/{ca*.pem,kubernetes*.pem,metrics-server*.pem} $i:/etc/kubernetes/ssl/; \9 m) m2 n4 E% F& U/ K4 I
scp /approot1/k8s/tmp/service/kube-apiserver.service.$i $i:/etc/systemd/system/kube-apiserver.service; \0 P% m* y$ w& y! t5 z
scp /approot1/k8s/pkg/kubernetes/bin/kube-apiserver $i:/approot1/k8s/bin/; \5 t! U! \ n- H1 l) n
done `1 b4 T( T; s4 z1 F
启动 apiserver 服务
+ `" x( u8 V0 O# T% o如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
* L# a% b5 n0 P U y2 E+ e; F z8 n1 u/ u
for i in 192.168.91.19;do \) F4 a1 l" E. W& [! I: L
ssh $i "systemctl daemon-reload"; \: v* m2 L1 j9 T2 `6 J2 F
ssh $i "systemctl enable kube-apiserver"; \* u+ B# p0 A Y" E0 \1 H) v5 ]9 I
ssh $i "systemctl restart kube-apiserver --no-block"; \
- ^' r# S: n0 Lssh $i "systemctl is-active kube-apiserver"; \+ I9 c. n; X6 r& {) ~/ x
done/ |, o* R, `! `" ]$ `7 u4 g% V
返回 activating 表示 apiserver 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-apiserver";done
1 _5 o+ }' A; `8 Z d1 m8 U( {0 p @! p5 q( q6 a7 h5 I4 Z
返回active表示 apiserver 启动成功
# l5 C3 _7 _% Y0 N) u
8 d) h9 H( d' ^7 _curl -k --cacert /etc/kubernetes/ssl/ca.pem \
s% n0 i7 p/ W3 ?--cert /etc/kubernetes/ssl/kubernetes.pem \
- S; K' r0 m1 n4 Q( O--key /etc/kubernetes/ssl/kubernetes-key.pem \* _/ j! K% M1 |6 Z' a
https://192.168.91.19:6443/api
& J$ f8 {' d9 L4 {: Z4 y6 s正常返回如下信息,说明 apiserver 服务运行正常
7 ^2 P2 r& i6 e1 ^& E
* H# [( z+ \) F9 P. V0 G{8 V8 k! H( z' ~9 f0 P/ g9 _( z
"kind": "APIVersions",
, p# O- Y* z- ~& ]$ X "versions": [4 V6 t: B% y' S6 H' u
"v1"
% o5 m+ [* W+ n$ A4 i2 V+ G ],
. _% S4 ^; N' n9 r "serverAddressByClientCIDRs": [8 [: J) F m9 K5 [: G7 h
{. _. @% c9 m$ t4 g0 j% P
"clientCIDR": "0.0.0.0/0",% t: b) m% p c! `1 U$ o3 J$ }$ ^- I
"serverAddress": "192.168.91.19:6443"
& {( i* A- d: ] }
$ I, r" }! q1 C" B( v# D$ P ]# N5 L) W% n6 e5 A. {8 U
}
# _. ~& f3 [5 S查看 k8s 的所有 kind (对象类别)
9 m8 h/ s: w2 K& P' K
! r2 P/ d& A0 X) w: Acurl -s -k --cacert /etc/kubernetes/ssl/ca.pem \
6 u: l) B" ? D--cert /etc/kubernetes/ssl/kubernetes.pem \
9 }# A9 a' _( b: r7 ]- Q4 z--key /etc/kubernetes/ssl/kubernetes-key.pem \
: ~; a2 X* i- \https://192.168.91.19:6443/api/v1/ | grep kind | sort -u
/ d% _6 [$ V" X! Y- L$ x "kind": "APIResourceList",
1 c' N5 j, j+ \ "kind": "Binding",
( Q" r; L( M6 x( Z. X) T "kind": "ComponentStatus",
: r, J* Y$ @! ?6 ^/ k# u! ? "kind": "ConfigMap",
% y4 w4 p) T/ ~* @! c6 u# Y4 g "kind": "Endpoints",0 t8 ~, Z; E1 d' J9 _8 t
"kind": "Event",
' e8 Y. w- u% L$ F/ z0 n "kind": "Eviction",# e0 o; [0 R1 [: i, P
"kind": "LimitRange",2 l/ v, j* H+ |
"kind": "Namespace",
* T, H K& h8 |0 h' {4 M "kind": "Node",
6 w! M. O+ ?) S: ~ F "kind": "NodeProxyOptions",9 T( j. U9 ?6 r% |( H
"kind": "PersistentVolume",. }" P& F, W4 J# j# |& p8 y1 |
"kind": "PersistentVolumeClaim",0 J$ x n" w8 ?# D: p% k
"kind": "Pod",
+ ^9 U, w: J" d* y, _3 u- F "kind": "PodAttachOptions",3 V3 D) ?0 K$ q7 y
"kind": "PodExecOptions",
' R o3 ~0 Z5 |4 R9 w6 z& n2 ` "kind": "PodPortForwardOptions",* x% G1 W& ~$ l7 K
"kind": "PodProxyOptions",6 H4 {+ w4 T* }; p
"kind": "PodTemplate",
& |, \+ J4 m) d V1 B; E9 D, U0 V "kind": "ReplicationController",
+ z* y* ^9 i, D "kind": "ResourceQuota",1 w t& @, j7 C
"kind": "Scale",
: e5 P) F2 W9 [2 G "kind": "Secret",+ C3 Y# t" H: V2 Z
"kind": "Service",7 Z3 o2 h8 v: _" ]% R8 b0 n* J
"kind": "ServiceAccount",
# s$ |" J6 D* U, j' k- Z3 \0 ?2 Q "kind": "ServiceProxyOptions",
/ B' w H8 w% P; G, Z& s& a5 u "kind": "TokenRequest",
4 h! ^( A9 h1 s4 U% a. I配置 kubectl 管理+ N6 V5 C6 Z# R1 F
创建 admin 证书3 _! E1 t+ T3 P% l C
vim /approot1/k8s/tmp/ssl/admin-csr.json7 I6 X: x) n# c
{5 X! n2 P4 Y- q7 y3 R: j
"CN": "admin"," j4 {* w! `/ A9 J; v* L
"hosts": [ C- c4 n' X9 h& w' r$ A, e+ J! G
],
# X* l. ] ^! ~& y. H "key": {( W* x& z+ d3 H0 ?5 T2 B0 p9 T
"algo": "rsa",: }: S# z! w6 D. D
"size": 2048/ G( N( L1 D8 L `
},
I; L1 U1 ~0 Z! I6 t' K% `0 |" I6 f "names": [: t# K! K7 d( T% Y
{+ ~+ f, ?6 D( \$ t
"C": "CN", l: ~' g- V; p- b) G
"ST": "ShangHai",: }* [! Z. c( Z R: K. H
"L": "ShangHai",1 m$ D' I) J$ F9 @1 q: ?' R; O
"O": "system:masters",% _/ M! e: r0 v8 F
"OU": "System", [6 R5 n0 S- |/ R0 V
}& K4 @6 c9 |' b+ R% P# M
]
0 p0 I/ K2 k3 `9 [& b- x, z}4 U% o" n; i( i7 a
cd /approot1/k8s/tmp/ssl/, E {7 r, j8 Z& ] W( H
cfssl gencert -ca=ca.pem \
7 ]) p- ~! z! @2 _/ P& u/ S3 u-ca-key=ca-key.pem \
- \9 b, C3 I F' [( k-config=ca-config.json \ m$ r% f& q: u( E- ?7 y/ ]
-profile=kubernetes admin-csr.json | cfssljson -bare admin
( w$ m6 ~' \" l0 s- a4 \创建 kubeconfig 证书% H9 f" q. N9 K$ i
设置集群参数0 u$ f! P5 _% y
8 l* A$ C, _& R! x9 N" O: V
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver
3 _* \: a. l/ k. \2 m5 |" P7 V
+ m8 d$ p! _& f* T' R# F9 Dcd /approot1/k8s/tmp/ssl/
: O/ O+ P D2 u% L5 V/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \- r* c4 f/ x4 o: u% d' @5 n
--certificate-authority=ca.pem \( T6 k! a) |0 M8 J4 J( V# o+ J
--embed-certs=true \% Z! _. E8 h2 A
--server=https://192.168.91.19:6443 \
+ A8 I8 s" i; r9 K1 d7 E% _--kubeconfig=kubectl.kubeconfig
, F0 O0 W3 z. `0 X设置客户端认证参数
3 Y5 k, ]+ G$ Y+ }- L/ J7 c# Z1 C9 N( p* J
cd /approot1/k8s/tmp/ssl/+ _% ^9 u* i8 X, |9 [' N
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials admin \
- P+ y- h7 p$ J4 v4 c2 V% W--client-certificate=admin.pem \
$ u. T' X0 |7 i- X' G7 u--client-key=admin-key.pem \
! ?1 {6 _5 s, i8 g, A/ R ]--embed-certs=true \
9 U B) g {9 n- F+ n9 a- L f% F--kubeconfig=kubectl.kubeconfig; ~( l1 T$ q# A( r$ e
设置上下文参数
# ?' H0 n% V A. X0 R* l/ l% o$ q7 R7 }8 _& j
cd /approot1/k8s/tmp/ssl/
4 ^( y9 `0 A5 r/ R" R/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context kubernetes \0 C- ?& S6 M- o' l% W" M: { M
--cluster=kubernetes \
6 ~3 R2 `6 C1 a9 `2 I2 A3 D--user=admin \
/ \5 M1 x6 y+ y0 c; z: O--kubeconfig=kubectl.kubeconfig
; k( ~1 J* }7 w4 G设置默认上下文9 K& \. S* i: ~5 `3 z
2 p8 ^! ?7 P2 k0 z0 {- ucd /approot1/k8s/tmp/ssl/. V2 Y7 `% U- g& g8 F
/approot1/k8s/pkg/kubernetes/bin/kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig
9 }5 S+ |/ |$ `# C+ V: A) L- V# k分发 kubeconfig 证书到所有 master 节点
& i8 u, O' {# n% b! }如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制$ L2 W/ _9 Z4 ?( j; T/ Z5 @
6 j' W) l/ m/ @1 Q9 s, wfor i in 192.168.91.19;do \
4 @8 K" q' @: a! c/ b5 L+ B6 g1 f6 Qssh $i "mkdir -p /etc/kubernetes/ssl"; \
t5 E6 ]+ A5 _* @ssh $i "mkdir -p /approot1/k8s/bin"; \
. L! S/ j# o. T3 N7 cssh $i "mkdir -p $HOME/.kube"; \
& d q. ~9 U7 \scp /approot1/k8s/pkg/kubernetes/bin/kubectl $i:/approot1/k8s/bin/; \
: U8 M1 s) S6 o9 Qssh $i "echo 'source <(kubectl completion bash)' >> $HOME/.bashrc"% b# J3 c0 v) g& |. n/ M* I
scp /approot1/k8s/tmp/ssl/kubectl.kubeconfig $i:$HOME/.kube/config; \
- Z# X8 K: C3 W* Edone
9 @+ r& T' q* q9 C部署 controller-manager 组件8 Z" V) j# f: f# G
创建 controller-manager 证书! @8 Y) ] `/ k! r1 B/ t
vim /approot1/k8s/tmp/ssl/kube-controller-manager-csr.json
1 h' I6 T; ^* O4 g, ?; Z7 a7 `这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴5 l5 I% Q$ m5 R1 D9 i6 ?
, M, D) W% o0 Y8 }( E& a8 J注意json的格式; n. M: ^1 b$ N8 j
/ f% i; {: d& H{
3 I$ y# g. F/ e( a "CN": "system:kube-controller-manager",
1 J6 g$ R3 }) n$ P' o* a! C "key": {
) Z" t4 W2 L1 i+ Y "algo": "rsa",4 N; @$ t4 i- ~/ J, X1 e. U3 i6 b; [
"size": 20487 `% V* u; U1 _- l
},
0 k, e8 C! O2 C# t "hosts": [5 r1 y2 s; W! J# N; x! L
"127.0.0.1",
0 m6 c. q+ z1 J5 z7 ?, I "192.168.91.19"9 N. S4 e% P& {+ |1 B3 |3 c8 i
],& s! j% ~+ u/ Q4 O! U. z" v' s
"names": [" a5 h3 |3 H! i3 i- |, q
{% z* I# X5 o+ c6 B% K3 B
"C": "CN",3 `; K1 ?4 e7 U! B: `
"ST": "ShangHai",
! h9 C* o& s3 o9 y5 }1 u" N! P6 R0 F5 g "L": "ShangHai",
" y, I! u, j1 A: U% O2 C "O": "system:kube-controller-manager",
1 G1 \2 l, v0 f3 `: ~ "OU": "System"
0 Y( Y" o9 S" h+ P% o6 Z }
' L/ _$ v# ?. m# h ]" Q: n( k+ J0 [3 D' U
}& ~, k7 A& x0 ~ @2 C
cd /approot1/k8s/tmp/ssl/
& y4 h4 y, h0 P- M* `' X' s9 m7 wcfssl gencert -ca=ca.pem \0 d5 B+ P" d9 A4 C; m% O
-ca-key=ca-key.pem \
+ l. g0 e3 l( o# ?" v-config=ca-config.json \% Y" N9 o& P0 k5 W/ t5 ? M7 f+ M
-profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
0 {9 J( R! }) u9 N; b创建 kubeconfig 证书
5 C+ j4 e0 C4 X6 C+ A& z, }设置集群参数
6 f2 @' z ~; `2 _" a2 u8 F% B8 ~8 E- r1 U5 R- Q9 n
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver
1 W7 c) g6 }4 P. O
" y* F; h* j, b& k6 Ycd /approot1/k8s/tmp/ssl/0 ` a( N, I, P4 C9 ~9 H( [; s* T
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \ \+ H2 y8 g2 f. F( a
--certificate-authority=ca.pem \5 S- `1 d5 Y1 F' U) Y
--embed-certs=true \
1 p# x. ?1 a# j! L; F" P--server=https://192.168.91.19:6443 \6 \1 v. ^: K, \$ [6 I3 T0 Q
--kubeconfig=kube-controller-manager.kubeconfig
1 C' b3 }/ ^, s$ ^& H- L设置客户端认证参数
2 H: B+ r& z2 k% n- I# o" W1 m
% n( m3 f% t: D: j8 O Pcd /approot1/k8s/tmp/ssl/( Q/ J \/ L% W
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials system:kube-controller-manager \
8 I% m! q8 r0 S0 c5 G) |--client-certificate=kube-controller-manager.pem \( S9 W4 H" F1 ~9 F: C$ x; N
--client-key=kube-controller-manager-key.pem \# o k( d: ^5 v& l! A
--embed-certs=true \
- ~" P; f6 H% [- o) \$ B- o) v+ L/ t, r--kubeconfig=kube-controller-manager.kubeconfig; A; F1 N4 y/ e) u, ?& Z# K
设置上下文参数( x% h; |% f9 L9 E: T
- Y' i) i. b( Z. ocd /approot1/k8s/tmp/ssl/7 B& J, t; V, {" q- X5 \
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context system:kube-controller-manager \
$ c. l& D) Z5 a6 A--cluster=kubernetes \( T0 Z1 s/ O8 F3 j
--user=system:kube-controller-manager \" \* T" X4 t3 j+ m/ Y
--kubeconfig=kube-controller-manager.kubeconfig( O/ ~4 u8 N- j# \9 {$ t* ]( q+ N8 \
设置默认上下文
* L( O- k; X6 j# e1 A1 k) z1 E9 S! a1 j r9 K2 {" Q, ?9 w, d: Z
cd /approot1/k8s/tmp/ssl/
" U6 F0 K2 u- q5 i3 o/approot1/k8s/pkg/kubernetes/bin/kubectl config \
: d& a* A2 P( ^, b$ e) suse-context system:kube-controller-manager \5 J0 f) g1 ^5 b" d2 @* H
--kubeconfig=kube-controller-manager.kubeconfig
8 l: W: |% X8 j' G+ |8 Z, U/ E配置 controller-manager 为 systemctl 管理
9 g h4 w9 d; Pvim /approot1/k8s/tmp/service/kube-controller-manager.service( b/ f. g$ V2 J8 _% Q0 }1 i
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴
! t0 Y1 t# c- b! g- h& T; l3 X- {! n' Q7 A9 ]4 G
--service-cluster-ip-range 参数的 ip 网段要和 kubernetes-csr.json 里面的 10.88.0.1 是一个网段的
9 p5 n5 D) N5 D, v, d. D
, E$ b2 {; q/ K; k% e1 w--cluster-cidr 为 pod 运行的网段,要和 --service-cluster-ip-range 参数的网段以及现有的网络不一致,避免出现冲突
* X7 c" G6 V; p& L" ^2 W6 E
* n' v! y! c1 H vcontroller-manager 参数 x- G% o. _( J
! l* A2 ]4 |8 b. @7 I[Unit]
0 ~ b# x7 D0 uDescription=Kubernetes Controller Manager. [3 `5 W1 N% g5 o: i
Documentation=https://github.com/GoogleCloudPlatform/kubernetes) D ]7 X6 ?8 H. q
/ }+ }- L) p; `+ y: \0 ][Service]7 l2 R& v# ?/ e3 h1 D
ExecStart=/approot1/k8s/bin/kube-controller-manager \
?' D' O8 X$ M( c4 d --bind-address=0.0.0.0 \
8 X4 T4 w, c t2 H --allocate-node-cidrs=true \6 j) k- C! N) K0 O9 N
--cluster-cidr=172.20.0.0/16 \/ n5 w- Z8 h# _- p1 k
--cluster-name=kubernetes \; X, O7 W4 c3 S
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \3 ~" c7 K7 M7 j9 \! X! ?
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
- t) ]$ j- y; }& ` --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \2 u& _% N" U2 \# j1 C2 d
--leader-elect=true \
P5 a2 h) f# J$ a# n --node-cidr-mask-size=24 \
/ q3 ?+ `! x. d7 _: u --root-ca-file=/etc/kubernetes/ssl/ca.pem \
4 y; k. ~! g5 Y& c6 X& B --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
" M) w, W3 P( \% M$ b) E2 M --service-cluster-ip-range=10.88.0.0/16 \# y2 ^1 j: @; \
--use-service-account-credentials=true \6 V8 w8 @( o$ f* X
--v=20 q9 a9 k. a. d0 }8 r4 E: N
Restart=always
0 }" j6 e# t, |# [% ^" a, VRestartSec=5% x, i7 {7 ?8 L7 p3 `8 M
8 K( {3 n) @& E7 o W[Install]
$ ?4 ?3 i6 E5 e1 E! o+ nWantedBy=multi-user.target) N3 G. r. U" Z& `7 @
分发证书以及创建相关路径
+ R0 g! E$ n( H7 q) j" W5 ~如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
& ~, n+ U& q" }4 u! N* U, Z: t
. |/ \8 h3 O" F+ D y" N. s0 T; z对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败% _' _' Q) I( X5 t/ m4 W# [$ K
, i! ]3 b* v% m* I, v g6 |
for i in 192.168.91.19;do \9 m3 f9 X' W5 Z* Y5 T& K
ssh $i "mkdir -p /etc/kubernetes/ssl"; \. R' P) G9 @5 @1 V- x
ssh $i "mkdir -p /approot1/k8s/bin"; \
5 l( b/ t+ ^( F0 E) vscp /approot1/k8s/tmp/ssl/kube-controller-manager.kubeconfig $i:/etc/kubernetes/; \9 w* x0 S9 c8 }- K1 _' L% a1 A2 ?
scp /approot1/k8s/tmp/ssl/ca*.pem $i:/etc/kubernetes/ssl/; \4 S7 i4 V* U- A3 M7 U) K
scp /approot1/k8s/tmp/service/kube-controller-manager.service $i:/etc/systemd/system/; \
. M+ T: \% ]6 u- qscp /approot1/k8s/pkg/kubernetes/bin/kube-controller-manager $i:/approot1/k8s/bin/; \
; l u1 S% \ g. @. z' h5 adone
7 [9 K5 P! b, d' Z4 e2 U. \6 @启动 controller-manager 服务9 h, i6 m: d* F* M1 u, r3 v8 z3 o
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
' g& L/ P T& c$ X" H6 C9 g! J: C, s. a( x( f2 T3 j
for i in 192.168.91.19;do \$ J4 ? b2 B3 u- L2 d8 i
ssh $i "systemctl daemon-reload"; \
7 _1 F; i: b, g! I4 T4 ^7 z' F: \. Ussh $i "systemctl enable kube-controller-manager"; \
, Y( H, @* A( N: rssh $i "systemctl restart kube-controller-manager --no-block"; \
% s, G7 @& Z' |* Q8 {0 essh $i "systemctl is-active kube-controller-manager"; \- Y( e9 G, D0 u
done+ h) m* B j8 L) g' J
返回 activating 表示 controller-manager 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-controller-manager";done
V1 w3 O2 o5 a* s, o* H- W! L1 l$ G j/ g' V* P
返回active表示 controller-manager 启动成功6 q8 s# V$ X. A6 e
- J1 ~: A& h# P, ?& f( a w部署 scheduler 组件
5 h6 g, i6 Q# s5 G创建 scheduler 证书4 M9 d$ V. v1 p2 Z1 D# L
vim /approot1/k8s/tmp/ssl/kube-scheduler-csr.json% s+ K8 a$ O1 e( h% n A/ O2 N! p$ I
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴9 Y2 B$ S: F2 i% i
# d+ S0 a+ |, E l2 G
注意json的格式
2 P4 }: _6 v# G5 m3 O1 U
, e4 l- e B% F7 V" q& v{
6 D" \1 I5 K4 h. Y "CN": "system:kube-scheduler",' u" F$ L a$ h
"key": {: l' O+ f% p- T7 X
"algo": "rsa",& z6 U) m5 B* T/ `* M# Y2 D
"size": 2048' ]) q$ H. p b+ h; y
},) D/ U: k g! D3 T) A. }7 c6 H
"hosts": [! Q/ C# N! C. z8 o3 U" I5 S
"127.0.0.1",) o9 _3 _0 u0 z' \, Y
"192.168.91.19"9 G! r5 D- a$ r) @6 |
],7 g0 |# l' P1 Z
"names": [9 y8 L2 P i, H* _. z; D/ a* |
{! y+ W4 |$ D" t" P3 k# y
"C": "CN",
+ e) Q+ y/ b; |, { "ST": "ShangHai",
. G, c; f2 g/ L$ Z5 B0 _9 y "L": "ShangHai",
9 L5 T7 j: Q: I5 _7 } "O": "system:kube-scheduler",5 D1 w2 {9 d% k# s% E
"OU": "System"! x. W. t4 k" D' a
}
* \5 I5 C" f( O1 o5 Q( ^3 r: d$ g" I ]& h4 @- ]( I* m
} e! @9 ^ I; C1 O' k6 d' p8 }6 w
cd /approot1/k8s/tmp/ssl/$ Q( { C7 d" X+ C8 @6 b
cfssl gencert -ca=ca.pem \
! |0 h9 R( A3 ^-ca-key=ca-key.pem \
/ k$ e. q; Q) I4 ]/ F-config=ca-config.json \
) R9 f1 D& K( j+ q `* z-profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
% ?* x, w4 R% T# P创建 kubeconfig 证书
3 @% g& A- w7 \9 Y* P t; V设置集群参数
+ P- d# O: ~4 a, ?# N( Q! m4 T" k9 ^4 [+ Q
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver
( C: Q( v0 r8 A/ _3 A, r1 R
* E- k6 K% ?6 T9 w8 k! Xcd /approot1/k8s/tmp/ssl/
7 U4 W% R, r* P- M4 e ~. v3 i/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
4 a0 r( k w ~* ]--certificate-authority=ca.pem \. O" o n- a3 |* V2 Y
--embed-certs=true \) q7 M5 O' J2 {7 C4 }8 F* h
--server=https://192.168.91.19:6443 \
; o$ ^$ _: [$ _+ Y0 M, ^( i4 b--kubeconfig=kube-scheduler.kubeconfig
# ?& P! _7 T$ ]) E+ i7 L% |) Y5 ^设置客户端认证参数
& ^; `" \$ t2 Z2 [0 k6 b. c0 U8 `' a0 I; B6 A% O" s+ `
cd /approot1/k8s/tmp/ssl/* {# Q4 l# |8 K$ A/ E8 |$ A
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials system:kube-scheduler \* m) F! S5 o2 M0 [' g; Q
--client-certificate=kube-scheduler.pem \9 e/ }1 _8 O$ E A' c! F2 u4 A
--client-key=kube-scheduler-key.pem \+ o! P9 e- P, |7 ^; X
--embed-certs=true \
+ `" H- }9 J! d0 {) w* v; u. s--kubeconfig=kube-scheduler.kubeconfig) B4 r! c) S6 Q+ w( X
设置上下文参数
+ \6 L |3 _( e6 b+ W/ Z
1 g- h; F8 `& E2 V0 p$ lcd /approot1/k8s/tmp/ssl// M( l- E9 w' G% ~8 d9 g
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context system:kube-scheduler \, H4 \% v' _* y2 d, v- Z& b! e
--cluster=kubernetes \+ X* s$ J! E5 f1 C9 `
--user=system:kube-scheduler \2 ~! {) `# Z: d4 J
--kubeconfig=kube-scheduler.kubeconfig, y6 _9 S0 Y9 d. f- r
设置默认上下文. \" e4 ~3 x4 q. b8 l8 ]. b! t8 q
2 p$ v+ `/ e! t
cd /approot1/k8s/tmp/ssl/
8 c8 P8 b J7 Q5 r0 |/approot1/k8s/pkg/kubernetes/bin/kubectl config \4 [0 D) @) C: S9 w
use-context system:kube-scheduler \' \$ ^0 E$ \( M
--kubeconfig=kube-scheduler.kubeconfig# t0 N$ o3 L8 g; m0 _3 n
配置 scheduler 为 systemctl 管理
1 o( @: }7 Q5 n O mvim /approot1/k8s/tmp/service/kube-scheduler.service
3 C2 z& o7 F* J. d/ o* t: W5 vscheduler 参数
) S. W6 b5 I( j
6 ?: l8 S8 v: Q2 e/ T[Unit]
4 @& ~" m+ R- H! e0 j5 vDescription=Kubernetes Scheduler& r2 ~0 R, F' L1 Y5 C- I$ ~
Documentation=https://github.com/GoogleCloudPlatform/kubernetes; _: ]4 c; f* Z0 @! I
; q7 a: Q2 U; j4 H; ][Service]
' E" \$ }0 g; A! O; g2 g$ bExecStart=/approot1/k8s/bin/kube-scheduler \! p% o, N6 x* `
--authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
! [5 }4 O- a2 k3 L8 E# ~ --authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \, b' t: ~/ ]* M* L7 W2 P, A
--bind-address=0.0.0.0 \
- M. Q4 h. F6 w) h( i# s V --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \0 }$ A* T& U& F1 K" j7 Z3 `
--leader-elect=true \
$ O1 }% n+ [, I' L8 y1 B --v=2" d1 ?5 H4 e, q. \% p0 f; d& U
Restart=always
6 d9 b1 M( h) L' A5 TRestartSec=5
8 s; D) O/ ?2 l# V& T
. g% Y+ j/ g! _& O% I5 k& j# H& D[Install]
7 r) y. J4 q; T: r6 c* @* x3 K# IWantedBy=multi-user.target+ l5 I2 _$ X* A# i
分发证书以及创建相关路径
, R+ F$ z/ z+ y; J! z7 s8 }! _' n: w! c如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
h2 c3 @. q7 r* m' o$ a+ ~( y( N! L3 }. t3 t6 s
对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
, s4 p4 L ~9 y" A" a
8 [! @% J/ f1 W: Afor i in 192.168.91.19;do \
6 _& \. }/ ]& Y; vssh $i "mkdir -p /etc/kubernetes/ssl"; \+ y1 K) _% L7 G) I. d. W3 [
ssh $i "mkdir -p /approot1/k8s/bin"; \
c, g$ P) X" Y0 E9 k; l$ Jscp /approot1/k8s/tmp/ssl/{ca*.pem,kube-scheduler.kubeconfig} $i:/etc/kubernetes/; \
Q( T. w8 B. v5 e# J& O& tscp /approot1/k8s/tmp/service/kube-scheduler.service $i:/etc/systemd/system/; \* U, F8 R7 R9 s7 k
scp /approot1/k8s/pkg/kubernetes/bin/kube-scheduler $i:/approot1/k8s/bin/; \
6 @2 i$ i+ k. Z: E' adone$ z- ~0 @' _( J1 I7 q% G- j
启动 scheduler 服务2 O+ d" ]9 X, m( V3 m; J4 U
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
* |- Y, l4 Q# D4 H. U
$ x# ~! y2 I- W- w2 Rfor i in 192.168.91.19;do \
4 I8 k( n( d" b/ k, u) }ssh $i "systemctl daemon-reload"; \
" Y+ H3 l0 R" ^ }5 ^ssh $i "systemctl enable kube-scheduler"; \
2 g5 k" f2 a3 i& X5 K8 x9 S; jssh $i "systemctl restart kube-scheduler --no-block"; \' Y! z6 Y" Q( s9 `& V
ssh $i "systemctl is-active kube-scheduler"; \) ~, r. R/ Q0 J8 k- {
done0 G% O! [: d2 h1 c
返回 activating 表示 scheduler 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19;do ssh $i "systemctl is-active kube-scheduler";done
& ~4 Y. l9 l, q) y" i5 W3 y' F
+ ^; S' V- D# @' c返回active表示 scheduler 启动成功
% B9 h2 y/ u; [$ N
/ D# |4 T( L' T部署 work 节点
+ g) f/ f8 c: L! B8 Z% ]部署 containerd 组件
& `) S4 T9 s" N2 a. [! Z( z# f下载二进制文件
7 o/ _3 l; [% E% H( l S2 S Xgithub 下载 containerd 的时候,记得选择cri-containerd-cni 开头的文件,这个包里面包含了 containerd 以及 crictl 管理工具和 cni 网络插件,包括 systemd service 文件、config.toml 、 crictl.yaml 以及 cni 配置文件都是配置好的,简单修改一下就可以使用了4 H$ E& [9 u2 _0 u5 J4 t
# y7 a. ^* Y4 G* ~9 O3 ~4 D
虽然 cri-containerd-cni 也有 runc ,但是缺少依赖,所以还是要去 runc github 重新下载一个
% Y% g$ {4 C& w m. \3 h7 i
1 }' }7 S) s( N! X. n. R& Swget -O /approot1/k8s/pkg/containerd.tar.gz \) \4 o/ {; ~4 [1 D# ]3 R
https://github.com/containerd/co ... -linux-amd64.tar.gz3 Q$ z: E/ i; A- v7 @4 ~5 r
wget -O /approot1/k8s/pkg/runc https://github.com/opencontainer ... d/v1.0.3/runc.amd64; W( X6 h' Q9 d4 l& Q
mkdir /approot1/k8s/pkg/containerd7 q% x& j) h# a s! F9 Q% F
cd /approot1/k8s/pkg/
: q7 q" _3 R1 z& B: Z# S, S+ afor i in $(ls *containerd*.tar.gz);do tar xvf $i -C /approot1/k8s/pkg/containerd && rm -f $i;done; G3 d6 M. q0 }4 z) `
chmod +x /approot1/k8s/pkg/runc- J6 x, T. n" C, V
mv /approot1/k8s/pkg/containerd/usr/local/bin/{containerd,containerd-shim*,crictl,ctr} /approot1/k8s/pkg/containerd/" B( I% p3 A' Y7 x/ q/ [
mv /approot1/k8s/pkg/containerd/opt/cni/bin/{bridge,flannel,host-local,loopback,portmap} /approot1/k8s/pkg/containerd/
+ o7 m, n" f1 ~, H. d+ vrm -rf /approot1/k8s/pkg/containerd/{etc,opt,usr}
8 m0 j9 O7 d& R+ F" Y4 b配置 containerd 为 systemctl 管理# v: `6 B X1 c/ c) C7 C3 n, B9 J
vim /approot1/k8s/tmp/service/containerd.service
5 E+ p' P, c2 K# o N6 g! Y9 U注意二进制文件存放路径
0 f' G7 N G5 \) U# m5 ^8 c4 a, C4 v, l& Z* K3 V( e
如果 runc 二进制文件不在 /usr/bin/ 目录下,需要有 Environment 参数,指定 runc 二进制文件的路径给 PATH ,否则当 k8s 启动 pod 的时候会报错 exec: "runc": executable file not found in $PATH: unknown, b i) Z; G& G/ x
/ |4 c" x5 ^+ C: i( Y: g O9 k; L[Unit]& ~3 G4 U C. p. ]
Description=containerd container runtime f2 u) R5 }( G3 i6 q( B8 ^
Documentation=https://containerd.io6 u6 P7 `7 r; n+ z+ g+ j
After=network.target: {0 s. @* C% ^4 ]2 c5 u7 K3 w
, r' n4 e' r1 I. q" K$ @+ X
[Service]
7 u8 |0 g7 @ d, c; l( r' j/ g) ^8 WEnvironment="PATH=$PATH:/approot1/k8s/bin"" l; F1 l% X- @3 |
ExecStartPre=-/sbin/modprobe overlay- `3 z9 A% i$ i1 R# {
ExecStart=/approot1/k8s/bin/containerd
( M- {6 c( i$ b1 j7 RRestart=always! U5 Y! y0 ?0 A9 x# z8 h
RestartSec=5
% W; h9 C/ a* O; LDelegate=yes$ W# E3 d' }9 o1 \, M' P5 S
KillMode=process
9 l$ F: M* G/ p* t3 yOOMScoreAdjust=-999
% ~) j, `6 V# H2 ^; Q1 uLimitNOFILE=1048576' O6 f4 e9 _+ }' ~
# Having non-zero Limit*s causes performance problems due to accounting overhead. S7 i4 N6 ~: f7 B$ n7 k1 t
# in the kernel. We recommend using cgroups to do container-local accounting.! N: o3 |0 Y) U1 q: n1 G
LimitNPROC=infinity
' C- F9 r4 c! R3 x& U ^! WLimitCORE=infinity
& m! g2 L* B* T6 z% A1 m$ @7 [% Q
5 m1 C Y1 J6 y# d( B# N/ ^8 [2 [[Install]0 i8 }0 Y: ~0 L6 W- \3 q
WantedBy=multi-user.target
. P* U: O9 |( Z/ ~% e4 F: m( b配置 containerd 配置文件( j2 P: |! }# J Z$ j9 M
vim /approot1/k8s/tmp/service/config.toml0 y# P$ ^, D; c2 L- K5 ?6 m5 B
root 容器存储路径,修改成磁盘空间充足的路径1 U& `0 _& Z! Y7 E" A
) o3 F! o9 z0 D' h7 n- K3 Xbin_dir containerd 服务以及 cni 插件存储路径& G P9 m- w$ t0 S% u
* }" T" W$ y. h0 O' f/ N9 }5 asandbox_image pause 镜像名称以及镜像tag
* i2 [7 M" j7 n7 M9 E# d. J0 n x" U- h9 L
disabled_plugins = []
# {# A2 \) P2 Z- L& w& [imports = []
" Z J$ Z) \. Aoom_score = 07 b5 X) W2 r, @9 l- j# U$ V, t
plugin_dir = "". B. z/ S5 ^/ L' }3 k$ e l! z. Q( v8 g
required_plugins = []* `: E4 b" q h5 z X" F2 f7 h- Q
root = "/approot1/data/containerd"& j2 g* z/ Y, L S" V8 A
state = "/run/containerd"
" b$ c/ E6 g. I$ G! @version = 2 k3 P9 K# d' m7 y6 ~5 r8 V; f
% g; H4 V+ j5 s. K, ?2 e5 [[cgroup]
7 _, T+ h% f3 Y path = ""* p0 p# o* L3 l* t6 H) n4 e9 p0 q
; U3 D5 ~1 g: J; |8 L' F3 y
[debug]1 }) H# z& n) v9 K9 e% c# b( ^
address = "": M7 t% z. {1 I
format = ""1 R! H& p |8 t4 l/ n5 y
gid = 0
* K. ]. E1 Z# T) Z level = ""- _1 |: S8 {! Q1 O
uid = 0 D8 V5 y& P3 z
" `# u/ q* \5 H8 f# R6 H) ~[grpc]8 O, S1 G( D5 ?1 _/ l* |
address = "/run/containerd/containerd.sock"
m1 o5 B0 I0 G: u9 d gid = 0
/ e, N: z$ Q# L8 [' y5 Q- l max_recv_message_size = 16777216
( e# G- U' n: x7 \- F, |6 s0 [ max_send_message_size = 16777216
' m f$ w' C7 T4 z7 x# g tcp_address = ""( X/ T$ H$ s" O/ M
tcp_tls_cert = """ a1 i2 ]5 I* z) X! p$ o2 f
tcp_tls_key = ""
: f* u2 e& Y7 n uid = 0
4 f" {+ X' C2 E$ T V# |/ j/ a0 q$ @
8 a* J9 z# R. y& E# m$ M1 h3 b' F- K[metrics]
1 p: {) j W. O1 D, e" E( @ address = ""
6 X4 [5 ?8 @# z5 j8 f grpc_histogram = false: M$ \( j/ ]0 T/ W# h8 C
" D, V; A- { |! ^9 h
[plugins]
& _: w$ ?. C+ y, R' w3 s/ L+ K) G7 I5 ?" `1 g+ l& t
[plugins."io.containerd.gc.v1.scheduler"]
9 @9 H. V7 M/ I2 v2 l4 P7 ^4 b deletion_threshold = 06 n- h; F' B# k! u. U. e
mutation_threshold = 100- z* d% w4 a# P* i
pause_threshold = 0.02
* I, ^" ?1 h# m/ K) F schedule_delay = "0s"
/ f# d" C, i3 ^4 Q startup_delay = "100ms"0 s" ^) V& D% q- a. O6 V4 x& F
% ~6 J8 d, u) x8 d4 B" D [plugins."io.containerd.grpc.v1.cri"]( J7 R' k3 y9 H: y1 Q/ P# v$ `
disable_apparmor = false
' E$ E9 P5 X1 z disable_cgroup = false
/ y! g: u' a+ B5 j disable_hugetlb_controller = true
4 n: h) E7 ]) i. q& c) q disable_proc_mount = false9 D3 i, E( o; P5 {5 }' d* s
disable_tcp_service = true
! H: O# F6 V; P/ [) d& c, d. D9 i enable_selinux = false
2 n* k8 c4 n6 x1 q9 c enable_tls_streaming = false
* I* v! v5 _6 u2 c; |* y w( R ignore_image_defined_volumes = false
7 m7 V+ Y8 ]6 N max_concurrent_downloads = 3& `4 ?) W. p3 U) L. n# z
max_container_log_line_size = 16384) }) y C! x3 e3 W+ ?% a8 m% [
netns_mounts_under_state_dir = false4 _/ _- E# j! j# n' W3 y/ f
restrict_oom_score_adj = false
, W7 S- D6 X8 }$ I$ ^$ ~% _ sandbox_image = "k8s.gcr.io/pause:3.6"
, m; | }- ^" t1 I* t; z selinux_category_range = 1024- Y& _% Y" R; k. j7 S
stats_collect_period = 103 m, F: a# P7 E% M
stream_idle_timeout = "4h0m0s"
! A" X7 [' {6 f1 P- [ stream_server_address = "127.0.0.1"; g) L- H. K/ r- x$ v/ H0 M
stream_server_port = "0"' S: B' E* t- Y) n; Y! a# J
systemd_cgroup = false
6 H* m$ n6 h( L7 S" D! s tolerate_missing_hugetlb_controller = true
& {4 M+ @& j, U6 e3 z unset_seccomp_profile = ""
2 G% y% o3 l4 b
- C& K8 J5 }; I- u! k( G' v [plugins."io.containerd.grpc.v1.cri".cni]8 w# G! j+ b. h; Y* p2 G. v/ D
bin_dir = "/approot1/k8s/bin"! X6 q9 @7 m1 `& m2 J9 p; S
conf_dir = "/etc/cni/net.d"
! o# M6 H/ F1 `3 B$ Y conf_template = "/etc/cni/net.d/cni-default.conf"
) p' U% E( q, k: J max_conf_num = 11 g }. d( S, k' H5 }
! p+ s( t9 ]5 A [plugins."io.containerd.grpc.v1.cri".containerd]5 u, j- v" S% c* c/ K* S; E$ U8 v
default_runtime_name = "runc"1 B ]' f' e% u Z8 D
disable_snapshot_annotations = true3 F* G9 H) H$ t4 t) Q( V
discard_unpacked_layers = false
$ ^! z3 |# `# v7 z no_pivot = false
0 r: ^/ H8 J! J$ o! t6 F snapshotter = "overlayfs"
4 [# v' v: D% ], m+ J- I6 m! `( `/ c# m5 r" V$ @+ B
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]6 U* q0 O2 \; z7 C
base_runtime_spec = ""
" s4 c( v( u8 B' @! u; S0 X container_annotations = []
( {5 Y; Q M) {7 o2 Q+ {1 Z pod_annotations = []# G" \6 x( B" A7 A
privileged_without_host_devices = false
# R% A' Y6 H. L2 F, H! l+ d9 f, ^ runtime_engine = "": O5 D8 w4 r. y. M, J( F
runtime_root = ""5 ?$ X, U/ C. i% b$ c/ m4 h
runtime_type = ""6 I3 n* L) u, }1 J; a% X3 N
, r6 T3 J' ?, _' [ A
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]+ e1 S8 V$ S% m! X( p, g
7 H, p1 E6 e* V# r8 q' {
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
$ H9 K# w( C) }& T [. M2 M. y9 `0 h/ P4 x8 m O) c
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
& K" y4 ?+ s+ B- \' e5 A base_runtime_spec = ""
, |9 g* M' c* U1 b container_annotations = []3 ?) o" I6 Q1 a* P
pod_annotations = []
. u F( t. c: b# o9 k/ L& s/ \1 L" E privileged_without_host_devices = false
" z* u0 b' ?+ Y6 q/ q+ M6 {' F runtime_engine = ""7 n, e* c& k6 u8 ?& z7 J
runtime_root = ""
# F# B+ t6 ?+ r5 e: Y8 z runtime_type = "io.containerd.runc.v2"& f) ?' o7 R; m: z2 y
! K' g$ U1 r: @) Y6 W [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
( V$ s: v. W7 e+ P0 i& X BinaryName = ""
1 A/ Q+ I5 N8 C9 w8 Z* Z5 [ CriuImagePath = ""
2 h& }5 z* y# f$ i( |. } CriuPath = ""
* B0 ~- B" T# |& {; h) Y" ?" y: \' x CriuWorkPath = ""
" C4 O- ~& P+ A+ }# k0 F IoGid = 00 [5 d8 E* x3 m. b- T
IoUid = 0# I( G0 Q& N( n" N9 n; Z" h
NoNewKeyring = false
. t C6 a2 E) ?/ J+ E: B NoPivotRoot = false
+ G9 \, h4 }( P) W Root = ""
% s r% k2 x+ m" w$ [ ShimCgroup = ""9 a" w7 g9 l: E+ c9 m6 _
SystemdCgroup = true* e1 i4 R& U3 R- N. D
# {+ T4 h: Y' Y0 T& c& L6 s [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
* L4 G) q2 t2 p. U* e base_runtime_spec = ""
( t1 S+ ?3 |5 g2 M# C+ Q& U% [8 C container_annotations = []9 m3 D' I- v8 ]
pod_annotations = []( M. ~6 ?3 j: W7 H0 D4 P9 T8 p* D
privileged_without_host_devices = false
@" ^/ p$ x5 A2 R E/ t9 y3 ]* G runtime_engine = ""
! T2 w5 F5 _5 N* I7 ? runtime_root = ""
5 O6 d8 a5 A. K7 u/ s4 J# U runtime_type = ""7 O1 P( r! A' G, g" w
2 G5 m; n8 S: e: }' r1 C7 M! G
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
& Z; A2 D9 q. i: \9 }# E% D% I# T [- g
[plugins."io.containerd.grpc.v1.cri".image_decryption]
5 m$ P# ^* |6 U) a1 y7 ^& } key_model = "node"8 @: E6 q/ s* T/ g$ X
. ?( M, L7 ^4 Z4 b& }/ f/ N8 ^7 O
[plugins."io.containerd.grpc.v1.cri".registry]# L$ \. x- J: M8 o6 y8 h1 w) n) B
config_path = ""5 g5 m) }+ `6 a( Q* l x
0 C' w8 `- @! U) o) M. a1 d [plugins."io.containerd.grpc.v1.cri".registry.auths]
* j; x0 J& A/ l# h# F& ?' D0 }2 R- a, ]3 D, C8 L1 n
[plugins."io.containerd.grpc.v1.cri".registry.configs]# o) {; l( A: ~: f0 b
! g8 q; i! q7 ]+ {* _/ H
[plugins."io.containerd.grpc.v1.cri".registry.headers]
. p- }" L) I& s! e6 ^: |( t
0 l4 v7 x0 s6 h5 D; v5 I' e [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
1 G, }) Y# m2 {) Y8 P [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
9 l) i) }: Y7 U: f endpoint = ["https://docker.mirrors.ustc.edu.cn", "http://hub-mirror.c.163.com"]
1 t4 i- |- N# H, s9 O0 @) D [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]; c9 U4 X& f: A
endpoint = ["https://gcr.mirrors.ustc.edu.cn"]
! C- c. I9 S9 U2 y [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
) h$ z, Q0 ?; O) t; z endpoint = ["https://gcr.mirrors.ustc.edu.cn/google-containers/"]/ r" N3 o$ I; b, q
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]$ c% `. Q) v) V# P8 M: y
endpoint = ["https://quay.mirrors.ustc.edu.cn"]
4 ~8 I6 ?! C" |, @
5 r$ [2 D; u3 x( o0 Q/ u8 n [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]2 `# x/ j1 Z' A) H/ j$ g
tls_cert_file = ""
6 N$ [/ @/ x0 Z tls_key_file = ""2 t! E; Z* H* b* B# M7 f# M
* r- z0 x7 {- r! {% T [plugins."io.containerd.internal.v1.opt"]4 D$ u, R+ K* H8 e ?
path = "/opt/containerd"! H. \1 b; S- G# b* [6 W
1 e: K) c/ N" J' [. B
[plugins."io.containerd.internal.v1.restart"]0 Z0 U) T- M( w9 V6 r
interval = "10s" c) _# w4 j6 X
0 \, ]4 L9 d3 V; G
[plugins."io.containerd.metadata.v1.bolt"]2 B7 D8 \/ m+ s6 {" E! H- U
content_sharing_policy = "shared"8 U& t S% q4 w$ c2 ?6 j- t9 m
! b" K, b3 I8 F' M8 D# T
[plugins."io.containerd.monitor.v1.cgroups"]
( K, \, k* {5 _5 N2 n no_prometheus = false/ V5 h, Y* a2 i2 Q. D
0 e) r, [8 W2 W1 D/ y# M* a
[plugins."io.containerd.runtime.v1.linux"]+ Y+ ?) p0 M; R0 T
no_shim = false: {9 R; _) t4 }" G
runtime = "runc"0 Y( r" h$ a: V' }* t3 ^7 W( c+ r& N' i
runtime_root = ""
, e* g; H b8 ^& _3 j- K5 f# s shim = "containerd-shim"
; C% k6 r8 l7 y ]9 d shim_debug = false, d7 g; d4 ~6 M \3 O8 W
5 V: b7 f4 Z! X, ~" L3 q/ D [plugins."io.containerd.runtime.v2.task"]" N% Z) q5 c4 K; j9 c; r6 w) o
platforms = ["linux/amd64"]7 l9 V* e5 \8 w- M: ~. |
/ l; G6 \* k. y" y' h/ } [plugins."io.containerd.service.v1.diff-service"]
& D& h5 Q2 a$ [* a/ B' b( S default = ["walking"]
. C- O. z2 j7 Z/ `" n, T
5 M; b9 u- W4 X* g3 b [plugins."io.containerd.snapshotter.v1.aufs"]2 a4 Z A' m) c1 N
root_path = ""! k4 V# j5 B1 i9 c" ^
. k! @" h' j- m" h( ^4 R% P( {5 \7 ~
[plugins."io.containerd.snapshotter.v1.btrfs"]4 m& O7 D$ O/ t: u( A0 ^
root_path = ""
) r% b& K1 @, m: K3 c( `1 Y
% w0 X5 w7 R E4 f. h [plugins."io.containerd.snapshotter.v1.devmapper"]
9 n0 V/ \" b3 j0 Y3 b% z" A# g async_remove = false
; e8 A; B5 q$ {6 K+ @5 m; e base_image_size = ""
; Z- S9 q- l4 \3 f/ I1 g pool_name = ""4 G. ~1 M7 F9 v1 b) M
root_path = "": W2 I* V. E$ Y: N9 b
5 d' @7 D- ~5 \- J3 U [plugins."io.containerd.snapshotter.v1.native"]
. B7 U6 H- h B! f7 h& u( { root_path = ""
) y# }( u7 S {3 U. i o C" v7 D8 | F
[plugins."io.containerd.snapshotter.v1.overlayfs"]' u/ m- p3 u9 r! |9 ]% s) s# b
root_path = ""
1 r1 i1 i/ x1 X6 H/ l/ Q1 @# T- i3 A+ E) `$ U; o
[plugins."io.containerd.snapshotter.v1.zfs"]
5 @8 I m7 Q6 l9 \) c+ Y2 p root_path = ""
& O( g) C3 v( W; k: a- E6 S! n- K% v3 I* B4 I
[proxy_plugins]
! ^; {% x+ k* N H9 C1 P* u: I: w. `4 y# m, M, `
[stream_processors]4 d8 X6 {* w8 w! D) u: q
% g! R: f* s, s5 T [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]6 x6 U0 `5 E% o) C
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
$ R* d/ C t$ w args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]0 _; Q2 R9 w7 }
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"], F! h3 J; X9 \0 y, U- k* g; O
path = "ctd-decoder"3 a6 }, A5 M! Y5 r* |
returns = "application/vnd.oci.image.layer.v1.tar"
# ]4 r1 `1 E/ B) L# W5 V6 W% N& J- I
' v" f+ X$ }9 j [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
8 W. V, r @, o8 x& R2 [. b( S accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
+ L0 ~8 _2 o+ k args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]9 X& c5 v, }0 v! R5 [( [0 M
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]- y. W8 J! n* X8 Y+ `+ G& @
path = "ctd-decoder"
2 b3 b3 W8 p P8 a! ~! k/ r4 `% v returns = "application/vnd.oci.image.layer.v1.tar+gzip"
" T5 j0 ^7 v/ }9 c8 ^
' |, ]8 w$ p( A, B[timeouts]
. T: N( L5 |6 f2 k0 M: Q "io.containerd.timeout.shim.cleanup" = "5s"
) { O, I7 C2 S# L% k "io.containerd.timeout.shim.load" = "5s", \" ~9 J$ S3 k4 _4 F' X
"io.containerd.timeout.shim.shutdown" = "3s" G8 R' E" H; c
"io.containerd.timeout.task.state" = "2s"* G* J0 e, K, C/ ^
2 f8 `. M+ x0 @; m) S Y
[ttrpc]
/ _& y- K. x/ Y address = ""9 F# l* i% o4 A: ]) G! \
gid = 0* a) M+ \6 k4 B5 v
uid = 0
& _) F0 D" p% M- X. d/ H u7 {配置 crictl 管理工具
% ?- P8 P- W1 v; E* Pvim /approot1/k8s/tmp/service/crictl.yaml
8 B5 {+ Z* r( {, _$ Zruntime-endpoint: unix:///run/containerd/containerd.sock
5 B* [5 }2 J+ d9 u6 z1 ^! Q- p1 S1 t9 ]配置 cni 网络插件
1 Q, Q; }' V: Fvim /approot1/k8s/tmp/service/cni-default.conf
" e8 g* G0 v9 G0 s2 ssubnet 参数要和 controller-manager 的 --cluster-cidr 参数一致# G. D$ J2 t# |8 H5 E3 z: |3 G
" G Y$ o3 D, K
{/ n, ~4 v7 q. ^4 {, Q
"name": "mynet",) ~) d, \9 W6 S6 e" @+ a+ r
"cniVersion": "0.3.1",
8 v2 n& L# w0 i( [4 S "type": "bridge",
3 ]1 T8 R0 ~7 G& R- I' l6 z "bridge": "mynet0",
/ R) r& B& [1 J2 F "isDefaultGateway": true,9 e6 ^3 f6 { X. {; N8 L
"ipMasq": true,
2 r- O! z! B* v5 d5 d0 E: U) y, w "hairpinMode": true,
! W; Z* L( y6 P7 Y "ipam": {
0 G3 z6 G% ]# H2 F% t6 X "type": "host-local",) c5 {4 C- g* o% C% F7 c
"subnet": "172.20.0.0/16"
& r$ P! o$ d+ W$ M# N }
- v7 p" u; a$ c( ]! y5 M7 I}
# L. S9 j" w. C1 b$ P分发配置文件以及创建相关路径
* f L! ?! y/ G. R5 K! L9 vfor i in 192.168.91.19 192.168.91.20;do \
+ T$ D ?# u$ j- Wssh $i "mkdir -p /etc/containerd"; \+ H6 |( z. \+ ?
ssh $i "mkdir -p /approot1/k8s/bin"; \9 F% u4 j# n6 _! M
ssh $i "mkdir -p /etc/cni/net.d"; \
! ?& a- s% S# [5 x- escp /approot1/k8s/tmp/service/containerd.service $i:/etc/systemd/system/; \( `* T5 N. g3 u0 `( H8 X: _: L
scp /approot1/k8s/tmp/service/config.toml $i:/etc/containerd/; \7 r, U5 m8 ~* |3 @" K+ K
scp /approot1/k8s/tmp/service/cni-default.conf $i:/etc/cni/net.d/; \2 v3 O0 ]; h9 B
scp /approot1/k8s/tmp/service/crictl.yaml $i:/etc/; \
5 c4 p! G# C% [- x1 rscp /approot1/k8s/pkg/containerd/* $i:/approot1/k8s/bin/; \
+ X) v( A0 |: @( v% bscp /approot1/k8s/pkg/runc $i:/approot1/k8s/bin/; \6 i2 v ? P- U6 J2 Q v
done4 `; t8 f+ B3 z' A. H! u" c
启动 containerd 服务
7 y2 |4 B8 ?" f0 kfor i in 192.168.91.19 192.168.91.20;do \! Q/ a" m* D- E9 X. }) q
ssh $i "systemctl daemon-reload"; \0 I. {, b+ m! y/ H; R0 O; a
ssh $i "systemctl enable containerd"; \. E1 b6 l8 Q$ @3 L8 v
ssh $i "systemctl restart containerd --no-block"; \' U5 _! p7 f2 q6 O3 y$ e) i
ssh $i "systemctl is-active containerd"; \% X( R. L0 a8 a) R/ Q* t4 m
done: b, H# N: ]' t$ S6 B# ^: P! k
返回 activating 表示 containerd 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19 192.168.91.20;do ssh $i "systemctl is-active containerd";done
U5 F' J, {) R' h9 i2 G5 K+ S6 J) \1 K# a$ z+ v# e6 q/ Z, x
返回active表示 containerd 启动成功3 G, y3 G' @) a; o3 L3 k
g' |) {. o1 j- \4 y9 h导入 pause 镜像2 [ c+ K+ z* K2 h9 W* s
ctr 导入镜像有一个特殊的地方,如果导入的镜像想要 k8s 可以使用,需要加上 -n k8s.io 参数,而且必须是ctr -n k8s.io image import <xxx.tar> 这样的格式,如果是 ctr image import <xxx.tar> -n k8s.io 就会报错 ctr: flag provided but not defined: -n 这个操作确实有点骚气,不太适应# G7 B( x' m8 H" e6 D
; ^. m- n+ n1 T! r4 p0 L: z如果镜像导入的时候没有加上 -n k8s.io ,启动 pod 的时候 kubelet 会重新去拉取 pause 容器,如果配置的镜像仓库没有这个 tag 的镜像就会报错
/ j4 K2 y4 b4 M* i) J0 c ]% q9 H' W# i( ~5 W) v
for i in 192.168.91.19 192.168.91.20;do \1 p' J5 C3 ?- Z8 k
scp /approot1/k8s/images/pause-v3.6.tar $i:/tmp/
( ^6 W! V$ X3 F4 @$ kssh $i "ctr -n=k8s.io image import /tmp/pause-v3.6.tar && rm -f /tmp/pause-v3.6.tar"; \) H; a2 }6 V. h
done& u6 v" w& \9 k/ M8 q
查看镜像; @ _1 o$ x* {) N+ v# {4 k. F
1 Z; b2 }/ K, pfor i in 192.168.91.19 192.168.91.20;do \+ b0 S' W, M. h: t9 @) b
ssh $i "ctr -n=k8s.io image list | grep pause"; \
/ m: p% e: L1 U- j$ ?2 e- Sdone
4 y- I" G# b0 Q5 D+ s& a部署 kubelet 组件
8 a. B1 |6 K) m1 [5 ^, _创建 kubelet 证书
5 l D5 P' W0 u: x( M! J# bvim /approot1/k8s/tmp/ssl/kubelet-csr.json.192.168.91.19* U/ ]* u! [$ z1 [- d* _: Z
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴,有多少个node节点就创建多少个json文件,json文件内的 ip 也要修改为 work 节点的 ip,别重复了
1 o" C; q6 r9 R
; h0 a0 n/ `% B8 F" m( x{! \2 R3 O9 t3 c5 k( D) A7 D
"CN": "system:node:192.168.91.19",! e! k- g6 ]+ `8 O9 |. ~- q* z
"key": {, z7 v8 `) p3 o0 D
"algo": "rsa",% [. u4 k0 C: L- b* G
"size": 2048' U1 }; s8 h2 J
},& [. r. r, z# S4 ? \
"hosts": [# I( z: x- c) z8 r8 {6 r( c
"127.0.0.1",4 B% p4 \; C4 D% C
"192.168.91.19"# V$ m3 c0 _ c/ ]% R- i2 Y+ q. `/ o
],
# v% b, ]# m- E "names": [
2 T2 m8 V+ ]( s' a* g1 N# @/ L: y/ { {
+ T x5 N% J3 w* @ "C": "CN",, E# x& L, G! y( I l! d
"ST": "ShangHai",; E9 D7 t8 \- H i* D7 x* {
"L": "ShangHai",
# x R$ `% R2 q' B "O": "system:nodes",% E) B `& x# [0 K$ {4 e
"OU": "System"8 v3 @2 I% E: y5 {: Y/ [
}
2 u3 C' w: X8 q8 r6 e" F( j ]
( f/ {* a C- k8 k% H3 n" v# J) a}
$ G7 g& b9 X5 e P T" afor i in 192.168.91.19 192.168.91.20;do \3 M( {, i% j9 u- C
cd /approot1/k8s/tmp/ssl/; \: V+ ?2 T; |/ B3 [+ _
cfssl gencert -ca=ca.pem \
- A3 T8 L$ t, c" l- [7 K* A-ca-key=ca-key.pem \/ `2 S J9 y& F6 G
-config=ca-config.json \
7 {/ s- X% R f. W; ?+ S-profile=kubernetes kubelet-csr.json.$i | cfssljson -bare kubelet.$i; \$ Y# I3 ]3 [. t8 z& I$ ]9 p7 h
done+ ~; Z9 i4 K% M0 W, R( x6 B4 T& Y
创建 kubeconfig 证书 M" L+ R8 q; \; g) \& Y9 b; X
设置集群参数! F( L- P6 n9 Q+ x7 z& K% B0 j. }' a
: N- X. B6 G( C
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver5 G' L3 [. o, L# m/ U4 G7 Q; T# J
* Y- S9 j* e0 S, {& N2 B& [for i in 192.168.91.19 192.168.91.20;do \& E) h) b; l4 a$ l/ q9 ^) o
cd /approot1/k8s/tmp/ssl/; \! [8 h5 Z: d+ r- Q1 i7 ^
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \
7 R5 Z5 r- }$ E/ s4 s6 g--certificate-authority=ca.pem \; W: D9 l8 J6 r5 I8 K; H
--embed-certs=true \
, a; B; C4 c& B--server=https://192.168.91.19:6443 \
) ~9 S) |1 T% G8 h--kubeconfig=kubelet.kubeconfig.$i; \
- ?, n/ G0 y3 U6 l/ N. m0 L* o- Ldone5 U7 ?8 ?0 L4 b2 h) Z3 o
设置客户端认证参数. \8 d3 E" r& ~5 K4 U
: ]& O6 p; Y' P8 a6 E, y0 N
for i in 192.168.91.19 192.168.91.20;do \ |7 e( q. s- w7 _0 k$ T1 G
cd /approot1/k8s/tmp/ssl/; \
7 X8 Y. ~0 j2 N) I6 D h/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials system:node:$i \( E/ Z9 V" e0 |/ k
--client-certificate=kubelet.$i.pem \
! T/ p+ V; a/ Z( s; H--client-key=kubelet.$i-key.pem \/ {$ x$ ^% a4 I( N/ T5 h/ K
--embed-certs=true \: H# U! C5 E+ i; E. h
--kubeconfig=kubelet.kubeconfig.$i; \
* Y6 A2 u8 a/ g4 L% u$ mdone
& g N/ L5 f, t7 _& Z. i设置上下文参数
) D9 w1 H% j# t
. ?0 V' x2 J/ X7 g6 I: ifor i in 192.168.91.19 192.168.91.20;do \& }; O' f L% W3 |
cd /approot1/k8s/tmp/ssl/; \% k" f B$ R, {' j6 E( E* o& `
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context default \
( u2 ?! \1 d- k4 q# z' \--cluster=kubernetes \: ^$ [( q- N8 g2 Q( N) S5 ]
--user=system:node:$i \
4 c3 U! y s( r$ u, G--kubeconfig=kubelet.kubeconfig.$i; \
3 ~* Z3 e8 h$ S! R- {5 }- q, G; ydone
3 ?1 e+ b5 E: O设置默认上下文* u5 _7 x7 H, J
- \! _) g/ Q1 v6 O, s$ Yfor i in 192.168.91.19 192.168.91.20;do \
+ L9 C: @% R! j x- ~ k& ocd /approot1/k8s/tmp/ssl/; \5 g. v4 I) Y. F6 y
/approot1/k8s/pkg/kubernetes/bin/kubectl config \7 ?, ^* N2 M! H% [ X; W" H
use-context default \
; V* q6 C4 ?: z+ w. M7 r# b, ~--kubeconfig=kubelet.kubeconfig.$i; \
3 S* ^) q1 O7 d8 v) s8 Y' Ddone" K9 w9 F" o- s' W; F
配置 kubelet 配置文件
9 v. @( U, `/ k+ h/ R) zvim /approot1/k8s/tmp/service/config.yaml0 ~4 h( h6 r/ }$ t
clusterDNS 参数的 ip 注意修改,和 apiserver 的 --service-cluster-ip-range 参数一个网段,和 k8s 服务 ip 要不一样,一般 k8s 服务的 ip 取网段第一个ip, clusterdns 选网段的第二个ip
5 ]/ a) E/ @; u3 r. l- V
! k; ~8 Z9 [1 O1 v6 F0 {kind: KubeletConfiguration# K# f( t) [" u) n, B
apiVersion: kubelet.config.k8s.io/v1beta1
% c- L/ x' @0 s0 ~address: 0.0.0.0: b5 p( j' I5 \: U$ k
authentication:5 } r' M+ C8 `+ e9 n
anonymous:1 k8 f3 ^" M6 V5 y8 A
enabled: false( c# g) U0 P0 i3 C- W
webhook:
7 k7 d$ E1 l3 c( `/ R6 T3 R cacheTTL: 2m0s) I9 C' ^- G: ~, V' u
enabled: true
8 ~. M. R8 S+ l+ c0 H8 y' J x509:
. R6 x! x& _1 z% m$ N7 Y* L$ ^; R clientCAFile: /etc/kubernetes/ssl/ca.pem6 Z5 Q0 n: q, E( G$ ^% C
authorization:" g, ?# p: E& t$ V
mode: Webhook
; `0 Q1 L% I6 H/ @& K webhook:
: f9 g) H t r4 u6 _& @ cacheAuthorizedTTL: 5m0s
5 D4 m, k$ p3 }% }5 y: k cacheUnauthorizedTTL: 30s
7 n5 T, d' Q0 r% |cgroupDriver: systemd$ v% M7 V Q, ^. R/ T) [& x
cgroupsPerQOS: true& |! C) k: }6 d: l& k8 W% d2 U( h
clusterDNS:$ J Z7 E6 @7 X# t! P) z4 ~
- 10.88.0.23 ?+ \9 p( |& @2 Y/ L
clusterDomain: cluster.local
' T. W3 ]" X: WconfigMapAndSecretChangeDetectionStrategy: Watch, K" A: @9 j5 E
containerLogMaxFiles: 34 [5 w) L% q4 ^ L! F% H
containerLogMaxSize: 10Mi
8 D9 \; K' ?9 e) k( Z: B6 M$ [enforceNodeAllocatable:
+ b! n- Z% M6 u; V& k/ }- pods5 O3 l. G1 o7 W, x4 X# j+ j+ l+ |
eventBurst: 10
* d9 M; y+ j$ r% z& \eventRecordQPS: 5
7 m$ l" }! r% E6 o$ J) F4 Z3 uevictionHard:( X8 H' W% ]" ]. s/ ^4 J" W
imagefs.available: 15%
; w ~. R* C* b8 X3 R; A+ j memory.available: 300Mi' G4 V& a& z( e4 D' H6 Y8 {
nodefs.available: 10%
4 W8 T' q3 g: O3 D. L! I. ] nodefs.inodesFree: 5%
2 F2 p V5 v+ Q; w# revictionPressureTransitionPeriod: 5m0s9 _* f: f/ G( U4 E( [
failSwapOn: true
3 ?: x! d& p2 @fileCheckFrequency: 40s& K0 [+ R4 L+ G/ w0 Y" d) A: |/ o& v
hairpinMode: hairpin-veth: D! ^6 O, |: m, J2 x0 }# Q K5 T
healthzBindAddress: 0.0.0.0 [5 q8 w% F% g) X1 J* U
healthzPort: 10248
% Y3 X. \: k, ihttpCheckFrequency: 40s, {. I3 w" P& v& U5 {& F1 l
imageGCHighThresholdPercent: 85" b% y' {. H9 W( Z- I3 z
imageGCLowThresholdPercent: 80" j( x2 ~* e" B8 P0 e1 Q$ h% |
imageMinimumGCAge: 2m0s
: y1 M5 X. V% L( y9 q& CkubeAPIBurst: 100
0 q+ M1 P) C3 i! u7 ^kubeAPIQPS: 50
! H) [/ }& D7 o7 C/ E1 K, nmakeIPTablesUtilChains: true
5 _8 q9 X6 }! kmaxOpenFiles: 1000000
! \4 i* S; a( ~) ^9 q1 _maxPods: 1105 @* K6 }$ {$ j' K6 D: y' U6 S
nodeLeaseDurationSeconds: 40
3 C1 ]# a, A* c! x2 ?' t& _! gnodeStatusReportFrequency: 1m0s
- a& ~0 y- J: X" ]+ g' mnodeStatusUpdateFrequency: 10s
3 N. ^ k) W5 y! p+ |$ `4 x* M+ }oomScoreAdj: -999( ?+ l# `2 l+ i
podPidsLimit: -1; R) y% B, ~4 q+ {6 H
port: 10250
) Y4 o- Q) h0 S2 t# disable readOnlyPort- e( J, i" V& ^& M7 {' u) r
readOnlyPort: 00 i- @, h- k& N) B! T
resolvConf: /etc/resolv.conf
5 Y) k1 C( @7 D2 IruntimeRequestTimeout: 2m0s
. n# F& T" Z/ d* C0 hserializeImagePulls: true$ D/ B7 E; ]5 T& B/ U- |7 m
streamingConnectionIdleTimeout: 4h0m0s; r( \7 L1 u9 r$ ]" ~/ d2 j
syncFrequency: 1m0s# M' M9 P) T' m9 T' ?
tlsCertFile: /etc/kubernetes/ssl/kubelet.pem
# [7 d z: C S$ MtlsPrivateKeyFile: /etc/kubernetes/ssl/kubelet-key.pem
1 T+ h5 v! C% }) N配置 kubelet 为 systemctl 管理
' G1 n4 M9 R: V- d2 P$ {6 J5 h2 ivim /approot1/k8s/tmp/service/kubelet.service.192.168.91.19" P. S$ V7 m5 ~4 q
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴,有多少个node节点就创建多少个service文件,service 文件内的 ip 也要修改为 work 节点的 ip,别重复了
' Y1 ]2 Z# Q" J0 t: |
8 U! N! Y, O. m- q--container-runtime 参数默认是 docker ,如果使用 docker 以外的,需要配置为 remote ,并且要配置 --container-runtime-endpoint 参数来指定 sock 文件的路径
, r8 v6 J; V$ e# u
4 M' b4 K& [( G2 r' Bkubelet 参数
' e2 |% Y. ^5 h. N+ U) g7 x. q& q7 P" `" R5 K& o6 |5 i
[Unit]6 q# a& r+ J/ _9 g& U% ] n+ y
Description=Kubernetes Kubelet8 I8 ~5 ?( ^# K4 U: h
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
! I+ ^* S" \' ~7 H
$ z$ i- x3 U3 b3 x6 f4 Z/ H[Service]; @0 H' x* `8 w6 c; Z7 |
WorkingDirectory=/approot1/k8s/data/kubelet' L' B. I) Q0 x% H& b( y0 ?
ExecStart=/approot1/k8s/bin/kubelet \! B ]# e3 l9 h, z
--config=/approot1/k8s/data/kubelet/config.yaml \1 a; A1 M1 C V, |
--cni-bin-dir=/approot1/k8s/bin \* c: K7 h* B H. o, }; @" ]. P+ `
--cni-conf-dir=/etc/cni/net.d \
8 r4 ~. \2 o+ {5 ?% {$ t; Z) @4 R --container-runtime=remote \
( M3 S* V+ y6 C; L1 w( C --container-runtime-endpoint=unix:///run/containerd/containerd.sock \7 X/ R, y( h5 ^, y' A% C8 h# H0 R) S
--hostname-override=192.168.91.19 \$ I# x; w6 Z6 w& R$ r
--image-pull-progress-deadline=5m \1 {* j! X, [* c* O% C
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \' J) f7 [ I/ y# v5 f/ H
--network-plugin=cni \
0 x: H3 ?2 }8 Y( Q, [ --pod-infra-container-image=k8s.gcr.io/pause:3.6 \
3 d0 C1 [8 w8 f# L' ] --root-dir=/approot1/k8s/data/kubelet \ \% `, i4 k8 `* V) D A8 j
--v=20 F6 x0 h, o/ [" h+ k/ P( a$ m
Restart=always5 m% B9 r' D# o
RestartSec=5
/ { n5 z* t6 Z4 m( U1 @9 u0 r
' q& W; g5 O7 R: u/ N) T, z[Install]! E$ ~) D2 x1 S* N6 p7 H
WantedBy=multi-user.target1 v% [5 \0 G# r% _
分发证书以及创建相关路径
0 h$ y- l8 s* ~+ @& r- K如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
7 p% u# v" ?7 v# W3 k8 v- h: J
$ d* I+ H0 m3 g) U y. v0 i对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败* z' A1 n6 M4 u9 `
4 C; N8 ]! |1 e4 `
for i in 192.168.91.19 192.168.91.20;do \2 y) i( f+ P" h0 `0 p
ssh $i "mkdir -p /approot1/k8s/data/kubelet"; \
' c% m$ W; L4 f0 n, Essh $i "mkdir -p /approot1/k8s/bin"; \
; }7 d1 ~- E3 ^+ ]3 nssh $i "mkdir -p /etc/kubernetes/ssl"; \
( @' O1 T( c1 }$ }! E% Xscp /approot1/k8s/tmp/ssl/ca*.pem $i:/etc/kubernetes/ssl/; \
! i' k- P" T) @scp /approot1/k8s/tmp/ssl/kubelet.$i.pem $i:/etc/kubernetes/ssl/kubelet.pem; \
1 x+ y1 X( }, E% ~scp /approot1/k8s/tmp/ssl/kubelet.$i-key.pem $i:/etc/kubernetes/ssl/kubelet-key.pem; \
0 p, ]- }/ v* f6 O0 oscp /approot1/k8s/tmp/ssl/kubelet.kubeconfig.$i $i:/etc/kubernetes/kubelet.kubeconfig; \
9 i+ Q+ z- N. f6 z' W% v% hscp /approot1/k8s/tmp/service/kubelet.service.$i $i:/etc/systemd/system/kubelet.service; \! d1 w- l* V# ~7 I$ ~
scp /approot1/k8s/tmp/service/config.yaml $i:/approot1/k8s/data/kubelet/; \) E6 | ~/ }! _* P
scp /approot1/k8s/pkg/kubernetes/bin/kubelet $i:/approot1/k8s/bin/; \# f' J$ H: Y, B* N
done- W6 u" h# @1 o' ^2 B3 R6 A
启动 kubelet 服务9 [ j5 F+ s4 L" _, a* i
for i in 192.168.91.19 192.168.91.20;do \( v& Z& }- q2 X0 q
ssh $i "systemctl daemon-reload"; \7 P* b& X3 ?* l; T; P2 e/ F2 l d; c
ssh $i "systemctl enable kubelet"; \
( n) u' H4 T2 F( ~) S8 d; yssh $i "systemctl restart kubelet --no-block"; \
6 \* f. N0 A) U; E1 a5 [+ f4 }ssh $i "systemctl is-active kubelet"; \$ \ D9 S# `! b! p
done/ Z5 ?9 x& l& e) j9 l
返回 activating 表示 kubelet 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19 192.168.91.20;do ssh $i "systemctl is-active kubelet";done
" r9 }( m3 r7 f9 O6 m1 w0 R' ^4 {' R' E
返回active表示 kubelet 启动成功 ~) _. ^1 Z6 _* z$ e- r- l
: N% c5 U" C$ V6 [# s" h! M+ [! |0 a5 _
查看节点是否 Ready- P0 u; Z& Q& m2 G9 z' \8 r! Y) ~# k1 N
kubectl get node
2 w. ^: ?( D( _ F7 c预期出现类似如下输出,STATUS 字段为 Ready 表示节点正常- W: e) S* y/ T, [+ E
) d8 R5 b& g' k2 k
NAME STATUS ROLES AGE VERSION
6 a+ ?) Z) {8 f* y- g192.168.91.19 Ready <none> 20m v1.23.3
/ y' K4 w9 X4 g5 A8 e0 X8 A6 p192.168.91.20 Ready <none> 20m v1.23.3
. \, e& D* Z7 K4 A2 z6 x- y5 Z2 H1 ^部署 proxy 组件3 Y1 p1 Y$ Y, f' o: p
创建 proxy 证书
' G- G- U: X# ]: \) w) m' [# rvim /approot1/k8s/tmp/ssl/kube-proxy-csr.json% s+ D- l& p" N- T( O- y
{5 L5 \7 C) p# Q f4 g
"CN": "system:kube-proxy",% l0 e7 I! H' y5 W
"key": {
/ W/ V0 ^: T3 b( E8 ~ "algo": "rsa",
. b l" \& u' f$ C "size": 2048! f: S7 [7 w+ C/ T6 o
},
* h3 t% s2 {. w7 O" ]$ o "hosts": [],
: \. v! x: }$ w5 d4 W. a4 \ "names": [
* a" C& Z6 e, z% [& n) { {9 @& }7 C, h% W
"C": "CN",3 Y* F0 b* y$ s2 d/ s8 x
"ST": "ShangHai",
& p. P! n7 |- [8 X4 {! m4 h "L": "ShangHai",
- x: N) C+ F! Z" a: C* N "O": "system:kube-proxy",
' `; E1 W+ b6 |/ s0 _0 c "OU": "System"
3 g! r; M* ?- K$ T }
) O9 x2 ~: r: u6 I$ ^ ]! S# r6 K% N# C& Z( h' p
}
& j% V* R' w; P r# T; mcd /approot1/k8s/tmp/ssl/; \! j$ f# A; s. ?& T4 S
cfssl gencert -ca=ca.pem \
1 I9 D' b, S/ I-ca-key=ca-key.pem \1 Z; u0 |: e! N
-config=ca-config.json \& \; L3 E- X$ v w: L
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy' z1 b; J) B" e
创建 kubeconfig 证书2 O4 b: D. F8 H/ z
设置集群参数
" b9 i H; f: H! j2 G# m- T+ o$ E5 t- q
--server 为 apiserver 的访问地址,修改成自己的 ip 地址和 service 文件里面指定的 --secure-port 参数的端口,切记,一定要带上https:// 协议,否则生成的证书,kubectl 命令访问不到 apiserver
+ n, c0 N: A* g# k! }, r1 d
- [ J5 e# w2 ecd /approot1/k8s/tmp/ssl/
, Y! D5 V3 R ~( ^/approot1/k8s/pkg/kubernetes/bin/kubectl config set-cluster kubernetes \2 \6 ? P( X/ V2 u* N! A
--certificate-authority=ca.pem \
* a Z* h0 z. ]* P- V--embed-certs=true \& K, B$ R5 ~" `
--server=https://192.168.91.19:6443 \
; }7 t& s- m; ]$ x--kubeconfig=kube-proxy.kubeconfig
2 S, Z1 Z- u( f3 k设置客户端认证参数, t" D1 M" L3 ]& j- ?, h8 L- g
! F1 m. {' c; Q6 _& ]2 P- C7 Acd /approot1/k8s/tmp/ssl/5 J- D' `( q1 X* F
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-credentials kube-proxy \
$ s0 a- M/ ?4 b; \--client-certificate=kube-proxy.pem \2 ?1 X7 C7 v) V3 w+ Z
--client-key=kube-proxy-key.pem \
( K1 i! x& p+ `--embed-certs=true \- m* r- L4 B+ Y
--kubeconfig=kube-proxy.kubeconfig
8 F4 s- U5 w- u( K设置上下文参数/ Y5 q6 q8 a" Z2 g: M3 f& |+ j
! H& f- ~, S, Y% R8 ]6 C2 D
cd /approot1/k8s/tmp/ssl/5 Q: A! g, \% A. R- r8 E
/approot1/k8s/pkg/kubernetes/bin/kubectl config set-context default \7 U) g5 Q* o1 R% _% D' V/ o
--cluster=kubernetes \
9 C1 O8 g+ l& b3 v7 {. g; y--user=kube-proxy \
. b5 u3 |% E4 G; ]# U1 Q9 [% p--kubeconfig=kube-proxy.kubeconfig3 Z& c/ l; {. ^
设置默认上下文/ B! D" J* L1 m% b, T, T4 i% R" T8 J, P
" K! z9 Q$ c2 ncd /approot1/k8s/tmp/ssl/
+ s7 H ?: A; K; X+ @* g/approot1/k8s/pkg/kubernetes/bin/kubectl config \) i4 C7 ?' t4 t
use-context default \
& \6 j) E3 c6 ]3 E8 t--kubeconfig=kube-proxy.kubeconfig
* {. |2 g& V! k- n配置 kube-proxy 配置文件$ c5 N* ^6 x% ?7 ]
vim /approot1/k8s/tmp/service/kube-proxy-config.yaml.192.168.91.194 }' f) z1 h7 ^
这里的192.168.91.19需要改成自己的ip,不要一股脑的复制黏贴,有多少个node节点就创建多少个service文件,service 文件内的 ip 也要修改为 work 节点的 ip,别重复了1 Z+ E, ^# |9 @$ d7 q6 H0 P
; K+ K* S2 r0 f6 p/ \. s% l- `- V
clusterCIDR 参数要和 controller-manager 的 --cluster-cidr 参数一致
+ Y& d9 a! E, X5 n! {$ o: d1 ^& V$ I$ t- O
hostnameOverride 要和 kubelet 的 --hostname-override 参数一致,否则会出现 node not found 的报错
) W$ v: Z4 p0 M+ u Q2 s5 _( n( P# y" f( z" J1 E/ W/ N
kind: KubeProxyConfiguration; e" g* Y6 K$ |
apiVersion: kubeproxy.config.k8s.io/v1alpha1
7 |$ D3 `( l7 C- EbindAddress: 0.0.0.0
% D6 k" N E8 b5 ?clientConnection:
8 F5 s" I# S) H. K4 k" U4 l: E kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig"' K, w; I2 J1 j D" }
clusterCIDR: "172.20.0.0/16"6 n/ D# R, Y* c" K! C% u
conntrack:, H6 \) i; e9 x' M' y
maxPerCore: 32768
- T5 A" s! x/ @& P min: 131072' j6 I9 z7 T; W( k: L) Z
tcpCloseWaitTimeout: 1h0m0s
9 A% T& V4 \% {" Z: z/ J tcpEstablishedTimeout: 24h0m0s! u$ q0 A- U. X; ?+ \* b, S. G
healthzBindAddress: 0.0.0.0:102568 u4 {- M4 C" |- S% v
hostnameOverride: "192.168.91.19"
, U1 A7 e9 e' ]- D0 l: |metricsBindAddress: 0.0.0.0:102498 M* B/ m$ E9 r# e6 Z" [
mode: "ipvs"+ ~9 l1 ~# Y: S- t6 i
配置 proxy 为 systemctl 管理
; `+ G& U# v( C8 Wvim /approot1/k8s/tmp/service/kube-proxy.service4 F8 h/ [6 s, b1 x/ W3 n: m
[Unit]
6 w( |% V; A; W' H3 L, J, ]Description=Kubernetes Kube-Proxy Server
' U* b+ X z7 u& RDocumentation=https://github.com/GoogleCloudPlatform/kubernetes5 B: u' Q, }, d8 O: `4 n
After=network.target; a n: A8 f& J7 f( }
2 H3 I1 ^% \. l. [5 B2 r$ R5 s8 K, D[Service]# B1 t i. {/ i. q8 L
# kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量
4 k& m A; \! w0 }7 _# ?# i( z: {+ a- s## 指定 --cluster-cidr 或 --masquerade-all 选项后9 C3 y: m- }/ g' D0 A
## kube-proxy 会对访问 Service IP 的请求做 SNAT. J2 Q6 v: X7 z' s
WorkingDirectory=/approot1/k8s/data/kube-proxy: u+ C" X( @! `1 }2 b5 O+ }2 l
ExecStart=/approot1/k8s/bin/kube-proxy \
! g0 n. \. l: H# i/ s4 i# J+ L' A --config=/approot1/k8s/data/kube-proxy/kube-proxy-config.yaml5 O+ p# B' X- o6 ^) K& a2 u, i
Restart=always
. n# D* y" Y' t6 q* f7 j l5 QRestartSec=5
& n0 J: `+ t1 FLimitNOFILE=65536+ k3 L& f' p, h3 o0 ?
w+ ]1 _7 T% `. Z/ J4 n
[Install] ^; Z- ~9 s* r/ J# S" R! o
WantedBy=multi-user.target1 ?) N' T7 p6 t4 \; e1 H, ~. J
分发证书以及创建相关路径* B( }2 P9 ]- [* i* a- D) V
如果是多节点,只需要在192.168.91.19后面加上对应的ip即可,以空格为分隔,注意将192.168.91.19修改为自己的ip,切莫一股脑复制
* P/ ?6 W7 D, y# c* |
( G) t+ Q/ J" `1 Y对应的目录也要确保和自己规划的一致,如果和我的有不同,注意修改,否则服务会启动失败
. [% B% @- R( \, E. `5 y: g! ]- _) O2 Y
for i in 192.168.91.19 192.168.91.20;do \/ E* N0 `! ?6 |, X
ssh $i "mkdir -p /approot1/k8s/data//kube-proxy"; \% i. i/ z% {- Q) V
ssh $i "mkdir -p /approot1/k8s/bin"; \; y% B# r# }0 c0 k" x0 @3 R* j
ssh $i "mkdir -p /etc/kubernetes/ssl"; \: e- P4 e7 @* g m; K
scp /approot1/k8s/tmp/ssl/kube-proxy.kubeconfig $i:/etc/kubernetes/; \
& {/ c2 ?; l; C/ w0 T7 |scp /approot1/k8s/tmp/service/kube-proxy.service $i:/etc/systemd/system/; \! ^ P5 p3 a1 y
scp /approot1/k8s/tmp/service/kube-proxy-config.yaml.$i $i:/approot1/k8s/data/kube-proxy/kube-proxy-config.yaml; \: p# u6 f% Y# G
scp /approot1/k8s/pkg/kubernetes/bin/kube-proxy $i:/approot1/k8s/bin/; \
1 B$ y' C+ K2 b% [done
1 v# {1 |- @2 [2 g启动 kube-proxy 服务/ v2 E6 |5 d7 L$ H: b$ ]4 _4 F
for i in 192.168.91.19 192.168.91.20;do \
- w c m1 g9 q ^. W2 V# l2 A. ~ssh $i "systemctl daemon-reload"; \$ b+ y3 k9 P; f% V- r( f7 l! D. k
ssh $i "systemctl enable kube-proxy"; \6 ^. Z4 b+ d3 u/ {* G& q9 Z' Z) c
ssh $i "systemctl restart kube-proxy --no-block"; \7 w/ r3 i) [ r C2 {
ssh $i "systemctl is-active kube-proxy"; \+ j* O1 o9 x' z$ V
done6 v& u4 p: r. Q' M0 Q, z; g( v4 ]
返回 activating 表示 kubelet 还在启动中,可以稍等一会,然后再执行 for i in 192.168.91.19 192.168.91.20;do ssh $i "systemctl is-active kubelet";done
- r( h) H6 S7 y7 i
" M8 @1 W# s% f9 B0 v) D: J: C返回active表示 kubelet 启动成功) W' r" V" m+ ]0 m# G6 K
, V' [& G+ w! E$ n$ C) H, G; \
部署 flannel 组件
; L+ \" J8 W K" _3 i4 Sflannel github5 E) k' Z: c! T+ v( X2 ~5 o
1 C) _1 [- Z4 G; c* ?: h+ t配置 flannel yaml 文件
% r H! p$ R- G! o3 K- svim /approot1/k8s/tmp/service/flannel.yaml- y0 u6 ?* c5 F$ X( P
net-conf.json 内的 Network 参数需要和 controller-manager 的 --cluster-cidr 参数一致6 ~$ N# X3 u6 F. } `/ F
6 J. d4 M: _. Q! f& @- H8 O) X
---! T" z9 H: u, ?8 [9 P0 W/ a
apiVersion: policy/v1beta1- i$ z# ~, S/ m& ~/ o9 z: Q$ O9 Z
kind: PodSecurityPolicy$ k7 z7 w i3 q% u% y3 ^
metadata:
4 [: ]; }8 E; r- A; y% J1 J/ i5 } name: psp.flannel.unprivileged4 d* d& e" R) ~. D+ w& E, X. i
annotations:3 J- v0 B- r, O/ _8 w. o3 D4 ^; h
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default9 @, N6 z' J9 N5 r! ~6 N) ] j( S
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
5 Q5 ?: Y: d9 S% w. D6 w, n1 w apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default3 y$ U7 z7 `/ J/ D
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
- {# n- u5 `: s6 a$ P [. s9 z. _spec:" m! S4 V! c, R, O
privileged: false8 F0 ~8 u9 w' M
volumes:
% }- A. h2 c% r+ w* N; u - configMap& F* z% H/ V- `7 D' Z3 s0 S0 a8 A
- secret
4 [* a1 b: G; l - emptyDir& t. Q0 X1 j- X% l. \8 d
- hostPath6 E1 D( T% f( j
allowedHostPaths:* g6 n" Z' ^, w1 i; ~+ M: a" x
- pathPrefix: "/etc/cni/net.d"9 r+ i, e- [4 v
- pathPrefix: "/etc/kube-flannel"8 n y5 |6 Y! q( ]8 h
- pathPrefix: "/run/flannel", s7 y$ U5 r) F& \# s
readOnlyRootFilesystem: false
$ P( C# l3 B1 }" D( ] # Users and groups
! B: c- c" W; J5 X6 w. p( ~, R runAsUser:
: Z. @' K2 |/ }: {/ j1 q rule: RunAsAny
/ S! ?9 M% [ j) l1 W- B3 } supplementalGroups:
& C2 B* v, y4 x rule: RunAsAny
( {# b/ J$ s. W9 w% j% o9 H fsGroup:- L3 R Y& j: M" q
rule: RunAsAny! w5 l5 M8 D5 L: D1 }+ ], Q: C, w
# Privilege Escalation
: ~8 M% f6 Q- `3 b6 _ allowPrivilegeEscalation: false
1 J* o) b6 X' D4 K: }6 i, G. U defaultAllowPrivilegeEscalation: false0 C6 ?; l- R. Q
# Capabilities; \8 b9 h r5 C: E* r' K
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
% R2 P o% }# A1 a" f3 W$ f8 ? defaultAddCapabilities: []( Z, o I( ~# O5 c4 K* H' k
requiredDropCapabilities: []
, `# {% f6 m% k s, y # Host namespaces
9 h# m5 E) \8 O' k# ~/ E9 X hostPID: false( q. |& f' K6 j. W0 U+ u% E- h9 S
hostIPC: false
; {( z; l3 c8 H hostNetwork: true( I: X' U' T: \3 @7 J# E
hostPorts:
: Z$ ~9 Q; X% j+ v - min: 04 Z @( Q; S) o4 U( d8 u% l/ ?
max: 65535- D7 e3 U0 d6 k3 g
# SELinux: U8 Z. A5 }! M* }1 [+ C0 ]
seLinux:
. K1 K5 ^! z6 _/ s% | b2 U # SELinux is unused in CaaSP+ Z: @+ H* ]6 x& y$ a9 a+ A1 U# S
rule: 'RunAsAny'$ O4 m4 z9 f3 `' z& L
---) H* |5 H% N! d! K
kind: ClusterRole
/ p# @ ~# J/ C/ Z: mapiVersion: rbac.authorization.k8s.io/v1
8 Y* [+ D- E" o* T5 X4 e! {* b, ymetadata:" ^2 o R. D a
name: flannel, v/ T4 p+ U, b( F1 a( |+ F2 j
rules:% _5 M, z+ s& C
- apiGroups: ['policy']* l$ b6 |4 Y1 n7 I! Z
resources: ['podsecuritypolicies']
4 `) G6 t2 b& \( Z4 l4 K- T verbs: ['use']
8 x: ]) X/ O* P- ^ resourceNames: ['psp.flannel.unprivileged']7 E+ l/ O" R Z6 Z- C( {4 g
- apiGroups:
" S3 D' l2 |- b) } - ""+ T T: }+ s# g% c+ R7 h" j
resources:
5 ~' U; C" c" x/ Y2 m% `4 H - pods
& @% S- [5 s1 F9 d verbs:
. |: ?1 I* L5 U! I h) l7 B - get, D$ a+ q. P) }6 d$ m: K7 D& \
- apiGroups:4 {4 _ J9 x; @5 o/ n) f
- ""
* Y, [6 x5 q% ^& O resources:
e0 E! {) a/ N8 @7 o - nodes" e" m: Q: ^* x
verbs:2 [: A! a$ {' t" |7 c3 t( r
- list y- j# R) q, O# G% x, I
- watch5 e& J3 Z+ C5 b( @
- apiGroups:
) ~# u+ D0 a& i5 n2 ^2 \0 } - ""* A/ n. X& J6 R$ Y9 D# Q
resources:9 i! I) D3 i% \+ U
- nodes/status
+ P2 K" ]0 J1 E5 G9 g verbs:% s9 {3 \) ?; W/ {" h1 L0 m9 {: z
- patch
0 {- c& K& R2 x# z) x" p/ J---, D/ G8 \ V" L. F& M4 m
kind: ClusterRoleBinding
h$ t: w" C- q$ T ] JapiVersion: rbac.authorization.k8s.io/v1
5 w, N) G, w! V7 Wmetadata:
. M$ u$ T% G8 X' L* S& y/ E1 \, P name: flannel
" P, @8 v9 k4 q6 A% QroleRef:
% n1 f( W; A8 e. Y0 |7 a% ~ apiGroup: rbac.authorization.k8s.io
6 V$ @' |- ]" `0 g' N& e. T kind: ClusterRole
' X- o; [. u6 F U% p* h8 _ name: flannel! ^6 P; i9 X) C3 o" S! s
subjects:
/ V9 K5 v) ^ x' K# g; C( H- kind: ServiceAccount. ~, N' h( \# k% Y w! ^" s7 q
name: flannel
+ b7 ]% j/ B; N namespace: kube-system6 V* ], `# Y, j
---
3 l; e, F8 }- c7 H. UapiVersion: v1
0 e& X+ a. N) r9 K" Zkind: ServiceAccount
4 P! R: U! g lmetadata:9 U" F( Q* ^3 g
name: flannel- q2 a! t$ M$ a6 Z$ V' S
namespace: kube-system
0 @* H. J2 C) u' K2 [8 P---" s+ p7 C; j; r7 p! O. e' V
kind: ConfigMap. w# {% |* y* T
apiVersion: v1. X/ F2 Y0 ~5 O. r% Q- ~4 z
metadata:
! B/ V- t; F4 N; a, y- G' _5 i name: kube-flannel-cfg0 v) m; r C7 W+ _. A
namespace: kube-system
* i* k! m e9 z1 h labels:
- f% @ l; k7 Q/ ~7 ^' ?- V tier: node# { r4 Z, g2 l/ t# p! q
app: flannel9 g$ I" L' e1 G! ~
data:
! u9 R! H: r2 s* Q5 R$ h8 u: F cni-conf.json: |+ a( s3 `6 s3 f
{- O* V; g# ?3 |* K! e
"name": "cbr0",
d+ ~5 W- T0 r3 | ? "cniVersion": "0.3.1",- _& h0 c9 ]0 P/ `
"plugins": [" n, S; ?- n; v
{$ N- {, E* D) O
"type": "flannel",
u' d4 u/ ?- p# G* p "delegate": {/ Z" p* j! \2 i
"hairpinMode": true,9 {+ j/ u' o" N% {8 H
"isDefaultGateway": true
* k8 S" ]! a9 j9 l: ]) N% D% F U* @ }
9 `/ z; l0 i& p4 h" I* U& f l* p4 a },
& r+ p7 M8 D! u* r7 x' N {* Q. w* r; f: m& Y- }5 s( y: t
"type": "portmap",
$ A2 f6 J* j9 @& K4 {: T$ l" y "capabilities": { M x0 c9 [2 A* i/ H
"portMappings": true
4 G$ z! J& D/ Q }
( \ O" N( e; I- M& X3 w" l }" q3 r& f8 W! r+ D' d, `) a
]
5 f X0 }4 m% `% F4 r: \ }
) S* D% r! J" L net-conf.json: |
F2 S# D6 M6 W1 w2 o {3 _! q& ?* q/ ^) D
"Network": "172.20.0.0/16",2 }' b# q* s' V o& Q0 O" C, u
"Backend": {: F5 h, }- F3 |8 _- I+ z
"Type": "vxlan"5 z: ~/ ~) @# {6 V9 c: u
}
% D+ e; C+ ?9 O4 m1 b% M7 _ }/ d' n) k8 C% ]- @
---1 H3 j' I9 C p3 b9 E7 U. K
apiVersion: apps/v1! q. O) a. c1 i) D$ {: Y5 t
kind: DaemonSet* F G; j' A9 l5 r. |4 d3 W
metadata:
1 U2 ~# x4 X3 Z1 a/ I) }+ z& c( m name: kube-flannel-ds
/ W. f2 i" a: y" U' z7 ?- P+ r# I namespace: kube-system l. n+ A, B+ M' I5 q
labels: P0 c9 i- K4 h$ d1 |8 G+ c
tier: node
0 m$ W" G" Z3 d6 q: S0 a app: flannel
% m$ ~2 X' b* G2 D+ s8 o: ]# Nspec:6 `5 o% L8 W7 Q0 L
selector:/ U; A8 p' v# |
matchLabels:
: c0 h; b, R8 _) Z. T% n! R app: flannel
2 r$ K, g& k# K8 H) J template:6 q. i- _0 f- O+ y# L% a# D
metadata:: N- [/ k: ]+ n6 u6 `
labels:2 b L; N& K8 q- X1 H
tier: node
U* B$ z& e4 S% X app: flannel: L8 r P, B" V6 t4 _
spec:
/ B8 G/ e1 h3 ]+ R& g% Q- U affinity:0 }1 X! ^; n" d, o+ w' T& a9 `
nodeAffinity:, M0 d1 J: y/ J4 b
requiredDuringSchedulingIgnoredDuringExecution:- Z, q9 f8 m( R; z; e/ T6 g
nodeSelectorTerms:3 e: {, |: e7 L
- matchExpressions:
/ b4 b1 y& N; n( t: t. ` - key: kubernetes.io/os
" P8 \. c9 Y, X. d$ O/ [ operator: In
+ ?. B0 Q( u) K" C- V values:
; q. f s2 l; p+ c - linux
* _$ x% V' y' p- h9 E; T hostNetwork: true: P0 A9 M3 k3 f9 E# l& b3 ^4 o! [
priorityClassName: system-node-critical3 Y+ Q1 @% s# X1 b6 _
tolerations:. d- c! V+ v6 z' d
- operator: Exists1 F1 U9 x0 P2 J! P [% {3 `3 K
effect: NoSchedule
3 N3 s/ C1 F( k% O+ L serviceAccountName: flannel! s' L% c; V8 @8 P
initContainers:
# q- `6 w& t. q3 [( k9 w( @+ Q+ g4 N - name: install-cni
9 n7 C5 K4 a# K image: quay.io/coreos/flannel:v0.15.1; L# @- C+ } B, q
command:/ C E; |( x8 K8 a# W
- cp' d( I$ H% k, w
args:5 k9 w! D5 k4 D3 L
- -f
: M% X" c5 b# ~4 c6 G' ^+ h - /etc/kube-flannel/cni-conf.json- d# R( f9 o3 K
- /etc/cni/net.d/10-flannel.conflist
7 D! V, M- ?% g3 j) ^ volumeMounts:/ y5 v1 j" ?1 E- G* n$ S
- name: cni
' K# x2 ~% s& v% D/ Z mountPath: /etc/cni/net.d/ `! H3 n! A/ x6 B5 W
- name: flannel-cfg$ B/ O2 V+ S' C& s7 P- n! V1 p; v! i
mountPath: /etc/kube-flannel/ ^$ T( l, P3 x% F2 U; b
containers:$ `0 q8 a, Y: ?& F, e: w
- name: kube-flannel
2 r0 B3 D5 J+ {1 y* m image: quay.io/coreos/flannel:v0.15.1
8 U) w" B7 l" \, _ command:' \' P. c+ g7 h5 b- J
- /opt/bin/flanneld' S( n5 l" ]" w
args:- _& g! F1 |- x
- --ip-masq+ Z" }* T6 m; G- [; n* `: C
- --kube-subnet-mgr6 h2 H% W" H& ] o1 F- n. r4 P
resources:; }3 \$ f' M2 G- h9 S U1 {; M) h
requests:7 n3 L9 S; V7 @8 P; A, w
cpu: "100m"7 \3 U' e: h7 @; T4 w
memory: "50Mi"" W2 ?+ r& f7 y1 H& W$ u
limits:
# l5 a A- m, E cpu: "100m"4 t! V3 a; K; F/ f+ h) a
memory: "50Mi"
% @3 s f9 P- G4 {9 W1 I+ d securityContext:
1 z W1 C' J' q) }% e) d privileged: false
% ]8 S# c$ ]7 e8 J capabilities:
; i" n* e+ c& Q) l add: ["NET_ADMIN", "NET_RAW"]
% f, n& I- G5 ] D1 n! q, E. L( A env:* t' ~) M! S4 H3 |' [0 f: H6 f
- name: POD_NAME. }! ]3 \$ y# l3 X/ v2 {
valueFrom:
3 A+ F' _$ q4 A- I h. E fieldRef:' [* n) v; Q+ t9 n: r4 I+ u7 W# X
fieldPath: metadata.name
0 W4 a) g$ [' w6 P6 r - name: POD_NAMESPACE( \/ w( R% B2 s& e* O
valueFrom:
" w0 ], `8 f* {4 P& a8 p fieldRef:
# A7 ^. O( N. _: c* m7 k fieldPath: metadata.namespace% P0 p4 K- U0 q+ a- @% W
volumeMounts:
' u) n% B) R* l0 j! I - name: run
s- O1 v9 D, c( X) |, {" D mountPath: /run/flannel
, _3 R) Q7 D) P$ e& h1 F, C - name: flannel-cfg8 _3 A# F, u, A) F! r2 W3 S6 Z. `
mountPath: /etc/kube-flannel/! q$ S9 a/ r, t5 j3 t- t9 F
volumes:4 c3 ~& x u3 }8 O! Q
- name: run6 p E; Q0 c" A0 ^" ?3 }8 C
hostPath:7 o( T$ y8 [, S, `7 y0 q
path: /run/flannel8 c" M! M% v6 I* O! ~
- name: cni
1 Y2 g( h/ A1 p$ q hostPath:5 |: q, y$ r2 _7 P5 ]$ H
path: /etc/cni/net.d& r% y! G6 D, Y6 Y: k8 K* [
- name: flannel-cfg/ m, x8 w3 @+ m( S/ a6 E
configMap:
& w4 q' S5 j9 X) ?$ p name: kube-flannel-cfg
; a0 V) J( o: J& j1 e5 @: o4 ^( i; ^; @配置 flannel cni 网卡配置文件$ ^/ u) q' R) q! F: u7 h
vim /approot1/k8s/tmp/service/10-flannel.conflist
* p2 M; \- r- S3 s9 [{
6 N4 I; k% W/ t& T# O' y( ^: v5 L "name": "cbr0",
4 f" K: @' A e "cniVersion": "0.3.1",
2 p$ ^' I# K$ F7 I4 z* j& F+ K "plugins": [
9 ~7 [; ?% E z2 m- L/ H {
( N2 e# \1 k- M" J; H+ j. l "type": "flannel",2 H( o% p/ \$ G+ R4 |! B( f
"delegate": {8 m& ^+ `2 ~1 q% L/ r& d# G
"hairpinMode": true,
% P; l7 k+ C0 O8 J: N z ] "isDefaultGateway": true
5 l' \& P8 j7 A }
8 K8 ]1 x% Z$ A' w },3 k3 V$ F: o; K1 N" K8 t
{. ?" }! ^; U# b. b
"type": "portmap",( \% Z% D% O9 Y- \4 `$ I9 _, q& |; x
"capabilities": {
& x7 |" G5 \$ \. e! s "portMappings": true# r, I% s b r! O( h) v
}; f& @ t. c: e% X
}
+ x/ b1 c, ~' M& ~0 _* G4 p0 [ ] r& ^8 y5 |& ^0 G: k w
}! [: }$ o7 C5 j0 ?8 i6 |5 C/ c9 |" ^
导入 flannel 镜像0 [/ o: k! K, ^8 P4 ~
for i in 192.168.91.19 192.168.91.20;do \
8 H5 ^4 e- J# F, r* lscp /approot1/k8s/images/flannel-v0.15.1.tar $i:/tmp// N% t1 t6 W; S: j( X
ssh $i "ctr -n=k8s.io image import /tmp/flannel-v0.15.1.tar && rm -f /tmp/flannel-v0.15.1.tar"; \
8 M& F, H" m4 @, j# s' ]8 _/ |3 Sdone6 |& _3 z4 L' Z. i% o v5 d8 N
查看镜像
8 [( X% J) [2 x/ ~' T* g; B
% ^/ T. i, s; jfor i in 192.168.91.19 192.168.91.20;do \
- i9 C& f- }+ _5 v4 sssh $i "ctr -n=k8s.io image list | grep flannel"; \: `, D# ]) j J" d3 @1 j
done$ K% P% G3 g5 L/ m/ d
分发 flannel cni 网卡配置文件
$ f! @& e' g9 {4 m. W( }6 Hfor i in 192.168.91.19 192.168.91.20;do \
! {( ]# Z6 z" E* t- Y9 J; dssh $i "rm -f /etc/cni/net.d/10-default.conf"; \6 g7 N( v* s B( U) a
scp /approot1/k8s/tmp/service/10-flannel.conflist $i:/etc/cni/net.d/; \: h# i0 E0 D% s7 \ a- {" |
done# I) Q( y2 J( {- Z
分发完 flannel cni 网卡配置文件后,节点会出现暂时的 NotReady 状态,需要等到节点都变回 Ready 状态后,再运行 flannel 组件* S. [3 k! q2 `8 q: ^5 c
& D- Z6 H) K) Z+ Q* Y在 k8s 中运行 flannel 组件- H- p1 G8 c+ d. r- M
kubectl apply -f /approot1/k8s/tmp/service/flannel.yaml
, g% T0 j$ Q+ p- s检查 flannel pod 是否运行成功
* X7 i2 {$ \4 p! j9 P) a u- Okubectl get pod -n kube-system | grep flannel y7 d" V x1 P- ]; ~
预期输出类似如下结果) d7 z; ~7 c$ F9 I: q D
8 d' E, E7 f( d5 ~$ j" Xflannel 属于 DaemonSet ,属于和节点共存亡类型的 pod ,k8s 有多少 node ,flannel 就有多少 pod ,当 node 被删除的时候, flannel pod 也会随之删除
$ l2 M. z7 c+ i# E* J- D
) J, _7 A) ~* |" |2 r$ k2 d/ Wkube-flannel-ds-86rrv 1/1 Running 0 8m54s+ l; y v4 s A
kube-flannel-ds-bkgzx 1/1 Running 0 8m53s6 i$ M; D$ t1 `1 w, \/ y( t
suse 12 发行版会出现 Init:CreateContainerError 的情况,此时需要 kubectl describe pod -n kube-system <flannel_pod_name> 查看报错原因,Error: failed to create containerd container: get apparmor_parser version: exec: "apparmor_parser": executable file not found in $PATH 出现这个报错,只需要使用 which apparmor_parser 找到 apparmor_parser 所在路径,然后做一个软连接到 kubelet 命令所在目录即可,然后重启 pod ,注意,所有 flannel 所在节点都需要执行这个软连接操作
. W$ R! w- x+ \7 ]1 K' n- F5 O! W3 G8 ?5 M7 [6 k5 Z
部署 coredns 组件) N T7 S1 H1 ]3 s
配置 coredns yaml 文件
& @3 ?4 V, F1 N& Svim /approot1/k8s/tmp/service/coredns.yaml
; s9 x: W4 ?: ?; E0 b* DclusterIP 参数要和 kubelet 配置文件的 clusterDNS 参数一致
+ M4 R3 _; ?) l4 A( P
t4 j1 u" L- l' E9 ?1 \. m& fapiVersion: v1
# I& c4 W# P. d( w, e: t1 Fkind: ServiceAccount
4 ^; U. u3 {: Imetadata:) {( h( {$ l! i! B. Z2 S7 T% b
name: coredns1 ~6 L) N/ G$ F" P8 }
namespace: kube-system
/ F6 d+ |( o) L labels:
1 s- }. J- R* k1 l kubernetes.io/cluster-service: "true". i0 s0 o- G4 z' J+ ~" i! w. H$ N
addonmanager.kubernetes.io/mode: Reconcile6 y0 \. Z. W% v: |* U+ J. u; k
---
/ ^( c5 a) B% S% }+ C* p1 z* A7 J1 gapiVersion: rbac.authorization.k8s.io/v19 c: o+ Q; j9 l6 P2 c" Z1 r
kind: ClusterRole
0 s" Q, u5 B3 w! {metadata:
6 s" _( n7 z G$ `; {% r labels:; ^8 \/ E% F9 E4 j
kubernetes.io/bootstrapping: rbac-defaults5 f3 {' R* }7 s( ?
addonmanager.kubernetes.io/mode: Reconcile, B/ V2 d4 s3 @$ H2 `
name: system:coredns
# }7 x1 Q- Q2 l' s4 w9 g p' j' Grules:
# c& R; U( Z4 a/ z. r# `- apiGroups:
; Z" l# W; U* K& l- d3 y - "", k q8 Q0 Z& B1 ?9 }) Z
resources:/ E9 E/ J6 m9 K, p6 Q
- endpoints
0 q& _, l3 k7 _ - services1 O+ h R6 f6 `
- pods" l G1 @* Y$ U3 J; R
- namespaces$ [3 W: @) k4 N
verbs:
+ V: E( }: A3 P$ g7 M. {5 d$ q4 m - list
/ `6 C6 j/ G" G) J - watch
) x( y, E& W! ~; k; d- apiGroups:
/ u1 s+ N R8 g6 W7 ~+ b ` - ""6 R/ \0 n) n2 ]9 d1 `
resources:
, p- _' t3 L W. u( i - nodes% ]3 ^# l" A/ \( n9 @2 \% Z
verbs:
- S# A- v( k3 P; O' s3 t7 t6 L - get
% P. @9 m: v1 A N# u- apiGroups:
; H3 b) q- j! s# w - discovery.k8s.io% a% w, _, I+ Q7 F
resources:' d/ y. w: W, P( \$ j
- endpointslices) _, K8 e2 i# T8 `& p/ e
verbs:
' J7 b* e! y5 y2 A; p - list8 I+ D2 p. j+ d: S
- watch
, a* h: q% s S. U/ K---+ [. D: N9 Z- n
apiVersion: rbac.authorization.k8s.io/v1
$ E2 W# e/ k: {kind: ClusterRoleBinding# h8 P- f. U6 e9 n
metadata:/ e8 B% H" n) F. S: `: A
annotations:! H- @$ d3 M! {
rbac.authorization.kubernetes.io/autoupdate: "true"3 B9 ^; N9 h5 z" B# o8 Z! `
labels:
5 o3 f9 z# T: K- K kubernetes.io/bootstrapping: rbac-defaults
: e+ P- Z5 [% |5 _$ g addonmanager.kubernetes.io/mode: EnsureExists; C& G3 }' r" @, v9 {
name: system:coredns- j' M% K* e) f
roleRef:
/ J; Q/ Z6 D4 l+ Q" y apiGroup: rbac.authorization.k8s.io
2 T0 X: p% I5 s! ?3 F8 \ kind: ClusterRole+ n$ b; a! i- r2 Q% `6 e
name: system:coredns
- o5 D C: Q3 K" psubjects:
% e) G; N$ \6 R: `9 ^' W- kind: ServiceAccount0 b8 z9 j. D' L0 u3 h
name: coredns
9 b- A/ K& a) G2 X% [0 L namespace: kube-system
+ p& Y6 y* \; e5 w' V( l! Y( I---
% q3 }: K N# EapiVersion: v1
: i2 l+ a+ l" x$ Q" Mkind: ConfigMap: D& W: o$ \) k
metadata:
, u$ f; ]1 y6 c% w: W name: coredns- |9 t) o5 C7 A8 V
namespace: kube-system
$ J# h% N) B3 i; k8 u' E labels:# ~2 k/ w5 v, c1 r" p) E
addonmanager.kubernetes.io/mode: EnsureExists
# g1 }* o/ S: V( R Gdata:4 ?4 G; ?0 @% H- k3 u
Corefile: |
8 }. D- b2 F n! X8 O$ \' ^ .:53 {
5 X' @" \% X& b( O9 i errors- k3 f6 |" `+ Q z3 ~& Z. X) J+ C
health {, K0 \2 c, @* o: d3 O. O Q! g
lameduck 5s0 o! ~$ S& n/ [; {7 h1 D
}; v" a7 B7 o/ Z' m; b4 p
ready$ H# H" q Y% V T
kubernetes cluster.local in-addr.arpa ip6.arpa {) J l9 r h% Z4 t; g6 {
pods insecure. c9 m- E( q' ~0 m/ r, u/ S
fallthrough in-addr.arpa ip6.arpa
5 \$ s7 b8 M/ q" F3 s- H ttl 307 ^2 n5 k( @; r/ j- q
}3 O9 `+ v; I. U5 R5 e& w- Q
prometheus :9153
$ G! ~. [. Q: ]+ R8 M+ v forward . /etc/resolv.conf {
4 N# }" ^6 v7 G, G% U' F; H) \7 m max_concurrent 1000' V ^ _' ^" w+ a- `: @9 R
}
0 ^+ A. A. e% M x, |+ e1 @ cache 30
9 a. z3 F/ c: |1 v: ?6 Z, t reload
8 Q5 O8 c% J, J) V" b' z+ K% X loadbalance$ Q# E/ @* P1 A% h
}
$ s1 L8 c- C) k) I---
3 I$ E2 W# }+ {) M4 \! I; MapiVersion: apps/v1. D8 J# R$ {# X/ @' r
kind: Deployment
/ J# @( P0 V, m) umetadata:- m! B: H& n- ?; S
name: coredns) g, K# J) S: O" s5 {- u: M
namespace: kube-system
. U7 g+ G# q7 B3 Y$ G" z labels:7 p3 |/ h5 w1 W1 S0 ]' q7 P
k8s-app: kube-dns
$ V" _: d/ D' g( _; R6 [! g kubernetes.io/cluster-service: "true"% C0 M( Q: b# K T$ q$ w
addonmanager.kubernetes.io/mode: Reconcile
* g2 L. d4 }0 b/ ~& e6 i( c kubernetes.io/name: "CoreDNS"; R8 G; q, T& k& q* s/ {
spec:
9 V. ~! K# t: D6 A9 _ replicas: 1
|3 Q+ Y9 m+ @; d# o strategy:2 i; |, o& H j6 F. R: f) U S# \4 k
type: RollingUpdate% i& A& X/ R9 @6 x
rollingUpdate:, w6 O/ X' G7 n3 s" s% Z. f
maxUnavailable: 18 g2 a$ }3 o7 j; w6 s5 c
selector:
6 {8 k! M- h |- r% l1 @ matchLabels:* t6 ?1 ^; v+ _- X, \; K- O
k8s-app: kube-dns4 X4 l% l3 y4 i1 s! [
template:0 _/ U2 P7 S4 O- a$ I f9 w
metadata:
4 |( B v1 K) F! O& D0 v2 H labels:
! X0 x: S+ O8 `, O6 P, h# L1 [$ _' O5 R k8s-app: kube-dns
9 d- `0 Q3 E7 S# o5 t/ ?$ H* w spec:
8 V$ K. N( F6 z6 ]1 P securityContext:+ W6 i1 J: Q) B2 U0 X
seccompProfile:* q9 e$ Z3 u( |! [
type: RuntimeDefault
7 S0 L0 J" A/ e priorityClassName: system-cluster-critical
" }# z2 g3 i0 Z6 g serviceAccountName: coredns; r' _3 J. c$ A7 d+ x- T$ i3 E' W
affinity:- ?) d/ B" l+ X5 |& \1 S7 T5 T
podAntiAffinity:
0 V8 ^8 S B: O/ X4 A+ E( J* ] preferredDuringSchedulingIgnoredDuringExecution:
# M b0 k. i! J3 D - weight: 100
; j$ f# F$ m# g* W, t8 h podAffinityTerm:. ]8 S0 s" e- B9 R( w- q& o- y+ P
labelSelector:. T5 @7 k" A& X5 Y1 U
matchExpressions:
% ~& t& v H% \/ h5 L$ t' q - key: k8s-app9 T+ J8 l2 r* d6 e1 X2 Z* r
operator: In
, L9 E2 N. ?+ V values: ["kube-dns"], t+ b V" h; n' H9 W( Y, W6 b; [
topologyKey: kubernetes.io/hostname/ b2 @2 f& [/ }2 r' T' `+ _) |
tolerations:/ \! d+ @; Z. _. I P
- key: "CriticalAddonsOnly"
$ ?7 g5 Q! h5 D0 {% ?5 ]% L! L1 y operator: "Exists"
8 ?/ m+ O7 g2 F( m. k* z5 C- {* K nodeSelector:
' \% R+ H& Y0 L9 z kubernetes.io/os: linux& c; m; G) p6 v
containers:3 p: G: Y* i; ]9 {% A1 F
- name: coredns2 b0 G [, J9 U! R' t/ Z' a
image: docker.io/coredns/coredns:1.8.6
0 Y+ g$ g3 {' k1 Q. V3 W( ` imagePullPolicy: IfNotPresent
. l& O( |$ K6 C" F$ Y* l- Q resources:$ d8 Y7 h# d! b7 B3 a2 o/ `
limits:0 H+ F2 x6 H* W1 B
memory: 300Mi
/ C& Y" U. d* A# B! V2 |! I! P: } requests:
3 f8 |' w9 z" ]* ?8 I cpu: 100m4 M3 ]+ z$ z$ _$ ^' a1 O4 x8 k v
memory: 70Mi
. t7 m' @. O# j, |. [4 M9 R args: [ "-conf", "/etc/coredns/Corefile" ]! o" v& [ V& H0 `3 M
volumeMounts:+ S7 ?: L( x/ J, p. {" [
- name: config-volume2 u$ d2 B0 p ~: Q& p0 P, A: R$ e
mountPath: /etc/coredns( s% S4 m! o& u! ~* S
readOnly: true
& A1 W6 R! K+ c' d6 w3 e3 ^ ports:: ?3 B' g1 M* @! Q
- containerPort: 53
$ ?' U7 w$ D8 e" l name: dns
, P! F7 k+ w1 q- `3 A: N protocol: UDP8 \7 j% Y+ M! |- i: I8 I. C
- containerPort: 538 S9 z* B( ]- `1 c# q/ _, ~
name: dns-tcp/ Z0 ~+ r" o) W v8 }! N
protocol: TCP/ P8 v/ L, X" u! i7 `6 }
- containerPort: 9153
- H6 G) z, d8 f$ D# R! b7 c name: metrics
3 ]* P% G/ S1 G/ c2 Z0 S protocol: TCP# G/ U$ [! o2 o+ I3 _% w
livenessProbe:/ J) m, g* `$ `( z) v! o& n* R F, B
httpGet:
. g r3 Y/ V, f- ?4 p" O0 M path: /health, w0 I/ W$ b1 y( @
port: 8080# ?/ B9 Z4 c5 F
scheme: HTTP
+ {! X- ], t. o% D initialDelaySeconds: 603 B6 B; V# O4 O$ j- `
timeoutSeconds: 5! ~5 }1 z) p% c$ g
successThreshold: 1
3 j( v* j8 I2 X& y9 e0 `4 u failureThreshold: 5
+ M( s& R, ~( G0 N: ]& k* x5 M readinessProbe:0 @2 C1 D, W) S) d+ k7 v* N6 f$ n
httpGet:
" [9 u. h. X" v: i X2 I# X6 T path: /ready* P) j: E/ d9 m. @( [
port: 8181
" Z- m# B% R @ scheme: HTTP/ ^0 h8 i+ u- V1 M9 \
securityContext:0 R8 Y- D- R, J7 u2 E/ m, ?3 B: L
allowPrivilegeEscalation: false" L: p6 K. G ?7 T- G* F4 ]# W5 n
capabilities:4 F% w- S; R- E5 ?
add:
. i0 n# {9 Z2 C( @! J - NET_BIND_SERVICE2 Q" F% v0 z5 y) N) A
drop:
* j0 w3 d- }2 S: Z5 O) c - all9 g8 ?$ {& B9 @$ B: \: a$ p
readOnlyRootFilesystem: true8 g8 ~2 m9 G0 J' c' a p# N, ?
dnsPolicy: Default5 @* O4 ]7 l- i0 N
volumes:+ _$ Z2 H4 k8 M( _9 ~1 W/ ]' W
- name: config-volume
% ]7 s2 s N: r8 H5 [5 j configMap:+ e5 j! }% r6 n( o+ N! X
name: coredns2 s |, {( r2 A* i7 f/ a
items:3 [2 T9 x1 g8 U8 m; j9 {4 _& d
- key: Corefile9 K, F7 ~7 V( |" r. g
path: Corefile
+ _; P' u1 J7 p* A: J& D---4 K% u9 l; G; n9 I( N" X' V1 e
apiVersion: v1( e% |% v% f; q- u$ e
kind: Service! M8 L$ W4 t. S7 N+ h1 @
metadata:
1 [& u2 C+ T6 K name: kube-dns, U5 ^: }2 @- P: s; U S# O
namespace: kube-system
% k" b% y8 e' t8 ^7 R5 b3 v) I6 S+ } annotations:
( _# O8 }: m% E' c# t$ Z8 Q: q prometheus.io/port: "9153"
3 H6 @' m0 t4 D5 O8 \. m prometheus.io/scrape: "true"2 o. U, I) u( A% s' }
labels:
7 N, G& ^: {& }& F7 `/ E0 g$ w k8s-app: kube-dns
! z6 U! p7 v) V$ J- D6 N kubernetes.io/cluster-service: "true"8 B5 ^% b n7 m' U* a
addonmanager.kubernetes.io/mode: Reconcile
4 c' Z' ]9 A9 Z1 l* g1 g& |) W kubernetes.io/name: "CoreDNS"
$ T9 m( l/ E$ pspec:1 k3 B- Y5 Q. `4 P( |- a1 J: b V
selector:
8 [' u8 ]( {( ~: v% ?7 R k8s-app: kube-dns2 k7 c% u+ V8 }, ~; [- P9 e' k
clusterIP: 10.88.0.2
! z, c- }0 a, T8 U) ]1 ] ports:: d# J( Q- }7 ?
- name: dns6 B# D' j% _1 A2 I* i
port: 53: ?% Q3 [9 X8 t2 k; o+ n
protocol: UDP
: A: j/ o& x: r! X( P$ N - name: dns-tcp
, A4 e {* u4 p* R$ x& l port: 53 J. D$ _& [, |. F0 `! c
protocol: TCP
- m0 Y/ j N H! t- s% e' i& F$ F) [ - name: metrics+ a ]2 o: X0 C
port: 9153
0 i- M# b. R) V+ H) D7 i protocol: TCP
2 E2 I' y! F, M: b, T! E5 P导入 coredns 镜像) P2 }. a' g$ C# f( }- D# m
for i in 192.168.91.19 192.168.91.20;do \
+ N; v% C9 e* J; e$ B# Bscp /approot1/k8s/images/coredns-v1.8.6.tar $i:/tmp/
2 v' {1 Q% S7 W2 B# e) \0 O* }ssh $i "ctr -n=k8s.io image import /tmp/coredns-v1.8.6.tar && rm -f /tmp/coredns-v1.8.6.tar"; \
6 B6 U" d3 N: A' ?- B! G! T( Tdone! e& s" T4 q" m2 R
查看镜像# g& D3 f& }, E" E
# {# l" Y- N" K- |
for i in 192.168.91.19 192.168.91.20;do \6 A% O- h7 P3 G
ssh $i "ctr -n=k8s.io image list | grep coredns"; \
$ B4 `: X N! N. C3 X' ~& Tdone- o( i8 f- \6 S1 |, U4 d
在 k8s 中运行 coredns 组件
0 |* a4 D) w; S9 gkubectl apply -f /approot1/k8s/tmp/service/coredns.yaml
( H" n$ m% P. G/ p! ~$ Q/ ^( O检查 coredns pod 是否运行成功
) ?3 l7 J1 f2 g+ N; {" e x" R9 ?kubectl get pod -n kube-system | grep coredns
4 |5 ?4 Z8 \5 a6 ]% t8 T5 A预期输出类似如下结果9 e$ E4 b! S- f" J e
: Y* Y2 ?) B, A' ~3 ^
因为 coredns yaml 文件内的 replicas 参数是 1 ,因此这里只有一个 pod ,如果改成 2 ,就会出现两个 pod4 H3 Y& u8 I- P( n2 l0 f
- |7 X" l9 d0 B/ D" ~0 Q( l; a a2 icoredns-5fd74ff788-cddqf 1/1 Running 0 10s
' u: {! ? A/ x5 r0 {: A7 G3 Y部署 metrics-server 组件1 e+ ~. s; |, _, ]
配置 metrics-server yaml 文件
6 n+ t F$ T* u% }, H8 u6 Dvim /approot1/k8s/tmp/service/metrics-server.yaml- X# M& q% y- G8 C' `6 B; U
apiVersion: v1/ @2 W3 ?8 S8 C: y
kind: ServiceAccount) _+ Q& Y' T% R0 P, B& Z1 g
metadata:/ A, k6 r( b r! [
labels:
5 g1 e2 y" T' x. e9 O5 @6 ^. e k8s-app: metrics-server9 ?$ r4 W; q/ @- T( s2 G- D
name: metrics-server
: k9 v2 [9 I' Q1 B; } namespace: kube-system0 @: q% C# \4 @0 n3 u1 J
---1 e6 o/ t }. v) M$ q/ ?0 W
apiVersion: rbac.authorization.k8s.io/v1- [2 m4 D" L$ [5 D. u, q3 L3 T
kind: ClusterRole Q& E/ L- f0 d0 m$ A- x
metadata:) H7 t; O0 C# X% I6 R! z
labels:: N: { f$ n; _5 l: ^
k8s-app: metrics-server
9 Y# h3 n0 e; l" u8 @! w9 a rbac.authorization.k8s.io/aggregate-to-admin: "true"' Z; T; O5 U( V, Z' A, ~
rbac.authorization.k8s.io/aggregate-to-edit: "true"
% k* K0 h- V- n/ B rbac.authorization.k8s.io/aggregate-to-view: "true"
/ D5 L9 y: _+ B" `9 H L9 B name: system:aggregated-metrics-reader
8 V" H- q' _. w, O# arules:
6 R w2 d$ D' I8 G- apiGroups:4 b% }7 z) b' s% m3 v2 F4 w
- metrics.k8s.io
1 I& @+ [2 t8 h resources:4 r& ]( S4 H, A3 g! i* } @
- pods+ Z3 M) c/ G$ y6 n, f3 `
- nodes4 k9 n5 Y; d+ H
verbs:8 ^0 k& n1 X( i) R# w7 ^' @
- get
2 G& `. M/ \; G( L1 m$ u - list3 @2 x' ?$ S; i- ^4 B, H& f; T, b$ [
- watch
* K: ?8 R- A7 S---
: n3 L/ U, o) i3 ]" R$ kapiVersion: rbac.authorization.k8s.io/v1
9 @7 G- m$ A6 A$ {8 W5 P9 q1 ]kind: ClusterRole
) t0 S* v3 ^' P$ O/ B% Vmetadata:# l& C5 U* B: E; y
labels:
% g9 ?$ X Q l k8s-app: metrics-server) k+ D) e$ S% b0 p# X3 l
name: system:metrics-server
% d4 ]$ @, G* q) G; M" Srules:( x2 E! F2 Z0 n
- apiGroups:- l' q8 t) O3 r
- ""
e# h/ @ e8 m2 c) Z resources:
! X# }$ s* H, E% C7 U9 u3 i - pods
1 @& n' E) e: y% O1 \ - nodes7 d: C: P4 C# c: k) G6 O$ i5 L
- nodes/stats. e8 Q" d& N& j0 M$ }2 D
- namespaces
1 e5 u6 ~% t9 m2 `) d! |3 ^ - configmaps
& {1 j# O8 W6 T: v verbs:7 E2 [: v( y2 h0 d$ Q; }
- get
; @; k4 V& j- M9 P. o k - list% h' P; Y% s8 P6 [& P
- watch
. P0 x% _1 w. `/ L---
' P* {7 ^6 f. }% v0 R* CapiVersion: rbac.authorization.k8s.io/v1
4 T3 A# z+ h/ d% ?$ E" ]3 {; U7 Ukind: RoleBinding" \: a8 _: M% O+ g, a" Z! i2 C
metadata:
* X" q5 ^ Q+ @9 _ labels:
& A8 N9 x# V$ _ k8s-app: metrics-server
1 J+ E( A0 t& J6 | name: metrics-server-auth-reader: W1 u4 ?: p+ T8 C( z% I
namespace: kube-system
$ G- l( I4 ?- \0 J6 croleRef:9 D4 D' g! T: @) V6 I+ Z
apiGroup: rbac.authorization.k8s.io
5 R. v; c4 i# T& } kind: Role- L0 J" u5 ^3 W2 r" D% R$ [9 H" i
name: extension-apiserver-authentication-reader
7 r4 s' _' W& c7 R6 |; ksubjects:
V" `) C8 Q9 d" H& Q- kind: ServiceAccount
6 N$ F" a- ^+ Z3 o2 q0 E name: metrics-server3 ~" D$ u2 j. x. e% m+ h
namespace: kube-system" N' F0 @! o- h- I1 s/ I; z* M
---( _' x& A2 f+ x6 O& }
apiVersion: rbac.authorization.k8s.io/v14 i- m$ _# o9 t
kind: ClusterRoleBinding8 b( P; C1 G7 z8 C0 ~ {/ X
metadata:1 e& J! c5 n, p4 ]8 Z; ]
labels:
; w7 K: U0 K M3 r7 H3 ~9 K k8s-app: metrics-server- W: c* Y) \' W }. [3 t7 }
name: metrics-server:system:auth-delegator
$ z; y. I; Q/ f0 P, NroleRef:
8 W& V% b9 m$ ]0 o2 C, q, D apiGroup: rbac.authorization.k8s.io
5 D# w/ v3 G( D- B4 Z p kind: ClusterRole
2 o# Z6 @& q) k* r" y, z% W name: system:auth-delegator
& ]# _1 _; |- ]/ n$ U* `. Dsubjects:
! A% d3 t; Q5 H( }- kind: ServiceAccount. ~. I" w5 {* A* H0 R
name: metrics-server3 U* p: ^) [( y3 ~2 l4 _' K
namespace: kube-system
0 x: ?; h+ e6 g7 d- \$ N---
- Z6 X B! ?0 JapiVersion: rbac.authorization.k8s.io/v1& _6 l) { V6 G; S6 U3 Y, D
kind: ClusterRoleBinding
# d% {* V0 f( y4 d4 ~metadata:
: Z( ^5 F# o. F" ? `5 A( o) ` labels:! y g+ B: S5 {& |6 r* O9 w
k8s-app: metrics-server( Q- G, g) c* C5 ] ~) ?
name: system:metrics-server
6 I; B) M+ ~8 d9 \roleRef:
5 \$ ?" g5 C) c* r+ _$ A8 {) a apiGroup: rbac.authorization.k8s.io# V( w' C" u# Z6 w- z
kind: ClusterRole
/ w7 }$ b- o$ f# j( A name: system:metrics-server
7 D4 t' M8 o; }' s5 r7 ]; v% Tsubjects:+ I2 r% K1 X" N1 k* j& B
- kind: ServiceAccount
/ F$ b3 P$ R- I, e. I) C( k name: metrics-server
0 I0 Z2 T4 x' v4 q namespace: kube-system0 A' x$ s: I9 j* b4 x5 d) M. e& }8 m
---2 x# A" w& V% ^4 A0 T/ p- [
apiVersion: v1
9 n; `1 n( o! J7 nkind: Service
5 B9 T+ A7 j+ {; xmetadata:
5 m' z0 K. i* t! H" N2 ~/ T$ _8 K. ^; ` labels:7 ~* u4 m. ?/ a4 w: L5 X8 q1 H: y! d/ {2 v
k8s-app: metrics-server4 [3 x8 _# q/ B+ ?) C
name: metrics-server+ w- s( @0 }7 @1 o* l% r; o
namespace: kube-system
6 u- ?( @' L3 t9 t( P F) Xspec:; D/ ?: |% |+ Y! l7 \
ports:
$ r. f7 @7 O, v% h - name: https
, w* n$ B+ ]4 t' N3 K" A! v2 v+ U port: 443! U) r1 L9 V' r( p- c
protocol: TCP0 O7 ^6 H9 y7 A3 u4 d
targetPort: https* ?6 T4 y' [' W! Z
selector:
& a/ r3 ?- s t% p7 y6 x3 V k8s-app: metrics-server
: p: d' V# a) Z2 c---4 l/ `, {/ A' o
apiVersion: apps/v11 ]6 B) W7 d$ S- _
kind: Deployment
, [% y, C; j5 z$ I4 B6 Cmetadata:3 P% D% G; H# f+ f, |4 e3 |' Z
labels:
B3 X3 d& ]! F/ t' X k8s-app: metrics-server
. D* `' N& ~* n' S5 r; ?8 e: m name: metrics-server
" z$ b( W2 Y* U7 R0 y- z namespace: kube-system: ~* X; }4 {$ @4 R
spec:; [4 L1 v$ K; j
selector:# ^8 z3 V$ K# W8 C
matchLabels:0 X# ` b" w; G# \: a
k8s-app: metrics-server
O$ R9 U2 w, w strategy:
1 _" @' j& n( G8 i rollingUpdate:, {6 L' b& v8 r* n1 t. T
maxUnavailable: 0
* L0 j; b: i. i, Y template:! i" S% b1 W* F8 s. m# o% r% w. B
metadata:
0 |. ~1 T0 C2 {/ f$ a labels:
0 n+ ~* [) k8 w1 h7 u k8s-app: metrics-server
b+ }3 x0 C4 f2 v- C- |$ C spec:% K3 P% D2 S# w5 }$ G V3 p# o& M
containers:
4 G/ f% U M0 R! E! L* N W - args:; B7 b, \9 F3 K
- --cert-dir=/tmp% @ j" W4 j2 I7 ^4 b5 Y
- --secure-port=4443
% y) B) e& j7 D: l/ A' ] - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
: \! b) G) j5 O. \4 G7 K - --kubelet-insecure-tls5 d. ]# Q+ S6 P% O0 a& a0 B( H
- --kubelet-use-node-status-port
8 n( ^3 I8 O2 V/ x8 r - --metric-resolution=15s, ~" ~: _/ `) H: j$ b
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.2
1 l! T0 R+ i( Q imagePullPolicy: IfNotPresent. U5 l9 z2 y, w( ~( k1 f
livenessProbe:/ U* k* N9 z' \2 B# d/ j" C, d% D
failureThreshold: 3
/ f) `- f: b( h) d8 F8 x8 f httpGet:" _6 K6 _2 m' S
path: /livez
- T8 L7 a" Y$ v5 h3 S* Z3 e port: https& q! Q* k4 ~# S8 _! b* o3 m0 U6 q
scheme: HTTPS
! x, y, t+ j* ~- C F V periodSeconds: 10
3 _/ f$ L# B9 Y7 H name: metrics-server P% I4 ?2 [" F/ j4 p9 t
ports:5 c1 l* x3 f0 _) Y& f
- containerPort: 4443
. r* V& l, t4 r' {, D$ n8 b name: https0 n% u( X, J+ c% E) M# ~* X$ P: i
protocol: TCP
- w, O5 Y. U7 j$ q readinessProbe:- f" t( B H/ P6 N1 `6 Z: T x
failureThreshold: 3
( {/ o- w+ W6 z+ f# V+ L* t httpGet:
+ k- ]' r% X$ e" u- ~3 G path: /readyz
. z! |, q6 C$ e u$ y port: https
: q5 d1 V8 j5 P7 L3 o7 i$ O7 l* p scheme: HTTPS
& G- ^8 F* i: X w/ c' _ initialDelaySeconds: 201 c0 z' a; G0 t6 |
periodSeconds: 10( R/ f* f! i* y b7 W
resources:& E! @$ T I6 G) V3 z- p# c& ]
requests:
l6 f+ R/ n: {% R* b$ \" { cpu: 100m
1 r8 T! g1 {* ~$ W' n7 o* U* K: F memory: 200Mi" K8 F* ]4 U' p7 h" ?9 {. e1 U
securityContext:
7 o+ o" t) a3 V* I/ l5 N readOnlyRootFilesystem: true
% Q" X8 N- ~ ^% n runAsNonRoot: true
6 ? {& u' u1 E. G* ?0 p3 N runAsUser: 1000
# J% q0 p( R& @( y* W3 ?7 h volumeMounts:
( F% e7 u& }; m1 g7 A4 r - mountPath: /tmp
6 O/ v4 d+ a2 |0 c j( I- W name: tmp-dir
( @+ ^6 ]/ L- m' x8 J5 T nodeSelector:6 R; Q- o0 b- `6 {
kubernetes.io/os: linux
9 g* T- M; i, Q5 `1 D5 N priorityClassName: system-cluster-critical
4 {9 G! ^0 D, z4 d serviceAccountName: metrics-server
8 D$ V! s& T( F5 a% v3 o) Y; E7 P volumes:
0 m9 O3 B: m" I" U4 W - emptyDir: {}
9 f6 `7 U1 L0 B* `1 T name: tmp-dir; T/ ?. w% F5 Z7 a
---3 o& u. r) E. y0 k
apiVersion: apiregistration.k8s.io/v1
) i( D& K( p5 \kind: APIService; f* l U( }& H- j& K" I
metadata:
' h7 B4 D) l( r% @; O5 w. l8 F labels:
8 |! T. ~- W! j6 C: B( \ k8s-app: metrics-server/ x- U1 \/ M# Z6 L8 H
name: v1beta1.metrics.k8s.io
& n% r. { S% u2 B& V6 L. rspec:* J5 ~3 @6 O/ {/ }9 V+ ]0 V2 {
group: metrics.k8s.io( [4 P% s$ p3 C
groupPriorityMinimum: 100
8 q r4 q& {( H- D insecureSkipTLSVerify: true
) ~5 b8 R m' D9 u5 C5 |8 U service:6 I& `3 N0 O: @9 o
name: metrics-server
0 Z3 t" W6 {% a& |7 _* }' i namespace: kube-system
, z C/ R, a; v3 r) b$ i# [ version: v1beta1
$ H/ m) B" ?% o8 y. y @4 H8 f versionPriority: 100
0 o" v( z/ T' q导入 metrics-server 镜像) {/ H! v# v8 v& U
for i in 192.168.91.19 192.168.91.20;do \0 `6 m* Q7 \/ D3 Y* y8 j" J' b
scp /approot1/k8s/images/metrics-server-v0.5.2.tar $i:/tmp/
& u1 |! I0 d; Q: kssh $i "ctr -n=k8s.io image import /tmp/metrics-server-v0.5.2.tar && rm -f /tmp/metrics-server-v0.5.2.tar"; \9 m3 ]- G' \ B( ]. u7 Y! M
done
! y3 l! {. o5 o2 s' M3 E* B查看镜像 F+ f( r4 t1 t# y O
1 O9 P, e! Q. t8 \1 e! pfor i in 192.168.91.19 192.168.91.20;do \1 e, i9 Z/ i+ s& \$ ~2 S/ P* a9 R
ssh $i "ctr -n=k8s.io image list | grep metrics-server"; \
# n* i2 P0 x( x0 v. g9 idone
% g; f1 v, ?' \) ~3 q. w. p在 k8s 中运行 metrics-server 组件- r. E, v" z S
kubectl apply -f /approot1/k8s/tmp/service/metrics-server.yaml w9 \7 Z* ?& H) J' h* x2 R* Q
检查 metrics-server pod 是否运行成功8 v w1 ]9 L s
kubectl get pod -n kube-system | grep metrics-server3 J/ S3 S. N/ g3 c
预期输出类似如下结果9 E7 T" q1 |! v+ Q/ [3 f
9 S b; W2 B! M6 Y
metrics-server-6c95598969-qnc76 1/1 Running 0 71s
* j! |7 c& l$ T验证 metrics-server 功能" g5 X# {9 E, T; s+ @
5 b" H2 ^, l) C查看节点资源使用情况
5 n' n% S7 N) z" x) d7 P" g
0 L. t, f: R9 m3 Gkubectl top node5 u8 ^, y' q) v2 L
预期输出类似如下结果
+ p$ @! ~4 o& \2 n: c3 q6 Z8 \ X: L! |6 w
metrics-server 启动会偏慢,速度取决于机器配置,如果输出 is not yet 或者 is not ready 就等一会再执行一次 kubectl top node
; P! n4 e5 k! O% K
+ f. u+ R6 [6 k$ A A3 JNAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
# o! M# ?% j5 z6 @+ O# w a7 ~192.168.91.19 285m 4% 2513Mi 32%8 \+ n6 j, S8 v9 u$ p: g' Q
192.168.91.20 71m 3% 792Mi 21%" n6 K% u; J4 x. Q; z' d4 x2 e$ g
查看指定 namespace 的 pod 资源使用情况
. E2 J2 r9 ^6 V
* M" \0 f J" }7 U, Qkubectl top pod -n kube-system; g ^# {, B9 \( A$ G& v' y
预期输出类似如下结果8 G; C2 |6 x. W2 N8 s( N
: W8 ^; i, Z% _* VNAME CPU(cores) MEMORY(bytes)
: E& A( l9 t4 G7 B6 h- X( Ocoredns-5fd74ff788-cddqf 11m 18Mi
! H3 }3 F7 u8 b! |3 Akube-flannel-ds-86rrv 4m 18Mi
+ e5 T/ ^1 j1 V$ G. @kube-flannel-ds-bkgzx 6m 22Mi" \1 o+ f' A# `! r& i! O
kube-flannel-ds-v25xc 6m 22Mi7 _7 N0 Z c* V( a; { Y
metrics-server-6c95598969-qnc76 6m 22Mi
2 ]3 w9 c* S1 f- {+ d: [部署 dashboard 组件$ }+ U8 }1 {: x$ f, T8 ?
配置 dashboard yaml 文件
8 A$ L2 }( f- b' H. f0 tvim /approot1/k8s/tmp/service/dashboard.yaml; Q F) g" r* I* ] T- ]% K
---
( R: }% n5 L9 o/ X3 r% q% L' h/ SapiVersion: v1( L* X$ u0 R5 x4 N* ~5 g
kind: ServiceAccount
% g) X" I- e8 o8 ametadata:
$ @4 _/ L2 F7 W1 O7 q name: admin-user
$ L7 [$ B; { I& ?- _4 ?5 ^ h namespace: kube-system3 E F, s: b5 O! \7 G+ F
% @( M/ H/ @: J9 R---- ~$ E2 y$ A: v- e1 G
apiVersion: rbac.authorization.k8s.io/v1
6 M) V! p: i: tkind: ClusterRoleBinding1 ?5 ?* i8 e" i
metadata:+ h2 [+ p0 ~) n9 E) ?
name: admin-user
2 y5 q; z# f6 a* A4 e9 J; n% N6 OroleRef:
! x% o! ~0 J* C) L/ j! ] apiGroup: rbac.authorization.k8s.io
& @! W) O3 g# P2 e. _- E" I kind: ClusterRole
1 G) j9 e1 f5 m6 j: [7 X name: cluster-admin
7 Y. [/ w7 u+ y6 |0 [; U7 W1 ssubjects:
+ r1 K$ g7 k3 a0 z/ B- kind: ServiceAccount
) V9 C& A. A- p3 e$ `4 C: O% r name: admin-user
) D6 a4 Y4 r' n' U) ]9 V$ | namespace: kube-system+ \1 L+ |* k7 U4 h. J5 C
4 ]7 ^. u* N; q' f---
0 Z4 E+ D5 S+ [/ {8 n* dapiVersion: v1
: _0 R' {* o8 b6 F) D+ g, R, N" Hkind: ServiceAccount: T4 k# ]" ~) y; T \
metadata:
" U& t. m. Z% S+ P+ v# | name: dashboard-read-user& i" p6 g, z5 @/ e
namespace: kube-system: A, _: F: l( O1 G
) X; [0 I' n- G. K! u2 l---0 G0 r( `- m& P {2 J1 `, a
apiVersion: rbac.authorization.k8s.io/v1) U ?/ E/ b7 _% U/ S
kind: ClusterRoleBinding- N$ u7 L/ h' j% \1 P+ e& `# j f
metadata: z0 E- v4 U4 g- Y5 b5 g% m
name: dashboard-read-binding! n2 t1 {& v; Q& h: ~! w. p6 @: e* d) t
roleRef:
$ [* g* R/ Y$ v; F) l' J apiGroup: rbac.authorization.k8s.io4 ^+ i* i: ^; q9 D3 f; {
kind: ClusterRole2 h* ?' \0 M- B& D( n
name: dashboard-read-clusterrole
. E: r1 l) b. b7 B& b, }subjects:7 u3 s$ E( g5 G( ?( t3 w
- kind: ServiceAccount) Q8 P8 B. j, \5 I; n1 ?) |
name: dashboard-read-user
Y# Q$ w. f% W, d1 i N2 v( f# [/ } namespace: kube-system
, T2 F* r' d' Z [5 a- F/ _1 `' s! U( `
---) ^/ |$ B4 v( o1 _
apiVersion: rbac.authorization.k8s.io/v1
" Z8 E; o6 ?( O, {3 @3 ]9 xkind: ClusterRole4 R# c; Q3 x/ V! a" A, @
metadata:7 d& r6 _3 @4 `9 T# A
name: dashboard-read-clusterrole2 p- K* x6 l9 a- L( R1 y
rules:/ F c K4 Q* ?. O, g/ Q* F
- apiGroups:
& ^" p1 F0 \! p% n. t% C - ""
9 Q3 ^5 Q/ B& P% g$ I) a& r resources:9 K/ w! u5 e" w8 i( J x8 x
- configmaps
/ r2 }3 a3 _, d/ `& ^* x ^ l( l - endpoints
0 Q5 X5 c) }3 { - nodes$ y' S4 g" E" m. U0 I
- persistentvolumes
?1 z2 g, L! P- _ - persistentvolumeclaims2 v/ U. O/ C' @; x+ K" w) @5 u
- persistentvolumeclaims/status6 ~- v- E% f" d% Y1 l
- pods) z) M/ G2 L; R4 b6 J; G( f
- replicationcontrollers
: E# h# K5 z1 G - replicationcontrollers/scale
& l- x0 x3 w. v" i3 n* a$ o* A - serviceaccounts
. c w; M3 p9 ~: Q) e" [ - services$ |/ A8 h7 ?* c1 f
- services/status% q9 }7 g5 Z% @% e4 U' h2 J* f9 C
verbs:
/ V6 F: ?# s: z# X - get! S( Z- d/ R$ X
- list
! ^6 J" _: e2 e! {+ R - watch
: @- L1 t; V( c8 H3 }0 ?" i- apiGroups:
5 Y+ K! W! W! n) p# s, l6 \ - ""
2 P: [% m. z$ N* ?6 S resources:
; r! s* p( t" l6 ?0 C# x7 {4 O: u - bindings9 r9 ^: [- d& ?+ q8 G: d, ]6 c
- events
' ^3 w' x7 z* H d* o/ {7 [ - limitranges
+ S, e2 k4 X+ p. ^' ~: U - namespaces/status7 h6 G4 ~' v4 T2 v9 t4 ]( o
- pods/log
5 b* y' p' H9 _1 N% ?2 w/ N - pods/status6 M9 [& P' Z* L. z% P: E
- replicationcontrollers/status) E5 B: l6 u5 {9 C2 M
- resourcequotas+ S- \9 [9 D& r4 q, R% _
- resourcequotas/status' G2 {/ B: `/ |* l# l
verbs:
?1 |! [; _! h. X; t - get
& X* U+ J. @9 ?" O - list
. ~0 {" n' Z1 _% B' Q# U0 Q - watch
' F! k9 |$ w/ ~- I- apiGroups:
: b& G( n( b, d. z! T6 ?6 c - ""
% t' A4 z0 Z3 e7 b resources:
; B) }( P; L, V - namespaces
& D/ @7 S1 [: m verbs:
* F" n- H, t( e: K" p( _- w, w" ]; v% l - get! N0 `# i9 P. _' I: p; R
- list. g! K* G7 ` T! {, Y7 |1 `
- watch
8 d& h( Z5 V t$ ^( @6 r- P- apiGroups:
+ U; [# G6 M2 C* d3 t# Y' V - apps, d1 w9 _/ Z- D# n/ }
resources:
2 |# Z0 Y: u5 f - controllerrevisions F" [/ ~. m- J& c+ V
- daemonsets+ L4 ^& @2 i0 D# l6 \- O$ f- t+ F% V
- daemonsets/status
7 Y, T- ?8 S- g% }' W# ]' E5 e" i - deployments
1 f( W% ]4 w! x. g6 F$ k/ H' G9 f - deployments/scale, U$ ^5 [% \ q# f- T, @
- deployments/status
" U; _, X. C- P. `* O: p1 _ - replicasets
3 d: r' V: p2 E+ ^8 x. Q7 q: S @ - replicasets/scale. C E# s/ y/ d$ \4 |) t- g/ h2 `
- replicasets/status; t' |9 d6 N1 t, U3 z& h
- statefulsets0 q Q) k3 v. s' ]+ }
- statefulsets/scale% x" `, d L; Z2 F* q! C3 U4 v
- statefulsets/status! K& [/ h6 P+ m( I
verbs:
7 X$ A Z# H( A, l - get
* D2 X" W3 W6 i; k% \ - list
3 [( S# ]# i" ?' D/ W0 ] - watch
, @: T# X/ U+ b7 X$ S1 I% V3 U( D7 J- apiGroups:
0 e% d; f7 K5 g r5 w1 Y/ S - autoscaling
! |- i9 Z* E: ~( K6 W% C resources:
8 T8 ?5 t8 `' P I/ i0 ?" F - horizontalpodautoscalers$ e, `# t: q* ?" C( J6 H, ?
- horizontalpodautoscalers/status" {* A: Z8 g1 x) E8 d+ n/ y8 k
verbs:1 j' [% k& U, A
- get. C, z) ]3 o5 B; Y- N
- list
0 k! f8 _9 d" Q1 k# j/ g - watch) v1 w4 o6 x" V. `+ {! n: C
- apiGroups:
+ Z2 T) I3 ]4 z2 t. P# v - batch
0 l2 V' f+ [+ e( a3 Z; s: D resources:
2 s5 l1 u6 E! p; B& z( K - cronjobs8 E( K( o* [4 E, y5 f( V( e
- cronjobs/status
6 L1 g5 @. O# S0 Q4 R' X6 ^ - jobs' m0 H3 W* D: i$ _
- jobs/status
O2 X% \) W5 ]+ `: u verbs:
) q" Z& _4 j# S, @- y" U, ]# u5 b - get# {) |7 y" d+ v% D+ H! Q# k
- list
9 C) z& z3 Q( u9 m - watch( f; e! B* M0 W d" x r+ y* K
- apiGroups:
( r. j% b( z$ Y# E - extensions& P7 s$ e/ v( s8 m
resources: n( `; h8 w0 ^8 [3 T; J/ A
- daemonsets: M6 q* |7 W! H( `
- daemonsets/status- O9 g5 m( k6 m [2 X
- deployments
, \( Y6 l) _2 Q, I' \+ J6 X - deployments/scale4 ~2 h7 e# q4 e! @, }6 ]8 ^
- deployments/status
0 J$ q2 P9 M2 P( \" T o K - ingresses
$ ]6 A# q P0 U8 V; T0 d* K$ A - ingresses/status* V1 G& C% W& o
- replicasets) B* s: w4 |# @( B; v; m
- replicasets/scale, C' f4 C2 [6 o5 k. S/ I6 T- B8 O
- replicasets/status/ ~9 l" M/ ?" u8 [( {1 e
- replicationcontrollers/scale
# {; K. H5 g/ v2 c; S verbs:7 i6 p& }$ |7 k# M
- get2 C1 ]) Z9 v4 K' L$ f) r
- list; `. p; X+ Z# G% p! q1 n1 C
- watch
) {# e. s" j2 e( y1 R- apiGroups:
% r0 M- B7 ]3 a7 ]9 z: L+ d+ `+ B - policy9 T7 n! P/ u. |
resources: u5 Q. D A4 M+ t: \# H- _5 E
- poddisruptionbudgets
1 ~3 a1 b5 j. n8 l9 X7 x - poddisruptionbudgets/status5 o8 ? E. ]; r4 }1 k5 z/ |6 l
verbs:
- X9 o2 r' j# L - get- N/ _+ R8 C; l0 e, m
- list7 \3 l7 q* b; ]: @2 h
- watch
% N A7 C% E+ ~) T" w1 C- apiGroups:
8 q4 g4 C) @/ I" ^( N! C/ y - networking.k8s.io. W/ G& M: E2 C: K5 h+ L
resources:
) l( \$ C) ?4 w: z. o - ingresses* |' K& d2 v3 d% Y) t5 c7 W J
- ingresses/status
/ E/ N2 [' P2 ]& j - networkpolicies
5 r) B$ y4 W& r/ n4 o7 I verbs:
5 T6 |- w' Q0 J - get. a: I0 {# Y0 k7 D
- list
' w6 R' d! E+ V1 D9 u/ e! A - watch# W5 o) ^/ y4 p/ G
- apiGroups:
% } ]" n! n$ w# g, \+ h& [ - storage.k8s.io
. l" m& Z2 b% p3 G5 E$ I/ E resources:
$ p8 z. R' @4 H! k7 n5 ~3 }! F! A - storageclasses( y; f- m) U: [3 t! w' b/ w
- volumeattachments
+ k# e* H( Q" B$ _# I/ v verbs:
! _+ f* q4 s) X# w( _: `% c* \0 Q - get- F( A5 ?5 n. D2 |
- list& _- s. B% U1 {* l" e" m" `
- watch% }/ a- u0 q# r
- apiGroups:
- T/ k; C, S. R& S - rbac.authorization.k8s.io8 F$ o6 E/ _1 b9 t
resources:
5 Y, {7 g" e% R# j) K - clusterrolebindings
" x! s' D" D; m g e* N - clusterroles% E9 K% m. p }( O
- roles
1 o7 h, p" P. H! ^; Q2 k! f2 L0 b - rolebindings
. s! G, r/ f* ~1 f, s$ b verbs:
( j4 l6 T- K+ x5 i! X5 ~4 G8 k2 [& V - get s$ l; l# C; L/ L+ |" g( V9 n
- list
8 S7 ^4 n8 \* D: q) h' | - watch# R# p9 j* E- @
; }$ R0 ]/ _' T t: [( J---5 n2 F4 ?: \7 O3 g: l
apiVersion: v1
3 h: d/ j, g: ]) z' l$ R) bkind: ServiceAccount8 i2 r( b" N" d
metadata:
/ {9 O+ J: a. t9 \) V, r labels:
]" z% z0 E; m! p k8s-app: kubernetes-dashboard$ @5 J3 ~+ G+ w5 p
name: kubernetes-dashboard
; Z6 g- w1 I! a% D namespace: kube-system
- O/ r* Y* q d3 G& A1 f& L2 K1 g; g3 b* \+ j9 t) k
---' T+ \1 S( k, A5 L1 ^7 p! n. d9 ^
kind: Service
q2 V: e& b3 r WapiVersion: v1
6 O( g6 c; v' a" g" gmetadata:
+ j3 v( L% D; p/ W& C labels:
: R' y, K% s2 ?0 A k8s-app: kubernetes-dashboard, G1 K* k2 r! C- A1 g
kubernetes.io/cluster-service: "true"2 X `& t6 ^* Q" b$ q
name: kubernetes-dashboard
$ S1 `4 M& j6 s: q7 h. n namespace: kube-system
# A6 o, r: s; W) d# R/ Y6 Uspec:
" n( p& ?7 W3 z( @8 Y, _0 \+ ^( g ports:
. l4 A# h, h% c* m - port: 443
& C0 G z6 b+ B- S3 h4 l1 ^9 S targetPort: 84430 z4 R1 C @. T o& L, D9 G
selector:4 V/ R- Y8 E6 @
k8s-app: kubernetes-dashboard" \- {; U" J5 `& P" q: \( }
type: NodePort& x0 K* }1 T8 T1 i: X" z
% v6 F; r# B1 Y* H. S/ T# n5 I% F---( r9 l, x; {4 c
apiVersion: v1
! k6 Y9 b4 l6 t9 J% Xkind: Secret( _: B! X4 ?4 F" J. X! }3 {! W1 ^
metadata:2 E- @0 z5 `+ n8 t
labels:/ H* J- ?' r3 n4 V- l- w, z6 i5 C
k8s-app: kubernetes-dashboard
* e9 t, Q0 R( t1 b7 J6 \4 I name: kubernetes-dashboard-certs' T. U+ M2 G1 m# O
namespace: kube-system' N) t: p- s; d8 f
type: Opaque3 s r, i3 |6 \0 N5 X' `8 T; I
8 v+ T/ J2 F. m$ K, h, K
---
1 l2 Q. J4 C- Q# {" W9 `1 j: }) a, gapiVersion: v17 C+ V% s) j: L' {1 i& P
kind: Secret
. L5 M& N9 E1 ?! k. C) Hmetadata:4 g0 l4 h0 G4 D4 L1 f0 g- x
labels:* n$ `( a9 @1 N
k8s-app: kubernetes-dashboard
+ z, E6 B- Y& t3 P9 h/ ^ name: kubernetes-dashboard-csrf
+ z% _0 x4 v: T namespace: kube-system
$ y/ z5 O3 e1 qtype: Opaque$ W+ v. M+ n8 n: e9 Z
data:
~$ K4 R3 i, p) p& i2 R csrf: ""
5 ?% Z" k, U' g. g- J4 R% y+ Y G' D- A. D5 c l& w! L
---0 J$ \7 r- S# n0 J/ V
apiVersion: v1+ }. U& D0 B5 k k4 m5 H0 J& P
kind: Secret }, P7 _% m6 r# o
metadata:9 C5 E/ V" H' F# S1 f7 h( |9 H
labels:
9 g! p- _( M9 T( V8 w k8s-app: kubernetes-dashboard
/ D$ [8 \, s, K' K: }! V name: kubernetes-dashboard-key-holder% G$ z& P( |- o) w. [! Z. d8 E
namespace: kube-system
, H/ x- u/ _. g& s- D# Etype: Opaque
" S" T. ^8 G! i* }! f
! u1 u, S y+ _3 B---( T$ i0 ^# _' B4 [- q" t8 v
kind: ConfigMap
8 R% O% G- R9 JapiVersion: v10 A% ? O) r/ j" B! B$ a0 `" O
metadata:
; E+ o4 z; h4 ]/ _5 P labels:' i% y0 T, b4 U9 X1 W- d+ M
k8s-app: kubernetes-dashboard& w3 r$ j1 F# M5 v+ A7 W" Q$ S
name: kubernetes-dashboard-settings1 T0 b' a# v' t# N$ O
namespace: kube-system+ Y j! z' m7 L+ o$ a
% P; b5 S4 r! P' \% v# ?---
x! |0 W' c: u0 R3 F& Vkind: Role
% m+ Z N" |8 w1 O& PapiVersion: rbac.authorization.k8s.io/v1! ?. X: L5 P8 Q. j& o
metadata:
4 @1 y# L& N: A" U9 q0 h; z labels:
$ |$ k* U7 b2 \6 r2 q3 N* t' g k8s-app: kubernetes-dashboard
. \" `1 b0 L3 Y m name: kubernetes-dashboard7 H8 O6 J( V4 u q- [8 J4 w
namespace: kube-system
2 s8 d' F: p: O- H/ Lrules:6 t4 `+ M$ J7 c: N- x3 d
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
" O) J1 O. h( ~! c& V2 j - apiGroups: [""]1 G7 H g/ ]) p- f6 `0 w( F: |
resources: ["secrets"]; I# q8 u4 T( @( X9 K
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
a j( I) Z" D# F* i" \ verbs: ["get", "update", "delete"]
+ d; A8 m0 A7 W8 |3 f% a # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
. V* X# L4 ^+ R/ w* v5 O* f! X7 v - apiGroups: [""]
* E& u4 R6 Y6 s% X" B9 Y resources: ["configmaps"]' i" b. R( t0 S- r7 [' R
resourceNames: ["kubernetes-dashboard-settings"]9 I$ x1 |9 q7 d4 t$ t s& z" n
verbs: ["get", "update"]
/ V {/ n/ x( |) ~( [ # Allow Dashboard to get metrics.
$ e* r& X1 D, M( E, k, G8 a - apiGroups: [""]
; Q4 g/ i& y% v$ b$ X8 s. E resources: ["services"]
8 I) Y( I- Q' P' c/ f7 ? resourceNames: ["heapster", "dashboard-metrics-scraper"]8 C3 H) }$ i5 m" @3 u1 I3 R8 w: @
verbs: ["proxy"]
( [3 R4 v5 s4 u% E( M - apiGroups: [""]' c3 |7 V3 x0 h- R0 N: J4 S. W* N
resources: ["services/proxy"]+ b m8 C4 ^+ x; b/ F0 W0 q1 r( i
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
# F L6 h' C! Z verbs: ["get"]; C. j, g" a4 q) l
" T/ G$ j( r% ]8 ?---4 _6 G/ l, V% J1 ~! P9 B, {+ G0 W) Q
kind: ClusterRole
# H, U7 o3 z" Y6 napiVersion: rbac.authorization.k8s.io/v1
8 ~* ~" u8 t0 x* O( V+ @- _metadata:
' P1 ^4 Z2 y6 L, M+ g0 u- n labels:+ u, i. A. K, @* [: [7 f
k8s-app: kubernetes-dashboard
% a% X5 @& r4 T+ f. ^6 W name: kubernetes-dashboard5 N/ @! Y5 M0 N- ~% W" N, Z) u
rules:
* _. ?) A8 W% q0 Y5 x5 P5 ]5 R% a i # Allow Metrics Scraper to get metrics from the Metrics server/ e$ g+ b, Q1 }' q) l
- apiGroups: ["metrics.k8s.io"]) h$ B$ j/ G6 X5 f& n
resources: ["pods", "nodes"]3 N) Z7 a# Y) X
verbs: ["get", "list", "watch"]
1 Z( b+ x: x Q9 T2 y- e% b+ j
; X; Y- b: T$ u y" M( R& R---9 |& v5 o, A! t; w3 n4 l8 v( f
apiVersion: rbac.authorization.k8s.io/v1
( O0 ]& ?0 s7 bkind: RoleBinding
3 A& j0 v$ Y5 Q7 w( s7 Jmetadata:; y T% B+ U% Q1 [4 @8 D! M/ w5 e
labels:
( q8 _5 U; R% _' J k8s-app: kubernetes-dashboard, N2 m0 y# K2 F \5 _& O
name: kubernetes-dashboard6 R2 f4 q* ~4 W# L0 N( o
namespace: kube-system
* x$ G H3 }4 V- i+ L8 s' v* \roleRef:' t5 Z5 m0 }/ \$ S7 Y" r
apiGroup: rbac.authorization.k8s.io. l3 K6 h$ B0 E y
kind: Role2 ^! J( ^5 f& T: D. Z
name: kubernetes-dashboard
[( b* I# e. {9 b) T# Csubjects:
8 D% k: N/ H% A, C0 f7 f( L: H - kind: ServiceAccount
1 _& } B5 u5 U name: kubernetes-dashboard- E9 E+ j1 v. W1 X: m2 q
namespace: kube-system& O7 P6 F' D5 W) z6 Y! R2 A, W9 ^
2 } y" X& M) z2 L1 b% R& |) i3 Q4 }
---
d9 O" T1 e: CapiVersion: rbac.authorization.k8s.io/v1* n3 H; Q6 B, ~$ `9 i( ?2 P
kind: ClusterRoleBinding
) b' |4 P6 V# Y: H9 L0 p( Vmetadata:
: Z6 r0 h( N- i. u4 ^ name: kubernetes-dashboard) u8 q1 P" G" Z3 L/ H9 l+ w
roleRef:
7 Y" `" G, M7 P7 U# P& h" k apiGroup: rbac.authorization.k8s.io
2 {3 s& a+ `- h kind: ClusterRole
u" N/ v; E- E8 ]- Z name: kubernetes-dashboard; ]6 L8 V& q4 e/ }6 x
subjects:
7 Y2 `9 U, `+ o/ s7 A4 T - kind: ServiceAccount! C" d( L2 s' t, \: j
name: kubernetes-dashboard1 h3 N, z+ d0 v2 g! b" M
namespace: kube-system
" o: X6 \$ Y; z# k/ }+ b* H* @/ f( e0 {" D3 q& V
---' M% R6 q9 q) _% j; {# n
kind: Deployment+ Y' b# _. n. k
apiVersion: apps/v1
# X) ]4 r4 ?/ H' }5 P$ M5 U' ~9 Umetadata:
6 v' \9 b# B; n7 ^" U labels:
- o' o3 i# m5 m- Y6 | k8s-app: kubernetes-dashboard
3 y. b1 D/ X! O _ name: kubernetes-dashboard
+ b# g5 S* j2 K- w6 S) A) h3 `4 K namespace: kube-system
2 R2 Q8 D: c: s+ U8 F6 I$ a5 ?7 Sspec:+ _8 N9 a3 [9 G8 n# M) q2 l
replicas: 1
* Q7 |) I' ?' h1 w revisionHistoryLimit: 10
: o0 k! g! S! a4 u" G selector:5 ^8 c9 Z: g# v4 D& U
matchLabels:# K- {9 X& r9 m5 T' _/ X& o
k8s-app: kubernetes-dashboard6 @/ ~& |! }3 i5 @8 V2 O
template:
/ \' a/ T Q( n( K0 W6 e2 \" t2 C metadata:
9 ]$ ]3 p/ E& w* v2 a) S labels:" j; s0 j) s% z( V
k8s-app: kubernetes-dashboard
! O4 ~" h- W9 T1 N1 b spec:! @7 ~4 m, }& X/ E1 B1 O
containers:8 C5 E0 d, Y$ v2 {/ T7 `
- name: kubernetes-dashboard. u7 f9 U' B$ z. h3 B% d' A
image: kubernetesui/dashboard:v2.4.0
; H y# z8 v; M9 L imagePullPolicy: IfNotPresent8 g$ Y" Y$ x) W" j4 @
ports:
" F+ s1 v( y2 ? - containerPort: 8443
2 g4 D3 j, r5 M" x; P. w6 C% Q protocol: TCP+ F% r) E& y3 u8 W6 m
args:; z, l }+ h: ]- {+ f1 D$ I {0 c
- --auto-generate-certificates8 {' ^" }& F" B5 {2 _
- --namespace=kube-system5 B$ |6 [- K( e Q
- --token-ttl=1800+ Q4 h7 p. v G. |3 F9 `) ]4 `7 @/ f
- --sidecar-host=http://dashboard-metrics-scraper:80006 h. E, X1 M3 P- w8 Z; g
# Uncomment the following line to manually specify Kubernetes API server Host U2 {3 S/ [2 t: W
# If not specified, Dashboard will attempt to auto discover the API server and connect {/ P) v; j$ G {' R! W
# to it. Uncomment only if the default does not work.8 E0 N0 V. b. O8 j9 u
# - --apiserver-host=http://my-address:port
+ Y; i/ J2 A! a5 j6 P( M volumeMounts: i( d+ o& B- b7 p4 q2 A
- name: kubernetes-dashboard-certs
. _9 F+ @9 V0 i7 F6 m3 K mountPath: /certs4 b8 R* K0 |4 E4 Q; L/ \
# Create on-disk volume to store exec logs
0 u/ i% V" P) z- { - mountPath: /tmp! p( g3 o# i, j
name: tmp-volume
# d& s6 Q5 b2 Z6 G( Q livenessProbe:
; M0 }: V7 {: p; b( M- D" R5 [ httpGet:
9 m) g) H( ^' q# j) P scheme: HTTPS3 }# w: i+ U: o A
path: /
; b% _& n) @1 e _- M) N port: 8443
, I3 D! ~' x& `/ [- P3 F+ [ initialDelaySeconds: 30
n; r- j1 Z; v) @ timeoutSeconds: 308 U% O2 c# C8 P) T
securityContext:4 _9 i' A0 c" L7 B
allowPrivilegeEscalation: false6 e9 f$ l2 [, q5 J7 b3 S: B
readOnlyRootFilesystem: true# Y* ?4 u& u; K+ n4 H; H A
runAsUser: 1001
3 [6 ]- ^) ^9 z) Z* k runAsGroup: 2001) K/ Q2 U) z' c* E7 |" q3 `5 Y1 V
volumes:% r$ w7 z1 A! J3 P$ v0 ~4 c. ~
- name: kubernetes-dashboard-certs; U. M) }9 N5 O6 b$ A, \# s8 K ^
secret:5 P' j+ K1 F5 I% J/ ^
secretName: kubernetes-dashboard-certs
4 N- X+ i1 O4 S" k0 X8 J0 Y - name: tmp-volume
& D) D8 w, h/ y1 ^6 |! O emptyDir: {}
; L1 Q' l' E: b! J- F3 @ t4 @ serviceAccountName: kubernetes-dashboard' _4 d! i, y3 z
nodeSelector:
^ m0 k( C9 n9 e1 B "kubernetes.io/os": linux& Z2 W2 W* m" ?, ]
# Comment the following tolerations if Dashboard must not be deployed on master
2 O! s6 G# }% w) h tolerations:
0 n/ B2 h# A2 n8 H - key: node-role.kubernetes.io/master
/ K$ ^" {- t) s* b effect: NoSchedule& Z0 w( O+ A. _5 g
/ l- h( t8 W( c t! Z' g1 S---- h" n. `5 g6 C( g
kind: Service, k2 j5 |% C* V$ Z4 y
apiVersion: v1
' Q4 b" p: Z: y+ n* g( fmetadata:
3 Q( R+ N5 |! Z6 C. l6 w2 O labels:' a2 A8 O$ ^8 l& h# ]
k8s-app: dashboard-metrics-scraper
- Z' p! J% X6 I: l; G$ K0 O name: dashboard-metrics-scraper
( ]) W+ r/ {& e. K! o$ R namespace: kube-system
! A$ Y J( u8 u) a( d6 Lspec:+ g' z5 w& |) K% N
ports:2 k ~ [/ G, ~3 V
- port: 8000
% G* m, y2 W! C% | d- G) S- j targetPort: 8000 y3 k. O- i- ~" q" d1 y
selector:
' K) D& l' u/ r8 F k8s-app: dashboard-metrics-scraper
# `. b3 `; V/ ?: F; @. A6 x- a4 }$ d, E" ^$ c5 Q! y# V
---
% j y0 W$ w6 p% P/ `8 Pkind: Deployment
9 D5 u9 w0 g1 q XapiVersion: apps/v1
2 v% U6 M1 Z. G9 D. T* mmetadata:
o, f1 q& B3 D5 Z0 W# p labels: C$ U) j" {* Q3 }0 o$ n, W' U$ O3 K
k8s-app: dashboard-metrics-scraper' U3 e4 c& C4 R
name: dashboard-metrics-scraper
% v" A" X! O. p+ h/ @ namespace: kube-system+ j# U9 R* Y& y& v; k
spec:
% i% ?$ {, H$ m0 n replicas: 17 V0 k# d+ v( ^( z
revisionHistoryLimit: 10
' z/ N1 ~- x6 m4 f' f5 B selector:: i! K+ m4 ]9 S2 r# v8 G# Q
matchLabels:3 ]) ?+ O7 `6 n g/ i- l
k8s-app: dashboard-metrics-scraper
+ E. d$ A( E; c: R template:8 f7 h5 T% r+ u+ l6 J
metadata:8 J1 r1 d E! x5 R* p
labels:) A* W+ l1 \, T3 F- I# j
k8s-app: dashboard-metrics-scraper/ o9 c; a/ n$ g+ m
spec:
# u4 c0 G) w2 q" H) N0 g8 F securityContext:
% @' Q# n; y6 P9 T) s seccompProfile:# X* |4 {% H! P/ j' z# ?+ i( _$ ?
type: RuntimeDefault
, ~; S& n3 u, Z3 n2 `1 l* X8 z% S( D containers:+ d$ q. w9 D& I Q7 W
- name: dashboard-metrics-scraper- t$ @8 ]( V0 u/ g* R2 F: i
image: kubernetesui/metrics-scraper:v1.0.74 W d$ j1 B" H* F5 t
imagePullPolicy: IfNotPresent
7 D1 @) u. \: }' r# j% ] ports:7 K! n% V; h& C6 d/ ]9 w; m/ d2 t9 P
- containerPort: 8000
& w( M# S( j8 _7 q+ O/ P$ w protocol: TCP( [/ S) g0 j' h) g! N! j* z
livenessProbe:
/ e7 L7 @% z ^) k) n/ n- J5 o httpGet:
% t' g& v$ u. D scheme: HTTP
, D2 Q4 M+ a I! t& s1 y0 ?9 S; g path: /
, x1 @# X: j& B- L2 T) c port: 8000
% P2 ~' u v$ S- [! r. m M initialDelaySeconds: 30
/ {# C5 `6 s# X; D( ^$ `3 D& k( N timeoutSeconds: 30
$ G$ `7 y0 C, F- v volumeMounts:
6 \, s9 ~5 H7 ^& T( {* A - mountPath: /tmp+ p4 y( b7 L F w9 u- |
name: tmp-volume
* u3 l8 G4 w. h! o( @/ I securityContext:& S' r' W# ^. j7 r
allowPrivilegeEscalation: false! t3 O5 T/ A5 |9 W& t! p+ q3 Q
readOnlyRootFilesystem: true/ B! I% o7 O1 Z Q4 P
runAsUser: 1001
: f9 {$ r" W# G runAsGroup: 20019 L3 b5 G5 c# k4 v8 [
serviceAccountName: kubernetes-dashboard% \$ I8 N' ?3 ^1 }. |3 r+ k( e
nodeSelector:1 ]" y; }9 _) \2 `, R
"kubernetes.io/os": linux, q8 x" j( c9 h$ Y! e5 G
# Comment the following tolerations if Dashboard must not be deployed on master: x3 @+ o$ u6 \ e4 m
tolerations:1 |8 P' l O( z" }2 `" p
- key: node-role.kubernetes.io/master8 G9 I+ p% u ]' A4 |
effect: NoSchedule
( }5 F, u3 n; V/ w volumes:2 L! V9 f! }) j7 L% E
- name: tmp-volume
7 D0 t$ b j& \0 ~/ k y! l. h X9 G emptyDir: {}
6 S! o* v S/ m/ @# }! l4 \3 g导入 dashboard 镜像
[0 p, X& v' |' z+ hfor i in 192.168.91.19 192.168.91.20;do \
& o' k% D( u. w! z6 G" I5 escp /approot1/k8s/images/dashboard-*.tar $i:/tmp/. c9 v H! f# b8 x
ssh $i "ctr -n=k8s.io image import /tmp/dashboard-v2.4.0.tar && rm -f /tmp/dashboard-v2.4.0.tar"; \
9 Q% B" k( r+ d1 j+ |ssh $i "ctr -n=k8s.io image import /tmp/dashboard-metrics-scraper-v1.0.7.tar && rm -f /tmp/dashboard-metrics-scraper-v1.0.7.tar"; \
: Z! W2 C* L4 u' u& ?6 E, Tdone. t4 r; |6 Q6 ^ ~2 _7 M; `+ @
查看镜像( R; C- Q9 L- l
2 e9 `5 e h' v ~. ^
for i in 192.168.91.19 192.168.91.20;do \" o; `4 y$ I2 F- K4 K
ssh $i "ctr -n=k8s.io image list | egrep 'dashboard|metrics-scraper'"; \' {6 H& q2 b+ d% M! v- w# h* Q
done v5 Z9 t! W6 @* x& h. V% m4 [
在 k8s 中运行 dashboard 组件
' z1 x" `! }0 C* mkubectl apply -f /approot1/k8s/tmp/service/dashboard.yaml9 w& d# g! i$ A' g3 X- [# D
检查 dashboard pod 是否运行成功+ l- m4 E" Z" T& G( G
kubectl get pod -n kube-system | grep dashboard
7 m r$ u; I% ^0 n' B! q. @预期输出类似如下结果7 G F$ y/ t# z7 H. I' f
0 w( `+ H4 G& N5 u9 X
dashboard-metrics-scraper-799d786dbf-v28pm 1/1 Running 0 2m55s
0 `" \4 l7 A0 r1 }kubernetes-dashboard-9f8c8b989-rhb7z 1/1 Running 0 2m55s
" B' B, {5 @) d$ h查看 dashboard 访问端口# `" }) @1 j% o: o2 {4 M
在 service 当中没有指定 dashboard 的访问端口,所以需要自己获取,也可以修改 yaml 文件指定访问端口1 q6 K/ x, X* V2 U1 M) ^5 h/ \6 g
5 R/ D* @3 T) ^- H8 V预期输出类似如下结果- m$ N: \4 A. G/ ^/ j' R/ K
# ~. ^2 D+ d' {; h$ d我这边是将 30210 端口映射给 pod 的 443 端口
) z; S, O0 w$ j$ J. Q( R$ z2 A* e+ d/ o; t3 Z/ Q3 A
kubernetes-dashboard NodePort 10.88.127.68 <none> 443:30210/TCP 5m30s# z# O5 m* ?( ^* }8 O
根据得到的端口访问 dashboard 页面,例如: https://192.168.91.19:30210; j5 ^( ~3 k1 I1 b: v2 |
; U5 a! f/ u7 f查看 dashboard 登录 token& r( Y7 W7 C; D |+ T
获取 token 文件名称( E5 |; w0 M3 W. N& o" u* L
, @" r6 B8 J4 J: z
kubectl get secrets -n kube-system | grep admin
- C; z# v& u4 o; `预期输出类似如下结果1 N: _0 w, V9 a* L8 D: K( u
9 q/ K5 G4 G; ~' _) |/ U' o' sadmin-user-token-zvrst kubernetes.io/service-account-token 3 9m2s5 w5 K# N* |( h% _2 i7 C
获取 token 内容
+ ]9 p6 W% U& J0 U/ u& Z8 \( q% ] U: v' z
kubectl get secrets -n kube-system admin-user-token-zvrst -o jsonpath={.data.token}|base64 -d
8 j: ^7 s3 |3 M预期输出类似如下结果
* {1 L# _" X" j
: c7 B# S; V' Q! A8 L1 TeyJhbGciOiJSUzI1NiIsImtpZCI6InA4M1lhZVgwNkJtekhUd3Vqdm9vTE1ma1JYQ1ZuZ3c3ZE1WZmJhUXR4bUUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXp2cnN0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJhYTE3NTg1ZC1hM2JiLTQ0YWYtOWNhZS0yNjQ5YzA0YThmZWYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.K2o9p5St9tvIbXk7mCQCwsZQV11zICwN-JXhRv1hAnc9KFcAcDOiO4NxIeicvC2H9tHQBIJsREowVwY3yGWHj_MQa57EdBNWMrN1hJ5u-XzpzJ6JbQxns8ZBrCpIR8Fxt468rpTyMyqsO2UBo-oXQ0_ZXKss6X6jjxtGLCQFkz1ZfFTQW3n49L4ENzW40sSj4dnaX-PsmosVOpsKRHa8TPndusAT-58aujcqt31Z77C4M13X_vAdjyDLK9r5ZXwV2ryOdONwJye_VtXXrExBt9FWYtLGCQjKn41pwXqEfidT8cY6xbA7XgUVTr9miAmZ-jf1UeEw-nm8FOw9Bb5v6A
* {; c1 } o# H8 q( y' u/ H7 l6 ?
5 C" y+ U6 N: v/ A到此,基于 containerd 二进制部署 k8s v1.23.3 就结束了 l$ f. ~8 k0 z, g) U; I- G
1 w+ j* V+ S+ U9 Z |
|