- 积分
- 11013
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2023-1-9 09:07:39
|
显示全部楼层
调整pool的pg数之后我们观察到的变化如下:
: t: |3 V! _4 u6 ?. [9 [4 Z* A# P5 v[root@ceph1 ~]# ceph osd pool set default.rgw.meta pg_num 641 M9 B8 z3 ?% b: b
set pool 5 pg_num to 64( G$ `7 \3 J- i3 |
[root@ceph1 ~]# ceph osd pool set default.rgw.meta pgp_num 64
! H4 O% {! m3 Tset pool 5 pgp_num to 648 h- m$ ~1 c/ t! X2 {$ c5 |# o
4 _7 y; V: d( U, X$ n' L此时的状态:8 S8 T0 w- @' u. }( [1 ?! ]* S
[root@ceph1 ~]# ceph -s
2 E% G8 N4 B" o3 j3 u; R! | cluster:) V6 _1 ]/ |1 R$ w
id: 433d3d2a-8e4a-11ed-b84b-000c29377297
, A! z. u, j1 F% ]" m+ } health: HEALTH_OK
7 ~3 S/ Z! Q: F* e" L1 r
$ G% x7 F4 g, |3 y: V' ]0 Q2 C9 T services:
- j& Q7 ]2 c7 r2 c' Z; {& Y mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m)0 n' w, k+ r! f
mgr: ceph2.twhubv(active, since 3m), standbys: ceph1.zmducz, ceph3.dkkmft
' B) ^# z2 {) g5 F% f$ v& a mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby8 o4 X- J! P" j0 h. f
osd: 3 osds: 3 up (since 3m), 3 in (since 11h)( i j0 V d6 c0 J; k
rgw: 3 daemons active (ceph1, ceph2, ceph3)
3 q. z. b& h% U; ~# \) @ l
# U8 @ {- k' y. ` task status:3 ]6 _! j: J- a3 h3 S/ O& x
9 Q7 Z$ S* K9 v( d4 C) Z
data:
, I; w& o, W0 x0 N pools: 8 pools, 233 pgs2 j2 Z. v* \4 j( q: y0 A# L' |
objects: 240 objects, 157 KiB
* y; u" s& w' d& b) n usage: 3.0 GiB used, 57 GiB / 60 GiB avail
) D6 U) S4 h7 W+ ?/ E pgs: 233 active+clean% W& C+ g, V0 P) V7 q: m# A
. D1 x0 F# f( Q B \9 e& w% d
: E8 k' W1 y& m$ [4 y
8 g# o# p0 P. V& C1 Y c% z2 Q
pgs到达某个值:
; L/ q/ V6 U* Q1 w0 R2 p2 a! H/ N( r. C
5 x4 t5 L I7 E2 S, L- X8 _$ q6 [5 N! R* D" M! _
查看pool池的详细记录:" |4 }: y( q1 G) c" r7 w
* J; x! _# v4 v/ x
[root@ceph1 ~]# ceph osd pool ls detail
- {4 K/ z# O4 ?: v* l1 h/ ^$ rpool 1 'device_health_metrics' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 375 flags hashpspool,selfmanaged_snaps stripe_width 0 pg_num_min 1 application mgr_devicehealth
1 A2 g: I; Q& A8 N4 c, Q' h: y9 _ X& a6 J) Upool 2 '.rgw.root' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 35 flags hashpspool stripe_width 0 application rgw
3 R9 ^& F' u) `% E# @( f! B# `pool 3 'default.rgw.log' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 37 flags hashpspool stripe_width 0 application rgw+ C; S- l+ w: M4 ^$ N# T
pool 4 'default.rgw.control' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 39 flags hashpspool stripe_width 0 application rgw8 N# _/ E8 B/ E8 R2 D3 t& f
pool 5 'default.rgw.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode on last_change 381 lfor 0/156/379 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 8 application rgw
/ i. ^! b5 J( T4 X% F2 V' K, Spool 6 'cephfs.cephfs.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 377 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs0 O! U2 q; W5 ?8 V& x( ~( ^1 C. `
pool 7 'cephfs.cephfs.data' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 163 flags hashpspool stripe_width 0 application cephfs1 ?$ Q- g# i) h( b! `* o( V
pool 8 'default.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode on last_change 297 lfor 0/297/295 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 8 application rgw2 f" K6 {; u0 `8 d! E7 S/ b
6 E' K" O1 e! i" i% o$ A发生变化,pg开始下降:: [! ]( m0 b1 M/ s8 V
[root@ceph1 ~]# ceph osd pool ls detail
# I& r/ r1 C# Q# x, |- Kpool 1 'device_health_metrics' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 375 flags hashpspool,selfmanaged_snaps stripe_width 0 pg_num_min 1 application mgr_devicehealth
7 _& B+ q, T) @: `% C0 [pool 2 '.rgw.root' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 35 flags hashpspool stripe_width 0 application rgw4 o _: E3 |# r( L
pool 3 'default.rgw.log' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 37 flags hashpspool stripe_width 0 application rgw# D2 W# W+ c7 A3 W% E/ q$ J
pool 4 'default.rgw.control' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 39 flags hashpspool stripe_width 0 application rgw0 B, B$ A0 @4 W% o; N. D; a' h; |
pool 5 'default.rgw.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 61 pgp_num 59 pg_num_target 8 pgp_num_target 8 autoscale_mode on last_change 400 lfor 0/400/398 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 8 application rgw% |, y) M" ~) j9 N0 z4 @
pool 6 'cephfs.cephfs.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 377 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs
/ b$ f, l( y0 i z! _7 E; S+ Ipool 7 'cephfs.cephfs.data' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 163 flags hashpspool stripe_width 0 application cephfs
. N* ]- C: e3 k1 ?' Jpool 8 'default.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode on last_change 297 lfor 0/297/295 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 8 application rgw/ q' u$ ?- m( K4 n, B
; I5 }* b- l$ z; J2 u, u0 h状态pgs也在下降:
0 Y I8 j. O y4 Z+ L5 f4 r[root@ceph1 ~]# ceph -s
: K$ e; m2 A' |& |' P. i cluster:0 j, ]0 r) m+ [8 Y8 d1 H
id: 433d3d2a-8e4a-11ed-b84b-000c29377297
1 P+ T. ?# l; L" _5 x S( i0 Y health: HEALTH_OK: x) l4 l1 T2 ~/ ?! B
& v+ W# A' b6 d$ V5 v. `4 W2 _
services:
" q7 M- ?, G3 W' q7 y: R9 Z8 Z mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)
' K7 q, k; N" h+ O( g$ [4 [" S mgr: ceph2.twhubv(active, since 4m), standbys: ceph1.zmducz, ceph3.dkkmft% \3 F/ Q- C, Z5 E2 k6 _
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby) V% `# l" N2 f) E r( b7 K. e0 O
osd: 3 osds: 3 up (since 4m), 3 in (since 11h)
3 c6 p5 i0 S0 {# q: r4 D9 K rgw: 3 daemons active (ceph1, ceph2, ceph3)$ @0 v! X, Z* v: R$ G4 D
! L8 U5 }; z, [. S5 j. z
task status:1 u0 z, p" n( X3 F( _, \! C
; e8 |! r' u, O) Y3 ]" h
data:! U3 d ^4 |; O6 e1 {
pools: 8 pools, 228 pgs
4 G9 H, |) ~; w( G S a4 E" E objects: 240 objects, 157 KiB
( l2 A; B. x0 \" | usage: 3.0 GiB used, 57 GiB / 60 GiB avail
+ t, y6 p$ K s5 e+ C1 F pgs: 228 active+clean
' i2 B8 B4 W) h. x x 0 U) \2 u! Q2 [& o3 V* g4 q r
progress:7 {+ V& a' B7 I, h$ K7 k$ p& L4 ^
PG autoscaler decreasing pool 5 PGs from 64 to 8 (0s)
: N5 [" B7 ?- k [............................] : u& |# ^, u S
2 @5 v1 o0 E& n' y* K
[root@ceph1 ~]# ceph -s
( ]! u H9 u2 Q2 o0 {* { u cluster:
6 T4 [- b$ J% U) w# V- K id: 433d3d2a-8e4a-11ed-b84b-000c29377297
2 X6 Z) L6 e ~: \/ g$ Q' W health: HEALTH_OK
" R7 E7 ^% j- H- z% D% p9 _* _1 [ : b$ Y) ?* p/ F
services:! O* Q% M- c5 D" J# F2 V
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)& X; F" A: [+ }) f
mgr: ceph2.twhubv(active, since 4m), standbys: ceph1.zmducz, ceph3.dkkmft
* x2 \6 z" f& j. b5 ]2 H mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
& j( D- @0 N' `+ S ?2 T osd: 3 osds: 3 up (since 4m), 3 in (since 11h)
8 T" R; X" o7 T# H3 K' } rgw: 3 daemons active (ceph1, ceph2, ceph3)
* Y7 s- o9 B2 N + J. d$ G. w' v5 u/ y6 ^' K2 [
task status:
& D1 o: [# V6 t$ y4 _* c : U$ ~" W1 ~9 H* T' P
data:" F9 q) S8 Y( u5 E" Z
pools: 8 pools, 228 pgs
5 G$ r/ F0 e+ _, i objects: 240 objects, 157 KiB( l4 D+ \8 q" ]
usage: 3.0 GiB used, 57 GiB / 60 GiB avail
$ S& K9 y% \( [0 g+ n: }4 h7 w pgs: 228 active+clean
w3 B4 p9 L( w- s& j U
& V8 c* f2 q7 l progress:
0 K6 n* S7 Y5 y0 k PG autoscaler decreasing pool 5 PGs from 64 to 8 (0s)( r b6 f. j( W- K
[............................] 6 p! g% c2 w; G0 u# _+ U+ ]
( l& V- |3 t0 ?) E# m
{/ Y4 i3 f' x o
* }: b2 R! S9 |% w等待同步一会
# D" T: x9 \; }# Q' `8 L/ _[root@ceph1 ~]# ceph -s* ~& ^6 e! ^: ?" |4 a
cluster:
3 E# U; R" J: i0 v id: 433d3d2a-8e4a-11ed-b84b-000c29377297
- w' k: U3 v! h" C health: HEALTH_OK/ Z! P5 |4 D V+ R H
: [$ x: N. D3 Y( i2 M+ E services:
% ?+ I6 e5 D; @( K+ ^8 ~ mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 10m)+ Q, _ B" L3 ?, B! `! \1 t) s
mgr: ceph2.twhubv(active, since 9m), standbys: ceph1.zmducz, ceph3.dkkmft0 d3 f8 [& k: g5 A2 u/ {6 j7 n
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby! x1 Z" z( q5 G8 w- R; Q
osd: 3 osds: 3 up (since 9m), 3 in (since 11h)
6 _) u l U; T rgw: 3 daemons active (ceph1, ceph2, ceph3)+ z$ t( t) t. n
+ L6 `2 g0 N! D, |' x% b: y task status:3 X, l7 w# @. Y; |- @
0 D2 P& d, K4 n8 Q D4 S3 q; p/ T- e$ d data:* R' ~: Q7 d: a2 }
pools: 8 pools, 185 pgs7 n5 m6 e8 U* u9 Q) \& O# z
objects: 240 objects, 157 KiB" U0 [* G6 l% Q9 {* ?/ @
usage: 3.0 GiB used, 57 GiB / 60 GiB avail. ? X h1 J: E4 ~) U6 |/ B
pgs: 185 active+clean
% w' e1 I4 }5 J; A. V
+ d! `. u o4 o; b1 V7 e9 e progress:
2 j: C: j! n# Z! ?* E& A$ w5 i PG autoscaler decreasing pool 5 PGs from 64 to 8 (5m)
9 ]( N1 ^# ?2 B; b1 {* L/ @ [======================......] (remaining: 81s)( }' M5 @' F+ V! c, d* M' y
+ y+ I: r t8 M3 [
自动下降到185。因为是虚机下降速度较慢。, y, Z2 @6 x, k6 _" H5 d" p6 b: F. l
|
|