- 积分
- 11013
在线时间 小时
最后登录1970-1-1
|
马上注册,结交更多好友,享用更多功能,让你轻松玩转社区。
您需要 登录 才可以下载或查看,没有帐号?开始注册
x
发现出现异常warn信息,虽然不影响整个集群正常使用,但强迫症患者还是忍不了,下面是过程。查看具体报错信息 V# w$ P ~4 j% O
HEALTH_WARN 2 pgs not deep-scrubbed in time
" a5 J1 M( t; ?0 Y9 k VPG_NOT_DEEP_SCRUBBED 2 pgs not deep-scrubbed in time+ K& T; b) _- ?$ s9 g
pg 18.41 not deep-scrubbed since 2022-12-07 20:15:50.550606& @+ d9 Y2 |9 U" N) Q) c6 \4 v6 j$ |
pg 5.16d not deep-scrubbed since 2022-12-07 22:21:58.1410718 B' a# r& _9 y( L* r
# F+ u) Y9 i* M5 |- K8 `3 H" o; H
[root@controller1 ~]# ceph pg deep-scrub 18.41; U, n( x; U& U ]
instructing pg 18.41 on osd.6 to deep-scrub* _$ ]/ T9 U3 a1 q b" O% o
[root@controller1 ~]# ceph pg deep-scrub 5.16% Y8 l7 }- [. y B% O Y
instructing pg 5.16 on osd.13 to deep-scrub( `3 O0 J9 R" Q# s
8 m: D: j6 }" V6 C% N$ n, g
8 l! p; B/ T0 }! M! L+ D
U9 ]1 {) }4 t4 K& [
[root@controller1 ~]# ceph daemon osd.6 config show |grep osd_deep_scrub_interval
1 r s$ ~. e* @4 G. h& T) z "osd_deep_scrub_interval": "604800.000000",
5 x: ^2 `" S/ b[root@controller1 ~]# ceph config set global osd_deep_scrub_interval 3628800
- d7 y) r+ G8 U$ H$ v: G& s[root@controller1 ~]# ceph daemon osd.6 config show |grep osd_deep_scrub_interval
& v0 H/ ?' @. B. N0 E0 ] "osd_deep_scrub_interval": "3628800.000000",: m0 |% r. i% i
[root@controller1 ~]# ceph config set global osd_deep_scrub_interval 3628800+ x9 Z |6 ]( Q- B" J
恢复正常后,再改回来:' U0 S' G" ^# p3 ~1 T
[root@controller1 ~]# ceph daemon osd.6 config show |grep osd_deep_scrub_interval% N& Z) ~- E5 i2 @; o
"osd_deep_scrub_interval": "604800.000000", C& Q1 w; X% X0 U K
[root@controller1 ~]# ceph config set global osd_deep_scrub_interval 3628800 L2 R; s U8 Q) B
[root@controller1 ~]# ceph daemon osd.6 config show |grep osd_deep_scrub_interval
6 H E! r1 P+ v! C "osd_deep_scrub_interval": "3628800.000000",
/ S, b- d0 Y. y6 \* r" n9 j- T[root@controller1 ~]# ceph config set global osd_deep_scrub_interval 3628800^C8 H) \" I0 ]$ y8 |" B2 @- S
) L3 j' V% J; T* h/ o( j" {% M[root@controller1 ~]# / ?2 z; M n4 D
[root@controller1 ~]# ceph -s
2 h$ n- d0 a; d' p+ @+ e' J! E cluster:
, E( w/ Q4 G/ H! q& O id: 9d22e36a-2bdd-4d2d-8394-48af75ead777
" r, E1 s9 P: P: [- b1 d health: HEALTH_OK% h/ Z7 w9 N6 E4 t) J8 ?
! y' Z0 |+ v$ E& H, M% o1 | services:5 @: b- a8 F4 ~) _; M& q
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5M)
% e; x- x4 u4 k! z mgr: ceph1(active, since 19M), standbys: ceph2,ceph33 g5 B/ H! ^9 B* g
osd: 40 osds: 40 up (since 3w), 40 in (since 12M)
" V4 \) C1 f) m/ L' A9 [# _+ @0 s rgw: 3 daemons active (host09, host10, host11)" n. @4 g( r" o& M5 w
3 I5 k0 n% H1 J2 \$ t" V task status:8 Z8 j6 z& w) a5 L
+ u ]7 ~7 A' j: }! T# {( I
data:( D! W! k# T/ q% C5 A
pools: 16 pools, 3072 pgs
$ n k, A6 f L/ s8 x objects: 4.20M objects, 16 TiB4 i0 a4 K5 |; X8 I$ O9 g: e
usage: 40 TiB used, 107 TiB / 148 TiB avail" A1 y' J& [* `, j
pgs: 3067 active+clean
3 P% W0 v% i: n 5 active+clean+scrubbing+deep# i0 e2 R, f. N4 v5 e
6 A6 L4 ~1 c% e; w io:
$ ]1 k: \4 r. i0 O. V' R& D$ A client: 403 KiB/s rd, 9.5 MiB/s wr, 514 op/s rd, 466 op/s wr
, k: N( @7 ]# R/ N! m5 L" x6 y% }) F$ W8 U
[root@ ~]# ceph config set global osd_deep_scrub_interval 6048009 C5 S) n1 k7 q; _1 o
[root@ ~]#
. G& p0 W) R" @' T+ D6 T# \: T K9 M! M' y1 C) _3 A, W
% ?; i& O# D2 K( I* b! |+ qsystemctl restart ceph-osd@6.service
% y3 Y' _- Z9 Z- Q, I3 ], f7 \: s" [" g7 I9 N" X1 u" N. K# I' q, G0 Y+ }0 F
4 M4 y- ]" T# S& N( ~. d
2 B' k9 |) ^' g |
|