- 积分
- 11013
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2023-1-9 17:03:34
|
显示全部楼层
[root@ceph1 mnt]# ceph -s, \5 j5 y6 |9 G8 \0 @9 [
cluster:
" J5 ~' [/ i0 l8 p id: 433d3d2a-8e4a-11ed-b84b-000c29377297
0 ^1 q. @. x/ K6 h/ I* o health: HEALTH_WARN0 X1 I; e$ n! n) c: g
too many PGs per OSD (272 > max 250)
, F! m8 J7 B- G) p9 E
1 r. C0 P" q. ? services:
+ {- X2 C: ?* C; w mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m)
( {4 ], ?1 Y5 s1 j mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz, F" s8 H/ ?7 |* ] M
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby% Q' o# o+ k, p- q
osd: 3 osds: 3 up (since 7h), 3 in (since 19h)
& a& W' \6 @# R& X: A rgw: 3 daemons active (ceph1, ceph2, ceph3)
9 a, i) X' B. v9 G. x# a& B
) A+ j, F5 @0 O( V4 N$ s task status:
& q+ D1 o# T% T! \ . J. a7 Q, Y6 u$ q& M$ h& {+ H6 s
data:
5 J, P4 D2 N q* C% x8 @" z% ]0 X pools: 8 pools, 273 pgs
$ h; Q1 _- w& X$ g, k. r, j( s' g# N objects: 240 objects, 162 KiB
6 _4 O: x: h$ `# A usage: 3.0 GiB used, 57 GiB / 60 GiB avail3 p0 |: R a; J/ Z
pgs: 273 active+clean4 Y1 n/ }- z; W* F1 U7 x2 `
7 i) j; N! b! L. Z. A[root@ceph1 mnt]# ceph config get mon - _6 G9 x+ T/ s) e5 N2 W
WHO MASK LEVEL OPTION VALUE RO: F# V1 d6 R A3 I6 L
mon advanced auth_allow_insecure_global_id_reclaim false
* S9 h0 M, ?! b; ^6 m" pmon advanced cluster_network 162.96.90.0/24 * 8 m5 F R1 ~8 m
global basic container_image quay.io/ceph/ceph:v15 *
) Y3 |0 v" F% k5 {# Nmon advanced mon_allow_pool_delete true
; V6 f2 Q: a v& K( n$ tmon advanced mon_max_pg_per_osd 500
6 F$ o2 n: z, e0 S$ x. m5 Iglobal advanced mon_target_pg_per_osd 500
4 g3 W4 I' g: p6 Sglobal advanced mon_warn_on_pool_no_redundancy false
0 I C# W) j9 O. t: ~, tmon advanced osd_max_pg_per_osd_hard_ratio 10.000000 $ E' r0 h4 h) M/ ^2 v5 _
mon advanced public_network 192.168.13.0/24 *
7 } D, r3 M7 b# g8 W6 F[root@ceph1 mnt]# ceph config set global mon_pg_per_osd 500' H) ]% ^* [: O+ U c* v% _' ]
Error EINVAL: unrecognized config option 'mon_pg_per_osd' n$ b0 u, \7 j* |% m
[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 500( Y7 V1 L2 ]& o+ d
[root@ceph1 mnt]# ceph config get mon
. a! j) d, W0 g( b- i- YWHO MASK LEVEL OPTION VALUE RO( {$ V" K3 I2 J3 H. F6 g9 Z
mon advanced auth_allow_insecure_global_id_reclaim false - p ~& V2 j* Q9 r2 f( l
mon advanced cluster_network 162.96.90.0/24 *
1 C8 q6 _. p0 p8 {+ m# |. Xglobal basic container_image quay.io/ceph/ceph:v15 *
; B! f2 U6 H, h$ ymon advanced mon_allow_pool_delete true
" k! P3 k6 R; d Umon advanced mon_max_pg_per_osd 500 * \0 o& X) @6 ~; P/ m1 A
global advanced mon_target_pg_per_osd 500
/ w0 P# m; R* ^+ R% w# K6 i: \! Dglobal advanced mon_warn_on_pool_no_redundancy false
- ^. G ~4 x4 Omon advanced osd_max_pg_per_osd_hard_ratio 10.000000
8 s, c; g$ I- Z+ umon advanced public_network 192.168.13.0/24 * . @! T' K# b" G0 F# B8 r% A( p
[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 500
% t: W9 o# Q# a* A( u[root@ceph1 mnt]# ceph -s
8 ^/ H, I9 ~% P P) r* t4 B cluster:7 W; r2 _3 a; f0 S
id: 433d3d2a-8e4a-11ed-b84b-000c29377297
. G% [8 U3 _( w; H health: HEALTH_OK
0 E( R! k! a$ P* C
2 H8 ~; w& r# j' n+ M' ^ services:. Q+ v2 L* c5 ~: S& h+ p$ J2 u+ x+ [* W
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)5 K. E6 f. P. ?( ^
mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz
% {0 w6 K1 W4 n3 v$ c* y mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
, p% y" d9 }# R6 z q: Y0 R osd: 3 osds: 3 up (since 7h), 3 in (since 19h)
$ X+ J; I+ I' Q: |8 ]- _ rgw: 3 daemons active (ceph1, ceph2, ceph3)
" H$ g3 @8 M* z
: k b3 `9 x _- l; U7 k* _ task status:- }8 l( o" d7 l/ l: w! m9 ~
6 R* h* B( m8 o
data:
' H$ s! h2 a. k* R( B4 u; s9 n pools: 8 pools, 273 pgs
2 K: R% _, {& r) F% A, N6 y- s objects: 240 objects, 162 KiB+ l* w3 Z% g0 w2 ]' M+ H
usage: 3.0 GiB used, 57 GiB / 60 GiB avail7 d/ l" k& _' X3 S) D* z
pgs: 273 active+clean9 e/ f1 [4 [' C2 M% E' w
" }8 G7 Q9 J/ r+ l6 V |
|