- 积分
- 16841
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2023-1-9 17:03:34
|
显示全部楼层
[root@ceph1 mnt]# ceph -s
1 r h! H* A2 j/ z- q; W cluster:+ N, X! j& }/ i, ~6 p" e
id: 433d3d2a-8e4a-11ed-b84b-000c29377297" `" g# p# Q5 X+ `- b: N
health: HEALTH_WARN4 c& e5 B8 m# W+ c- U
too many PGs per OSD (272 > max 250)& E& L3 v% Z- ~3 S: R
5 Z# h, o, v3 o8 E! S! {) @0 G
services:5 ]1 E! i3 _1 A3 ]0 E+ S
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m)
5 P1 M6 W: f d: { mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz
, i3 `4 ?: i [ mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
' t/ x0 L' q0 j osd: 3 osds: 3 up (since 7h), 3 in (since 19h), k: T4 I' V4 g5 F
rgw: 3 daemons active (ceph1, ceph2, ceph3)
# `( f J a5 U; t, N$ s. [ : \7 S' w! V* V) N8 G
task status:5 o) l9 f2 F$ y0 c
* i: p. l7 p) `/ t+ W3 B data:1 Y% F9 t7 n. S9 Z
pools: 8 pools, 273 pgs' H# b# F& u1 P
objects: 240 objects, 162 KiB4 Y/ o# [+ e. I- `) W- e
usage: 3.0 GiB used, 57 GiB / 60 GiB avail/ |$ T( W! p# K% Q. k* t
pgs: 273 active+clean
/ ]0 s% e R7 g" e; a
( B6 g8 z4 h2 {/ {- d( R7 D/ H3 w1 r[root@ceph1 mnt]# ceph config get mon
( W5 O9 R( Y- P/ ^- X- ^9 tWHO MASK LEVEL OPTION VALUE RO
; m4 h4 E8 y% W( O3 s- G7 Cmon advanced auth_allow_insecure_global_id_reclaim false " q* g* t2 V3 N/ Y6 Y
mon advanced cluster_network 162.96.90.0/24 * ! i5 x* N1 X; T- n
global basic container_image quay.io/ceph/ceph:v15 *
) {$ `* j( P: v, u; `" Smon advanced mon_allow_pool_delete true
) [: M% g5 c" ^' u8 omon advanced mon_max_pg_per_osd 500
& N- V' y3 I1 ?4 eglobal advanced mon_target_pg_per_osd 500
1 r: W! \ }4 b7 N6 r. ]# Cglobal advanced mon_warn_on_pool_no_redundancy false
* o E, n- U9 O$ J4 kmon advanced osd_max_pg_per_osd_hard_ratio 10.000000
& s8 z# u* @4 q2 r$ ?4 U8 h& y3 Nmon advanced public_network 192.168.13.0/24 * 5 m% L: o5 R1 q9 k9 d, P9 n
[root@ceph1 mnt]# ceph config set global mon_pg_per_osd 500
, z- Q- q: J4 F7 \2 z$ U6 M: o- GError EINVAL: unrecognized config option 'mon_pg_per_osd'& s/ \# s/ ~: U) E4 @+ z7 W( ?& \8 `% P
[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 5004 p8 }6 c/ g8 ^; W! @
[root@ceph1 mnt]# ceph config get mon
. A5 J' B* e+ j- F% ~8 J3 v& `8 VWHO MASK LEVEL OPTION VALUE RO
* q! J; Z7 I( ^8 d5 amon advanced auth_allow_insecure_global_id_reclaim false
5 W$ o- j$ p1 e' d3 I5 O: Kmon advanced cluster_network 162.96.90.0/24 * Z( H- t( v: J( c# y9 U6 \
global basic container_image quay.io/ceph/ceph:v15 *
+ c/ G$ C, ]/ f& s9 s0 F7 ?2 cmon advanced mon_allow_pool_delete true ' j( q* c2 q* S4 i6 [: {6 k- C
mon advanced mon_max_pg_per_osd 500 0 |, |3 u% O: n# T _
global advanced mon_target_pg_per_osd 500
+ X9 ]; c) q3 H1 @) W: kglobal advanced mon_warn_on_pool_no_redundancy false
# ^$ _' i; ]! u+ zmon advanced osd_max_pg_per_osd_hard_ratio 10.000000
. ^$ h( N2 o1 m2 s% M0 A& amon advanced public_network 192.168.13.0/24 * 8 W# l' b9 M, j2 Y6 V; F* `, _
[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 500
, M" U4 i- [" S1 ] L; F j[root@ceph1 mnt]# ceph -s$ `) K6 S# _' k
cluster:1 W& E+ v$ g5 |9 l* A) t
id: 433d3d2a-8e4a-11ed-b84b-000c29377297! x4 m8 {* c% A3 Z7 u o
health: HEALTH_OK
; d2 K q+ l0 ] ' P9 P/ X# f( {( p" |0 n! s- {
services:; W- L8 H# @2 w. u) D8 z
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)0 J% b% g0 E% p' J, {3 M
mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz
+ f% v& C; c; x9 J mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
8 }# e Q% [, S" }# i" p osd: 3 osds: 3 up (since 7h), 3 in (since 19h)
2 u! {! |" Q5 x9 D( z0 ]1 i3 A; W rgw: 3 daemons active (ceph1, ceph2, ceph3)
1 b% U8 ?+ K2 k1 a6 D: C # h- Q: o4 F* ?! U1 q1 W
task status:) x8 {8 f$ m7 O4 X; z2 e, F: l
3 r- o! E" E7 w2 o
data: g% y5 S% T( R9 l* `
pools: 8 pools, 273 pgs2 w7 p5 T; F0 ~9 p
objects: 240 objects, 162 KiB: D3 X A% |3 C- t% v6 v
usage: 3.0 GiB used, 57 GiB / 60 GiB avail
" {' Q0 u# a( b4 g/ R( n! a pgs: 273 active+clean
1 X( h3 {3 `* P3 I$ Q- n
' b$ R/ P' k( q6 x f+ e |
|