全局设置mon_pg_per_osd 参数 ceph config
ceph config set global mon_target_pg_per_osd 500# ceph config getmon
WHO MASKLEVEL OPTION VALUE RO
mon advancedauth_allow_insecure_global_id_reclaimfalse
mon advancedcluster_network 162.96.90.0/24 *
global basic container_image quay.io/ceph/ceph:v15*
mon advancedmon_allow_pool_delete true
mon advancedmon_max_pg_per_osd 500
global advancedmon_target_pg_per_osd 500
global advancedmon_warn_on_pool_no_redundancy false
mon advancedosd_max_pg_per_osd_hard_ratio 10.000000
mon advancedpublic_network 192.168.13.0/24 *
# ceph config set global mon_max_pg_per_osd 500
# ceph -s
cluster:
id: 433d3d2a-8e4a-11ed-b84b-000c29377297
health: HEALTH_WARN
too many PGs per OSD (272 > max 250)
services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m)
mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
osd: 3 osds: 3 up (since 7h), 3 in (since 19h)
rgw: 3 daemons active (ceph1, ceph2, ceph3)
task status:
data:
pools: 8 pools, 273 pgs
objects: 240 objects, 162 KiB
usage: 3.0 GiB used, 57 GiB / 60 GiB avail
pgs: 273 active+clean
# ceph config getmon
WHO MASKLEVEL OPTION VALUE RO
mon advancedauth_allow_insecure_global_id_reclaimfalse
mon advancedcluster_network 162.96.90.0/24 *
global basic container_image quay.io/ceph/ceph:v15*
mon advancedmon_allow_pool_delete true
mon advancedmon_max_pg_per_osd 500
global advancedmon_target_pg_per_osd 500
global advancedmon_warn_on_pool_no_redundancy false
mon advancedosd_max_pg_per_osd_hard_ratio 10.000000
mon advancedpublic_network 192.168.13.0/24 *
# ceph config set global mon_pg_per_osd 500
Error EINVAL: unrecognized config option 'mon_pg_per_osd'
# ceph config set global mon_max_pg_per_osd 500
# ceph config getmon
WHO MASKLEVEL OPTION VALUE RO
mon advancedauth_allow_insecure_global_id_reclaimfalse
mon advancedcluster_network 162.96.90.0/24 *
global basic container_image quay.io/ceph/ceph:v15*
mon advancedmon_allow_pool_delete true
mon advancedmon_max_pg_per_osd 500
global advancedmon_target_pg_per_osd 500
global advancedmon_warn_on_pool_no_redundancy false
mon advancedosd_max_pg_per_osd_hard_ratio 10.000000
mon advancedpublic_network 192.168.13.0/24 *
# ceph config set global mon_max_pg_per_osd 500
# ceph -s
cluster:
id: 433d3d2a-8e4a-11ed-b84b-000c29377297
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)
mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
osd: 3 osds: 3 up (since 7h), 3 in (since 19h)
rgw: 3 daemons active (ceph1, ceph2, ceph3)
task status:
data:
pools: 8 pools, 273 pgs
objects: 240 objects, 162 KiB
usage: 3.0 GiB used, 57 GiB / 60 GiB avail
pgs: 273 active+clean
页:
[1]