osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph tell 'osd.*' injectargs --osd-max-backfills=2 --osd-recovery-max-active=6
osd.0: osd_max_backfills = '2' osd_recovery_max_active = '6' (not observed, change may require restart)
osd.0: {}
osd.1: osd_max_backfills = '2' osd_recovery_max_active = '6' (not observed, change may require restart)
osd.1: {}
osd.2: osd_max_backfills = '2' osd_recovery_max_active = '6' (not observed, change may require restart)
osd.2: {}
# ceph-conf --show-config | egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph-conf --show-config | egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph-conf --show-config | egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph-conf --show-config | egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph-conf --show-config | egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph-conf --show-config | egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph-conf --show-config | egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph daemon osd.1 config get osd_max_backfills
admin_socket: exception getting command descriptions: No such file or directory
# ceph config show osd.2
NAME VALUE SOURCE OVERRIDESIGNORES
container_image quay.io/ceph/ceph:v15 mon
daemonize false override
keyring $osd_data/keyring default
leveldb_log default
log_stderr_prefix debug default
log_to_file false default
log_to_stderr true default
mon_host file
mon_warn_on_pool_no_redundancyfalse mon
no_config_file false override
osd_max_backfills 2 override
osd_recovery_max_active 6 override
rbd_default_features 61 default
setgroup ceph cmdline
setuser ceph cmdline
# ceph-conf --show-config | egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph tell 'osd.*' injectargs --osd-max-backfills=2 --osd-recovery-max-active=6
osd.0: {}
osd.1: {}
osd.2: {}
#ceph tell 'osd.*' injectargs --osd-max-backfills=2 --osd-recovery-max-active=6
osd.0: osd_max_backfills = '2' osd_recovery_max_active = '6' (not observed, change may require restart)
osd.0: {}
osd.1: osd_max_backfills = '2' osd_recovery_max_active = '6' (not observed, change may require restart)
osd.1: {}
osd.2: osd_max_backfills = '2' osd_recovery_max_active = '6' (not observed, change may require restart)
osd.2: {}
#ceph-conf --show-config | egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
osd_max_backfills = 1
osd_recovery_max_active = 0
osd_recovery_max_active_hdd = 3
osd_recovery_max_active_ssd = 10
osd_recovery_op_priority = 3
# ceph config show osd.2
NAME VALUE SOURCE OVERRIDESIGNORES
container_image quay.io/ceph/ceph:v15 mon
daemonize false override
keyring $osd_data/keyring default
leveldb_log default
log_stderr_prefix debug default
log_to_file false default
log_to_stderr true default
mon_host file
mon_warn_on_pool_no_redundancyfalse mon
no_config_file false override
osd_max_backfills 2 override
osd_recovery_max_active 6 override
rbd_default_features 61 default
setgroup ceph cmdline
setuser ceph cmdline
这个只是临时生效。重启集群后失效。 ceph mgr module disable pg_autoscaler 关闭 ceph config show osd.1 osd_recovery_max_active
# ceph config show osd.1 osd_recovery_max_active
4
To view the current active setting(s), on the node where the the OSD being checked is running execute for example:
ceph daemon osd.<insert_id> config get osd_max_backfills
To set back to default:
ceph tell 'osd.*' injectargs --osd-max-backfills=1 --osd-recovery-max-active=3
With SES 6, "ceph config set" can alternatively be used:
ceph config set osd osd_max_backfills 2
ceph config set osd osd_recovery_max_active 3
To set back to default:
ceph config rm osd osd_recovery_max_active
ceph config rm osd osd_max_backfills
To view the current settings:
ceph config show osd.<insert_id>
Recovery can be monitored with "ceph -s".
After increasing the settings, should any OSDs become unstable (restarting) or clients are negatively impacted by the additional recovery overhead then reduce the values or set them back to the defaults.
Once the cluster is finished with recovery and back in a HEALTH_OK state, set the values back to default. ceph tell 'osd.*' injectargs --osd-max-backfills=3 --osd-recovery-max-active=9 两者按照1:3的方式匹配
页:
[1]