1 pgs not deep-scrubbed in time ceph分布式存储告警解决步骤
# ceph -scluster:
id: 5fa16469-8be4-4457-8a78-12b1910afff7
health: HEALTH_WARN
1 pgs not deep-scrubbed in time
services:
mon: 5 daemons, quorum compute01,controller01,controller02,controller03,compute02 (age 2d)
mgr: controller02(active, since 8d), standbys:controller01, controller03
mds: 1/1 daemons up, 2 standby
osd: 96 osds: 96 up (since 34h), 96 in (since 34h)
rgw: 3 daemons active (3 hosts, 1 zones)
data:
volumes: 1/1 healthy
pools: 14 pools, 881 pgs
objects: 7.68M objects, 29 TiB
usage: 105 TiB used, 611 TiB / 716 TiB avail
pgs: 880 active+clean
1 active+clean+scrubbing+deep+repair
io:
client: 12 KiB/s rd, 16 MiB/s wr, 8 op/s rd, 1.24k op/s wr
# ceph health detail
HEALTH_WARN 1 pgs not deep-scrubbed in time
PG_NOT_DEEP_SCRUBBED: 1 pgs not deep-scrubbed in time
pg 6.10 not deep-scrubbed since 2025-03-13T05:28:32.703667+0800
# ceph -s
cluster:
id: 5fa16469-8be4-4457-8a78-12b1910afff7
health: HEALTH_WARN
1 pgs not deep-scrubbed in time
services:
mon: 5 daemons, quorum compute01,controller01,controller02,controller03,compute02 (age 2d)
mgr: controller02(active, since 8d), standbys: controller01, controller03
mds: 1/1 daemons up, 2 standby
osd: 96 osds: 96 up (since 34h), 96 in (since 34h)
rgw: 3 daemons active (3 hosts, 1 zones)
data:
volumes: 1/1 healthy
pools: 14 pools, 881 pgs
objects: 7.69M objects, 29 TiB
usage: 105 TiB used, 611 TiB / 716 TiB avail
pgs: 880 active+clean
1 active+clean+scrubbing+deep+repair
io:
client: 89 MiB/s rd, 99 MiB/s wr, 201 op/s rd, 979 op/s wr
# ceph config
no valid command found; 10 closest matches:
config show <who> [<key>]
config show-with-defaults <who>
config set <who> <name> <value> [--force]
config rm <who> <name>
config get <who> [<key>]
config dump
config help <key>
config ls
config assimilate-conf
config log [<num:int>]
Error EINVAL: invalid command
# ceph config dump
WHO MASKLEVEL OPTION VALUE RO
global advancedcluster_network 172.27.10.0/24 *
global advancedms_bind_ipv4 true
global advancedms_bind_ipv6 false
global dev osd_crush_chooseleaf_type 0 *
global advancedosd_pool_default_crush_rule -1
global advancedpublic_network 172.27.12.0/23 *
mgr advancedmgr/balancer/active true
mgr advancedmgr/dashboard/ALERTMANAGER_API_HOST http://172.27.12.11:9093 *
mgr advancedmgr/dashboard/GRAFANA_API_PASSWORD admin *
mgr advancedmgr/dashboard/GRAFANA_API_SSL_VERIFY false *
mgr advancedmgr/dashboard/GRAFANA_API_URL https://172.27.12.11:3000 *
mgr advancedmgr/dashboard/GRAFANA_API_USERNAME admin *
mgr advancedmgr/dashboard/PROMETHEUS_API_HOST http://172.27.12.11:9092 *
*
mgr advancedmgr/dashboard/server_port 8443 *
mgr advancedmgr/dashboard/ssl true *
mgr advancedmgr/dashboard/ssl_server_port 8443 *
mgr advancedmgr/dashboard/controller01/server_addr172.27.12.136 *
mgr advancedmgr/dashboard/controller02/server_addr172.27.12.137 *
mgr advancedmgr/dashboard/controller03/server_addr172.27.12.138 *
mgr advancedmgr/zabbix/identifier 172.27.12.136 *
mgr advancedmgr/zabbix/zabbix_host 172.27.16.49 *
osd dev bluestore_2q_cache_kin_ratio 0.700000
osd dev bluestore_2q_cache_kout_ratio 0.300000
osd dev bluestore_avl_alloc_bf_threshold 262144
osd advancedbluestore_bluefs_max_free 21474836480
osd dev bluestore_cache_autotune_interval 10.000000
osd dev bluestore_cache_kv_onode_ratio 0.200000
osd dev bluestore_cache_kv_ratio 0.300000
osd dev bluestore_cache_meta_ratio 0.500000
osd dev bluestore_cache_size 4294967296
osd advancedbluestore_cache_trim_interval 0.100000
osd advancedbluestore_compression_algorithm lz4
osd advancedbluestore_compression_max_blob_size_hdd 262144
osd advancedbluestore_compression_min_blob_size_hdd 32768
osd advancedbluestore_compression_mode passive
osd advancedbluestore_deferred_batch_ops_hdd 128
osd dev bluestore_freelist_blocks_per_key 256
osd advancedbluestore_prefer_deferred_size_hdd 131072
osd dev bluestore_rocksdb_cfs write_buffer_size=268435456,\# 256MB
max_write_buffer_number=32 *
osd advancedbluestore_rocksdb_options max_write_buffer_number=128,min_write_buffer_number_to_merge=32,level0_file_num_compaction_trigger=16,max_background_jobs=16,max_bytes_for_level_base=4294967296,compaction_readahead_size=4194304*
osd advancedms_osd_compression_algorithm lz4
osd advancedosd_deep_scrub_interval 1209600.000000
osd advancedosd_max_backfills 2
osd advancedosd_max_scrubs 3
osd dev osd_memory_cache_min 1073741824
osd basic osd_memory_target 4294967296
osd advancedosd_recovery_max_active 5
osd advancedosd_scrub_begin_hour 22
osd advancedosd_scrub_end_hour 7
osd advancedosd_scrub_interval_randomize_ratio 0.800000
osd advancedosd_scrub_load_threshold 0.800000
osd advancedosd_scrub_sleep 0.200000
osd.4 advancedosd_memory_target_autotune true
client.rgw.default.compute04.rgw0 basic log_file /var/log/ceph/ceph-rgw-default-yz-openstack01-compute04.rgw0.log *
client.rgw.default.compute04.rgw0 advancedrgw_content_length_compat true
client.rgw.default.compute04.rgw0 advancedrgw_enable_apis s3, swift, swift_auth, admin *
client.rgw.default.compute04.rgw0 advancedrgw_enforce_swift_acls true
client.rgw.default.compute04.rgw0 basic rgw_frontends beast endpoint=172.27.11.134:8080 *
client.rgw.default.compute04.rgw0 advancedrgw_keystone_accepted_admin_roles admin, ResellerAdmin *
client.rgw.default.compute04.rgw0 advancedrgw_keystone_accepted_roles _member_, member, admin, ResellerAdmin *
client.rgw.default.compute04.rgw0 advancedrgw_keystone_admin_domain default *
client.rgw.default.compute04.rgw0 advancedrgw_keystone_admin_password X2MYnyXJdg1PIM48pFloZw52yeg6wEyryM08j75p *
client.rgw.default.compute04.rgw0 advancedrgw_keystone_admin_project service *
client.rgw.default.compute04.rgw0 advancedrgw_keystone_admin_user ceph_rgw *
client.rgw.default.compute04.rgw0 advancedrgw_keystone_api_version 3
client.rgw.default.compute04.rgw0 advancedrgw_keystone_implicit_tenants true *
client.rgw.default.compute04.rgw0 basic rgw_keystone_url http://172.27.11.188:5000 *
client.rgw.default.compute04.rgw0 advancedrgw_keystone_verify_ssl false
client.rgw.default.compute04.rgw0 advancedrgw_s3_auth_use_keystone true
client.rgw.default.compute04.rgw0 advancedrgw_swift_account_in_url true
client.rgw.default.compute04.rgw0 advancedrgw_swift_versioning_enabled true
client.rgw.default.compute04.rgw0 advancedrgw_verify_ssl false
client.rgw.default.compute05.rgw0 basic log_file /var/log/ceph/ceph-rgw-default-compute05.rgw0.log *
client.rgw.default.compute05.rgw0 advancedrgw_content_length_compat true
client.rgw.default.compute05.rgw0 advancedrgw_enable_apis s3, swift, swift_auth, admin *
client.rgw.default.compute05.rgw0 advancedrgw_enforce_swift_acls true
client.rgw.default.compute05.rgw0 basic rgw_frontends beast endpoint=172.27.11.135:8080 *
client.rgw.default.compute05.rgw0 advancedrgw_keystone_accepted_admin_roles admin, ResellerAdmin *
client.rgw.default.compute05.rgw0 advancedrgw_keystone_accepted_roles _member_, member, admin, ResellerAdmin *
client.rgw.default.compute05.rgw0 advancedrgw_keystone_admin_domain default *
client.rgw.default.compute05.rgw0 advancedrgw_keystone_admin_password X2MYnyXJdg1PIM48pFloZw52yeg6wEyryM08j75p *
client.rgw.default.compute05.rgw0 advancedrgw_keystone_admin_project service *
client.rgw.default.compute05.rgw0 advancedrgw_keystone_admin_user ceph_rgw *
client.rgw.default.compute05.rgw0 advancedrgw_keystone_api_version 3
client.rgw.default.compute05.rgw0 advancedrgw_keystone_implicit_tenants true *
client.rgw.default.compute05.rgw0 basic rgw_keystone_url http://172.27.11.188:5000 *
client.rgw.default.compute05.rgw0 advancedrgw_keystone_verify_ssl false
client.rgw.default.compute05.rgw0 advancedrgw_s3_auth_use_keystone true
client.rgw.default.compute05.rgw0 advancedrgw_swift_account_in_url true
client.rgw.default.compute05.rgw0 advancedrgw_swift_versioning_enabled true
client.rgw.default.compute05.rgw0 advancedrgw_verify_ssl false
client.rgw.default.compute06.rgw0 basic log_file /var/log/ceph/ceph-rgw-default-compute06.rgw0.log *
client.rgw.default.compute06.rgw0 advancedrgw_content_length_compat true
client.rgw.default.compute06.rgw0 advancedrgw_enable_apis s3, swift, swift_auth, admin *
client.rgw.default.compute06.rgw0 advancedrgw_enforce_swift_acls true
client.rgw.default.compute06.rgw0 basic rgw_frontends beast endpoint=172.27.11.139:8080 *
client.rgw.default.compute06.rgw0 advancedrgw_keystone_accepted_admin_roles admin, ResellerAdmin *
client.rgw.default.compute06.rgw0 advancedrgw_keystone_accepted_roles _member_, member, admin, ResellerAdmin *
client.rgw.default.compute06.rgw0 advancedrgw_keystone_admin_domain default *
client.rgw.default.compute06.rgw0 advancedrgw_keystone_admin_password X2MYnyXJdg1PIM48pFloZw52yeg6wEyryM08j75p *
client.rgw.default.compute06.rgw0 advancedrgw_keystone_admin_project service *
client.rgw.default.compute06.rgw0 advancedrgw_keystone_admin_user ceph_rgw *
client.rgw.default.compute06.rgw0 advancedrgw_keystone_api_version 3
client.rgw.default.compute06.rgw0 advancedrgw_keystone_implicit_tenants true *
client.rgw.default.compute06.rgw0 basic rgw_keystone_url http://172.27.11.188:5000 *
client.rgw.default.compute06.rgw0 advancedrgw_keystone_verify_ssl false
client.rgw.default.compute06.rgw0 advancedrgw_s3_auth_use_keystone true
client.rgw.default.compute06.rgw0 advancedrgw_swift_account_in_url true
client.rgw.default.compute06.rgw0 advancedrgw_swift_versioning_enabled true
client.rgw.default.compute06.rgw0 advancedrgw_verify_ssl false
# ceph config dump |grep osd_deep_scrub
osd advancedosd_deep_scrub_interval 1209600.000000
# ceph config getmon
WHO MASKLEVEL OPTION VALUE RO
global advancedcluster_network 172.27.10.0/24*
global advancedms_bind_ipv4 true
global advancedms_bind_ipv6 false
global dev osd_crush_chooseleaf_type 0 *
global advancedosd_pool_default_crush_rule-1
global advancedpublic_network 172.27.12.0/23*
设置osd磁盘深度同步检测时间:为3628800 s
# ceph config set global osd_deep_scrub_interval 3628800
# ceph config getmon
WHO MASKLEVEL OPTION VALUE RO
global advancedcluster_network 172.27.10.0/24*
global advancedms_bind_ipv4 true
global advancedms_bind_ipv6 false
global dev osd_crush_chooseleaf_type 0 *
global advancedosd_deep_scrub_interval 3628800.000000
global advancedosd_pool_default_crush_rule-1
global advancedpublic_network 172.27.12.0/23*
# ceph -s
cluster:
id: 5fa16469-8be4-4457-8a78-12b1910afff7
health: HEALTH_OK
services:
mon: 5 daemons, quorum compute01,controller01,controller02,controller03,compute02 (age 2d)
mgr: controller02(active, since 8d), standbys: controller01, controller03
mds: 1/1 daemons up, 2 standby
osd: 96 osds: 96 up (since 34h), 96 in (since 34h)
rgw: 3 daemons active (3 hosts, 1 zones)
data:
volumes: 1/1 healthy
pools: 14 pools, 881 pgs
objects: 7.69M objects, 29 TiB
usage: 105 TiB used, 611 TiB / 716 TiB avail
pgs: 880 active+clean
1 active+clean+scrubbing+deep+repair
io:
client: 14 KiB/s rd, 14 MiB/s wr, 7 op/s rd, 1.08k op/s wr
#
页:
[1]