admin 发表于 2023-2-28 09:58:29

HEALTH_ERR 1 pgs inconsistent; 1 pgs repair; 1 scrub errors pg 1.141e is active+

HEALTH_ERR 1 pgs inconsistent; 1 pgs repair; 1 scrub errors
pg 1.141e is active+clean+scrubbing+deep+inconsistent+repair, acting
1 scrub errors


# ceph pg deep-scrub 1.141e
instructing pg 1.141e on osd.86 to deep-scrub
# ceph pg repair 1.141e
instructing pg 1.141e on osd.86 to repair
# ceph pg repair 1.141e
instructing pg 1.141e on osd.86 to repair
# ceph pg repair 1.141e
instructing pg 1.141e on osd.86 to repair
# ceph health detail
HEALTH_ERR 1 pgs inconsistent; 1 scrub errors
pg 1.141e is active+clean+inconsistent, acting
1 scrub errors


# ceph osd set noout
set noout
# ceph -s
    cluster d385bef6-6778-43cc-8755-4e1d5ef5485e
   health HEALTH_ERR
            1 pgs inconsistent
            1 scrub errors
            noout flag(s) set
   monmap e3: 3 mons at {}
            election epoch 10, quorum 0,1,2
   osdmap e219: 90 osds: 90 up, 90 in
            flags noout,sortbitwise,require_jewel_osds
      pgmap v40230148: 8192 pgs, 1 pools, 29512 GB data, 7460 kobjects
            88344 GB used, 241 TB / 327 TB avail
                8185 active+clean
                   6 active+clean+scrubbing+deep
                   1 active+clean+inconsistent
client io 484 MB/s rd, 40044 kB/s wr, 8192 op/s rd, 11982 op/s wr

# systemctl stop ceph-osd@86.service
# systemctl status ceph-osd@86.service
● ceph-osd@86.service - Ceph object storage daemon
   Loaded: loaded (/usr/lib/systemd/system/ceph-osd@.service; enabled; vendor preset: disabled)
   Active: inactive (dead) since Tue 2023-02-28 09:33:18 CST; 3s ago
Main PID: 19110 (code=exited, status=0/SUCCESS)

Feb 27 19:31:09 cn09 ceph-osd: 2023-02-27 19:31:09.098552 7fddf529b700 -1 log_channel(cluster) log : 1.141e shard 26: soid 1:782c65a9::...read error
Feb 27 19:31:46 cn09 ceph-osd: 2023-02-27 19:31:46.357585 7fddf1c96700 -1 log_channel(cluster) log : 1.141e deep-scrub 0 missing, 1 inc...nt objects
Feb 27 19:31:46 cn09 ceph-osd: 2023-02-27 19:31:46.357597 7fddf1c96700 -1 log_channel(cluster) log : 1.141e deep-scrub 1 errors
Feb 28 09:33:14 cn09 ceph-osd: 2023-02-28 09:33:14.304486 7fdde4e85700 -1 osd.86 219 *** Got signal Terminated ***
Feb 28 09:33:14 cn09 systemd: Stopping Ceph object storage daemon...
Feb 28 09:33:14 cn09 ceph-osd: 2023-02-28 09:33:14.397327 7fdde4e85700 -1 osd.86 219 shutdown
Feb 28 09:33:18 cn09 systemd: Stopped Ceph object storage daemon.
Warning: Journal has been rotated since unit was started. Log output is incomplete or unavailable.
Hint: Some lines were ellipsized, use -l to show in full.
# systemctl start ceph-osd@86.service
# systemctl status ceph-osd@86.service
● ceph-osd@86.service - Ceph object storage daemon
   Loaded: loaded (/usr/lib/systemd/system/ceph-osd@.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2023-02-28 09:33:26 CST; 2s ago
Process: 2460810 ExecStartPre=/usr/lib/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i (code=exited, status=0/SUCCESS)
Main PID: 2460821 (ceph-osd)
   CGroup: /system.slice/system-ceph\x2dosd.slice/ceph-osd@86.service
         └─2460821 /usr/bin/ceph-osd -f --cluster ceph --id 86 --setuser ceph --setgroup ceph

Feb 28 09:33:26 cn09 systemd: Starting Ceph object storage daemon...
Feb 28 09:33:26 cn09 systemd: Started Ceph object storage daemon.
Feb 28 09:33:26 cn09 ceph-osd: starting osd.86 at :/0 osd_data /var/lib/ceph/osd/ceph-86 /var/lib/ceph/osd/ceph-86/journal
# systemctl status ceph-osd@86.service
● ceph-osd@86.service - Ceph object storage daemon
   Loaded: loaded (/usr/lib/systemd/system/ceph-osd@.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2023-02-28 09:33:26 CST; 3s ago
Process: 2460810 ExecStartPre=/usr/lib/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i (code=exited, status=0/SUCCESS)
Main PID: 2460821 (ceph-osd)
   CGroup: /system.slice/system-ceph\x2dosd.slice/ceph-osd@86.service
         └─2460821 /usr/bin/ceph-osd -f --cluster ceph --id 86 --setuser ceph --setgroup ceph

Feb 28 09:33:26 cn09 systemd: Starting Ceph object storage daemon...
Feb 28 09:33:26 cn09 systemd: Started Ceph object storage daemon.
Feb 28 09:33:26 cn09 ceph-osd: starting osd.86 at :/0 osd_data /var/lib/ceph/osd/ceph-86 /var/lib/ceph/osd/ceph-86/journal
# ceph -s
    cluster d385bef6-6778-43cc-8755-4e1d5ef5485e
   health HEALTH_ERR
            240 pgs degraded
            1 pgs inconsistent
            26 pgs stuck unclean
            240 pgs undersized
            recovery 223763/22917261 objects degraded (0.976%)
            1 scrub errors
            1/90 in osds are down
            noout flag(s) set
   monmap e3: 3 mons at {}
            election epoch 10, quorum 0,1,2
   osdmap e223: 90 osds: 89 up, 90 in; 240 remapped pgs
            flags noout,sortbitwise,require_jewel_osds
      pgmap v40230185: 8192 pgs, 1 pools, 29512 GB data, 7460 kobjects
            88344 GB used, 241 TB / 327 TB avail
            223763/22917261 objects degraded (0.976%)
                7949 active+clean
               239 active+undersized+degraded
                   3 active+clean+scrubbing+deep
                   1 active+undersized+degraded+inconsistent
client io 16286 kB/s rd, 29898 kB/s wr, 3339 op/s rd, 7408 op/s wr
# watch ceph -s
# systemctl status ceph-osd@86.service
● ceph-osd@86.service - Ceph object storage daemon
   Loaded: loaded (/usr/lib/systemd/system/ceph-osd@.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2023-02-28 09:33:26 CST; 8min ago
Process: 2460810 ExecStartPre=/usr/lib/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i (code=exited, status=0/SUCCESS)
Main PID: 2460821 (ceph-osd)
   CGroup: /system.slice/system-ceph\x2dosd.slice/ceph-osd@86.service
         └─2460821 /usr/bin/ceph-osd -f --cluster ceph --id 86 --setuser ceph --setgroup ceph

Feb 28 09:33:26cn09 systemd: Starting Ceph object storage daemon...
Feb 28 09:33:26 cn09 systemd: Started Ceph object storage daemon.
Feb 28 09:33:26 cn09 ceph-osd: starting osd.86 at :/0 osd_data /var/lib/ceph/osd/ceph-86 /var/lib/ceph/osd/ceph-86/journal
Feb 28 09:33:45 cn09 ceph-osd: 2023-02-28 09:33:45.106009 7fe7b91d2f80 -1 leveldb: Compacting leveldb store...
Feb 28 09:33:46 cn09 ceph-osd: 2023-02-28 09:33:46.428126 7fe7b91d2f80 -1 leveldb: Finished compacting leveldb store
Feb 28 09:33:50 cn09 ceph-osd: 2023-02-28 09:33:50.066050 7fe7b91d2f80 -1 osd.86 219 log_to_monitors {default=true}
# watch ceph -s
# ceph osd unset noout
unset noout
# watch ceph -s




admin 发表于 2023-11-24 00:06:43

解决过程,发现无发正常同步,通过检查调整参数,达到修复的目的:
# ceph daemon osd.0 config show |egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
    "osd_max_backfills": "1",
    "osd_recovery_max_active": "1",
    "osd_recovery_op_priority": "0",
"osd_recovery_op_priority": "0",默认是10

修改为默认值10

# ceph tell osd.* injectargs osd_recovery_op_priority=10
osd.0: osd_recovery_op_priority = '10' (unchangeable)
osd.1: osd_recovery_op_priority = '10' (unchangeable)
osd.2: osd_recovery_op_priority = '10' (unchangeable)
osd.3: osd_recovery_op_priority = '10' (unchangeable)
osd.4: osd_recovery_op_priority = '10' (unchangeable)
osd.5: osd_recovery_op_priority = '10' (unchangeable)
osd.6: osd_recovery_op_priority = '10' (unchangeable)
osd.7: osd_recovery_op_priority = '10' (unchangeable)
osd.8: osd_recovery_op_priority = '10' (unchangeable)
osd.9: osd_recovery_op_priority = '10' (unchangeable)
osd.10: osd_recovery_op_priority = '10' (unchangeable)
osd.11: osd_recovery_op_priority = '10' (unchangeable)
osd.12: osd_recovery_op_priority = '10' (unchangeable)
osd.13: osd_recovery_op_priority = '10' (unchangeable)
osd.14: osd_recovery_op_priority = '10' (unchangeable)
osd.15: osd_recovery_op_priority = '10' (unchangeable)
osd.16: osd_recovery_op_priority = '10' (unchangeable)
osd.17: osd_recovery_op_priority = '10' (unchangeable)
osd.18: osd_recovery_op_priority = '10' (unchangeable)
osd.19: osd_recovery_op_priority = '10' (unchangeable)
osd.20: osd_recovery_op_priority = '10' (unchangeable)
osd.21: osd_recovery_op_priority = '10' (unchangeable)
osd.22: osd_recovery_op_priority = '10' (unchangeable)
osd.23: osd_recovery_op_priority = '10' (unchangeable)
osd.24: osd_recovery_op_priority = '10' (unchangeable)
osd.25: osd_recovery_op_priority = '10' (unchangeable)
osd.26: osd_recovery_op_priority = '10' (unchangeable)
osd.27: osd_recovery_op_priority = '10' (unchangeable)
osd.28: osd_recovery_op_priority = '10' (unchangeable)
osd.29: osd_recovery_op_priority = '10' (unchangeable)
osd.30: osd_recovery_op_priority = '10' (unchangeable)
osd.31: osd_recovery_op_priority = '10' (unchangeable)
osd.32: osd_recovery_op_priority = '10' (unchangeable)
osd.33: osd_recovery_op_priority = '10' (unchangeable)
osd.34: osd_recovery_op_priority = '10' (unchangeable)
osd.35: osd_recovery_op_priority = '10' (unchangeable)
osd.36: osd_recovery_op_priority = '10' (unchangeable)
osd.37: osd_recovery_op_priority = '10' (unchangeable)
osd.38: osd_recovery_op_priority = '10' (unchangeable)
osd.39: osd_recovery_op_priority = '10' (unchangeable)
osd.40: osd_recovery_op_priority = '10' (unchangeable)
osd.41: osd_recovery_op_priority = '10' (unchangeable)
osd.42: osd_recovery_op_priority = '10' (unchangeable)
osd.43: osd_recovery_op_priority = '10' (unchangeable)
osd.44: osd_recovery_op_priority = '10' (unchangeable)
osd.45: osd_recovery_op_priority = '10' (unchangeable)
osd.46: osd_recovery_op_priority = '10' (unchangeable)
osd.47: osd_recovery_op_priority = '10' (unchangeable)
osd.48: osd_recovery_op_priority = '10' (unchangeable)
osd.49: osd_recovery_op_priority = '10' (unchangeable)
osd.50: osd_recovery_op_priority = '10' (unchangeable)
osd.51: osd_recovery_op_priority = '10' (unchangeable)
osd.52: osd_recovery_op_priority = '10' (unchangeable)
osd.53: osd_recovery_op_priority = '10' (unchangeable)
Error ENXIO: problem getting command descriptions from osd.54
osd.54: problem getting command descriptions from osd.54
osd.55: osd_recovery_op_priority = '10' (unchangeable)
osd.56: osd_recovery_op_priority = '10' (unchangeable)
osd.57: osd_recovery_op_priority = '10' (unchangeable)
osd.58: osd_recovery_op_priority = '10' (unchangeable)
osd.59: osd_recovery_op_priority = '10' (unchangeable)
osd.60: osd_recovery_op_priority = '10' (unchangeable)
osd.61: osd_recovery_op_priority = '10' (unchangeable)
osd.62: osd_recovery_op_priority = '10' (unchangeable)
osd.63: osd_recovery_op_priority = '10' (unchangeable)
osd.64: osd_recovery_op_priority = '10' (unchangeable)
osd.65: osd_recovery_op_priority = '10' (unchangeable)
osd.66: osd_recovery_op_priority = '10' (unchangeable)
osd.67: osd_recovery_op_priority = '10' (unchangeable)
osd.68: osd_recovery_op_priority = '10' (unchangeable)
osd.69: osd_recovery_op_priority = '10' (unchangeable)
osd.70: osd_recovery_op_priority = '10' (unchangeable)
osd.71: osd_recovery_op_priority = '10' (unchangeable)
osd.72: osd_recovery_op_priority = '10' (unchangeable)
osd.73: osd_recovery_op_priority = '10' (unchangeable)
osd.74: osd_recovery_op_priority = '10' (unchangeable)
osd.75: osd_recovery_op_priority = '10' (unchangeable)
osd.76: osd_recovery_op_priority = '10' (unchangeable)
osd.77: osd_recovery_op_priority = '10' (unchangeable)
osd.78: osd_recovery_op_priority = '10' (unchangeable)
osd.79: osd_recovery_op_priority = '10' (unchangeable)
osd.80: osd_recovery_op_priority = '10' (unchangeable)
osd.81: osd_recovery_op_priority = '10' (unchangeable)
osd.82: osd_recovery_op_priority = '10' (unchangeable)
osd.83: osd_recovery_op_priority = '10' (unchangeable)
osd.84: osd_recovery_op_priority = '10' (unchangeable)
osd.85: osd_recovery_op_priority = '10' (unchangeable)
osd.86: osd_recovery_op_priority = '10' (unchangeable)
osd.87: osd_recovery_op_priority = '10' (unchangeable)
osd.88: osd_recovery_op_priority = '10' (unchangeable)
osd.89: osd_recovery_op_priority = '10' (unchangeable)
osd.90: osd_recovery_op_priority = '10' (unchangeable)
osd.91: osd_recovery_op_priority = '10' (unchangeable)
osd.92: osd_recovery_op_priority = '10' (unchangeable)
osd.93: osd_recovery_op_priority = '10' (unchangeable)
osd.94: osd_recovery_op_priority = '10' (unchangeable)
osd.95: osd_recovery_op_priority = '10' (unchangeable)
osd.96: osd_recovery_op_priority = '10' (unchangeable)
osd.97: osd_recovery_op_priority = '10' (unchangeable)
osd.98: osd_recovery_op_priority = '10' (unchangeable)
osd.99: osd_recovery_op_priority = '10' (unchangeable)
osd.100: osd_recovery_op_priority = '10' (unchangeable)
osd.101: osd_recovery_op_priority = '10' (unchangeable)
osd.102: osd_recovery_op_priority = '10' (unchangeable)
osd.103: osd_recovery_op_priority = '10' (unchangeable)
osd.104: osd_recovery_op_priority = '10' (unchangeable)
osd.105: osd_recovery_op_priority = '10' (unchangeable)
osd.106: osd_recovery_op_priority = '10' (unchangeable)
osd.107: osd_recovery_op_priority = '10' (unchangeable)
osd.108: osd_recovery_op_priority = '10' (unchangeable)
osd.109: osd_recovery_op_priority = '10' (unchangeable)
osd.110: osd_recovery_op_priority = '10' (unchangeable)
osd.111: osd_recovery_op_priority = '10' (unchangeable)
osd.112: osd_recovery_op_priority = '10' (unchangeable)
osd.113: osd_recovery_op_priority = '10' (unchangeable)
osd.114: osd_recovery_op_priority = '10' (unchangeable)
osd.115: osd_recovery_op_priority = '10' (unchangeable)
osd.116: osd_recovery_op_priority = '10' (unchangeable)
osd.117: osd_recovery_op_priority = '10' (unchangeable)
osd.118: osd_recovery_op_priority = '10' (unchangeable)
osd.119: osd_recovery_op_priority = '10' (unchangeable)
osd.120: osd_recovery_op_priority = '10' (unchangeable)
osd.121: osd_recovery_op_priority = '10' (unchangeable)
osd.122: osd_recovery_op_priority = '10' (unchangeable)
osd.123: osd_recovery_op_priority = '10' (unchangeable)
Error ENXIO: problem getting command descriptions from osd.124
osd.124: problem getting command descriptions from osd.124
osd.125: osd_recovery_op_priority = '10' (unchangeable)
osd.126: osd_recovery_op_priority = '10' (unchangeable)
Error ENXIO: problem getting command descriptions from osd.127
osd.127: problem getting command descriptions from osd.127
osd.128: osd_recovery_op_priority = '10' (unchangeable)
osd.129: osd_recovery_op_priority = '10' (unchangeable)
osd.130: osd_recovery_op_priority = '10' (unchangeable)
You have new mail in /var/spool/mail/root

所有的osd都修改:

执行过ceph pg repair pg-id


状态出现修改过程:
# ceph s
    cluster 70d27aec742e4a95b000-cf37ebba35d0
   health HEALTH_ERR
            1 pgs inconsistent
            1 pgs repair
            1 scrub errors
   monmap e3: 3 mons at {compute1=176.12.132.5:6789/0,compute2=176.12.132.6:6789/0,compute3=176.12.132.7:6789/0}
            election epoch 332, quorum 0,1,2 compute1,compute2,compute3
   osdmap e26725: 131 osds: 128 up, 128 in
            flags sortbitwise,require_jewel_osds
      pgmap v169370679: 10240 pgs, 1 pools, 103 TB data, 27242 kobjects
            309 TB used, 155 TB / 465 TB avail
               10229 active+clean
                   9 active+clean+scrubbing+deep
                   1 active+clean+scrubbing
                   1 active+clean+scrubbing+deep+inconsistent+repair
client io 91086 kB/s rd, 67615 kB/s wr, 3119 op/s rd, 3569 op/s wr
# ceph s
    cluster 70d27aec742e4a95b000-cf37ebba35d0
   health HEALTH_ERR
            1 pgs inconsistent
            1 pgs repair
            1 scrub errors
   monmap e3: 3 mons at {compute1=176.12.132.5:6789/0,compute2=176.12.132.6:6789/0,compute3=176.12.132.7:6789/0}
            election epoch 332, quorum 0,1,2 compute1,compute2,compute3
   osdmap e26725: 131 osds: 128 up, 128 in
            flags sortbitwise,require_jewel_osds
      pgmap v169370681: 10240 pgs, 1 pools, 103 TB data, 27242 kobjects
            309 TB used, 155 TB / 465 TB avail
               10230 active+clean
                   8 active+clean+scrubbing+deep
                   1 active+clean+scrubbing
                   1 active+clean+scrubbing+deep+inconsistent+repair
client io 104 MB/s rd, 67033 kB/s wr, 3479 op/s rd, 5013 op/s wr
# ceph s
    cluster 70d27aec742e4a95b000-cf37ebba35d0
   health HEALTH_ERR
            1 pgs inconsistent
            1 pgs repair
            1 scrub errors
   monmap e3: 3 mons at {compute1=176.12.132.5:6789/0,compute2=176.12.132.6:6789/0,compute3=176.12.132.7:6789/0}
            election epoch 332, quorum 0,1,2 compute1,compute2,compute3
   osdmap e26725: 131 osds: 128 up, 128 in
            flags sortbitwise,require_jewel_osds
      pgmap v169370783: 10240 pgs, 1 pools, 103 TB data, 27242 kobjects
            309 TB used, 155 TB / 465 TB avail
               10232 active+clean
                   6 active+clean+scrubbing+deep
                   1 active+clean+scrubbing
                   1 active+clean+scrubbing+deep+inconsistent+repair
client io 147 MB/s rd, 57692 kB/s wr, 4584 op/s rd, 5944 op/s wr
You have mail in /var/spool/mail/root
# ceph s
    cluster 70d27aec742e4a95b000-cf37ebba35d0
   health HEALTH_ERR
            1 pgs inconsistent
            1 pgs repair
            1 scrub errors
   monmap e3: 3 mons at {compute1=176.12.132.5:6789/0,compute2=176.12.132.6:6789/0,compute3=176.12.132.7:6789/0}
            election epoch 332, quorum 0,1,2 compute1,compute2,compute3
   osdmap e26725: 131 osds: 128 up, 128 in
            flags sortbitwise,require_jewel_osds
      pgmap v169370785: 10240 pgs, 1 pools, 103 TB data, 27242 kobjects
            309 TB used, 155 TB / 465 TB avail
               10232 active+clean
                   6 active+clean+scrubbing+deep
                   1 active+clean+scrubbing
                   1 active+clean+scrubbing+deep+inconsistent+repair
client io 104 MB/s rd, 49581 kB/s wr, 2830 op/s rd, 3282 op/s wr
# ceph s
    cluster 70d27aec742e4a95b000-cf37ebba35d0
   health HEALTH_ERR
            1 pgs inconsistent
            1 pgs repair
            1 scrub errors
   monmap e3: 3 mons at {compute1=176.12.132.5:6789/0,compute2=176.12.132.6:6789/0,compute3=176.12.132.7:6789/0}
            election epoch 332, quorum 0,1,2 compute1,compute2,compute3
   osdmap e26725: 131 osds: 128 up, 128 in
            flags sortbitwise,require_jewel_osds
      pgmap v169370786: 10240 pgs, 1 pools, 103 TB data, 27242 kobjects
            309 TB used, 155 TB / 465 TB avail
               10232 active+clean
                   6 active+clean+scrubbing+deep
                   1 active+clean+scrubbing
                   1 active+clean+scrubbing+deep+inconsistent+repair
client io 95762 kB/s rd, 44372 kB/s wr, 2258 op/s rd, 2660 op/s wr


# ceph daemon osd.0 config show |egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
    "osd_max_backfills": "1",
    "osd_recovery_max_active": "1",
    "osd_recovery_op_priority": "10",
# ceph daemon osd.0 config show |egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
    "osd_max_backfills": "1",
    "osd_recovery_max_active": "1",
    "osd_recovery_op_priority": "10",
# ceph s
    cluster 70d27aec742e4a95b000-cf37ebba35d0
   health HEALTH_ERR
            1 pgs inconsistent
            1 pgs repair
            1 scrub errors
   monmap e3: 3 mons at {compute1=176.12.132.5:6789/0,compute2=176.12.132.6:6789/0,compute3=176.12.132.7:6789/0}
            election epoch 332, quorum 0,1,2 compute1,compute2,compute3
   osdmap e26725: 131 osds: 128 up, 128 in
            flags sortbitwise,require_jewel_osds
      pgmap v169370799: 10240 pgs, 1 pools, 103 TB data, 27242 kobjects
            309 TB used, 155 TB / 465 TB avail
               10232 active+clean
                   6 active+clean+scrubbing+deep
                   1 active+clean+scrubbing
                   1 active+clean+scrubbing+deep+inconsistent+repair
client io 134 MB/s rd, 117 MB/s wr, 3865 op/s rd, 4949 op/s wr


# ceph daemon osd.0 config show |egrep "osd_recovery_max_active|osd_recovery_op_priority|osd_max_backfills"
    "osd_max_backfills": "1",
    "osd_recovery_max_active": "1",
    "osd_recovery_op_priority": "10",



# ceph s
    cluster 70d27aec742e4a95b000-cf37ebba35d0
   health HEALTH_OK
   monmap e3: 3 mons at {compute1=176.12.132.5:6789/0,compute2=176.12.132.6:6789/0,compute3=176.12.132.7:6789/0}
            election epoch 332, quorum 0,1,2 compute1,compute2,compute3
   osdmap e26725: 131 osds: 128 up, 128 in
            flags sortbitwise,require_jewel_osds
      pgmap v169370916: 10240 pgs, 1 pools, 103 TB data, 27242 kobjects
            309 TB used, 155 TB / 465 TB avail
               10233 active+clean
                   6 active+clean+scrubbing+deep
                   1 active+clean+scrubbing
client io 195 MB/s rd, 234 MB/s wr, 4404 op/s rd, 5993 op/s wr
You have mail in /var/spool/mail/root

# ceph s
    cluster 70d27aec742e4a95b000-cf37ebba35d0
   health HEALTH_OK
   monmap e3: 3 mons at {compute1=176.12.132.5:6789/0,compute2=176.12.132.6:6789/0,compute3=176.12.132.7:6789/0}
            election epoch 332, quorum 0,1,2 compute1,compute2,compute3
   osdmap e26725: 131 osds: 128 up, 128 in
            flags sortbitwise,require_jewel_osds
      pgmap v169370921: 10240 pgs, 1 pools, 103 TB data, 27242 kobjects
            309 TB used, 155 TB / 465 TB avail
               10233 active+clean
                   6 active+clean+scrubbing+deep
                   1 active+clean+scrubbing
client io 257 MB/s rd, 358 MB/s wr, 5025 op/s rd, 6587 op/s wr

恢复正常:
# ceph s
    cluster 70d27aec742e4a95b000-cf37ebba35d0
   health HEALTH_OK
   monmap e3: 3 mons at {compute1=176.12.132.5:6789/0,compute2=176.12.132.6:6789/0,compute3=176.12.132.7:6789/0}
            election epoch 332, quorum 0,1,2 compute1,compute2,compute3
   osdmap e26725: 131 osds: 128 up, 128 in
            flags sortbitwise,require_jewel_osds
      pgmap v169370922: 10240 pgs, 1 pools, 103 TB data, 27242 kobjects
            309 TB used, 155 TB / 465 TB avail
               10233 active+clean
                   6 active+clean+scrubbing+deep
                   1 active+clean+scrubbing
client io 254 MB/s rd, 390 MB/s wr, 5522 op/s rd, 6826 op/s wr


页: [1]
查看完整版本: HEALTH_ERR 1 pgs inconsistent; 1 pgs repair; 1 scrub errors pg 1.141e is active+