ceph osd存储osd出现down解决
ceph osd存储osd出现down解决过程查看:
$ lvscan
File descriptor 5 (pipe:) leaked on lvscan invocation. Parent PID 2382294: -bash
ACTIVE '/dev/ceph-dd0e9bb1-bd80-4de4-8c66-7a5fc21454ff/osd-block-3a460a49-70ca-4f28-a719-0fa7a6b30a37' inherit
ACTIVE '/dev/ceph-05b950c4-4b29-412e-840e-eebf630c49c6/osd-block-e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce' inherit
ACTIVE '/dev/ceph-302f6a8d-d98d-4d4a-b049-d0d2374de24f/osd-block-1fd96d27-1fb1-4061-b700-5fc0c72e5f47' inherit
ACTIVE '/dev/ceph-64c8725f-bdfd-4968-9f9c-8d212f0fc336/osd-block-8f39ebbf-9cba-4c32-a807-c2628ca2bd1f' inherit
ACTIVE '/dev/ceph-c6cbb4c7-9fd7-46bf-b990-493df45680ca/osd-block-132e71ba-344a-41f7-a162-34bb94bf43d7' inherit
ACTIVE '/dev/ceph-a0e4549d-3cfb-4741-932d-81a4c07233f5/osd-block-bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e' inherit
ACTIVE '/dev/vg_thinker/lv_thinker' inherit
ACTIVE '/dev/vg_thinker/remswap' inherit
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 7.3T0 disk
├─sda1 8:1 0 1M0 part
├─sda2 8:2 0 3G0 part /boot
├─sda3 8:3 0 488.3G0 part /home
├─sda4 8:4 0 195.3G0 part /
├─sda5 8:5 097.7G0 part
├─sda6 8:6 097.7G0 part
│ └─vg_thinker-lv_thinker 253:3 097.7G0 lvm/thinker/rem
├─sda7 8:7 0 1.3T0 part
│ └─vg_thinker-remswap 253:7 0 32G0 lvm
└─sda8 8:8 0 4.2T0 part
sdb 8:16 014.6T0 disk
└─ceph--a0e4549d--3cfb--4741--932d--81a4c07233f5-osd--block--bfe4f8b7--90a4--47bf--8cd9--9d9765f0c24e 253:6 014.6T0 lvm
sdc 8:32 014.6T0 disk
└─ceph--c6cbb4c7--9fd7--46bf--b990--493df45680ca-osd--block--132e71ba--344a--41f7--a162--34bb94bf43d7 253:5 014.6T0 lvm
sdd 8:48 014.6T0 disk
└─ceph--64c8725f--bdfd--4968--9f9c--8d212f0fc336-osd--block--8f39ebbf--9cba--4c32--a807--c2628ca2bd1f 253:2 014.6T0 lvm
sde 8:64 014.6T0 disk
└─ceph--302f6a8d--d98d--4d4a--b049--d0d2374de24f-osd--block--1fd96d27--1fb1--4061--b700--5fc0c72e5f47 253:1 014.6T0 lvm
sdf 8:80 014.6T0 disk
└─ceph--05b950c4--4b29--412e--840e--eebf630c49c6-osd--block--e4ef12f6--7ebd--4e3c--bcd9--c9909dcb10ce 253:4 014.6T0 lvm
sdg 8:96 014.6T0 disk
└─ceph--dd0e9bb1--bd80--4de4--8c66--7a5fc21454ff-osd--block--3a460a49--70ca--4f28--a719--0fa7a6b30a37 253:0 014.6T0 lvm
sdh 8:112014.6T0 disk
└─sdh1 8:113014.6T0 part
rbd0 252:0 0 20T0 disk
└─rbd0p1 252:1 0 20T0 part
nvme0n1 259:0 0 931.5G0 disk
└─nvme0n1p1 259:1 0 931.5G0 part
$ ceph-volume lvm list
====== osd.25 ======
/dev/ceph-a0e4549d-3cfb-4741-932d-81a4c07233f5/osd-block-bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e
block device /dev/ceph-a0e4549d-3cfb-4741-932d-81a4c07233f5/osd-block-bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e
block uuid PZe96y-KzSv-YMAh-pqc5-mXj0-wbUH-oT1aNz
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e
osd id 25
osdspec affinity None
type block
vdo 0
devices /dev/sdb
====== osd.26 ======
/dev/ceph-05b950c4-4b29-412e-840e-eebf630c49c6/osd-block-e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce
block device /dev/ceph-05b950c4-4b29-412e-840e-eebf630c49c6/osd-block-e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce
block uuid OPQkXF-WZR1-dTzF-7eZM-OiER-lCg6-gId7jr
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce
osd id 26
osdspec affinity None
type block
vdo 0
devices /dev/sdf
====== osd.27 ======
/dev/ceph-c6cbb4c7-9fd7-46bf-b990-493df45680ca/osd-block-132e71ba-344a-41f7-a162-34bb94bf43d7
block device /dev/ceph-c6cbb4c7-9fd7-46bf-b990-493df45680ca/osd-block-132e71ba-344a-41f7-a162-34bb94bf43d7
block uuid drIezU-3a23-EiUk-Jijj-kxtY-nAKi-EClJ0J
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 132e71ba-344a-41f7-a162-34bb94bf43d7
osd id 27
osdspec affinity None
type block
vdo 0
devices /dev/sdc
====== osd.28 ======
/dev/ceph-dd0e9bb1-bd80-4de4-8c66-7a5fc21454ff/osd-block-3a460a49-70ca-4f28-a719-0fa7a6b30a37
block device /dev/ceph-dd0e9bb1-bd80-4de4-8c66-7a5fc21454ff/osd-block-3a460a49-70ca-4f28-a719-0fa7a6b30a37
block uuid mkGkjm-p89M-JN30-LDrn-qsgF-FI2c-Jv4IEo
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 3a460a49-70ca-4f28-a719-0fa7a6b30a37
osd id 28
osdspec affinity None
type block
vdo 0
devices /dev/sdg
====== osd.29 ======
/dev/ceph-64c8725f-bdfd-4968-9f9c-8d212f0fc336/osd-block-8f39ebbf-9cba-4c32-a807-c2628ca2bd1f
block device /dev/ceph-64c8725f-bdfd-4968-9f9c-8d212f0fc336/osd-block-8f39ebbf-9cba-4c32-a807-c2628ca2bd1f
block uuid lOTPQt-Hjix-1NWb-ur7n-yXCL-xxMp-U8a7tf
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 8f39ebbf-9cba-4c32-a807-c2628ca2bd1f
osd id 29
osdspec affinity None
type block
vdo 0
devices /dev/sdd
====== osd.4 =======
/dev/ceph-302f6a8d-d98d-4d4a-b049-d0d2374de24f/osd-block-1fd96d27-1fb1-4061-b700-5fc0c72e5f47
block device /dev/ceph-302f6a8d-d98d-4d4a-b049-d0d2374de24f/osd-block-1fd96d27-1fb1-4061-b700-5fc0c72e5f47
block uuid reOm7n-vELk-Cqd2-IXXV-K1qH-luJn-RfXSOE
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 1fd96d27-1fb1-4061-b700-5fc0c72e5f47
osd id 4
osdspec affinity None
type block
vdo 0
devices /dev/sde
root@gm268-5 10:14:58 ~
目的是找对应关系
$ ceph-volume lvm list
====== osd.25 ======
/dev/ceph-a0e4549d-3cfb-4741-932d-81a4c07233f5/osd-block-bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e
block device /dev/ceph-a0e4549d-3cfb-4741-932d-81a4c07233f5/osd-block-bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e
block uuid PZe96y-KzSv-YMAh-pqc5-mXj0-wbUH-oT1aNz
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e
osd id 25
osdspec affinity None
type block
vdo 0
devices /dev/sdb
====== osd.26 ======
/dev/ceph-05b950c4-4b29-412e-840e-eebf630c49c6/osd-block-e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce
block device /dev/ceph-05b950c4-4b29-412e-840e-eebf630c49c6/osd-block-e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce
block uuid OPQkXF-WZR1-dTzF-7eZM-OiER-lCg6-gId7jr
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce
osd id 26
osdspec affinity None
type block
vdo 0
devices /dev/sdf
====== osd.27 ======
/dev/ceph-c6cbb4c7-9fd7-46bf-b990-493df45680ca/osd-block-132e71ba-344a-41f7-a162-34bb94bf43d7
block device /dev/ceph-c6cbb4c7-9fd7-46bf-b990-493df45680ca/osd-block-132e71ba-344a-41f7-a162-34bb94bf43d7
block uuid drIezU-3a23-EiUk-Jijj-kxtY-nAKi-EClJ0J
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 132e71ba-344a-41f7-a162-34bb94bf43d7
osd id 27
osdspec affinity None
type block
vdo 0
devices /dev/sdc
====== osd.28 ======
/dev/ceph-dd0e9bb1-bd80-4de4-8c66-7a5fc21454ff/osd-block-3a460a49-70ca-4f28-a719-0fa7a6b30a37
block device /dev/ceph-dd0e9bb1-bd80-4de4-8c66-7a5fc21454ff/osd-block-3a460a49-70ca-4f28-a719-0fa7a6b30a37
block uuid mkGkjm-p89M-JN30-LDrn-qsgF-FI2c-Jv4IEo
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 3a460a49-70ca-4f28-a719-0fa7a6b30a37
osd id 28
osdspec affinity None
type block
vdo 0
devices /dev/sdg
====== osd.29 ======
/dev/ceph-64c8725f-bdfd-4968-9f9c-8d212f0fc336/osd-block-8f39ebbf-9cba-4c32-a807-c2628ca2bd1f
block device /dev/ceph-64c8725f-bdfd-4968-9f9c-8d212f0fc336/osd-block-8f39ebbf-9cba-4c32-a807-c2628ca2bd1f
block uuid lOTPQt-Hjix-1NWb-ur7n-yXCL-xxMp-U8a7tf
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 8f39ebbf-9cba-4c32-a807-c2628ca2bd1f
osd id 29
osdspec affinity None
type block
vdo 0
devices /dev/sdd
====== osd.4 =======
/dev/ceph-302f6a8d-d98d-4d4a-b049-d0d2374de24f/osd-block-1fd96d27-1fb1-4061-b700-5fc0c72e5f47
block device /dev/ceph-302f6a8d-d98d-4d4a-b049-d0d2374de24f/osd-block-1fd96d27-1fb1-4061-b700-5fc0c72e5f47
block uuid reOm7n-vELk-Cqd2-IXXV-K1qH-luJn-RfXSOE
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 1fd96d27-1fb1-4061-b700-5fc0c72e5f47
osd id 4
osdspec affinity None
type block
vdo 0
devices /dev/sde
root@gm268-5 10:14:58 ~ $ ceph osd rm osd.28
removed osd.28
root@gm268-5 10:51:48 ~
$ ceph osd tree
ID CLASSWEIGHT TYPE NAME STATUSREWEIGHTPRI-AFF
-1 582.11163root default
-3 101.87065 host gm268-1
0 hdd 14.55269 osd.0 up 1.000001.00000
5 hdd 14.55299 osd.5 up 1.000001.00000
6 hdd 14.55299 osd.6 up 1.000001.00000
7 hdd 14.55299 osd.7 up 1.000001.00000
8 hdd 14.55299 osd.8 up 1.000001.00000
9 hdd 14.55299 osd.9 up 1.000001.00000
30 hdd 14.55299 osd.30 up 1.000001.00000
-5 101.86900 host gm268-2
1 hdd 14.55299 osd.1 up 1.000001.00000
10 hdd 14.55299 osd.10 up 1.000001.00000
11 hdd 14.55299 osd.11 up 1.000001.00000
12 hdd 14.55299 osd.12 up 1.000001.00000
13 hdd 14.55299 osd.13 up 1.000001.00000
14 hdd 14.55299 osd.14 up 1.000001.00000
31 hdd 14.55299 osd.31 up 1.000001.00000
-7 101.86900 host gm268-3
2 hdd 14.55299 osd.2 up 1.000001.00000
15 hdd 14.55299 osd.15 up 1.000001.00000
16 hdd 14.55299 osd.16 up 1.000001.00000
17 hdd 14.55299 osd.17 up 1.000001.00000
18 hdd 14.55299 osd.18 up 1.000001.00000
19 hdd 14.55299 osd.19 up 1.000001.00000
32 hdd 14.55299 osd.32 up 1.000001.00000
-9 101.86900 host gm268-4
3 hdd 14.55299 osd.3 up 1.000001.00000
20 hdd 14.55299 osd.20 up 1.000001.00000
21 hdd 14.55299 osd.21 up 1.000001.00000
22 hdd 14.55299 osd.22 up 1.000001.00000
23 hdd 14.55299 osd.23 up 1.000001.00000
24 hdd 14.55299 osd.24 up 1.000001.00000
33 hdd 14.55299 osd.33 up 1.000001.00000
-11 72.76497 host gm268-5
4 hdd 14.55299 osd.4 up 1.000001.00000
25 hdd 14.55299 osd.25 up 1.000001.00000
26 hdd 14.55299 osd.26 up 1.000001.00000
27 hdd 14.55299 osd.27 up 1.000001.00000
29 hdd 14.55299 osd.29 up 1.000001.00000
-13 101.86900 host tp266-1
34 hdd 14.55299 osd.34 up 1.000001.00000
35 hdd 14 .55299 osd.35 up 1.000001.00000
36 hdd 14.55299 osd.36 up 1.000001.00000
37 hdd 14.55299 osd.37 up 1.000001.00000
38 hdd 14.55299 osd.38 up 1.000001.00000
39 hdd 14.55299 osd.39 up 1.000001.00000
40 hdd 14.55299 osd.40 up 1.000001.00000
root@gm268-5 10:51:55 ~
$ ceph -s
cluster:
id: ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
health: HEALTH_WARN
1 hosts fail cephadm check
failed to probe daemons or devices
Degraded data redundancy: 117992/84949880 objects degraded (0.139%), 28 pgs degraded
services:
mon: 3 daemons, quorum gm268-1,gm268-2,gm268-3 (age 5d)
mgr: gm268-2.zttohs(active, since 5d), standbys: gm268-3.sjagqo, gm268-1.jgdvxs
mds: cephfs:1 {0=cephfs.gm268-2.xdsdoz=up:active} 2 up:standby
osd: 40 osds: 40 up (since 101s), 40 in (since 101s); 804 remapped pgs
data:
pools: 4 pools, 10241 pgs
objects: 42.47M objects, 115 TiB
usage: 232 TiB used, 350 TiB / 582 TiB avail
pgs: 117992/84949880 objects degraded (0.139%)
3274408/84949880 objects misplaced (3.855%)
9437 active+clean
739active+remapped+backfill_wait
37 active+remapped+backfilling
26 active+undersized+degraded+remapped+backfill_wait
2 active+undersized+degraded+remapped+backfilling
io:
client: 1.4 GiB/s rd, 1.5 GiB/s wr, 1.02k op/s rd, 870 op/s wr
recovery: 754 MiB/s, 371 keys/s, 371 objects/s
progress:
Rebalancing after osd.4 marked in (2m)
[===.........................] (remaining: 14m)
Rebalancing after osd.29 marked in (99s)
[............................]
Rebalancing after osd.25 marked in (2m)
[=======.....................] (remaining: 6m)
Rebalancing after osd.26 marked in (2m)
[=========...................] (remaining: 4m)
Rebalancing after osd.27 marked in (2m)
[==========..................] (remaining: 3m)
root@gm268-5 10:52:03 ~
$ lvremove /dev/ceph-dd0e9bb1-bd80-4de4-8c66-7a5fc21454ff/osd-block-3a460a49-70ca-4f28-a719-0fa7a6b30a37
Do you really want to remove active logical volume ceph-dd0e9bb1-bd80-4de4-8c66-7a5fc21454ff/osd-block-3a460a49-70ca-4f28-a719-0fa7a6b30a37? : y
Logical volume "osd-block-3a460a49-70ca-4f28-a719-0fa7a6b30a37" successfully removed
root@gm268-5 10:52:35 ~
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 7.3T0 disk
├─sda1 8:1 0 1M0 part
├─sda2 8:2 0 3G0 part /boot
├─sda3 8:3 0 488.3G0 part /home
├─sda4 8:4 0 195.3G0 part /
├─sda5 8:5 097.7G0 part
├─sda6 8:6 097.7G0 part
│ └─vg_thinker-lv_thinker 253:0 097.7G0 lvm/thinker/rem
├─sda7 8:7 0 1.3T0 part
│ └─vg_thinker-remswap 253:7 0 32G0 lvm
└─sda8 8:8 0 4.2T0 part
sdb 8:16 014.6T0 disk
└─ceph--a0e4549d--3cfb--4741--932d--81a4c07233f5-osd--block--bfe4f8b7--90a4--47bf--8cd9--9d9765f0c24e 253:3 014.6T0 lvm
sdc 8:32 014.6T0 disk
└─ceph--c6cbb4c7--9fd7--46bf--b990--493df45680ca-osd--block--132e71ba--344a--41f7--a162--34bb94bf43d7 253:2 014.6T0 lvm
sdd 8:48 014.6T0 disk
└─ceph--64c8725f--bdfd--4968--9f9c--8d212f0fc336-osd--block--8f39ebbf--9cba--4c32--a807--c2628ca2bd1f 253:5 014.6T0 lvm
sde 8:64 014.6T0 disk
└─ceph--302f6a8d--d98d--4d4a--b049--d0d2374de24f-osd--block--1fd96d27--1fb1--4061--b700--5fc0c72e5f47 253:4 014.6T0 lvm
sdf 8:80 014.6T0 disk
└─ceph--05b950c4--4b29--412e--840e--eebf630c49c6-osd--block--e4ef12f6--7ebd--4e3c--bcd9--c9909dcb10ce 253:1 014.6T0 lvm
sdg 8:96 014.6T0 disk
sdh 8:112014.6T0 disk
└─sdh1 8:113014.6T0 part
rbd0 252:0 0 20T0 disk
└─rbd0p1 252:1 0 20T0 part
nvme0n1 259:0 0 931.5G0 disk
└─nvme0n1p1 259:1 0 931.5G0 part
root@gm268-5 10:52:46 ~
$ vgremove /dev/ceph-dd0e9bb1-bd80-4de4-8c66-7a5fc21454ff
Volume group "ceph-dd0e9bb1-bd80-4de4-8c66-7a5fc21454ff" successfully removed
root@gm268-5 10:53:16 ~
$ ceph-volume lvm list
====== osd.25 ======
/dev/ceph-a0e4549d-3cfb-4741-932d-81a4c07233f5/osd-block-bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e
block device /dev/ceph-a0e4549d-3cfb-4741-932d-81a4c07233f5/osd-block-bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e
block uuid PZe96y-KzSv-YMAh-pqc5-mXj0-wbUH-oT1aNz
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid bfe4f8b7-90a4-47bf-8cd9-9d9765f0c24e
osd id 25
osdspec affinity None
type block
vdo 0
devices /dev/sdb
====== osd.26 ======
/dev/ceph-05b950c4-4b29-412e-840e-eebf630c49c6/osd-block-e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce
block device /dev/ceph-05b950c4-4b29-412e-840e-eebf630c49c6/osd-block-e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce
block uuid OPQkXF-WZR1-dTzF-7eZM-OiER-lCg6-gId7jr
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid e4ef12f6-7ebd-4e3c-bcd9-c9909dcb10ce
osd id 26
osdspec affinity None
type block
vdo 0
devices /dev/sdf
====== osd.27 ======
/dev/ceph-c6cbb4c7-9fd7-46bf-b990-493df45680ca/osd-block-132e71ba-344a-41f7-a162-34bb94bf43d7
block device /dev/ceph-c6cbb4c7-9fd7-46bf-b990-493df45680ca/osd-block-132e71ba-344a-41f7-a162-34bb94bf43d7
block uuid drIezU-3a23-EiUk-Jijj-kxtY-nAKi-EClJ0J
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 132e71ba-344a-41f7-a162-34bb94bf43d7
osd id 27
osdspec affinity None
type block
vdo 0
devices /dev/sdc
====== osd.29 ======
/dev/ceph-64c8725f-bdfd-4968-9f9c-8d212f0fc336/osd-block-8f39ebbf-9cba-4c32-a807-c2628ca2bd1f
block device /dev/ceph-64c8725f-bdfd-4968-9f9c-8d212f0fc336/osd-block-8f39ebbf-9cba-4c32-a807-c2628ca2bd1f
block uuid lOTPQt-Hjix-1NWb-ur7n-yXCL-xxMp-U8a7tf
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 8f39ebbf-9cba-4c32-a807-c2628ca2bd1f
osd id 29
osdspec affinity None
type block
vdo 0
devices /dev/sdd
====== osd.4 =======
/dev/ceph-302f6a8d-d98d-4d4a-b049-d0d2374de24f/osd-block-1fd96d27-1fb1-4061-b700-5fc0c72e5f47
block device /dev/ceph-302f6a8d-d98d-4d4a-b049-d0d2374de24f/osd-block-1fd96d27-1fb1-4061-b700-5fc0c72e5f47
block uuid reOm7n-vELk-Cqd2-IXXV-K1qH-luJn-RfXSOE
cephx lockbox secret
cluster fsid ce68aab8-8f46-11ed-88c0-ac1f6b3a30b9
cluster name ceph
crush device class
encrypted 0
osd fsid 1fd96d27-1fb1-4061-b700-5fc0c72e5f47
osd id 4
osdspec affinity None
type block
vdo 0
devices /dev/sde
root@gm268-5 10:53:27 ~
$ ceph orch daemon add osd gm268-5:/dev/sdg
Created osd(s) 28 on host 'gm268-5'
root@gm268-1 10:57:24 ~
$ ceph osd tree
ID CLASSWEIGHT TYPE NAME STATUSREWEIGHTPRI-AFF
-1 596.66431root default
-3 101.87065 host gm268-1
0 hdd 14.55269 osd.0 up 1.000001.00000
5 hdd 14.55299 osd.5 up 1.000001.00000
6 hdd 14.55299 osd.6 up 1.000001.00000
7 hdd 14.55299 osd.7 up 1.000001.00000
8 hdd 14.55299 osd.8 up 1.000001.00000
9 hdd 14.55299 osd.9 up 1.000001.00000
30 hdd 14.55299 osd.30 up 1.000001.00000
-5 101.86900 host gm268-2
1 hdd 14.55299 osd.1 up 1.000001.00000
10 hdd 14.55299 osd.10 up 1.000001.00000
11 hdd 14.55299 osd.11 up 1.000001.00000
12 hdd 14.55299 osd.12 up 1.000001.00000
13 hdd 14.55299 osd.13 up 1.000001.00000
14 hdd 14.55299 osd.14 up 1.000001.00000
31 hdd 14.55299 osd.31 up 1.000001.00000
-7 101.86900 host gm268-3
2 hdd 14.55299 osd.2 up 1.000001.00000
15 hdd 14.55299 osd.15 up 1.000001.00000
16 hdd 14.55299 osd.16 up 1.000001.00000
17 hdd 14.55299 osd.17 up 1.000001.00000
18 hdd 14.55299 osd.18 up 1.000001.00000
19 hdd 14.55299 osd.19 up 1.000001.00000
32 hdd 14.55299 osd.32 up 1.000001.00000
-9 101.86900 host gm268-4
3 hdd 14.55299 osd.3 up 1.000001.00000
20 hdd 14.55299 osd.20 up 1.000001.00000
21 hdd 14.55299 osd.21 up 1.000001.00000
22 hdd 14.55299 osd.22 up 1.000001.00000
23 hdd 14.55299 osd.23 up 1.000001.00000
24 hdd 14.55299 osd.24 up 1.000001.00000
33 hdd 14.55299 osd.33 up 1.000001.00000
-11 87.31766 host gm268-5
4 hdd 14.55299 osd.4 up 1.000001.00000
25 hdd 14.55299 osd.25 up 1.000001.00000
26 hdd 14.55299 osd.26 up 1.000001.00000
27 hdd 14.55299 osd.27 up 1.000001.00000
28 hdd 14.55269 osd.28 up 1.000001.00000
29 hdd 14.55299 osd.29 up 1.000001.00000
-13 101.86900 host tp266-1
34 hdd 14.55299 osd.34 up 1.000001.00000
35 hdd 14.55299 osd.35 up 1.000001.00000
36 hdd 14.55299 osd.36 up 1.000001.00000
37 hdd 14.55299 osd.37 up 1.000001.00000
38 hdd 14.55299 osd.38 up 1.000001.00000
39 hdd 14.55299 osd.39 up 1.000001.00000
40 hdd 14.55299 osd.40 up 1.000001.00000
页:
[1]