- 积分
- 11013
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2021-7-19 13:50:11
|
显示全部楼层
环境介绍
8 U5 N, g: M, OIP地址 配置 主机名 Ceph版本
6 h. f9 b) Q2 d/ Q- ~) G9 }10.15.253.161 c2m8h300 cephnode01 Octopus 15.2.4
: Q2 m% V4 K( d. J' J10.15.253.193 c2m8h300 cephnode02 Octopus 15.2.4; b0 y# u4 o, v4 j% g7 h; j
10.15.253.225 c2m8h300 cephnode03 Octopus 15.2.4
2 n* H5 L. J, ~8 d+ W) ?) F9 ?
% S: X) X5 i. J, z#Linux系统版本
0 K7 @# \8 l* y+ l6 o, C[root@cephnode01 ~]# cat /etc/redhat-release0 L, e: F4 O' Y' x" c! [% Q
CentOS Linux release 8.2.2004 (Core)
5 Y* Y- [* k3 i3 b) Z N) `+ H[root@cephnode01 ~]# uname -r! P4 y& N7 [. B' w3 f
4.18.0-193.14.2.el8_2.x86_64
. O5 n3 h. M0 J3 w
4 b4 Z- y* n4 f4 C! `/ L* D& j#网络设计:建议各网络单独分开- D& q# p- e/ ^2 {
10.15.253.0/24 #Public Network 公共网络
( ^2 n+ \- k5 P* J% N$ E' H0 E9 M$ \% N- H172.31.253.0/24 #Cluster Network 集群网络
! _9 [- P$ s+ U7 ~. N3 b+ d/ q: Y#每台ceph节点下除系统盘外,最少挂载两块相同的大容量硬盘,不需要进行分区
4 q5 H( @9 m4 p1 C$ d[root@cephnode01 ~]# lsblk+ t' A' |& q, G5 v4 T+ N
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT* ^2 \1 | c0 N
sda 8:0 0 20G 0 disk
+ U1 c9 V4 s G- L: L├─sda1 8:1 0 200M 0 part /boot
& p1 v% G- Q; u4 l& y$ f├─sda2 8:2 0 1G 0 part [SWAP]$ s. t1 ], m. j+ I- l% D
└─sda3 8:3 0 18.8G 0 part /
2 q$ ]. Q" a1 s0 r5 n( D" H$ A2 Bsdb 8:16 0 20G 0 disk' Q- n; }% R/ g6 H8 Z9 q
2.1.1 Ceph安装与版本选择! r. P& k9 V3 j7 G6 |# v* b
https://docs.ceph.com/docs/master/install/( e( X- @3 v- n5 [/ s8 Z
ceph-deploy 是用于快速部署群集的工具;社区不再积极维护ceph-deploy。仅支持Nautilus版之前的Ceph版本上进行。它不支持RHEL8,CentOS 8或更新的操作系统。
2 o8 I: Q% a+ s8 v, N! r+ A+ e这里的系统环境是centos8系统,所以需要使用cephadm部署工具部署octopus版的ceph( G+ [) G! n2 ~% Q
2.1.2 基础环境准备
3 V! @* { x7 Z全部Ceph节点上操作;以cephnode01节点为例;
9 L) A5 I/ `1 E5 P6 Z4 F" r& Q
0 {1 X$ H& {5 y( }! ?% Z2 t#(1)关闭防火墙:. o" ?6 M1 D! }+ K1 e; H3 k
systemctl stop firewalld) ]1 e/ J$ o9 _2 e8 O8 c* d
systemctl disable firewalld
1 I. S5 {6 b9 V#(2)关闭selinux: A# N/ O0 q1 j) A3 |
sed -i 's/enforcing/disabled/' /etc/selinux/config
- F& z6 Z! C/ W9 ]* P+ N; ], Ysetenforce 0% F0 Q. v0 i7 V0 ?. z4 o
#(3)在cephnode01上配置免密登录到cephnode02、cephnode03
( l8 f. Q& v) @; h" o) hdnf install sshpass -y
1 [) w( R) B* }0 X2 s. issh-keygen -t rsa -f ~/.ssh/id_rsa -P ''+ ^+ N5 B; y: X
for ip in 161 193 225 ;do sshpass -pZxzn@2020 ssh-copy-id -o StrictHostKeyChecking=no 10.15.253.$ip ;done0 N5 p6 f0 c) I3 h
#(4)在cephnode01上添加主机名:已经配置过则不需要再次添加) ?& l3 P3 I& F8 h" y5 {
cat >>/etc/hosts <<EOF
1 V! W" Q- ~; d4 B, O" a W* t6 v10.15.253.161 cephnode01
) B9 ^" p- C, G10.15.253.193 cephnode02
- }1 n6 R/ ~+ p10.15.253.225 cephnode03) L4 H# U* O7 O) X7 @
EOF: w2 x8 w" \: H+ c* Q
for ip in 193 225 ;do scp -rp /etc/hosts root@10.15.253.$ip:/etc/hosts ;done
0 I' O6 ^) |. ?9 s7 b5 ~#(5)设置文件连接数最大值# L1 _: |6 E' J+ L: ~
echo "ulimit -SHn 102400" >> /etc/rc.local4 T# t3 [1 \: N: j4 ~+ [
cat >> /etc/security/limits.conf << EOF) f: U, m- ?" `' p
* soft nofile 65535
# [7 N V3 e: X3 f4 C" ~* hard nofile 655359 i6 z& g: S! W4 T
EOF7 I# r3 P9 w6 t7 [% T% Y# b
#(6)内核参数优化
+ f+ M! H" [# Decho 'net.ipv4.ip_forward = 1' >>/etc/sysctl.conf
# {1 Z$ O6 h+ p8 `& U/ M& Q1 yecho 'kernel.pid_max = 4194303' >>/etc/sysctl.conf" u# S# q7 [, J8 [+ {
#内存不足时低于此值,使用交换空间
6 _) A4 R) ?: P4 J& ?' Jecho "vm.swappiness = 0" >>/etc/sysctl.conf
& G4 N' a. b2 k3 qsysctl -p& I' B9 C& L# x" |
#(7)同步网络时间和修改时区;已经添加不需要配置
( M( u/ l f7 F8 N/ S, l安装chrony时间同步 同步cephnode01节点% J) f: C# q" P- \- D
yum install chrony -y4 g- H4 D- w' {8 Y
vim /etc/chrony.conf3 \7 f6 q* I5 }% D9 V$ @* l; j
server cephnode01 iburst
) }/ {; X# ~, Y---3 m& U9 W4 ~5 d% Y& M! ^& R
systemctl restart chronyd.service
1 u! G! s: {) O [+ f( ^+ qsystemctl enable chronyd.service
" r B) G- f- |5 w3 Hchronyc sources
+ ]7 X( o# k) L) k#(8)read_ahead,通过数据预读并且记载到随机访问内存方式提高磁盘读操作
, K4 V( ^; \) t' d, ^! eecho "8192" > /sys/block/sda/queue/read_ahead_kb
& p4 Y7 `9 ]7 y& k#(9) I/O Scheduler,SSD要用noop(电梯式调度程序),SATA/SAS使用deadline(截止时间调度程序)
C& q d& b& n1 E7 Y. D0 \#https://blog.csdn.net/shipeng1022/article/details/786049100 W1 K( T6 }6 L
echo "deadline" >/sys/block/sda/queue/scheduler% D3 Z" V" b! N2 t! } l
echo "deadline" >/sys/block/sdb/queue/scheduler& j7 O. a! V0 y
#echo "noop" >/sys/block/sd[x]/queue/scheduler
$ r3 d, a" W5 f9 G$ k3. 添加Octopus版yum源( p3 w j( N& P3 r' p& O, T
4 K5 _# U6 @. j
cat >>/etc/yum.repos.d/ceph.repo <<EOF& m/ a6 L+ t8 }% E! K, ^) a) s& T
[Ceph]2 {, p9 J9 ], Q: d
name=Ceph packages for $basearch6 N3 S# J& H* U& A9 H
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/$basearch
5 i+ j; V( w: \enabled=1
I' x% t( k2 ^. ggpgcheck=0
% }& V0 k% X" s0 M* [' N$ M. utype=rpm-md
: v6 L5 B v: C- z) C3 W! P; E) C[Ceph-noarch]. t z- D; G' [5 m8 Z, e( A
name=Ceph noarch packages; Y: d6 Y1 [2 k
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/noarch6 Q# i. m- @$ |: p6 X
enabled=19 o6 _2 D) \* A
gpgcheck=0
: Y" w9 ?+ y# M0 a$ L3 T3 m5 ctype=rpm-md4 T3 ?+ O6 K H, E5 v3 n
[ceph-source]
( ?8 s. S2 e/ ^" cname=Ceph source packages0 B9 D- C9 b- b1 A$ L0 B( K
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/SRPMS* _- f6 n n! R" `3 m. j' l" B
enabled=1
$ I) W1 F( d3 Egpgcheck=02 j/ q6 g% A# |9 w2 g4 \2 G7 t5 M
type=rpm-md
4 d7 o7 c- V7 XEOF$ X4 |" ]$ D, p
yum clean all && yum makecache; w+ |7 J- R; l' j) n5 B3 w
#安装基础软件
; ]5 U, ^# R: s Lyum install net-tools wget vim bash-completion lrzsz unzip zip -y
( t# Z* `" e! y4. cephadm工具部署& k& _) g( D5 G" }% d, |
https://docs.ceph.com/docs/master/cephadm/install// t8 O( s! z0 S$ U+ C
在15版本,支持使用cephadm工具部署,ceph-deploy在14版本前都支持
6 C/ i" `/ B+ S) T- I8 {5 J4.1 拉取最新的cephadm并赋权& ]! J- \+ u& W% n2 G# |( ?
在cephnode01节点配置;" ^3 P# h2 a) Y2 Z; ? B
% U2 A) v& u+ Q/ T8 H
[root@cephnode01 ~]# curl --silent --remote-name --location https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm( c% n% V. F8 C% C- t
[root@cephnode01 ~]# chmod +x cephadm
$ c* y1 j2 q4 Z0 n/ r[root@cephnode01 ~]#ll
@1 r; Q1 K% Y {" O& x# g) {-rwxr-xr-x. 1 root root 184653 Sep 10 12:01 cephadm
( n3 \4 t/ Q3 ]" r) t* ~4 W4.2 使用cephadm获取octopus最新版本并安装
1 B1 l3 W& _. g6 O已手动配置为国内yum源,不需要按官方文档的步骤再进行添加yum源& g" b# W- d( ? S
r% j# S" M/ p9 ?% [7 y) A/ m#全部ceph节点安装6 J2 o3 d& Z- c: X; E- ]6 |
[root@cephnode01 ~]# dnf install python3 podman -y
" L7 K5 B; O. g7 h) x6 i$ x3 b1 e[root@cephnode01 ~]# ./cephadm install
& U! H- Z; x8 P4 {) D& o...
9 g8 q' o" S: {[root@cephnode01 ~]# which cephadm0 t4 z8 l6 e: |* v4 O
/usr/sbin/cephadm4 d, E1 [% k) r M# S
5. 创建ceph新集群2 z, @; M7 h; e/ b6 I1 `
5.1 指定管理节点# m0 h; q- a2 M' d0 P; p
创建一个可以被任何访问Ceph集群的主机访问的网络,指定mon-ip,并将生成的配置文件写进/etc/ceph目录里
+ N% i; }1 D- x2 G4 e( f( Z3 C# o+ S
[root@cephnode01 ~]# mkdir -p /etc/ceph; d2 a$ C6 f7 c" O5 l6 l8 [5 t
[root@cephnode01 ~]# cephadm bootstrap --mon-ip 10.15.253.1611 B& G, J3 h! v' f9 |1 \1 n; G
...4 z; f; {8 Z% R: r- ?
URL: https://cephnode01:8443/
2 v6 R- |# o( X8 d( F3 b User: admin1 Q9 Q/ @+ d `7 h
Password: 6v7xazcbwk
$ A$ f+ ~0 j. L o& ~. @1 i+ F...3 v! t% G! X" p& T8 F
可登陆URL: https://cephnode01:8443/,首次登陆要修改密码,进行验证& s$ p8 E7 {/ C F9 k7 V+ a
$ ^& X- q: [; U$ r! q$ O
8 I4 X- L, j4 K) A5 R
5.2 将ceph命令映射到本地 ^4 g" Y/ U5 P
Cephadm不需要在主机上安装任何Ceph包。但是,建议启用对ceph命令的简单访问。
* H+ [: L% q" M2 ecephadm shell命令在安装了所有Ceph包的容器中启动一个bash shell。默认情况下,如果在主机上的/etc/ceph中找到配置和keyring文件,它们将被传递到容器环境中,这样就可以完全正常工作了。
& ?5 h' u# T3 u% p( e+ P* ~$ W. X4 E( k* F& U& j
[root@cephnode01 ~]# cephadm shell8 L6 L {8 }) a( z1 j) a" K
[root@cephnode01 ~]# alias ceph='cephadm shell -- ceph'3 I, _9 w) r% S( v( ^) E3 P* C
[root@cephnode01 ~]# exit8 B: A& u; c5 H3 W
#安装ceph-common包;包括ceph,rbd,mount.ceph的命令
" K0 _ y- v4 d! G4 D8 e! E, w[root@cephnode01 ~]# ephadm install ceph-common包;包括ceph,rbd,mount.ceph的命令
. b2 ~3 S, Q' D2 l0 j#查看版本# v! M9 c4 ?% q9 E3 O9 S
[root@cephnode01 ~]# ceph -v& R, O+ @2 t( C' d" B r& F
ceph version 15.2.4 (7447c15c6ff58d7fce91843b705a268a1917325c) octopus (stable)
% S( {7 q& R0 H查看状态+ Z$ \2 _. X: q: U" p; A: w; `
, v r6 ]" o. {3 R6 E[root@cephnode01 ~]# ceph status
: e9 g3 Y4 V' N! H! J cluster:
$ u$ x4 b9 o2 s id: 8a4fdb4e-f31c-11ea-be33-000c29358c7a
+ a, S7 o. J' D, {2 E, t health: HEALTH_OK
# W. c5 R" p) V. P0 c Reduced data availability: 1 pg inactive0 M) z' D) }! [* w9 i. Z! a! X
OSD count 0 < osd_pool_default_size 3
- i5 D# C3 V6 D, E7 w$ H ( \- k, S% k$ E
services:/ ]7 J2 U2 X# P; J) A H
mon: 1 daemons, quorum ceph135 (age 14m)9 E" k& k- d# L$ V5 w/ Y/ F0 C% e# @
mgr: ceph03.oesega(active, since 10m): h- u5 X3 {) ^' |6 ]# [! V* U% \: o
osd: 0 osds: 0 up (since 31m), 0 in; V9 f0 m# h+ J0 C
' z# {+ q+ T9 i# V1 [; Q P data:
2 ]. q5 ^& }7 f" a Y pools: 1 pools, 1 pgs; [+ ]& t+ W( o
objects: 0 objects, 0 B
: G- @) m) v# _: R6 Y usage: 0 B used, 0 B / 0 B avail/ l9 n X1 A( U; D: r: c
pgs: 100.000% pgs unknown: T) S9 a" s# Z- h) |# C
1 unknown4 @) D7 ^; e- ^" i6 _0 J4 E
5.3 添加新节点进ceph集群
4 V! G9 Z) {) o( A8 M1 t, L6 j* h; x3 ]9 I9 t
[root@cephnode01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@cephnode02) Z) F) h/ E6 k& _( K% V
[root@cephnode01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@cephnode03
' s( |' U, q+ y6 {8 }6 `( W( |+ P[root@cephnode01 ~]# ceph orch host add cephnode02$ k; M; x! a6 b+ u& I
Added host 'cephnode02'; B5 F$ @; x! v; X
[root@cephnode01 ~]# ceph orch host add cephnode03) r- A& l) y% U& I+ u: D9 |7 u
Added host 'cephnode03'% p0 J* e7 F0 K. A8 C! z' X# z- G7 T. x
5.4 部署添加 monitor
& z7 W, Y1 }* R+ t2 {! ]5 Z+ ~选择需要设置mon的节点,全选
3 x9 c: w; z; @3 f
. e+ ?# k( J H9 g. q9 a[root@cephnode01 ~]# ceph orch host label add cephnode01 mon
8 i; K% w3 B, g" S- iAdded label mon to host cephnode011 u6 ~/ R9 b M1 S5 L7 P
[root@cephnode01 ~]# ceph orch host label add cephnode02 mon
% c/ B5 G( c, LAdded label mon to host cephnode02
: X! ~3 l5 A4 m5 u/ o; [[root@cephnode01 ~]# ceph orch host label add cephnode03 mon
3 y3 o$ ?' E. g) W0 Q% ^Added label mon to host cephnode03
: Q$ N9 Y8 [/ M( h[root@cephnode01 ~]# ceph orch host ls. \! x: w9 T& `: y# H
HOST ADDR LABELS STATUS
1 f; H% F% l8 ~; Zcephnode01 cephnode01 mon
) x5 i5 e; A5 acephnode02 cephnode02 mon 1 @: B! G5 j+ F l r+ S/ q
cephnode03 cephnode03 mon* c3 e9 o6 B* F' u" R( D6 ^
告诉cephadm根据标签部署mon,这步需要等待各节点拉取images并启动容器
. m( c5 u# G- B3 O( }9 w4 S* l; \/ e$ d/ Y5 m
[root@cephnode01 ~]# ceph orch apply mon label:mon) n; x5 H' ~ A+ o$ W
具体验证是否安装完成,其他两台节点可查看下
$ S; A- V7 B$ c: @7 u% K; H# ~' j/ F/ p1 _( b
[root@cephnode02 ~]# podman ps -a8 Z+ o: G1 K; |7 A
...! H6 F- w2 m: i2 E' {0 h6 F( m; v
[root@cephnode02 ~]# podman images0 Z: D) R7 O0 u& B d
REPOSITORY TAG IMAGE ID CREATED SIZE
! i% c7 b& \& J0 |) x) gdocker.io/ceph/ceph v15 852b28cb10de 3 weeks ago 1 GB
" R2 ^" [! s7 I" ^% N' z' S. H9 mdocker.io/prom/node-exporter v0.18.1 e5a616e4b9cf 15 months ago 24.3 MB4 }1 s" b5 { S
6. 部署OSD( e3 O4 f2 ~+ h7 U! H
6.1 查看可使用的硬盘/ c: F! t1 @/ a1 `6 W% k* h
, L9 C* ^) ?7 W) v+ a: p
[root@cephnode01 ~]# ceph orch device ls
@: S$ B1 s1 \* \7 qHOST PATH TYPE SIZE DEVICE AVAIL REJECT REASONS
- U6 n x( B# I8 B& eceph01 /dev/sda hdd 20.0G False locked, Insufficient space (<5GB) on vgs, LVM detected
& \- X& l8 @( }8 m1 D1 t; f4 Rceph01 /dev/sdb hdd 20.0G True
" H) i% ~/ v9 e# pceph02 /dev/sda hdd 20.0G False Insufficient space (<5GB) on vgs, LVM detected, locked 2 }% }3 U) @" U, y' o: E3 w
ceph02 /dev/sdb hdd 20.0G True
7 |$ b& r* a& i& q0 Y9 ?- i* Bceph03 /dev/sda hdd 20.0G False locked, Insufficient space (<5GB) on vgs, LVM detected + ~1 E9 i3 `3 k
ceph03 /dev/sdb hdd 20.0G True
* k1 C+ ]8 T) Y; i6.2 使用所有可用硬盘
2 W7 D( o b+ p' U' Z' K6 r0 ?( |6 c" h& U- M8 Q8 E
[root@cephnode01 ~]# ceph orch apply osd --all-available-devices. N9 K; X$ ~- {. u, [* \& M. j
添加单块盘的方式; Y7 G( J' w! V0 \9 n$ T1 G
$ b2 P2 u# g% T& S" |
[root@cephnode01 ~]# ceph orch daemon add osd cephnode02:/dev/sdc
8 r9 V3 f4 U, I6.3 验证部署情况
, C$ t; E2 P9 _8 ^& v) h$ m+ [# Y1 W2 O" N
[root@cephnode01 ~]# ceph osd df3 T" k/ M5 L/ F9 E% X; s& _2 u
ID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL %USE VAR PGS STATUS1 R$ f4 z! ?! X- b1 X7 L
0 hdd 0.01949 1.00000 20 GiB 1.0 GiB 3.8 MiB 1 KiB 1024 MiB 19 GiB 5.02 1.00 1 up
( f) C/ @7 J( a: M2 }; C& G 1 hdd 0.01949 1.00000 20 GiB 1.0 GiB 3.8 MiB 1 KiB 1024 MiB 19 GiB 5.02 1.00 1 up8 n$ T' V$ ]* n
2 hdd 0.01949 1.00000 20 GiB 1.0 GiB 3.8 MiB 1 KiB 1024 MiB 19 GiB 5.02 1.00 1 up
: N2 M, N- U2 g N- g: z TOTAL 60 GiB 3.0 GiB 11 MiB 4.2 KiB 3.0 GiB 57 GiB 5.02 2 z# Z' N) s, j) k0 g7 A
MIN/MAX VAR: 1.00/1.00 STDDEV: 07 G' W- ]; j1 E+ {% H
7. 存储部署
: b. Y+ Z; R4 O( B2 O- q; c c2 P7.1 CephFS部署
1 U- [9 F. t0 y. l部署cephfs的mds服务,指定集群名及mds的数量3 [: ]9 U0 h$ m
; [9 l0 Q8 T2 I6 u: }5 K2 D3 `[root@cephnode01 ~]# ceph orch apply mds fs-cluster --placement=31 H+ N% ?/ p! ]
1 H0 E$ u p5 I: H5 f$ J; ?( Z& E
[root@cephnode01 ~]# ceph -s. ^5 t8 P" d' {9 W5 g
cluster:3 Y4 i, A$ i; R: L: H2 \* q% E p
id: 8a4fdb4e-f31c-11ea-be33-000c29358c7a
( }9 W% f( J; j: `& H health: HEALTH_OK y% b, w6 ?# ~# M6 F
! l4 q2 I0 q) }; m2 i; N services:6 T- |; \7 A8 i) `
mon: 3 daemons, quorum cephnode01,cephnode02,cephnode03 (age 1m)
# F* {) @+ s/ \9 I" A( O. B mgr: cephnode01.oesega(active, since 49m), standbys: cephnode02.lphrtb, cephnode03.wkthtb& R9 s% o0 T. ?: {' j& e- Q4 t
mds: 3 up:standby! p! n+ S% J% P# [1 h
osd: 3 osds: 3 up (since 51m), 3 in (since 30m)/ u5 u& \7 R, f0 n- I5 z
+ ~! Z; C3 q# Q; }2 ` S# ^% W
data:; w5 ^# H& N) R
pools: 1 pools, 1 pgs' B8 i* x* e! N# x
objects: 0 objects, 0 B
% S% I: M1 A$ ?: W0 ?4 d! ]: m usage: 3.0 GiB used, 57 GiB / 60 GiB avail
! W: K. j6 K! M$ O5 J pgs: 1 active+clean
' Z O, Z- u" S& w8 E3 \2 U: W7.2 部署RGW
, D5 P' d6 f8 p3 G! }! D创建一个领域2 G) _/ m6 f, Q! x4 A
5 |$ u" }% C ^( z
[root@cephnode01 ~]# radosgw-admin realm create --rgw-realm=rgw-org --default
* C& U' ]# f S, o; g$ @( Y{. T1 l g; N) [8 k) c" Q
"id": "43dc34c0-6b5b-411c-9e23-687a29c8bd00",
3 Q0 ]+ M2 J) P "name": "rgw-org",% W% f, K" h3 R8 R% M
"current_period": "ea3dd54c-2dfe-4180-bf11-4415be6ccafd",! h$ W3 L D1 w8 v
"epoch": 16 y& a- o4 Q- s" @
}7 O. C, s S3 i6 [% X
创建一个zonegroup区域组
; b; V: J6 o( ~, c/ K9 z4 s& k( T. _# M
[root@cephnode01 ~]# radosgw-admin zonegroup create --rgw-zonegroup=rgwgroup --master --default7 X7 W+ P6 X: X/ L+ y% D
{
% k4 W; G! x4 W+ ]1 K7 w# r "id": "1878ecaa-216b-4c99-ad4e-b72f4fa9193f",4 l, f/ B3 K2 k- U* M
"name": "rgwgroup",8 _1 e: C! r& s1 z. ?: S3 M- r% L
"api_name": "rgwgroup",
! {) l3 |) E/ Z! U) C "is_master": "true",9 G0 O: H0 ~( E2 h
"endpoints": [],5 `$ c( X/ v! k7 m4 g
"hostnames": [],0 m" W' u9 n0 P6 U! M. s8 Z8 |! Q
"hostnames_s3website": [],
; l" X9 l$ \1 L0 W D/ N- j! r "master_zone": "",
' w: v* z1 ?; W "zones": [],6 a3 w' `% K5 \8 i& Z
"placement_targets": [],# ~' Q; i& X+ s$ h
"default_placement": "",
* k7 b- n, W L0 j4 H "realm_id": "43dc34c0-6b5b-411c-9e23-687a29c8bd00",
4 K) a1 b5 t* l- X$ I "sync_policy": {, \4 i- A8 j1 \7 R$ z- ~" Q% h
"groups": []+ ]- z0 q9 z: ~9 n; `
}
+ X% A& @1 K/ J}
: J: |' J& J, H) G% q$ Y$ [5 J2 u创建一个区域8 I) Q; Q5 O: N* _. E7 h$ j7 L
( b# Y; L( L3 y[root@cephnode01 ~]# radosgw-admin zone create --rgw-zonegroup=rgwgroup --rgw-zone=zone-dc1 --master --default* z) H8 W8 m/ S! C M# E
{/ e% e# o; v7 E! n* b4 E( f D. h
"id": "fbdc5f83-9022-4675-b98e-39738920bb57",
9 X4 n8 T( Q! Y. S2 a/ }4 n "name": "zone-dc1",) f$ ~( x2 O' R- T! L
"domain_root": "zone-dc1.rgw.meta:root",
! n' h3 F+ n- A "control_pool": "zone-dc1.rgw.control",
6 t- {2 h8 U X9 i/ M "gc_pool": "zone-dc1.rgw.log:gc",% l6 H; _ V3 x, V
"lc_pool": "zone-dc1.rgw.log:lc",& F( L8 A, Z2 n/ Q
"log_pool": "zone-dc1.rgw.log",& _6 I f7 G; j; e- w* x0 _+ V
"intent_log_pool": "zone-dc1.rgw.log:intent",7 ~) M( m1 z( w! J3 r v
"usage_log_pool": "zone-dc1.rgw.log:usage",/ R5 I' O! P3 f* b
"roles_pool": "zone-dc1.rgw.meta:roles",
" U* S8 H. F* H3 p+ P, m "reshard_pool": "zone-dc1.rgw.log:reshard",
/ k$ E0 b, p; [" x, ] "user_keys_pool": "zone-dc1.rgw.meta:users.keys",9 j) s) D1 x% Q m" {9 ~+ L
"user_email_pool": "zone-dc1.rgw.meta:users.email",$ l3 B) p$ N! ^( [
"user_swift_pool": "zone-dc1.rgw.meta:users.swift",0 G+ v+ c1 X4 ^- T" s
"user_uid_pool": "zone-dc1.rgw.meta:users.uid",, y; e8 M5 q. E( i# m0 M! p0 p' r
"otp_pool": "zone-dc1.rgw.otp",8 }5 ]2 }/ q: v6 ?. B: q: }( p5 x4 k
"system_key": {
9 D7 T; L- P, J' N "access_key": ""," o, X# h8 _- }, R2 c
"secret_key": ""
( Q/ v/ Q4 f# P) ^0 X. n' u },& ~& u3 i6 ]$ x: n, [+ r+ Y0 U
"placement_pools": [) [: Z! S; L/ s7 h4 c8 c
{6 O g) a3 C2 C/ w1 }3 w
"key": "default-placement",: ]. K0 }1 y) K( J* j
"val": {% U" V: b6 u5 ]% O- `
"index_pool": "zone-dc1.rgw.buckets.index",, I( p/ Y/ Y6 ]- n3 R
"storage_classes": {. l; {$ w8 R3 P. w, Z( z
"STANDARD": {; a0 Y* ]1 r; }7 N4 G
"data_pool": "zone-dc1.rgw.buckets.data"
; V% s& C& Y8 W/ Q/ q! W+ y }2 N( o" D; {# o+ d% a2 e: ]
},
. R7 {( V8 m- k# r "data_extra_pool": "zone-dc1.rgw.buckets.non-ec",
* C/ U6 J8 K% W4 O* V, f "index_type": 06 Q5 i2 i1 B! M4 q) x& R! G
}
6 c& t4 T8 T' q4 z7 w A }3 E9 {) f0 o# z6 ^6 O
],/ I1 E! H+ ~% Z) p( `
"realm_id": "43dc34c0-6b5b-411c-9e23-687a29c8bd00"/ n$ j; F" S7 T( ]
}, {0 o' l9 z2 m$ V( k: X) D
为特定领域和区域部署一组radosgw守护进程,这里只指定了两个节点开启rgw
" l3 C# V1 }, N$ ~8 x- M0 T1 x" K; x1 @" E6 o% V5 F
[root@cephnode01 ~]# ceph orch apply rgw rgw-org zone-dc1 --placement="2 cephnode02 cephnode03"
6 K8 t# K) l; r G验证/ g4 M: V: v0 K. O
& q |4 x9 _+ S W P[root@cephnode01 ~]# ceph -s
7 `' u4 O* d) d: q7 u cluster:9 i0 b. Z0 k( u; m& v% v- R8 n- z9 _1 p
id: 8a4fdb4e-f31c-11ea-be33-000c29358c7a
0 _4 O: R8 }$ a& c5 q' V$ H5 O- R health: HEALTH_OK
6 p4 g& V5 G5 \- M: g0 V3 v# C
. p: y% ]% d$ Q4 \. e" R+ I services:
- u3 b& w; t1 r( K w: `& c mon: 3 daemons, quorum cephnode01,cephnode02,cephnode03 (age 1m): V/ e" U# R; z
mgr: cephnode01.oesega(active, since 49m), standbys: cephnode02.lphrtb, cephnode03.wkthtb6 P0 k/ j p; w% o& n c9 v4 z
mds: 3 up:standby9 O! m$ Y9 r$ m* g
osd: 3 osds: 3 up (since 51m), 3 in (since 30m)2 h: S! g) u6 n9 k5 y4 e3 `
rgw: 2 daemons active (rgw-org.zone-dc1.cephnode02.cdgjsi, rgw-org.zone-dc1.cephnode03.nmbbsz)
5 i* e# F7 k5 Z' l data:
5 b& S" {! K' z, j5 @( O+ Q3 q pools: 1 pools, 1 pgs
0 ^- c9 z3 N; y8 I6 Q objects: 0 objects, 0 B
; O4 }/ t% {0 y7 [- o usage: 3.0 GiB used, 57 GiB / 60 GiB avail. F" @" L& C+ v0 a
pgs: 1 active+clean% a% a; y8 l( }" z' E
为RGW开启dashborad
: L# K5 {6 \ U5 m7 B i6 J: i4 G0 w) z0 x" a R
#创建rgw的管理用户& {3 Z- h" ?% l' \
[root@cephnode01 ~]# radosgw-admin user create --uid=admin --display-name=admin --system
# g& ~; w4 U6 T& S{ x. S% m `; \- M: S) ?
"user_id": "admin",
/ k' V( ~$ C" n# N9 _1 B8 q+ B "display_name": "admin",& k2 r) G9 c: p* W# @
"email": "",3 P* i4 |5 [* t o" h
"suspended": 0,
1 W6 A$ t1 O0 X4 x "max_buckets": 1000,& w3 j( e9 [, J- ~- N2 w. \
"subusers": [],
2 |2 ]8 j( d( B+ Y! s# O# r "keys": [
& I! y# @# K/ L1 Q {9 |0 r9 @+ h+ k" B3 [1 A3 [( ^
"user": "admin",7 m4 T f' E. A% Y: p3 z3 m
"access_key": "WG9W5O9O11TGGOLU6OD2",- d# K, u9 N) \
"secret_key": "h2DfrWvlS4NMkdgGin4g6OB6Z50F1VNmhRCRQo3W"
+ E- A# {% c' T }4 h( ^! z1 y$ w" d' Y
],6 k- c' @: R+ `) H
"swift_keys": [],
( }+ l& {* _. t% g; e7 b "caps": [],$ X: G5 F3 B) Z; l6 B; t; k9 C! H
"op_mask": "read, write, delete",/ C$ _$ I: o9 {2 B: _% C u
"system": "true",
4 y( M/ _7 Q) p8 z "default_placement": "",* o/ w" l7 Z' q/ u, T
"default_storage_class": "",
+ U I6 C6 Y4 Q4 C; _5 U "placement_tags": [],: J6 _0 ~' E1 l6 L8 Q% N M
"bucket_quota": {
6 `" g/ Q+ G R3 Q5 h1 v* I "enabled": false," Z% Y* [) U: ~" _6 h) a
"check_on_raw": false," L( U& Q/ r% w- r
"max_size": -1,+ L# t* h. p+ H
"max_size_kb": 0,
2 q5 t7 \/ x2 f! Q2 n1 U "max_objects": -1 l3 x8 ]9 F1 U8 _/ f
},, Y0 K& c" }3 T8 w% n# U
"user_quota": {
! A/ k5 ?3 ~( i) D "enabled": false,
3 v0 P! D6 ]& Q5 d- Y: a# N% X "check_on_raw": false,
8 u% G; B- q5 h "max_size": -1,+ Y3 d W( M. A9 c8 y( z- N. D
"max_size_kb": 0,9 ?/ z4 ~: C0 _( a( c0 j, \
"max_objects": -1
) p4 b' P8 @- W' O- e0 y },
5 M: b: i9 t, W' q "temp_url_keys": [],, p! a P( \& P6 ?
"type": "rgw",, S' F5 g2 R: |; |! S
"mfa_ids": [] N. q- Z, X( I: f% G+ @
}
% A, ~8 h1 \- y* o6 r9 u9 z设置dashboard凭证7 Q% h2 H! f5 a# n1 d
6 |6 K: h. T0 |6 |
[root@cephnode01 ~]# ceph dashboard set-rgw-api-access-key WG9W5O9O11TGGOLU6OD2
+ {4 Y& M8 [& ]. p0 ]3 k2 [Option RGW_API_ACCESS_KEY updated# ?9 I7 w, i/ ~% F* ?1 A
[root@cephnode01 ~]# ceph dashboard set-rgw-api-secret-key h2DfrWvlS4NMkdgGin4g6OB6Z50F1VNmhRCRQo3W1 y* B7 `0 n. V" S
Option RGW_API_SECRET_KEY updated
. `- V3 N$ d, \: w; J6 `0 {设置禁用证书验证、http访问方式及使用admin账号
2 o" i4 n( q3 `) F" ~
: L* y8 @" i1 ^- w, v' f' [ceph dashboard set-rgw-api-ssl-verify False
# L/ y- H' ^" G. U1 a a+ i* hceph dashboard set-rgw-api-scheme http
{: `% J0 C, Aceph dashboard set-rgw-api-host 10.15.253.225
- E. O" e- r" @7 Bceph dashboard set-rgw-api-port 80
+ l/ v1 a* w6 Tceph dashboard set-rgw-api-user-id admin# j% }. f' V; v8 S
重启RGW C. [- w- ?* d; x
1 |: B6 P- Z4 V, o3 u4 [% Cceph orch restart rgw- m( I6 Y- a2 L2 [4 ^
|
|