echo deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
sudo apt-get update sudo apt-get install ceph-deploy -y
cat /etc/hosts 192.168.1.101 controller1 192.168.1.102 controller2 192.168.1.103 controller3
ssh-keygen
ssh-copy-id controller1 ssh-copy-id controller2 ssh-copy-id controller3
apt-get install cephadm
mkdir -p /etc/ceph
mon
节点)cephadm bootstrap \ --mon-ip 10.2.32.187 \ --initial-dashboard-user admin \ --initial-dashboard-password Troila12 # cephadm bootstrap \ # --mon-ip 第一个节点IP \ # --initial-dashboard-user 用户名 \ # --initial-dashboard-password 密码
cephadm add-repo --release octopus cephadm install ceph-common
ceph orch apply mon --unmanaged
ceph orch host add {node1} ceph orch host add {node2} ceph orch host add {node3} # 示例 ceph orch host add controller1 ceph orch host add controller2 ceph orch host add controller3
ceph orch host label add {node1} mon ceph orch host label add {node2} mon ceph orch host label add {node3} mon # 查看标记结果 正常如下: HOST ADDR LABELS STATUS controller1 controller1 mon controller2 controller2 mon controller3 controller3 mon
ceph orch apply mon label:mon
ceph orch apply mon "{node1},{node2},{node3}" # 示例--注意:controller 之间不能有空格 ceph orch apply mon "controller1,controller2,controller3"
步骤一: 增加节点到集群
ceph orch host add {node2} ceph orch host add {node3} # 示例 ceph orch host add controller1 ceph orch host add controller2
步骤二:标记 osd 节点
ceph orch host add {node1} osd ceph orch host add {node2} osd # 示例 ceph orch host add controller1 osd ceph orch host add controller2 osd
步骤三:查看标记结果
ceph orch host ls # 正常结果如下: HOST ADDR LABELS STATUS controller1 controller1 mon controller2 controller2 osd mon controller3 controller3 osd mon
ceph orch device ls
ceph orch daemon add osd {node1}:/dev/sdb ceph orch daemon add osd {node2}:/dev/sdb ceph orch daemon add osd {node3}:/dev/sdb ceph orch daemon add osd {node1}:/dev/sdc ceph orch daemon add osd {node2}:/dev/sdc ceph orch daemon add osd {node3}:/dev/sdc ceph orch daemon add osd {node1}:/dev/sdd ceph orch daemon add osd {node2}:/dev/sdd ceph orch daemon add osd {node3}:/dev/sdd
ceph -s # 正常如下: cluster: id: f2024656-3082-11ec-a2b1-1d2be083b36b health: HEALTH_OK ###### HEALTH_OK 是正常状态 services: mon: 3 daemons, quorum controller1,controller3,controller2 (age 3h) mgr: controller1.fkiash(active, since 3h), standbys: controller2.znlcmv osd: 9 osds: 9 up (since 49m), 9 in (since 49m) data: pools: 1 pools, 1 pgs objects: 0 objects, 0 B usage: 9.1 GiB used, 891 GiB / 900 GiB avail pgs: 1 active+clean
ceph osd lspools