ceph集群之cephadm清理ceph集群及一键部署集群

见下方脚本即可(注意:清理后ceph集群数据将完全消失且无法恢复)

1.一键清理ceph集群

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#!/bin/bash

#set -o errexit
#set -o pipefail
set -e
putong_disk="/dev/sdb /dev/sdc /dev/sdd"
deploy_disk="/dev/sdb /dev/sdc /dev/sdd"
clear_deploy() {
ceph orch pause
fsid_ceph=`ceph fsid`
cephadm rm-cluster --force --zap-osds --fsid ${fsid_ceph}
cephadm rm-cluster --fsid ${fsid_ceph} --force
systemctl stop ceph.target
docker stop $(docker ps -a |grep ceph |awk '{print $1}') || true
sleep 2
rm -rf /var/lib/ceph
for i in ${deploy_disk};do
dmsetup remove_all
mkfs.ext4 ${i}
wipefs -a ${i}
done
}
clear_putong(){
systemctl stop ceph.target
docker stop $(docker ps -a |grep ceph |awk '{print $1}') || true
sleep 2
rm -rf /var/lib/ceph
for i in ${putong_disk};do
dmsetup remove_all
sleep 2
mkfs.ext4 ${i}
wipefs -a ${i}
done
}
main(){
clear_deploy
clear_putong
}
main

2.一键部署ceph集群

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#!/bin/bash
# version v2
set -e
red="31m"
green="36m"
ip="10.0.0.10"
all_in_one_host="ceph-1"
all_host="ceph-1 ceph-2 ceph-3"
if [ ! -d /etc/ceph ];
then
mkdir -p /etc/ceph
#touch /etc/ceph/ceph.conf
fi
if [ ! -f /etc/ceph/ceph.conf ];then
touch /etc/ceph/ceph.conf
fi
change_source() {
echo "执行换源+安装cephadm ceph-common"
ceph_app="cephadm ceph-common"
wget -q -O- 'https://mirrors.aliyun.com/ceph/keys/release.asc' | sudo apt-key add -
sudo apt-add-repository 'deb https://mirrors.aliyun.com/ceph/debian-quincy/ focal main'
sudo apt update
apt install ${ceph_app} -y
[ $? = 0 ] && echo -e "\033[${green} 执行换源+安装cephadm ceph-common 成功! \033[0m"
}
install_ceph_colony(){
echo "执行部署ceph + 添加osd"
cephadm --image registry.cn-hangzhou.aliyuncs.com/zznn/ceph:v17 bootstrap --config /etc/ceph/ceph.conf --skip-monitoring-stack --skip-dashboard --mon-ip ${ip} --cluster-network 192.168.232.0/24 --allow-overwrite
# 添加主机 添加osd
for i in ${all_host};do
ssh-copy-id -f -i /etc/ceph/ceph.pub root@${i}
ceph orch host add ${i}
# 自动添加osd
# ceph orch apply osd --all-available-devices
sleep 5
ceph orch daemon add osd ${i}:/dev/sdb
[ $? = 0 ] && echo -e "\033[${green} ${i} osd sdb添加成功! \033[0m"
done
}
install_all_in_one_ceph(){
cat > /etc/ceph/ceph.conf << eric
[global]
osd_pool_default_size = 1
osd_pool_default_min_size = 1
eric
cephadm --image registry.cn-hangzhou.aliyuncs.com/zznn/ceph:v17 bootstrap --config /etc/ceph/ceph.conf --skip-monitoring-stack --skip-dashboard --mon-ip ${ip} --allow-overwrite
ceph config-key set config/global/osd_pool_default_size 1 || true
# ceph config-key set config/global/osd_pool_default_min_size = 1 || true
# 添加主机 添加osd
for i in ${all_in_one_host};do
ssh-copy-id -f -i /etc/ceph/ceph.pub root@${i}
ceph orch host add ${i}
ceph orch daemon add osd ${i}:/dev/sdb
ceph orch daemon add osd ${i}:/dev/sdc
ceph orch daemon add osd ${i}:/dev/sdd
done
for i in `ceph osd lspools |awk '{print $2}'`;do
ceph osd pool get ${i} size; ceph osd pool set ${i} size 1;done
}
main(){
change_source
install_ceph_colony
install_all_in_one_ceph
}
main

结语fighting!