环境

两台CentOS7.5,每台各两块硬盘部署OSD

public network = 10.0.0.0/24
cluster network = 172.16.0.0/24

导入ceph的rpm key

[root@]# rpm --import 'https://download.ceph.com/keys/release.asc'

配置rpm

/etc/yum.repos.d/ceph-luminous.repo

[ceph]
name=Ceph packages for $basearch
baseurl=https://download.ceph.com/rpm-luminous/el7/x86_64/ 
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc

[ceph-noarch]
name=Ceph noarch packages
baseurl=https://download.ceph.com/rpm-luminous/el7/noarch
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc

[ceph-source]
name=Ceph source packages
baseurl=https://download.ceph.com/rpm-luminous/el7/SRPMS/
enabled=0
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc

安装依赖

[root@ ~]#  yum install snappy leveldb gdisk python-argparse gperftools-libs
[root@ ~]#  yum install -y yum-utils 
[root@ ~]#  yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/7/x86_64/ 
[root@ ~]#  yum install --nogpgcheck -y epel-release 
[root@ ~]#  rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 
[root@ ~]#  rm -f /etc/yum.repos.d/dl.fedoraproject.org*

安装ceph

[root@ ~]# yum install ceph

配置ceph

/etc/ceph/ceph.conf 

[global]
max open files = 131072
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
auth cluster required = none
auth service required = none
auth client required = none
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 1024
osd pool default pgp num = 1024
osd crush chooseleaf type = 1
mon osd full ratio = .95
mon osd nearfull ratio = .85
public network = 10.0.0.0/24
cluster network = 172.16.0.0/24

[mon]
mon initial members = a
mon data = /var/lib/ceph/mon/$cluster-$id
mon allow pool delete = true

[mon.a]
host = ceph0
mon addr = 10.0.0.10:6789

[osd]
enable experimental unrecoverable data corrupting features = bluestore rocksdb
osd objectstore = bluestore
bluestore = true

[osd.0]
host =  ceph0
devs = /dev/sdb1
bluestore block path = /dev/sdb2

[osd.1]
host =  ceph0
devs = /dev/sdc1
bluestore block path = /dev/sdc2

[osd.2]
host =  ceph1
devs = /dev/sdb1
bluestore block path = /dev/sdb2

[osd.3]
host =  ceph1
devs = /dev/sdc1
bluestore block path = /dev/sdc2

mon.a

[root@ ~]# ceph-authtool /etc/ceph/ceph.mon.keyring --create-keyring --gen-key -n mon.
[root@ ~]# ceph-mon -i a --mkfs --keyring /etc/ceph/ceph.mon.keyring
[root@ ~]# chown ceph:ceph -R  /var/lib/ceph/mon/ceph-a
[root@ ~]# systemctl start ceph-mon@a

mgr.a

[root@ ~]# sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-a/
[root@ ~]# sudo -u ceph ceph auth get-or-create mgr.a  mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-a/keyring
[root@ ~]# ceph-mgr -i a

osd.0

[root@ ~]# mkfs.xfs /dev/sdb1  
[root@ ~]# mkdir /var/lib/ceph/osd/ceph-0
[root@ ~]# mount /dev/sdb1 /var/lib/ceph/osd/ceph-0/
[root@ ~]# ceph osd create 
[root@ ~]# ceph-osd -i 0 --mkfs --mkkey 
[root@ ~]# ceph auth add osd.0 osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-0/keyring 
[root@ ~]# ceph osd crush add osd.0 0.2 root=default host=ceph0
[root@ ~]# ceph-osd -i 0

其他osd同样的部署

02-10 12:37