十年网站开发经验 + 多家企业客户 + 靠谱的建站团队
量身定制 + 运营维护+专业推广+无忧售后,网站问题一站解决
官方文档
江华ssl适用于网站、小程序/APP、API接口等需要进行数据传输应用场景,ssl证书未来市场广阔!成为创新互联的ssl证书销售渠道,可以享受市场价格4-6折优惠!如果有意向欢迎电话联系或者加微信:13518219792(备注:SSL证书合作)期待与您的合作!
在部署之前需要准备好三台CentOS主机,我这里使用的是CentOS7.5,并将系统内核升级到4.x长期支持版本,相关的配置信息如下:
local-node-1: 10.0.0.1
local-node-2: 10.0.0.2
local-node-3: 10.0.0.3
配置hosts解析三台节点的主机名,并将三台节点配置为无密码认证模式。
关闭防火墙和Selinux
每台主机节点至少添加3块磁盘用于ceph存储,在实际的生产中可以将多块盘做raid,ceph 在添加磁盘时会自动格式化,所以此处不需要格式化。
yum install snappy leveldb gdisk python-argparse gperftools-libs -y
添加yum 源,导入key,这里我使用最新的mimic版本:
rpm --import 'https://download.ceph.com/keys/release.asc'
su -c 'rpm -Uvh https://download.ceph.com/rpm-mimic/el7/noarch/ceph-release-1-0.el7.noarch.rpm'
由于国内的网络问题,这里可以选择使用阿里云的源,修改ceph.repo文件如下:
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
yum install ceph -y --disablerepo=epel
[root@local-node-1 ~]# rpm -qa |egrep -i "ceph|rados|rbd"
ceph-deploy-2.0.1-0.noarch
librados2-13.2.5-0.el7.x86_64
libradosstriper1-13.2.5-0.el7.x86_64
ceph-mgr-13.2.5-0.el7.x86_64
ceph-13.2.5-0.el7.x86_64
python-rados-13.2.5-0.el7.x86_64
libcephfs2-13.2.5-0.el7.x86_64
python-rbd-13.2.5-0.el7.x86_64
ceph-common-13.2.5-0.el7.x86_64
ceph-selinux-13.2.5-0.el7.x86_64
ceph-mon-13.2.5-0.el7.x86_64
ceph-osd-13.2.5-0.el7.x86_64
librbd1-13.2.5-0.el7.x86_64
python-cephfs-13.2.5-0.el7.x86_64
ceph-base-13.2.5-0.el7.x86_64
ceph-mds-13.2.5-0.el7.x86_64
mkdir /etc/ceph/
touch /etc/ceph/ceph.conf
[root@local-node-1 ~]# uuidgen
7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
3.集群创建一个钥匙串,为Monitor 服务创建一个密钥:
[root@local-node-1 ~]# ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
creating /tmp/ceph.mon.keyring
4.创建一个管理员钥匙串,生成一个client.admin用户,并将此用户添加到钥匙串中:
[root@local-node-1 ~]# ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
creating /etc/ceph/ceph.client.admin.keyring
[root@local-node-1 ~]# ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
creating /var/lib/ceph/bootstrap-osd/ceph.keyring
[root@local-node-1 ~]# ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
[root@local-node-1 ~]# ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
[root@local-node-1 ~]# monmaptool --create --add local-node-1 10.0.0.1 --fsid 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5 /tmp/monmap
monmaptool: monmap file /tmp/monmap
monmaptool: set fsid to 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
monmaptool: writing epoch 0 to /tmp/monmap (1 monitors)
集群名称-主机名
的形式: mkdir /var/lib/ceph/mon/ceph-local-node-1
[root@local-node-1 ~]# ceph-mon --mkfs -i local-node-1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
[root@local-node-1 ~]# cat /etc/ceph/ceph.conf
[global]
fsid = 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5 # 生成的FSID
mon initial members = local-node-1 # 主机名
mon host = 10.0.0.1 # 对应的IP
public network = 10.0.0.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
chown -R ceph:ceph /var/lib/ceph
systemctl start ceph-mon@local-node-1.service
[root@local-node-1 ~]# ceph -s
cluster:
id: 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
health: HEALTH_OK
services:
mon: 1 daemons, quorum local-node-1
mgr: no daemons active
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
[root@local-node-1 ~]# netstat -lntp|grep ceph-mon
tcp 0 0 10.0.0.1:6789 0.0.0.0:* LISTEN 1799/ceph-mon
当我们配置好ceph-mon服务之后,就需要配置ceph-mgr服务。
ceph auth get-or-create mgr.$name mon 'allow profile mgr' osd 'allow *' mds 'allow *'
如:
[root@local-node-1 ~]# ceph auth get-or-create mgr.ceph-mgr mon 'allow profile mgr' osd 'allow *' mds 'allow *'
[mgr.ceph-mgr]
key = AQBC56VcK2PALhAArjY0icXMK6/Hs0xZm/smPA==
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-ceph-mgr
[root@local-node-1 ~]# cat /var/lib/ceph/mgr/ceph-ceph-mgr/keyring
[mgr.ceph-mgr]
key = AQBC56VcK2PALhAArjY0icXMK6/Hs0xZm/smPA==
ceph-mgr -i $name
如:
[root@local-node-1 ~]# ceph-mgr -i ceph-mgr
mgr: ceph-mgr(active)
[root@local-node-1 ~]# ceph -s
cluster:
id: 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
health: HEALTH_OK
services:
mon: 1 daemons, quorum local-node-1
mgr: ceph-mgr(active) # 如果为starting状态需要稍等一会儿
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
[root@local-node-1 ~]# netstat -lntp | grep ceph
tcp 0 0 10.0.0.1:6789 0.0.0.0:* LISTEN 1799/ceph-mon
tcp 0 0 10.0.0.1:6800 0.0.0.0:* LISTEN 133336/ceph-mgr
[root@local-node-1 ~]# ceph mgr module ls
{
"enabled_modules": [
"balancer",
"iostat",
"restful",
"status"
],
"disabled_modules": [
{
"name": "dashboard",
"can_run": true,
"error_string": ""
},
{
"name": "hello",
"can_run": true,
"error_string": ""
},
{
"name": "influx",
"can_run": false,
"error_string": "influxdb python module not found"
},
{
"name": "localpool",
"can_run": true,
"error_string": ""
},
{
"name": "prometheus",
"can_run": true,
"error_string": ""
},
{
"name": "selftest",
"can_run": true,
"error_string": ""
},
{
"name": "smart",
"can_run": true,
"error_string": ""
},
{
"name": "telegraf",
"can_run": true,
"error_string": ""
},
{
"name": "telemetry",
"can_run": true,
"error_string": ""
},
{
"name": "zabbix",
"can_run": true,
"error_string": ""
}
]
}
[root@local-node-1 ~]# ceph mgr module enable dashboard
[root@local-node-1 ~]# ceph mgr module ls
{
"enabled_modules": [
"balancer",
"dashboard",
"iostat",
"restful",
"status"
],
...
# 禁用模块
ceph mgr module disable dashboard
[root@local-node-1 ~]# ceph mgr services
{}
[mon]
mgr initial modules = dashboard balancer
通常情况下,我们应该在运行ceph-mon守护程序的每台主机上配置ceph-mgr服务,以实现相同的可用性级别。
默认情况下,首先出现的ceph-mgr实例将由Monitor激活,其他成为是备用节点。ceph-mgr守护进程中不需要仲裁。
如果活动守护程序未能向监视器发送信标超过mon mgr beacon grace(默认为30秒),则它将被备用数据库替换。
如果要抢占故障转移,可以使用ceph mgr fail
相关模块的帮助命令可以使用:
ceph tell mgr help
官方文档
当mon初始化完成并正常运行后,应添加OSD。 在有足够的OSD来处理对象的副本数量之前,群集无法达到active + clean状态(例如,osd池默认大小= 3需要至少三个OSD)。 引导监视器后,您的群集具有默认的CRUSH映射; 但是,CRUSH映射没有映射到Ceph节点的任何Ceph OSD守护进程。
Ceph提供了ceph-volume实用程序,它可以初始化Ceph使用的逻辑卷,磁盘或分区等。 ceph-volume实用程序通过递增索引来创建OSD ID。 此外,ceph-volume会将新的OSD添加到主机下的CRUSH地图中。可以通过执行ceph-volume -h以获取CLI详细信息。 ceph-volume工具可以简化很多手工部署的步骤,如果不使用ceph-volume,就需要手动完成一些配置。 要使用短格式过程创建前三个OSD,请在所有要创建osd的node上执行以下操作:
==ceph OSD的创建有两种不同的架构选择,分为filestore和bluestore,bulestore是社区版本的默认配置,也是旨在对filestore性能方面的优化,两者具体的差异将在ceph原理中介绍。==
ceph-volume lvm create --data /dev/sdb
ceph-volume lvm create --data /dev/sdc
ceph-volume lvm create --data /dev/sdd
[root@local-node-1 ~]# ceph-volume lvm list
====== osd.1 =======
[block] /dev/ceph-fad16202-18c0-4444-9640-946173373925/osd-block-43a082d5-79c4-4d3f-880e-ecc7eaef6a83
type block
osd id 1
cluster fsid 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
cluster name ceph
osd fsid 43a082d5-79c4-4d3f-880e-ecc7eaef6a83
encrypted 0
cephx lockbox secret
block uuid W68QgI-8eHM-bSEr-I9Gs-dQx8-tdf9-lHRbqa
block device /dev/ceph-fad16202-18c0-4444-9640-946173373925/osd-block-43a082d5-79c4-4d3f-880e-ecc7eaef6a83
vdo 0
crush device class None
devices /dev/sdc
====== osd.0 =======
[block] /dev/ceph-6c675287-4a42-43f0-8cef-69b0150c3b06/osd-block-f829a5f0-0a11-4ae7-983a-ecd01718a81a
type block
osd id 0
cluster fsid 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
cluster name ceph
osd fsid f829a5f0-0a11-4ae7-983a-ecd01718a81a
encrypted 0
cephx lockbox secret
block uuid E0YDG4-lm1W-WbqE-yRHy-hqGL-H0af-eZzKjr
block device /dev/ceph-6c675287-4a42-43f0-8cef-69b0150c3b06/osd-block-f829a5f0-0a11-4ae7-983a-ecd01718a81a
vdo 0
crush device class None
devices /dev/sdb
====== osd.2 =======
[block] /dev/ceph-256d0c82-3d7b-4672-a241-99c9c614809d/osd-block-75c04fb3-90e8-40af-9fb4-1c94b22664be
type block
osd id 2
cluster fsid 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
cluster name ceph
osd fsid 75c04fb3-90e8-40af-9fb4-1c94b22664be
encrypted 0
cephx lockbox secret
block uuid fNFmrI-Y1dZ-4cHd-UCVi-ajLD-Uim2-wkcx3y
block device /dev/ceph-256d0c82-3d7b-4672-a241-99c9c614809d/osd-block-75c04fb3-90e8-40af-9fb4-1c94b22664be
vdo 0
crush device class None
devices /dev/sdd
这种方式中配置更加细致的参数,分为准备磁盘和激活OSD两步。
准备OSD
ceph-volume lvm prepare --data {data-path} {data-path}
如:
ceph-volume lvm prepare --data /dev/hdd1
ceph-volume lvm activate {ID} {FSID}
如:
ceph-volume lvm activate 0 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
[root@local-node-1 ~]# ceph -s
cluster:
id: 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
health: HEALTH_OK
services:
mon: 1 daemons, quorum local-node-1
mgr: ceph-mgr(active)
osd: 3 osds: 3 up, 3 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 3.0 GiB used, 27 GiB / 30 GiB avail
pgs:
[root@local-node-1 ~]# netstat -lntp|grep ceph
tcp 0 0 10.0.0.1:6805 0.0.0.0:* LISTEN 1132/ceph-osd
tcp 0 0 10.0.0.1:6806 0.0.0.0:* LISTEN 1132/ceph-osd
tcp 0 0 10.0.0.1:6807 0.0.0.0:* LISTEN 1132/ceph-osd
tcp 0 0 10.0.0.1:6808 0.0.0.0:* LISTEN 1126/ceph-osd
tcp 0 0 10.0.0.1:6809 0.0.0.0:* LISTEN 1126/ceph-osd
tcp 0 0 10.0.0.1:6810 0.0.0.0:* LISTEN 1126/ceph-osd
tcp 0 0 10.0.0.1:6811 0.0.0.0:* LISTEN 1126/ceph-osd
tcp 0 0 10.0.0.1:6812 0.0.0.0:* LISTEN 1941/ceph-mgr
tcp 0 0 10.0.0.1:6789 0.0.0.0:* LISTEN 1093/ceph-mon
tcp 0 0 10.0.0.1:6800 0.0.0.0:* LISTEN 1128/ceph-osd
tcp 0 0 10.0.0.1:6801 0.0.0.0:* LISTEN 1128/ceph-osd
tcp 0 0 10.0.0.1:6802 0.0.0.0:* LISTEN 1128/ceph-osd
tcp 0 0 10.0.0.1:6803 0.0.0.0:* LISTEN 1128/ceph-osd
tcp 0 0 10.0.0.1:6804 0.0.0.0:* LISTEN 1132/ceph-osd
参考此文档:http://docs.ceph.com/docs/master/install/manual-deployment/#filestore
以上在单机上部署了基本的组件,我们需要创建一个高可用的集群,要加入另外的两个节点local-node-2和local-node-3。
[root@local-node-1 ~]# cat /etc/ceph/ceph.conf
[global]
fsid = 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
public network = 10.0.0.0/24
mon initial members = local-node-1,local-node-2,local-node-3
mon host = 10.0.0.1,10.0.0.2,10.0.0.3
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true
[mds.local-node-1]
host = local-node-1
scp /etc/ceph/* 10.0.0.2:/etc/ceph/
scp /etc/ceph/* 10.0.0.3:/etc/ceph/
mkdir -p /var/lib/ceph/{bootstrap-mds,bootstrap-mgr,bootstrap-osd,bootstrap-rbd,bootstrap-rgw,mds,mgr,mon,osd}
chown -R ceph:ceph /var/lib/ceph
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-local-node-2 # 指定node名称ID
[root@local-node-2 ~]# cat /etc/ceph/ceph.conf
[global]
fsid = 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
public network = 10.0.0.0/24
mon initial members = local-node-1,local-node-2,local-node-3
mon host = 10.0.0.1,10.0.0.2,10.0.0.3
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true
[mon.local-node-2]
mon_addr = 10.0.0.2:6789
host = loacl-node-2
ceph auth get mon. -o /tmp/monkeyring
ceph mon getmap -o /tmp/monmap
sudo -u ceph ceph-mon --mkfs -i local-node-2 --monmap /tmp/monmap --keyring /tmp/monkeyring
systemctl start ceph-mon@local-node-2
[root@local-node-3]# ceph -s
cluster:
id: 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
health: HEALTH_OK
services:
mon: 3 daemons, quorum local-node-1,local-node-2,local-node-3
mgr: ceph-mgr(active)
osd: 3 osds: 3 up, 3 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 3.0 GiB used, 27 GiB / 30 GiB avail
pgs:
[root@local-node-3]# ceph mon stat
e3: 3 mons at {local-node-1=10.0.0.1:6789/0,local-node-2=10.0.0.2:6789/0,local-node-3=10.0.0.3:6789/0}, election epoch 28, leader 0 local-node-1, quorum 0,1,2 local-node-1,local-node-2,local-node-3
[root@local-node-2 ~]# cat /etc/ceph/ceph.conf
[global]
fsid = 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
public network = 10.0.0.0/24
mon initial members = local-node-1,local-node-2,local-node-3
mon host = 10.0.0.1,10.0.0.2,10.0.0.3
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true
[mon.local-node-2]
mon_addr = 10.0.0.2:6789
host = loacl-node-2
scp -p /var/lib/ceph/bootstrap-osd/ceph.keyring 10.0.0.2:/var/lib/ceph/bootstrap-osd/
scp -p /var/lib/ceph/bootstrap-osd/ceph.keyring 10.0.0.3:/var/lib/ceph/bootstrap-osd/
ceph-volume lvm create --data /dev/sdb
ceph-volume lvm create --data /dev/sdc
ceph-volume lvm create --data /dev/sdd
[root@local-node-1 ~]# ceph -s
cluster:
id: 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
health: HEALTH_OK
services:
mon: 3 daemons, quorum local-node-1,local-node-2,local-node-3
mgr: ceph-mgr(active)
osd: 9 osds: 9 up, 9 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 9.1 GiB used, 81 GiB / 90 GiB avail
pgs:
==只有要使用cephFS的场景下才需要创建MDS服务。==
mkdir -p /var/lib/ceph/mds/{cluster-name}-{id} # 这里的ID设置为本地主机名
EG:
[root@local-node-1 ~]# sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-local-node-1
ceph-authtool --create-keyring /var/lib/ceph/mds/{cluster-name}-{id}/keyring --gen-key -n mds.{id}
eg:
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-local-node-1/keyring --gen-key -n mds.local-node-1
ceph auth add mds.{id} osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/{cluster}-{id}/keyring
EG:
ceph auth add mds.local-node-1 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-local-node-1/keyring
[root@local-node-1 ~]# cat /etc/ceph/ceph.conf
[global]
fsid = 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
public network = 10.0.0.0/24
mon initial members = local-node-1
mon host = 10.0.0.1
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mds.local-node-1] # 添加此处的配置
host = local-node-1
[root@local-node-1 ~]# ceph-mds --cluster ceph -i local-node-1 -m local-node-1:6789
如果是使用root启动,需要注意权限问题,最好将服务使用systemd,修改为ceph:
chown -R ceph:ceph /var/lib/ceph/mds/
systemctl start ceph-mds@local-node-1
systemctl enable ceph-mds@local-node-1
[root@local-node-1 ~]# ps -ef|grep ceph-mds
ceph 2729 1 0 17:32 ? 00:00:00 /usr/bin/ceph-mds -f --cluster ceph --id local-node-1 --setuser ceph --setgroup ceph
[root@local-node-1 ~]# netstat -lntp|grep ceph-mds
tcp 0 0 10.0.0.1:6813 0.0.0.0:* LISTEN 2729/ceph-mds
7.检查ceph 集群状态
[root@local-node-1 ~]# ceph -s
cluster:
id: 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
health: HEALTH_OK
services:
mon: 1 daemons, quorum local-node-1
mgr: ceph-mgr(active)
osd: 3 osds: 3 up, 3 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 3.0 GiB used, 27 GiB / 30 GiB avail
pgs:
[root@local-node-1 ~]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.02939 root default
-3 0.02939 host local-node-1
0 hdd 0.00980 osd.0 up 1.00000 1.00000
1 hdd 0.00980 osd.1 up 1.00000 1.00000
2 hdd 0.00980 osd.2 up 1.00000 1.00000
CephFS至少需要两个RADOS pool,一个存储数据,另一个存储元数据,当配置这些pool时,我们需要考虑两个问题:
使用如下命令创建这两个pool:
ceph osd pool create cephfs_data
ceph osd pool create cephfs_metadata
EG:
[root@local-node-1 ~]# ceph osd pool create cephfs_data 64
[root@local-node-1 ~]# ceph osd pool create cephfs_metadata 64
[root@local-node-1 ~]# ceph fs new cephfs cephfs_metadata cephfs_data
new fs with metadata pool 2 and data pool 1
查看文件系统状态:
[root@local-node-1 ~]# ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
[root@local-node-1 ~]# ceph mds stat
cephfs-1/1/1 up {0=local-node-1=up:active}
如果在系统中创建了多个Ceph filesystem,可以通过使用 ceph fs set-default 设置默认的挂载盘,而不必指定具体要挂载哪个文件系统。
==使用ceph-fuse挂载==
这里演示将cephfs挂载到node-4(10.0.0.4)上,先安装ceph-fuse客户端:
yum install ceph-fuse -y --disablerepo=epel # 非ceph集群中的主机需要开启epel安装依赖包
如果要挂载的客户机不是ceph集群中的成员,需要将ceph集群中的密钥和配置文件拷贝到/etc/ceph目录:
[root@node-4 ~]# mkdir /etc/ceph
[root@local-node-2 ~]# scp /etc/ceph/ceph.conf 10.0.0.4:/etc/ceph/
[root@local-node-2 ~]# scp /etc/ceph/ceph.client.admin.keyring 10.0.0.4:/etc/ceph/
挂载cephfs:
ceph-fuse -m 10.0.0.2:6789 /mnt/cephfs
查看挂载配置:
# df -h |grep cephfs
ceph-fuse 26G 0 26G 0% /mnt/cephfs
通过测试可以发现,可以在任何ceph集群节点上挂载,共享文件:
# 指定的mon必须是正常状态,如果不是active或standby则不能挂载
ceph-fuse -m 10.0.0.3:6789 /mnt/cephfs
[root@local-node-2 cephfs]# ceph -s
cluster:
id: 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
health: HEALTH_WARN
1/3 mons down, quorum local-node-2,local-node-3
services:
mon: 3 daemons, quorum local-node-2,local-node-3, out of quorum: local-node-1
mgr: ceph-mgr(active)
mds: cephfs-1/1/1 up {0=local-node-1=up:active}
osd: 9 osds: 9 up, 9 in
data:
pools: 2 pools, 128 pgs
objects: 24 objects, 11 KiB
usage: 9.1 GiB used, 81 GiB / 90 GiB avail
pgs: 128 active+clean
==使用内核自带驱动挂载==
使用内核驱动,对于系统内核版本有一定的要求,如果使用Ceph最新的TUNABLES(可调参数)jewel时,官方建议是4.14或者4.9内核,如果是低于4.5版本的内核则会出现挂载错误:
local-node-1 kernel: libceph: mon0 10.0.0.2:6789 feature set mismatch, my 107b84a842aca < server's 40107b84a842aca, missing 400000000000000
local-node-1 kernel: libceph: mon0 10.0.0.2:6789 missing required protocol features
更多内核版本支持信息可以参考官方文档
如果是这种情况建议使用ceph-fuse,当然也可以通过以下命令修改为低版本crush tunables(默认default,实际是jewel):
ceph osd crush tunables hammer
# ceph osd crush show-tunables
{
"choose_local_tries": 0,
"choose_local_fallback_tries": 0,
"choose_total_tries": 50,
"chooseleaf_descend_once": 1,
"chooseleaf_vary_r": 1,
"chooseleaf_stable": 0,
"straw_calc_version": 1,
"allowed_bucket_algs": 54,
"profile": "hammer",
"optimal_tunables": 0,
"legacy_tunables": 0,
"minimum_required_version": "hammer",
"require_feature_tunables": 1,
"require_feature_tunables2": 1,
"has_v2_rules": 0,
"require_feature_tunables3": 1,
"has_v3_rules": 0,
"has_v4_buckets": 1,
"require_feature_tunables5": 0,
"has_v5_rules": 0
}
hammer
支持4.1或更高版本内核。
使用以下两种方式挂载:
# 方式一:
[root@local-node-1 ~]# mount -t ceph 10.0.0.2:6789:/ /mnt -o name=admin,secret=AQDo1aVcQ+Z0BRAAENyooUgFgokkjw9hBUOseg==
#方式二:
[root@local-node-1 ~]# mount -t ceph 10.0.0.2:6789:/ /mnt -o name=admin,secretfile=/tmp/keyring
# keyring只包含密钥,不包含其它任何参数
如果挂载失败,需要检查mon和mds服务是否正常。
# ceph mds stat
cephfs-1/1/1 up {0=local-node-1=up:creating}
ceph mds fail local-node-1
# 或者使用
systemctl stop ceph-mds@local-node-1
# 列出当前的CephFS
ceph fs ls
# 删除CephFS
ceph fs rm cephfs --yes-i-really-mean-it
# ceph fs ls
No filesystems enabled
# ceph osd lspools
3 cephfs_data
4 cephfs_metadata
ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it
ceph osd pool delete cephfs_metadata cephfs_metadata --yes-i-really-really-mean-it
# ceph -s
cluster:
id: 7bd25f8d-b76f-4ff9-89ec-186287bbeaa5
health: HEALTH_OK
services:
mon: 1 daemons, quorum local-node-1
mgr: ceph-mgr(active)
osd: 3 osds: 3 up, 3 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 3.0 GiB used, 27 GiB / 30 GiB avail
pgs: