ceph最佳實踐

基礎環境:Centos7.2

192.168.200.126 ceph1
192.168.200.127 ceph2
192.168.200.129 ceph3


關閉防火牆和selinux

# setenforce 0

# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/confi

# systemctl stop firewalld

# systemctl disable firewalld

ceph Yum源:
[root@ceph1 ~]# cat /etc/yum.repos.d/ceph.repo
[Ceph-mimic]
name=Ceph x86_64 packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/x86_64/
enabled=1
gpgcheck=0


ceph-deploy Yum源
[root@ceph1 ~]# cat /etc/yum.repos.d/ceph-deploy.repo
[ceph-deploy]
name=ceph-deploy
baseurl=https://download.ceph.com/rpm-mimic/el7/noarch/
enabled=1
gpgcheck=0


所有節點安裝ntp後進行時間同步
# yum install -y ntp
# ntpdate pool.ntp.org


集羣免密鑰配置
[root@ceph1 ~]# ssh-keygen
[root@ceph1 ~]# ssh-copy-id ceph1
[root@ceph1 ~]# ssh-copy-id ceph2
[root@ceph1 ~]# ssh-copy-id ceph3


同步配置

[root@ceph1 ~]# scp /etc/hosts ceph2:/etc/hosts
[root@ceph1 ~]# scp /etc/hosts ceph3:/etc/hosts
[root@ceph1 ~]# scp /etc/yum.repos.d/ceph-deploy.repo ceph2:/etc/yum.repos.d/
[root@ceph1 ~]# scp /etc/yum.repos.d/ceph-deploy.repo ceph3:/etc/yum.repos.d/


部署ceph
[root@ceph1 ~]# mkdir /etc/ceph
[root@ceph1 ~]# yum install -y ceph-deploy python-pip
[root@ceph1 ceph]# ceph-deploy new ceph1 ceph2 ceph3
[root@ceph1 ceph]# ls
ceph.conf  ceph-deploy-ceph.log  ceph.mon.keyring
[root@ceph1 ceph]# vi ceph.conf
[global]
fsid = d5dec480-a9df-4833-b740-de3a0ae4c755
mon_initial_members = ceph1, ceph2, ceph3
mon_host = 192.168.200.126,192.168.200.127,192.168.200.129
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public network = 192.168.200.0/24
cluster network = 192.168.200.0/24


所有節點安裝ceph組件:
yum install -y ceph


ceph1節點 初始monitor 並收集密鑰

[root@ceph1 ceph]# ceph-deploy  mon create-initial

分發密鑰給其他節點

[root@ceph1 ceph]# ceph-deploy admin ceph{1..3}


配置OSD
[root@ceph1 ceph]# ceph-deploy osd create --data /dev/sdb ceph1
[root@ceph1 ceph]# ceph-deploy osd create --data /dev/sdb ceph2
[root@ceph1 ceph]# ceph-deploy osd create --data /dev/sdb ceph3

[root@ceph1 ceph]# ceph -s
  cluster:
    id:     d5dec480-a9df-4833-b740-de3a0ae4c755
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3
    mgr: no daemons active
    osd: 3 osds: 3 up, 3 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:
 


如果出現以下警告,說明是集羣內主機未同步:同步時間即可
    health: HEALTH_WARN
            clock skew detected on mon.ceph2
[root@ceph2 ~]# ntpdate pool.ntp.org
[root@ceph1 ceph]# systemctl restart ceph-mon.target


[root@ceph1 ceph]# ceph -s
  cluster:
    id:     d5dec480-a9df-4833-b740-de3a0ae4c755
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3
    mgr: ceph1(active), standbys: ceph3, ceph2
    osd: 3 osds: 3 up, 3 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   3.0 GiB used, 57 GiB / 60 GiB avail
    pgs:


開啓dashboard,web管理
[root@ceph1 ceph]# vi /etc/ceph/ceph.conf
# 添加
[mgr]
mgr_modules = dashboard
[root@ceph1 ceph]# ceph mgr module enable dashboard
[root@ceph1 ceph]# ceph-deploy mgr create ceph1                   

生成並安裝一個 自簽名證書
[root@ceph1 ceph]# ceph dashboard create-self-signed-cert
生成密鑰,生成兩個文件dashboard.crt  dashboard.key
[root@ceph1 ceph]#openssl req -new -nodes -x509   -subj "/O=IT/CN=ceph-mgr-dashboard" -days 3650   -keyout dashboard.key -out dashboard.crt -extensions v3_ca
配置服務地址、端口,默認的端口是8443,這裏改爲7000
[root@ceph1 ceph]# ceph config set mgr mgr/dashboard/server_addr 192.168.200.126
[root@ceph1 ceph]# ceph config set mgr mgr/dashboard/server_port 7000
[root@ceph1 ceph]# ceph dashboard set-login-credentials admin admin
[root@ceph1 ceph]# systemctl restart [email protected]
[root@ceph1 ceph]# ceph mgr services
{
    "dashboard": "https://192.168.200.126:7000/"
}

同步集羣ceph配置文件
[root@ceph1 ceph]# ceph-deploy --overwrite-conf config push ceph2
[root@ceph1 ceph]# ceph-deploy --overwrite-conf config push ceph3



https://192.168.200.126:7000/#/login

圖片.png
圖片.png



塊存儲的使用

[root@ceph3 ceph]# ceph osd pool create rbd 128  
[root@ceph3 ceph]# ceph osd pool get rbd pg_num
pg_num: 128
[root@ceph3 ceph]# ceph auth add client.rbd mon 'allow r' osd 'allow rwx pool=rbd'
[root@ceph3 ceph]# ceph auth export client.rbd -o ceph.client.rbd.keyring

[root@ceph3 ceph]# rbd create rbd1 --size 1024 --name client.rbd
[root@ceph3 ceph]# rbd ls -p rbd --name client.rbd
rbd1
[root@ceph3 ceph]# rbd --image rbd1 info --name client.rbd  
rbd image 'rbd1':
        size 1 GiB in 256 objects
        order 22 (4 MiB objects)
        id: 85d36b8b4567
        block_name_prefix: rbd_data.85d36b8b4567
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features:
        flags:
        create_timestamp: Sun Nov 17 04:33:17 2019
        
place group(pg) 爲存儲對象數量   一個磁盤爲1個OSD,本次是三個sdb所以小於5個爲128

[root@ceph3 ceph]# rbd feature disable rbd1 exclusive-lock object-map deep-flatten fast-diff --name client.rbd  
[root@ceph3 ceph]# rbd map --image rbd1 --name client.rbd
/dev/rbd0
[root@ceph3 ceph]# rbd showmapped --name client.rbd
id pool image snap device    
0  rbd  rbd1  -    /dev/rbd0
[root@ceph3 ceph]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0              isize=256    agcount=8, agsize=32752 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=0        finobt=0
data     =                       bsize=4096   blocks=262016, imaxpct=25
         =                       sunit=16     swidth=16 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=0
log      =internal log           bsize=4096   blocks=768, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@ceph3 ceph]# mount /dev/rbd0 /mnt/
[root@ceph3 ceph]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 467M     0  467M   0% /dev
tmpfs                    479M     0  479M   0% /dev/shm
tmpfs                    479M   13M  466M   3% /run
tmpfs                    479M     0  479M   0% /sys/fs/cgroup
/dev/mapper/centos-root   50G  1.9G   49G   4% /
/dev/mapper/centos-home   28G   33M   28G   1% /home
/dev/sda1                497M  139M  359M  28% /boot
tmpfs                    479M   24K  479M   1% /var/lib/ceph/osd/ceph-2
tmpfs                     96M     0   96M   0% /run/user/0
/dev/rbd0               1021M   33M  989M   4% /mnt


刪除存儲池
[root@ceph3 ceph]# umount /dev/rbd0

[root@ceph3 ceph]# rbd unmap /dev/rbd/rbd/rbd1
[root@ceph3 ceph]# ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
pool 'rbd' removed


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章