Ceph課程總結

 

第1章 Ceph安裝前準備

#所有節點創建ceph用戶

useradd ceph;echo 123456|passwd ceph –stdin

#所有節點給ceph sudo免密碼權限

echo "ceph ALL = (root) NOPASSWD: ALL" | sudo tee -a /etc/sudoers.d/ceph

sudo chmod 0440 /etc/sudoers.d/ceph

#ceph-deploy節點設置ceph.repo

cat /etc/yum.repos.d/ceph.repo

[Ceph]

name=Ceph packages for $basearch

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64

enabled=1

gpgcheck=1

type=rpm-md

gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc

priority=1

[Ceph-noarch]

name=Ceph noarch packages

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch

enabled=1

gpgcheck=1

type=rpm-md

gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc

priority=1

[ceph-source]

name=Ceph source packages

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS

enabled=1

gpgcheck=1

type=rpm-md

gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc

priority=1

#ceph-deploy節點部署ceph-deploy

sudo yum update

sudo yum install ceph-deploy

#ceph-deploy節點生成ssh密鑰對

ssh-keygen

將公鑰拷貝到每個ceph節點的.ssh/authorized_keys

ssh-copy-id [email protected]

ssh-copy-id [email protected]

ssh-copy-id [email protected]

ssh-copy-id [email protected]

ssh-copy-id [email protected]

ssh-copy-id [email protected]

ssh-copy-id [email protected]

ssh-copy-id [email protected]

ssh-copy-id [email protected]

#創建免賬戶名登錄文件~/.ssh/config

cat ~/.ssh/config

Host ceph01.wang.local

Hostname ceph01.wang.local

User ceph

Host ceph02.wang.local

Hostname ceph02.wang.local

User ceph

Host ceph03.wang.local

Hostname ceph03.wang.local

User ceph

Host ceph04.wang.local

Hostname ceph04.wang.local

User ceph

Host ceph05.wang.local

Hostname ceph05.wang.local

User ceph

Host ceph06.wang.local

Hostname ceph06.wang.local

User ceph

Host ceph07.wang.local

Hostname ceph07.wang.local

User ceph

Host ceph08.wang.local

Hostname ceph08.wang.local

User ceph

Host ceph09.wang.local

Hostname ceph09.wang.local

User ceph

Host block01.wang.local

Hostname block01.wang.local

User ceph

#啓動時間服務

for i in ceph0{1..9}.wang.local;do ssh root@${i} systemctl restart chronyd; ssh root@${i} sudo systemctl restart chronyd;done

#複製ceph.repo yum源文件

for i in ceph0{1..9}.wang.local;do sudo scp /etc/yum.repos.d/ceph.repo root@${i}:/etc/yum.repos.d/;done

#批量安裝ceph文件

for i in ceph0{1..9}.wang.local;do ssh ${i} sudo yum -y install ceph;done

第2章 創建ceph集羣

2.1 創建集羣

#在ceph-deploy創建cluster集羣配置保存文件

mkdir my-cluster

cd my-cluster

創建ceph集羣配置,指定mon節點

ceph-deploy new ceph01.wang.local

#修改集羣配置文件

在[global]下添加public network

public network = 192.168.1.0/24

初始化mon配置

ceph-deploy mon create-initial

Once you complete the process, your local directory should have the following keyrings:

ceph.client.admin.keyring

ceph.bootstrap-mgr.keyring

ceph.bootstrap-osd.keyring

ceph.bootstrap-mds.keyring

ceph.bootstrap-rgw.keyring

ceph.bootstrap-rbd.keyring

#ceph在luminous版本後要求創建mgr daemon

ceph-deploy admin ceph00.wang.local

ceph-deploy mgr create ceph00.wang.local *Required only for luminous+ builds, i.e >= 12.x builds*

#創建osd daemon

ceph-deploy osd create ceph01.wang.local:vdb ceph02.wang.local:vdb ceph03.wang.local:vdb

#查看集羣狀態

ssh node1 sudo ceph –s

ssh node1 sudo ceph health

ssh node1 sudo ceph -w

ssh block01 sudo ceph osd tree

2.2 集羣中增加節點

#add mon node

ceph-deploy mon add node2 node3

# add額外 mgr node

ceph-deploy mgr create node2 node3

#add osd daemon

ceph-deploy osd create ceph0{4..6}.wang.local:sd{b,c}

#刪除osd daemon

ceph osd out {0..5}

ssh ceph04 sudo systemctl stop ceph-osd@{0..5}

for i in {6,7};do ssh block01 sudo ceph osd purge ${i} --yes-i-really-mean-it;done;

#替換osd daemon 硬盤

ssh block01 sudo ceph osd out {0..1}

ssh ceph04 sudo systemctl stop ceph-osd@{0..1}

for i in {0,1};do ssh block01 sudo ceph osd purge ${i} --yes-i-really-mean-it;done;

for i in {0,1};do ssh block01 sudo ceph osd destroy ${i} --yes-i-really-mean-it;done;

#使用parteid刪除分區表

parted /dev/sdb rm 1

parted /dev/sdb rm 2

parted /dev/sdc rm 1

parted /dev/sdc rm 2

ssh ceph04 sudo ceph-disk zap /dev/sd{b,c}

ceph-deploy disk zap ceph04:/dev/sdb #清除磁盤分區

ssh ceph04 sudo ceph-disk prepare --osd-id 6 /dev/sdb

ssh ceph04 sudo ceph-disk prepare --osd-id 7 /dev/sdc

ssh ceph04 sudo ceph-disk activate /dev/sdb

ssh ceph04 sudo ceph-disk activate /dev/sdc

第3章 池和卷

#創建存儲池

ceph osd pool create first_pool 30

ceph osd pool set first_pool pg_num 180

ceph osd pool set first_pool pgp_num 180

ceph osd pool ls

#創建或修改用戶

ceph auth get-or-create client.cinder mon 'allow r' osd 'allow rw pool=first_pool' -o client.cinder.keyring

ceph auth caps client.cinder mon 'allow r' osd 'allow rwx pool=first_pool'

ceph auth list

ceph auth get client.cinder

#客戶端準備

修改ceph.conf文件,添加

[client.cinder]

keyring = /etc/ceph/client.cinder.keyring

拷貝ceph的配置文件

sudo scp client.cinder.keyring block01:/etc/ceph/

#cephclient創建和操作卷

rbd -n client.cinder -p first_pool create --size 20G cephdisk03 --image-format 2 --image-feature layering

rbd -n client.cinder -p first_pool list

#將卷映射到一個設備

rbd -n client.cinder -p first_pool map cephdisk03

rbd showmapped

#格式化分區和加載

mkfs.xfs -f /dev/rbd0

mkdir /mnt/rbd

mount /dev/rbd0 /mnt/rbd

df -hT

#umount操作

umount /mnt/rbd

rbd -p first_pool unmap /dev/rbd0

#刪除存儲卷和池

rados rm test-object-1 --pool=mytest

ceph osd pool rm mytes

第4章 ceph mgr管理

ceph mgr module enable dashboard

systemctl restart [email protected]

systemctl restart [email protected]

systemctl restart [email protected]

netstat –lntup |grep 7000

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章