Ceph CephFS 存儲

CephFS 內核態掛載

[root@node1 my-cluster]# ceph-deploy --overwrite-conf mds create node1
[root@node1 my-cluster]# ceph-deploy --overwrite-conf mds create node2
[root@node1 my-cluster]# ceph-deploy --overwrite-conf mds create node3
[root@node1 my-cluster]# ceph -s
  cluster:
    id:     3f5560c6-3af3-4983-89ec-924e8eaa9e06
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum node1,node2,node3 (age 2h)
    mgr: node1(active, since 3d), standbys: node2, node3
    mds:  3 up:standby
    osd: 3 osds: 3 up (since 2h), 3 in (since 3d)
    rgw: 1 daemon active (node1)

  task status:

  data:
    pools:   5 pools, 192 pgs
    objects: 266 objects, 221 MiB
    usage:   3.7 GiB used, 56 GiB / 60 GiB avail
    pgs:     192 active+clean

[root@node1 my-cluster]# ceph osd pool create cephfs_data 16
[root@node1 my-cluster]# ceph osd pool create cephfs_metadata 16
[root@node1 my-cluster]# ceph osd lspools
·····
6 cephfs_data
7 cephfs_metadata

[root@node1 my-cluster]# ceph fs new  cephfs-demo cephfs_metadata cephfs_data
new fs with metadata pool 7 and data pool 6
[root@node1 my-cluster]# ceph fs ls
name: cephfs-demo, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
[root@node1 my-cluster]# ceph -s
  cluster:
    id:     3f5560c6-3af3-4983-89ec-924e8eaa9e06
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum node1,node2,node3 (age 4h)
    mgr: node1(active, since 3d), standbys: node2, node3
    mds: cephfs-demo:1 {0=node1=up:active} 2 up:standby
    osd: 3 osds: 3 up (since 4h), 3 in (since 3d)
    rgw: 1 daemon active (node1)

  task status:
    scrub status:
        mds.node1: idle

  data:
    pools:   7 pools, 224 pgs
    objects: 288 objects, 221 MiB
    usage:   3.7 GiB used, 56 GiB / 60 GiB avail
    pgs:     224 active+clean

[root@node1 my-cluster]# mount.ceph 192.168.6.160:6789:/ /mnt/cephfs/ -o name=admin
[root@node1 my-cluster]# df -h
文件系統                 容量  已用  可用 已用% 掛載點
devtmpfs                 475M     0  475M    0% /dev
tmpfs                    487M     0  487M    0% /dev/shm
tmpfs                    487M  7.8M  479M    2% /run
tmpfs                    487M     0  487M    0% /sys/fs/cgroup
/dev/mapper/centos-root   47G  2.1G   45G    5% /
/dev/sda1               1014M  168M  847M   17% /boot
tmpfs                     98M     0   98M    0% /run/user/0
tmpfs                    487M   52K  487M    1% /var/lib/ceph/osd/ceph-0
/dev/rbd0                 15G   41M   14G    1% /mnt/rbd
192.168.6.160:6789:/      18G     0   18G    0% /mnt/cephfs
[root@node1 my-cluster]# ceph df
RAW STORAGE:
    CLASS     SIZE       AVAIL      USED        RAW USED     %RAW USED
    hdd       60 GiB     56 GiB     671 MiB      3.7 GiB          6.09
    TOTAL     60 GiB     56 GiB     671 MiB      3.7 GiB          6.09

POOLS:
    POOL                    ID     PGS     STORED      OBJECTS     USED        %USED     MAX AVAIL
    ceph-demo                1      64     216 MiB          79     652 MiB      1.18        18 GiB
    .rgw.root                2      32     1.2 KiB           4     768 KiB         0        18 GiB
    default.rgw.control      3      32         0 B           8         0 B         0        18 GiB
    default.rgw.meta         4      32         0 B           0         0 B         0        18 GiB
    default.rgw.log          5      32         0 B         175         0 B         0        18 GiB
    cephfs_data              6      16         0 B           0         0 B         0        18 GiB
    cephfs_metadata          7      16     2.5 KiB          22     1.5 MiB         0        18 GiB

CephFS 用戶態掛載

[root@node1 ~]# mkdir -pv /mnt/ceph-fuse
[root@node1 ~]# yum install ceph-fuse -y

[root@node1 ~]# ceph-fuse -n client.admin  -m 192.168.6.160:6789,192.168.6.161:6789,192.168.6.162:6789 /mnt/ceph-fuse/
ceph-fuse[5735302020-12-21 13:30:14.104 7f8c9a2f6f80 -1 init, newargv = 0x5610072b6ae0 newargc=9
]: starting ceph client
ceph-fuse[573530]: starting fuse

[root@node1 ~]# df -hT
文件系統                類型            容量  已用  可用 已用% 掛載點
devtmpfs                devtmpfs        475M     0  475M    0% /dev
tmpfs                   tmpfs           487M     0  487M    0% /dev/shm
tmpfs                   tmpfs           487M  7.8M  479M    2% /run
tmpfs                   tmpfs           487M     0  487M    0% /sys/fs/cgroup
/dev/mapper/centos-root xfs              47G  2.2G   45G    5% /
/dev/sda1               xfs            1014M  168M  847M   17% /boot
tmpfs                   tmpfs            98M     0   98M    0% /run/user/0
tmpfs                   tmpfs           487M   52K  487M    1% /var/lib/ceph/osd/ceph-0
/dev/rbd0               ext4             15G   41M   14G    1% /mnt/rbd
192.168.6.160:6789:/    ceph             18G     0   18G    0% /mnt/cephfs
ceph-fuse               fuse.ceph-fuse   18G     0   18G    0% /mnt/ceph-fuse
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章