Ceph RBD 備份和恢復

RBD 備份和恢復

# 備份 export
[root@node1 ~]# rbd snap create pool_demo/rbd-demo.img@template
[root@node1 ~]# rbd export pool_demo/rbd-demo.img@template /root/rbd-test.img
Exporting image: 100% complete...done.
[root@node1 ~]# du -sh rbd-test.img
84M     rbd-test.img

# 恢復 import
[root@node1 ~]# rbd import /root/rbd-test.img pool_demo/rbd-test-new.img
Importing image: 100% complete...done.
[root@node1 ~]# rbd -p pool_demo list
demo.img
rbd-demo.img
rbd-test-new.img
vm1-clone.img
[root@node1 ~]# rbd info pool_demo/rbd-test-new.img
rbd image 'rbd-test-new.img':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 883b63c34129
        block_name_prefix: rbd_data.883b63c34129
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features:
        flags:
        create_timestamp: Thu Dec 24 13:58:45 2020
        access_timestamp: Thu Dec 24 13:58:45 2020
        modify_timestamp: Thu Dec 24 13:58:45 2020

# 增量備份 export-diff
[root@node1 ~]# rbd snap create pool_demo/rbd-demo.img@v2
[root@node1 ~]# rbd export-diff pool_demo/rbd-demo.img@v2 rbd-demo.img@v2
Exporting image: 100% complete...done.
[root@node1 ~]# ls
anaconda-ks.cfg  ceph-deploy-ceph.log  history.txt  my-cluster  rbd-demo.img@v2  rbd-test.img
[root@node1 ~]# du -sh rbd-*
8.6M    rbd-demo.img@v2
84M     rbd-test.img

# 增量恢復 import-diff
[root@node1 ~]# rbd import-diff rbd-demo.img@v2 pool_demo/rbd-test-new.img
Importing image diff: 100% complete...done.
[root@node1 ~]# rbd -p pool_demo list
demo.img
rbd-demo.img
rbd-test-new.img
vm1-clone.img
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章