Pacemaker+Corosync+Drbd快速配置指南

1. 分佈式塊設備DRBD部署

1.1 基礎環境初始化

1.2 部署DRBD環境

主備服務器都需要執行

# 更新系統內核,並重啓服務器
[root@node1 ~]# yum install kernel-devel kernel -y
[root@node1 ~]# reboot

# 開始安裝DRBD
[root@node1 software]# tar zxf drbd-9.0.18-1.tar.gz
[root@node1 drbd-9.0.18-1]# make KDIR=/usr/src/kernels/3.10.0-957.21.3.el7.x86_64/
[root@node1 drbd-9.0.18-1]# make install
[root@node1 software]# yum install drbd90-utils-9.6.0-1.el7.elrepo.x86_64.rpm -y
[root@node1 software]# yum install drbd90-utils-sysvinit-9.6.0-1.el7.elrepo.x86_64.rpm -y

1.3 創建drbd資源文件

[root@node1 software]# vim /etc/drbd.d/data.res
resource data {
  on node1 {                                  # on 主機名
    device    /dev/drbd0;                     # 映射的drbd磁盤,可默認,本教程默認/dev/sdb1
    disk      /dev/sdb1;          # 設置後面存放數據的drbd磁盤
    address   192.168.10.30:7789;
    meta-disk internal;
  }
  on node2 {
    device    /dev/drbd0;
    disk      /dev/sdb1;
    address   192.168.10.40:7789;
    meta-disk internal;
  }
}

1.4 修改drbd全局和通用配置文件

[root@node1 ~]# vim /etc/drbd.d/global_common.conf
global {
    usage-count yes;
}
common {
    handlers {
    }
    startup {
    }
    options {
        # 當塊設備被掛在時自動提升爲primary,被卸載時候自動降級爲secondary
        auto-promote yes;
    }

    disk {
    }

    net {
        # 使用協議C,保證實時同步
        protocol C;
    }
}

1.5 啓動drbd服務

[root@node1 ~]# systemctl start drbd

1.6 創建drbd磁盤分區

# 如果用於drbd存儲分區的磁盤之前已經存在文件系統例如執行過mkfs.xfs /dev/sdb1操作的話,在創建drbd metadata時候會報錯此時需要破壞原先的文件系統,執行如下命令
[root@node1 ~]# dd if=/dev/zero of=/dev/sdb1 bs=1M count=100

# 創建metadata
[root@node1 ~]# drbdadm create-md data
  --==  Thank you for participating in the global usage survey  ==--
The server's response is:
initializing activity log
initializing bitmap (320 KB) to all zero
Writing meta data...
New drbd meta data block successfully created.

# 查看此時drbd運行狀態,node1和node2均爲secondary,且Inconsistent數據處於未同步狀態
[root@node1 ~]# drbdadm status data
data role:Secondary
  disk:Inconsistent
  node2 role:Secondary
    peer-disk:Inconsistent

1.7 設置drbd主節點並格式化文件系統

# 第一次需要時手動設置一個主節點,後續可以通過mount自動切換主備節點
[root@node1 ~]# drbdadm primary --force data

# 創建文件系統
[root@node1 ~]# mkfs.xfs /dev/drbd1
meta-data=/dev/drbd1             isize=512    agcount=4, agsize=655210 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=2620839, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none

# 掛載到本地目錄
[root@node1 ~]# mkdir /mydata
[root@node1 ~]# mount /dev/drbd1 /mydata/
[root@node1 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root  8.0G  1.5G  6.6G  18% /
devtmpfs                 475M     0  475M   0% /dev
tmpfs                    487M     0  487M   0% /dev/shm
tmpfs                    487M  7.6M  479M   2% /run
tmpfs                    487M     0  487M   0% /sys/fs/cgroup
/dev/sda1               1014M  156M  859M  16% /boot
tmpfs                     98M     0   98M   0% /run/user/0
/dev/drbd1                10G   33M   10G   1% /mydata
[root@node1 ~]# drbdadm status data
data role:Primary
  disk:UpToDate
  node2 role:Secondary
    peer-disk:UpToDate

1.8 主備切換測試

# /mydata/下寫入文件後,主機卸載磁盤,備機掛載磁盤,備機上觀察文件是否同步過來
[root@node1 ~]# umount /mydata/
[root@node1 ~]# drbdadm secondary data
[root@node1 ~]# drbdadm status data
data role:Secondary
  disk:UpToDate
  node2 role:Secondary
    peer-disk:UpToDate

[root@node2 ~]# mkdir /mydata
[root@node2 ~]# mount /dev/drbd1 /mydata/
[root@node2 ~]# drbdadm status data
data role:Primary
  disk:UpToDate
  node1 role:Secondary
    peer-disk:UpToDate

[root@node2 ~]# ls /mydata/
a  b  c

2. 高可用組件pacemaker+corosync部署

2.1 安裝相關組件

# 主備機添加crm管理工具yum源
[root@node1 ~]# vim /etc/yum.repos.d/crmsh.repo
[network_ha-clustering_Stable]
name=Stable High Availability/Clustering packages (CentOS_CentOS-7)
type=rpm-md
baseurl=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/
gpgcheck=1
gpgkey=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/repodata/repomd.xml.key
enabled=1

# 安裝crmsh管理工具
[root@node1 ~]# yum install crmsh pacemaker corosync

2.2 配置corosync

[root@node1 ~]# cd /etc/corosync/
[root@node1 corosync]# cp corosync.conf.example corosync.conf
[root@node1 corosync]# vim corosync.conf
totem {
    version: 2

    crypto_cipher: aes256
    crypto_hash: sha1

    interface {
        ringnumber: 0
        bindnetaddr: 192.168.10.30
        mcastaddr: 239.255.1.1
        mcastport: 5405
        ttl: 1
    }
}

logging {
    fileline: off
    to_stderr: no
    to_logfile: yes
    logfile: /var/log/cluster/corosync.log
    to_syslog: yes
    debug: off
    timestamp: on
    logger_subsys {
        subsys: QUORUM
        debug: off
    }
}

quorum {
    provider: corosync_votequorum
}

nodelist {                             
        node {
        ring0_addr: node1
        nodeid: 1
        }
        node {
        ring0_addr: node2
        nodeid: 2
        }
}

# 生成corosync密鑰
[root@node1 corosync]# corosync-keygen
[root@node1 corosync]# scp authkey root@node2:/etc/corosync/
[root@node1 corosync]# scp corosync.conf root@node2:/etc/corosync/

# 啓動corosync和pacemaker
[root@node1 ~]# systemctl start corosync
[root@node1 ~]# systemctl start pacemaker

# 查看集羣狀態
[root@node1 corosync]# crm_mon
Stack: corosync
Current DC: node1 (version 1.1.19-8.el7_6.4-c3c624ea3d) - partition with quorum
Last updated: Fri Jul  5 21:48:22 2019
Last change: Fri Jul  5 21:45:52 2019 by hacluster via crmd on node1

2 nodes configured
0 resources configured

Online: [ node1 node2 ]

No active resources

2.3 關閉stonish設備

[root@node1 ~]# crm
crm(live)# configure
crm(live)configure# show
node 1: node1
node 2: node2
property cib-bootstrap-options: \
    have-watchdog=false \
    dc-version=1.1.19-8.el7_6.4-c3c624ea3d \
    cluster-infrastructure=corosync
crm(live)configure# property stonith-enabled=false
crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# property start-failure-is-fatal=false
crm(live)configure# property default-action-timeout=180s
crm(live)configure# rsc_defaults resource-stickiness=100
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# show
node 1: node1
node 2: node2
property cib-bootstrap-options: \
    have-watchdog=false \
    dc-version=1.1.19-8.el7_6.4-c3c624ea3d \
    cluster-infrastructure=corosync \
    stonith-enabled=false \
    no-quorum-policy=ignore \
    start-failure-is-fatal=false \
    default-action-timeout=180s
rsc_defaults rsc-options: \
    resource-stickiness=100

2.4 添加Virtual IP資源

crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=192.168.10.50 op monitor 30s
crm(live)configure# commit

2.4 添加DRBD磁盤資源

添加drbd磁盤開機自啓動,因爲集羣只是接管drbd的掛載,而不是啓動drbd,所有drbd狀態必須都是secondary

crm(live)configure# primitive drbd ocf:heartbeat:Filesystem params device=/dev/drbd1 directory=/mydata fstype=xfs
crm(live)configure# commit

2.5 綁定VIP和DRBD資源,並設置先啓動VIP後啓動DRBD

利用group命令進行分組和排序啓動

crm(live)configure# group vip_drbd vip drbd
crm(live)configure# commit
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章