3Nodes + DRBD + GNBD(DMMP) + GFS on the RHEL5
[Configuration GUI]
– 최초 접근시 새로운 ‘cluster.conf’ 파일 생성
1. Node 추가
– Node1
– Node2
– Node3
2. Fence Device 추가
– GNBD
GNBD1 : Node1
GNBD2 : Node2
3. 각 노드에 Fence Device 지정
– Node1
Fence Device : Level1(GNBD1) / Level2(GNBD2)
– Node 2
Fence Device : Level1(GNBD2) / Level2(GNBD1)
[Node1 & Node2]
– DRBD v8 설치
– GNBD Server 설치
– GFS 설치
# modprobe drbd
# vi /etc/drbd.conf
resource RSC {
protocol C;
startup {
wfc-timeout 0; ## Infinite!
degr-wfc-timeout 60; ## 2 minutes.
}
disk {
on-io-error detach;
}
net {
# timeout 60;
# connect-int 10;
# ping-int 10;
# max-buffers 2048;
# max-epoch-size 2048;
allow-two-primaries;
}
syncer {
rate 60M;
# group 0;
al-extents 257;
}
on Node1 {
device /dev/drbd0;
disk /dev/sdb2;
address 210.220.224.151:7789;
meta-disk /dev/sdb1[0];
}
on Node2 {
device /dev/drbd0;
disk /dev/sdb2;
address 210.220.224.152:7789;
meta-disk /dev/sdb1[0];
}
}
# service drbd start
# vi /etc/init.d/cluster
#!/bin/sh
# chkconfig: – 50 50
# description: init file for Cluster daemon
. /etc/init.d/functions
case $1 in
start)
service cman start
service gfs start
service rgmanager start
;;
stop)
service rgmanager stop
sleep 1
service gfs stop
service cman stop
;;
*)
echo $”Usage: $0 {start|stop}”
exit 1
esac
exit 0
# chmod +x /etc/init.d/cluster
# service cluster start
# gnbd_serv -v
# gnbd_export -v -e gnbd 0 -d /dev/drbd0 -u 1 (GNBDSVR1)
# gnbd_export -v -e gnbd 1 -d /dev/drbd0 -u 1 (GNBDSVR2)
export 하는 디바이스에대한 uid (1) 에대한 값은 같아야하며 또한 gnbd 이름은
위와 같이 gnbd 0 , gnbd 1 처럼 다른 이름이여야 한다..
[Node3 & Node4]
– GNBD 서버 및 모듈 설치
– GFS 설치
– device-mapper-multipath 설치
# vi /etc/init.d/cluster
#!/bin/sh
# chkconfig: – 50 50
# description: init file for Cluster daemon
. /etc/init.d/functions
case $1 in
start)
service cman start
service gfs start
service rgmanager start
;;
stop)
service rgmanager stop
sleep 1
service gfs stop
service cman stop
;;
*)
echo $”Usage: $0 {start|stop}”
exit 1
esac
exit 0
# chmod +x /etc/init.d/cluster
# service cluster start
# yum -y install device-mapper-multipath
# modprobe gnbd
# gnbd_import -v -i Node1
# gnbd_import -v -i Node2
# vi /etc/multipath.conf
blacklist {
devnode sda
}
defaults {
user_friendly_names yes
}
defaults {
udev_dir /dev
polling_interval 10
selector “round-robin 0”
path_grouping_policy multibus
getuid_callout “/sbin/scsi_id -g -u -s /block/%n”
prio_callout /bin/true
path_checker readsector0
rr_min_io 100
rr_weight priorities
failback immediate
no_path_retry fail
user_friendly_name yes
}
devices {
device {
vendor “GNBD”
product “GNBD”
path_grouping_policy multibus
getuid_callout “/sbin/gnbd_import -q -U /block/%n”
path_checker directio
}
}
# service multipathd start
# ls /dev/mpath/mpath0
# multipath -ll
mpath0 (1) dm-0 GNBD,GNBD
[size=1.5G][features=0][hwhandler=0]
\\_ round-robin 0 [prio=0][active]
\\_ #:#:#:# gnbd0 252:0 [active][ready]
\\_ #:#:#:# gnbd1 252:1 [active][ready]
# gfs_mkfs -p lock_dlm -t new_cluster:GFS -j 3 /dev/mpath/mpath0
# mount /dev/mpath/mpath0 /GFS
[Node1]을 Power off 시킨경우..
# cman_tool nodes
Node Sts Inc Joined Name
1 X 92 Node1
2 M 92 2007-09-19 10:06:55 Node2
3 M 92 2007-09-19 10:06:55 Node3
4 M 4 2007-09-19 10:06:55 Node4
# multipath -ll
mpath0 (1) dm-0 GNBD,GNBD
[size=1.5G][features=0][hwhandler=0]
\\_ round-robin 0 [prio=0][active]
\\_ #:#:#:# gnbd1 252:1 [active][ready]
\\_ #:#:#:# gnbd0 252:0 [failed][faulty]
# gnbd_monitor
device # timeout state
1 60 normal
0 60 restartable
GNBD 서버에서 공유시킨 자원을 IMPORT한 클라이언트에서 해당 자원을 GFS 포맷으로 파일시스템 생성 후 Multipath를 지정한 뒤에 GNBD Node1 서버를 다운시켜도 클라이언트들의 자원들은 정상적으로 서비스 되고 있다.
Node1을 Power on 시킨 후..
>>Node1
# service drbd start
# drbdsetup /dev/drbd0 primary -o
# service cluster start
# gnbd_serv -v
# gnbd_export -v -3 gnbd1 -d /dev/drbd0 -u 1
>>클라이언트들
# gnbd_import -a //어느정도 기다리면 자동적으로 체크를 하지만, 수동으로 동작시킬 경우 사용한다.
# service multipathd restart
multipath -v3