Redhat ES4 환경에서의 GFS 와 GNBD 구성하기

* Redhat ES4 환경에서의 GFS 와 GNBD 구성하기

작성자 : 서진우

– 기본 환경 구축 하기

3대의 서버에 Redhat ES4 를 설치 한다.

* hostname 정보

n001        192.168.123.111                gfs client1

n002        192.168.123.112                gfs client2

n003        192.168.123.113                gfs,gnbd server

먼저 3대 서버에 2.6.9-11 대 kernel 을 설치 한다.

3대 서버에 GFS, GNBD 관련 패키지를 설치 한다.

* 기본 Cluster 관련 패키지 설치

[root@n003 gfs+rhcs]# rpm -Uvh magma-1.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh magma-devel-1.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh ccs-1.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh ccs-devel-1.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh gulm-1.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh gulm-devel-1.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh magma-plugins-1.0.0-0.i386.rpm

[root@n003 gfs+rhcs]# rpm -Uvh cman-kernel-2.6.9-36.0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh cman-1.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh dlm-kernel-2.6.9-34.0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh dlm-1.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh dlm-devel-1.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh perl-Net-Telnet-3.03-3.noarch.rpm

[root@n003 gfs+rhcs]# rpm -Uvh fence-1.32.1-0.i686.rpm

[root@n003 gfs]# rpm -Uvh gnbd-kernel-2.6.9-8.27.i686.rpm

[root@n003 gfs]# rpm -Uvh gnbd-1.0.0-0.i686.rpm

* Cluster File System 관련 패키지 설치

[root@n003 gfs]# rpm -Uvh GFS-kernel-2.6.9-35.5.i686.rpm

[root@n003 gfs]# rpm -Uvh GFS-6.1.0-0.i386.rpm

[root@n003 rhn-packages]# rpm -Uvh device-mapper-1.01.01-1.RHEL4.i386.rpm

[root@n003 rhn-packages]# rpm -Uvh lvm2-2.01.08-1.0.RHEL4.i386.rpm

[root@n003 gfs]# rpm -Uvh lvm2-cluster-2.01.09-5.0.RHEL4.i386.rpm

[root@n003 gfs+rhcs]# rpm -Uvh iddev-2.0.0-0.i686.rpm

[root@n003 gfs+rhcs]# rpm -Uvh iddev-devel-2.0.0-0.i686.rpm

기본적으로 GFS, GNBD 를 이용하기 위한 기본 패키지는 위와 같다. Redhat 의 Cluster Suit 과

같이 사용하기 위해서는 몇가지 패키지가 더 지원된다.

rgmanager-1.9.34-1.i386.rpm   ;; cluster resource 관련 프로그램

system-config-cluster-1.0.12-1.0.noarch.rpm ;; CCS 설정 프로그램

일단 기본 cluster.conf 파일을 설정한다.

[root@n003 gfs+rhcs]# cd /etc

[root@n003 etc]# ensh mkdir cluster

### executing in n001

### executing in n002

### executing in n003

[root@n003 cluster]# vi cluster.conf

————————————————————————————–

<?xml version=”1.0″?>

<cluster name=”cluster” config_version=”1″>

<cman>

</cman>

<clusternodes>

<clusternode name=”n001″ votes=”1″>

        <fence>

                <method name=”single”>

                        <device name=”human” ipaddr=”n001″/>

                </method>

        </fence>

</clusternode>

<clusternode name=”n002″ votes=”1″>

        <fence>

                <method name=”single”>

                        <device name=”human” ipaddr=”n002″/>

                </method>

        </fence>

</clusternode>

<clusternode name=”n003″ votes=”1″>

        <fence>

                <method name=”single”>

                        <device name=”human” ipaddr=”nd03″/>

                </method>

        </fence>

</clusternode>

</clusternodes>

<fence_devices>

        <device name=”human” agent=”fence_manual”/>

</fence_devices>

</cluster>

————————————————————————————–

[root@n003 cluster]# ensync cluster.conf

모듈을 탑재한다.

[root@n003 cluster]# depmod -a

[root@n003 cluster]# modprobe dm-mod

[root@n003 cluster]# modprobe gfs

[root@n003 cluster]# modprobe lock_dlm

[root@n003 cluster]# ccsd

[root@n003 cluster]# cman_tool join              

[root@n003 cluster]# fence_tool join

[root@n003 cluster]# clvmd

[root@n003 cluster]# vgchange -aly

이걸로 GFS 를 사용할 수 있는 준비 되었다.

정상적으로 동작하는지 몇가지 테스트를 해 보자 ..

[root@n003 cluster]# ccs_test connect

Connect successful.

Connection descriptor = 1

[root@n003 cluster]# ccs_test get node ‘//cluster/@name’

Get successful.

Value = <cluster>

[root@n003 cluster]# cat /proc/cluster/nodes

Node  Votes Exp Sts  Name

   1    1    3   M   n003

   2    1    3   M   n001

   3    1    3   M   n002

[root@n003 cluster]# cat /proc/cluster/dlm_stats

DLM stats (HZ=1000)

Lock operations:          0

Unlock operations:        0

Convert operations:       0

Completion ASTs:          0

Blocking ASTs:            0

Lockqueue        num  waittime   ave

[root@n003 cluster]# cat /proc/cluster/services

Service          Name                              GID LID State     Code

Fence Domain:    “default”                           1   2 run       –

[1 2 3]

DLM Lock Space:  “clvmd”                             2   3 run       –

[1 2 3]

[root@n003 cluster]# cman_tool status

Protocol version: 5.0.1

Config version: 1

Cluster name: cluster

Cluster ID: 13364

Cluster Member: Yes

Membership state: Cluster-Member

Nodes: 3

Expected_votes: 3

Total_votes: 3

Quorum: 2  

Active subsystems: 3

Node name: n003

Node addresses: 192.168.123.113  

이제 정상적으로 작동하는 것을 확인했다.

만일 서버를 Shutdown 시키기 위해서는 아래와 같은 절차가 필요하다.

[root@n003 cluster]# umount /mnt        

[root@n003 cluster]# vgchange -aln    

[root@n003 cluster]# killall clvmd    

[root@n003 cluster]# fence_tool leave

[root@n003 cluster]# cman_tool leave  

[root@n003 cluster]# killall ccsd    

Redhat RPM 으로 Install 시 chkconfig 에 자동 등록이 되어 Redhat 방식의 설정과 충돌이

발생 할 수 있으니 해당 deamon 을 모두 제거해 준다.

# chkconfig ccsd off

# chkconfig clvmd off

# chkconfig cman off

# chkconfig fenced off

# chkconfig gfs off

# chkconfig lock_gulmd off

GNBD 환경 준비 하기

gnbd export 시킬 hdd device 를 준비한다. fdisk 를 이용하여 partition 을 지정하도록 한다.

Gnbd 서버 셋팅

[root@n003 cluster]# modprobe gnbd

[root@n003 cluster]# /sbin/gnbd_serv -v

[root@n003 cluster]# gnbd_export -v -e export1 -d /dev/hda6

정상적으로 등록 되었는지 확인 한다.

[root@n003 cluster]# gnbd_export -v -l

Server[1] : export1

————————–

      file : /dev/hda6

   sectors : 19551042

  readonly : no

    cached : no

   timeout : 60

Gnbd 클라이언트 셋팅

[root@n001 ~]# modprobe gnbd

[root@n001 ~]# gnbd_import -v -i n003

정상적으로 셋팅 되었는지 확인 한다.

[root@n001 ~]# gnbd_import -v -l

Device name : export1

———————-

    Minor # : 0

  Proc name : /dev/gnbd0

     Server : n003

       Port : 14567

      State : Close Connected Clear

   Readonly : No

    Sectors : 19551042

[root@n001 ~]# ll /dev/gnbd/export1

brw-r–r–  1 root root 251, 0 Oct  7 13:46 /dev/gnbd/export1

이제 /dev/gnbd/export1 device 를 local device 처럼 사용이 가능하다.

이제 Gnbd 서버로 가서 gfs file system 을 생성한다.

[root@n003 cluster]# gfs_mkfs -p lock_dlm -t cluster:export1 -j 3 /dev/hda6

This will destroy any data on /dev/hda6.

Are you sure you want to proceed? [y/n] y

이제 각 서버와 클라이어트에서 Mount 를 시킨다.

[root@n003 cluster]# mount -t gfs /dev/hda6 /mnt

[root@n001 ~]# mount -t gfs /dev/gnbd/export1 /mnt

[root@n002 ~]# mount -t gfs /dev/gnbd/export1 /mnt

[root@n001 ~]# df -Th | grep /mnt

/dev/gnbd/export1    gfs  9.0G   20K  9.0G   1% /mnt

[root@n002 ~]# df -Th | grep /mnt

/dev/gnbd/export1    gfs  9.0G   20K  9.0G   1% /mnt

[root@n003 cluster]# df -Th | grep /mnt

/dev/hda6            gfs 9.0G   20K  9.0G   1% /mnt

[root@n003 cluster]# cat /proc/cluster/services

Service          Name                              GID LID State     Code

Fence Domain:    “default”                           1   2 run       –

[1 2 3]

DLM Lock Space:  “clvmd”                             2   3 run       –

[1 2 3]

DLM Lock Space:  “export1”                           4   5 run       –

[1 2 3]

GFS Mount Group: “export1”                           5   6 run       –

[1 2 3]

User:            “gnbd”                              3   4 run       –

[1]

이제 LVM 을 이용하여 GFS+GNBD 클러스터 파일 시스템 구성을 해보자..

LVM 을 이용하면 GNBD 스토리지의 확장이 매우 편리하다.

먼저 GNBD Server 에 LVM 포맷으로 파티션을 두개정도 생성한다.

[root@n003 ~]# fdisk /dev/hda

   Device Boot      Start         End      Blocks   Id  System

/dev/hda1   *           1          13      104391   83  Linux

/dev/hda2              14         395     3068415   83  Linux

/dev/hda3             396        1670    10241437+  83  Linux

/dev/hda4            1671        9729    64733917+   5  Extended

/dev/hda5            1671        1801     1052226   82  Linux swap

/dev/hda6            1802        3018     9775521   83  Linux

/dev/hda7            3019        4235     9775521   8e  Linux LVM

/dev/hda8            4236        5452     9775521   8e  Linux LVM

[root@n003 ~]# partprobe

[root@n003 ~]# pvcreate /dev/hda7 /dev/hda8

  Physical volume “/dev/hda7” successfully created

  Physical volume “/dev/hda8” successfully created

[root@n003 ~]# vgcreate vg0 /dev/hda7 /dev/hda8

  Volume group “vg0” successfully created

[root@n003 ~]# lvcreate -L 15G -n lvmdata vg0

[root@n003 ~]# lvcreate -L 10G -n lvm0 vg0

[root@n003 ~]# gnbd_export -v -e export2 -d /dev/vg0/lvm0

[root@n002 data]# gnbd_import -i n003

gnbd_import: created gnbd device export2

gnbd_recvd: gnbd_recvd started

[root@n002 data]# ll /dev/gnbd/export

export1  export2  

[root@n002 data]# gnbd_import -l

Device name : export1

———————-

    Minor # : 0

  Proc name : /dev/gnbd0

     Server : n003

       Port : 14567

      State : Open Connected Clear

   Readonly : No

    Sectors : 19551042

Device name : export2

———————-

    Minor # : 1

  Proc name : /dev/gnbd1

     Server : n003

       Port : 14567

      State : Close Connected Clear

   Readonly : No

    Sectors : 20971520

gnbd_export: created GNBD export2 serving file /dev/vg0/lvm0

[root@n003 ~]# gfs_mkfs -p lock_dlm -t cluster:export2 -j 3 /dev/vg0/lvm0

[root@n003 ~]# mount -t gfs /dev/vg0/lvm0 /lvmdata

[root@n002 ~]# mount -t gfs /dev/gnbd/export2 /lvmdata/

[root@n001 ~]# mount -t gfs /dev/gnbd/export2 /lvmdata/

[root@n003 ~]# df -Th

Filesystem           Type    Size  Used Avail Use% Mounted on

/dev/hda2            ext3    2.9G  231M  2.6G   9% /

/dev/hda1            ext3     99M   11M   83M  12% /boot

none                 tmpfs    249M     0  249M   0% /dev/shm

/dev/hda3            ext3    9.7G  2.3G  6.9G  26% /usr

/dev/hda6             gfs    9.0G   81M  8.9G   1% /data

/dev/mapper/vg0-lvm0  gfs    9.7G   20K  9.7G   1% /lvmdata

##################################################################

* gfs start scripts *

modprobe dm-mod

modprobe gfs

modprobe lock_dlm

ccsd

sleep 2

cman_tool join

sleep 2

fence_tool join

sleep 2

clvmd

modprobe gnbd

gnbd_import -v -i n003

mount -t gfs /dev/gnbd/export1 /data

* gfs stop scripts *

umount /data

sleep 2

gnbd_import -r export1

killall clvmd

fence_tool leave

sleep 2

cman_tool leave

killall ccsd

rmmod gnbd

rmmod lock_dlm

rmmod gfs

rmmod dm-mod

### gfs initsripts ########################################################################

#!/bin/bash

#

#

#

# description: mount/unmount gfs filesystems configured in /etc/fstab

#

#              

### BEGIN INIT INFO

# Provides:

### END INIT INFO

. /etc/init.d/functions

[ -f /etc/sysconfig/cluster ] && . /etc/sysconfig/cluster

#

# This script’s behavior is modeled closely after the netfs script.  

#

GFSFSTAB=`cat /etc/fstab | grep gfs | awk ‘{print $2}’`

GFSMTAB=$(LC_ALL=C awk ‘!/^#/ && $3 ~ /^gfs/ && $2 != “/” { print $2 }’ /proc/mounts)

# See how we were called.

case “$1” in

  start)

        if [ -n “$GFSFSTAB” ]

        then

                #load_module lock_harness MODULE_LOCK_HARNESS

                #load_module crc32        MODULE_CRC32

                modprobe lock_dlm

                modprobe gfs

                action $”Mounting GFS filesystems: ” mount -a -t gfs

        fi

        touch /var/lock/subsys/gfs

        ;;

  stop)

        if [ -n “$GFSMTAB” ]

        then

                sig=

                retry=6

                remaining=`LC_ALL=C awk ‘!/^#/ && $3 ~ /^gfs/ && $2 != “/” {print $2}’ /proc/mounts`

                while [ -n “$remaining” -a “$retry” -gt 0 ]

                do

                        action $”Unmounting GFS filesystems: ” umount -a -t gfs

                        if [ $retry -eq 0 ]

                        then

                                action $”Unmounting GFS filesystems (lazy): ” umount -l -a -t gfs

                                break

                        fi

                        sleep 2

                        remaining=`LC_ALL=C awk ‘!/^#/ && $3 ~ /^gfs/ && $2 != “/” {print $2}’ /proc/mounts`

                        [ -z “$remaining” ] && break

                        /sbin/fuser -k -m $sig $remaining >/dev/null

                        sleep 10

                        retry=$(($retry – 1))

                        sig=-9

                done

        fi

        modprobe -r gfs

        modprobe -r lock_dlm

        rm -f /var/lock/subsys/gfs

        ;;

  status)

        if [ -f /proc/mounts ]

        then

                [ -n “$GFSFSTAB” ] && {

                     echo $”Configured GFS mountpoints: ”

                     for fs in $GFSFSTAB; do echo $fs ; done

                }

                [ -n “$GFSMTAB” ] && {

                      echo $”Active GFS mountpoints: ”

                      for fs in $GFSMTAB; do echo $fs ; done

                }

        else

                echo “/proc filesystem unavailable”

        fi

        ;;

  restart)

        $0 stop

        $0 start

        ;;

  reload)

        $0 start

        ;;

  *)

        echo $”Usage: $0 {start|stop|restart|reload|status}”

        exit 1

esac

exit 0

—————————————————————————————–

#### /etc/fstab modify ######################################################################

/dev/sda1               /gfs                    gfs     defaults        0 0

#### nfs + ha + gfs #########################################################################

– nfs server

/etc/exports

———————————————————————————————-

/gfs   *(rw,sync,no_root_squash,fsid=1)

/etc/ha.d/ha.cf

———————————————————————————————-

debugfile /var/log/ha-debug

logfile /var/log/ha-log

logfacility     local0

keepalive 2

deadtime 5

hopfudge 1

udpport 1001

udp     eth0

auto_failback off

node    xeon01

node    xeon02

ping 192.168.123.254

#respawn hacluster /usr/lib/heartbeat/ccm

respawn hacluster /usr/lib/heartbeat/ipfail

apiauth ipfail uid=hacluster

/etc/ha.d/haresources

————————————————————————————————–

xeon01 192.168.123.118 nfslock nfs

xeon02 192.168.123.119 nfslock nfs

– nfs client

———————————————————————————————-

mount -t nfs -o proto=tcp,soft,intr,timeo=3  192.168.123.119:/gfs  /gfs

서진우

슈퍼컴퓨팅 전문 기업 클루닉스/ 상무(기술이사)/ 정보시스템감리사/ 시스존 블로그 운영자

You may also like...

페이스북/트위트/구글 계정으로 댓글 가능합니다.