Setup HA-Storage with GlusterFS on Fedora 11

From Bitbull Wiki
Jump to navigation Jump to search

homepage: http://www.gluster.com/community/documentation/index.php/GlusterFS
wiki: http://en.wikipedia.org/wiki/GlusterFS
other howto: http://blogama.org/node/78/
nice presentation: http://ftp.gluster.com/pub/gluster/glusterfs/talks/Z/GlusterFS.pdf

1 DESCRIPTION

install GlusterFS on Fedora 11
and configure a mirrored volume to get HA-storage
at this point i use client side replication, server side storage replication is also available

2 NODE NAMES

xen1 - 192.168.123.11 - i386 - 256MB RAM - 4GB HD as /
xen2 - 192.168.123.12 - i386 - 256MB RAM - 4GB HD as /


3 INSTALL CURRENT GLUSTERFS

yum install wget rpm-build libibverbs-devel db4-devel fuse-devel bison flex gcc make
wget http://ftp.gluster.com/pub/gluster/glusterfs/2.0/LATEST/Fedora/glusterfs-2.0.7-1.fc10.src.rpm
rpm -ihv glusterfs-2.0.7-1.fc10.src.rpm
rpmbuild -bb /root/rpmbuild/SPECS/glusterfs.spec
rpm -Uhv /root/rpmbuild/RPMS/i386/glusterfs-*.rpm

4 CONFIGURE GLUSTERFS

4.1 create volume

mkdir /mnt/glusterfs
mkdir /data/
mkdir /data/export
mkdir /data/export-ns
vi /etc/glusterfs/glusterfsd.vol
---
volume posix
  type storage/posix
  option directory /data/export
end-volume

volume locks
  type features/locks
  subvolumes posix
end-volume

volume brick
  type performance/io-threads
  option thread-count 8
  subvolumes locks
end-volume

volume posix-ns
  type storage/posix
  option directory /data/export-ns
end-volume

volume locks-ns
  type features/locks
  subvolumes posix-ns
end-volume

volume brick-ns
  type performance/io-threads
  option thread-count 8
  subvolumes locks-ns
end-volume

volume server
  type protocol/server
  option transport-type tcp
  option auth.addr.brick.allow *
  option auth.addr.brick-ns.allow *
  subvolumes brick brick-ns
end-volume
---

4.2 start server

/etc/init.d/glusterfsd start
ps -ef | grep -i gluster
cat /var/log/glusterfs/-etc-glusterfs-glusterfsd.vol.log

5 CONFIGURE GLUSTERFS CLIENT CLIENT

vi /etc/glusterfs/glusterfs.vol
---
### Add client feature and attach to remote subvolume of server1
volume brick1
 type protocol/client
 option transport-type tcp/client
 option remote-host 192.168.123.11   # IP address of the remote brick
 option remote-subvolume brick        # name of the remote volume
end-volume

### Add client feature and attach to remote subvolume of server2
volume brick2
 type protocol/client
 option transport-type tcp/client
 option remote-host 192.168.123.12   # IP address of the remote brick
 option remote-subvolume brick        # name of the remote volume
end-volume

### The file index on server1
volume brick1-ns
 type protocol/client
 option transport-type tcp/client
 option remote-host 192.168.123.11    # IP address of the remote brick
 option remote-subvolume brick-ns        # name of the remote volume
end-volume

### The file index on server2
volume brick2-ns
 type protocol/client
 option transport-type tcp/client
 option remote-host 192.168.123.12      # IP address of the remote brick
 option remote-subvolume brick-ns        # name of the remote volume
end-volume

#The replicated volume with data
volume afr1
 type cluster/afr
 subvolumes brick1 brick2
end-volume

#The replicated volume with indexes
volume afr-ns
 type cluster/afr
 subvolumes brick1-ns brick2-ns
end-volume

#The unification of all afr volumes (used for > 2 servers)
volume unify
  type cluster/unify
  option scheduler rr # round robin
  option namespace afr-ns
  subvolumes afr1
end-volume
---

5.1 reboot to get a lean state

init 6

6 TEST GLUSTERFS

6.1 try to mount glusterfs

glusterfs -f /etc/glusterfs/glusterfs.vol /mnt/glusterfs
cat /var/log/glusterfs/mnt-glusterfs.log
df -ht fuse.glusterfs

6.2 configure auto mount

echo '/etc/glusterfs/glusterfs.vol /mnt/glusterfs glusterfs rw 0 0' >> /etc/fstab init 6

6.3 test if its replicating

xen1> mkdir -p /mnt/glusterfs/xen1
     cp -a /root/rpmbuild/RPMS/i386/glusterfs-* /mnt/glusterfs/xen1/
xen2> mkdir -p /mnt/glusterfs/xen2
      cp -a /root/rpmbuild/RPMS/i386/glusterfs-* /mnt/glusterfs/xen2/
xen1> md5sum /mnt/glusterfs/xen?/*
      ls -li /mnt/glusterfs/xen?/* /data/export-ns/xen?/* /data/export/xen?/*
xen2> md5sum /mnt/glusterfs/xen?/*
      ls -li /mnt/glusterfs/xen?/* /data/export-ns/xen?/* /data/export/xen?/*

6.4 test while booting one node

xen1> dd < /dev/zero > /mnt/glusterfs/xen1.img bs=1M count=512
      md5sum /mnt/glusterfs/xen1.img
      init 0
xen2> ls -l /mnt/glusterfs/*
      md5sum /mnt/glusterfs/xen1.img
      rm -f /mnt/glusterfs/xen1/*
xen1> # power-on
      ls -l /mnt/glusterfs


6.5 notes

I had trouble launching XEN VMs when stored on a GlusterFS volume,
what fixed it for me was to add --disable-direct-io-mode to the command above,
so now I am using, /usr/sbin/glusterfs --disable-direct-io-mode --volfile=/etc/glusterfs/glusterfs.vol /mnt/glusterfs