[Linux-cluster] Can`t mount gfs

Pavel Kuzin pk at nodex.ru
Thu Nov 26 13:36:10 UTC 2009


I`m tring to upgrade my old 1.04 installation to 2.03.11.
I`m installed new versions of software.
I`m using customised kernel.
And cluster is came up.
Creating test filesystem is ok.
But when i trying to mount new filesystem error is occured.

Can anybody tell what i`m doing wrong?

Thank you!

mount -t gfs -v /dev/sdb1 /mnt

node3:/usr/src# mount -t gfs -v /dev/sdb1 /mnt
/sbin/mount.gfs: mount /dev/sdb1 /mnt
/sbin/mount.gfs: parse_opts: opts = "rw"
/sbin/mount.gfs:   clear flag 1 for "rw", flags = 0
/sbin/mount.gfs: parse_opts: flags = 0
/sbin/mount.gfs: parse_opts: extra = ""
/sbin/mount.gfs: parse_opts: hostdata = ""
/sbin/mount.gfs: parse_opts: lockproto = ""
/sbin/mount.gfs: parse_opts: locktable = ""
/sbin/mount.gfs: message to gfs_controld: asking to join mountgroup:
/sbin/mount.gfs: write "join /mnt gfs lock_dlm TEST:NEW rw /dev/sdb1"
/sbin/mount.gfs: message from gfs_controld: response to join request:
/sbin/mount.gfs: lock_dlm_join: read "0"
/sbin/mount.gfs: message from gfs_controld: mount options:
/sbin/mount.gfs: lock_dlm_join: read "hostdata=jid=0:id=1441795:first=1"
/sbin/mount.gfs: lock_dlm_join: hostdata: "hostdata=jid=0:id=1441795:first=1"
/sbin/mount.gfs: lock_dlm_join: extra_plus: "hostdata=jid=0:id=1441795:first=1"
/sbin/mount.gfs: mount(2) failed error -1 errno 19
/sbin/mount.gfs: lock_dlm_mount_result: write "mount_result /mnt gfs -1"
/sbin/mount.gfs: message to gfs_controld: asking to leave mountgroup:
/sbin/mount.gfs: lock_dlm_leave: write "leave /mnt gfs 19"
/sbin/mount.gfs: message from gfs_controld: response to leave request:
/sbin/mount.gfs: lock_dlm_leave: read "0"
/sbin/mount.gfs: error mounting /dev/sdb1 on /mnt: No such device

# uname -a
Linux node3.cl.nodex.ru 2.6.30.9 #1 SMP Thu Nov 26 12:18:02 MSK 2009 i686 GNU/Linux

node3:/usr/src# cman_tool status
Version: 6.2.0
Config Version: 1
Cluster Name: TEST
Cluster Id: 1198
Cluster Member: Yes
Cluster Generation: 28
Membership state: Cluster-Member
Nodes: 1
Expected votes: 1
Total votes: 1
Node votes: 1
Quorum: 1
Active subsystems: 8
Flags: 2node Dirty
Ports Bound: 0 11
Node name: node3
Node ID: 3
Multicast addresses: 239.0.210.1
Node addresses: 10.210.10.12
node3:/usr/src#


node3:/usr/src# mkfs -t gfs -p lock_dlm -t TEST:NEW -j 1 /dev/sdb1
This will destroy any data on /dev/sdb1.
   It appears to contain a LVM2_member raid.

Are you sure you want to proceed? [y/n] y

Device:                    /dev/sdb1
Blocksize:                 4096
Filesystem Size:           24382128
Journals:                  1
Resource Groups:           374
Locking Protocol:          lock_dlm
Lock Table:                TEST:NEW

Syncing...
All Done



node3:/usr/src# cat /etc/cluster/cluster.conf
<?xml version="1.0"?>
<cluster name="TEST" config_version="1">
<cman two_node="1" expected_votes="1" deadnode_timer="1">
     <multicast addr="239.0.210.1"/>
</cman>
<clusternodes>
     <clusternode name="node3" votes="1" nodeid="3">
         <multicast addr="239.0.210.1" interface="eth0"/>
         <fence>
                 <method name="single">
                     <device name="pdu2" port="6"/>
                 </method>
         </fence>
     </clusternode>
     <clusternode name="node4" votes="1" nodeid="4">
         <multicast addr="239.0.210.1" interface="eth0"/>
         <fence>
                 <method name="single">
                     <device name="pdu2" port="5"/>
                 </method>
         </fence>
     </clusternode>
</clusternodes>
<fence_daemon post_fail_delay="0" post_join_delay="3"/>
<fencedevices>
         <fencedevice name="pdu2" agent="fence_apc" ipaddr="pdu2" login="cluster" passwd="xxxxxx"/>
</fencedevices>
</cluster>



-- 
Pavel D. Kuzin
pk at nodex.ru
Nodex LTD.
Saint-Petersburg, Russia




More information about the Linux-cluster mailing list