[Linux-cluster] NFSCookbook w/ Redhat 5.0 Cluster

dennis at demarco.com dennis at demarco.com
Wed Jul 11 22:02:55 UTC 2007



I'm playing around with the nfscookbook on a test cluster. I'm finding 
some really odd behavior. I can't seem to get the services to 'stick' the 
cluster node with the lowest priority.

When a cluster node starts a services, it seems to re-locate other 
services for no apparent reasons. Have I've done something wrong?

Thanks,
Dennis







Below is my config file:




<?xml version="1.0"?>
<cluster alias="cluster1" config_version="113" name="cluster1">
         <fence_daemon clean_start="0" post_fail_delay="0" 
post_join_delay="12"/>
         <clusternodes>
                 <clusternode name="node03.internal.lan" nodeid="1" 
votes="1">
                         <fence>
                                 <method name="1">
                                         <device domain="node03" 
name="xen-fence"/>
                                 </method>
                         </fence>
                 </clusternode>
                 <clusternode name="node01.internal.lan" nodeid="2" 
votes="1">
                         <fence>
                                 <method name="1">
                                         <device domain="node01" 
name="xen-fence"/>
                                 </method>
                         </fence>
                 </clusternode>
                 <clusternode name="node02.internal.lan" nodeid="3" 
votes="1">
                         <fence>
                                 <method name="1">
                                         <device domain="node2" 
name="xen-fence"/>
                                 </method>
                         </fence>
                 </clusternode>
         </clusternodes>
         <cman/>
         <fencedevices>
                 <fencedevice agent="fence_xvm" name="xen-fence"/>
         </fencedevices>
         <rm>
                 <failoverdomains>
                         <failoverdomain name="grid1" ordered="1" 
restricted="1">
                                 <failoverdomainnode 
name="node03.internal.lan" priority="3"/>
                                 <failoverdomainnode 
name="node01.internal.lan" priority="1"/>
                                 <failoverdomainnode 
name="node02.internal.lan" priority="2"/>
                         </failoverdomain>
                         <failoverdomain name="grid2" ordered="1" 
restricted="1">
                                 <failoverdomainnode 
name="node03.internal.lan" priority="2"/>
                                 <failoverdomainnode 
name="node01.internal.lan" priority="3"/>
                                 <failoverdomainnode 
name="node02.internal.lan" priority="1"/>
                         </failoverdomain>
                         <failoverdomain name="grid3" ordered="1" 
restricted="1">
                                 <failoverdomainnode 
name="node03.internal.lan" priority="1"/>
                                 <failoverdomainnode 
name="node01.internal.lan" priority="2"/>
                                 <failoverdomainnode 
name="node02.internal.lan" priority="3"/>
                         </failoverdomain>
                 </failoverdomains>
                 <resources>
                         <ip address="192.168.1.23" monitor_link="1"/>
                         <ip address="192.168.1.24" monitor_link="1"/>
                         <ip address="192.168.1.25" monitor_link="1"/>
                         <nfsexport name="nfsexport1"/>
                         <nfsexport name="nfsexport2"/>
                         <nfsexport name="nfsexport3"/>
                         <nfsclient allow_recover="0" name="nfsclient1" 
target="*"/>
                         <nfsclient allow_recover="0" name="nfsclient2" 
target="*"/>
                         <nfsclient allow_recover="0" name="nfsclient3" 
target="*"/>
                         <clusterfs device="/dev/vg0/gfslv2" 
force_unmount="0" fsid="59408" fstype="gfs" mountpoint="/gfsdata" name="
gfs" options="acl"/>
                 </resources>
                 <service autostart="1" domain="grid1" exclusive="0" 
name="nfs1" recovery="relocate">
                         <clusterfs ref="gfs">
                                 <nfsexport ref="nfsexport1">
                                         <nfsclient ref="nfsclient1"/>
                                 </nfsexport>
                         </clusterfs>
                         <ip ref="192.168.1.23"/>
                 </service>
                 <service autostart="1" domain="grid2" exclusive="0" 
name="nfs2" recovery="relocate">
                         <clusterfs ref="gfs">
                                 <nfsexport ref="nfsexport2">
                                         <nfsclient ref="nfsclient2"/>
                                 </nfsexport>
                         </clusterfs>
                         <ip ref="192.168.1.24"/>
                 </service>
                 <service autostart="1" domain="grid3" exclusive="0" 
name="nfs3" recovery="relocate">
                         <clusterfs ref="gfs">
                                 <nfsexport ref="nfsexport3">
                                         <nfsclient ref="nfsclient3"/>
                                 </nfsexport>
                         </clusterfs>
                         <ip ref="192.168.1.25"/>
                 </service>
         </rm>
</cluster>
[root at node01 ~]# cat /etc/cluster/cluster.conf
<?xml version="1.0"?>
<cluster alias="cluster1" config_version="113" name="cluster1">
         <fence_daemon clean_start="0" post_fail_delay="0" 
post_join_delay="12"/>
         <clusternodes>
                 <clusternode name="node03.internal.lan" nodeid="1" 
votes="1">
                         <fence>
                                 <method name="1">
                                         <device domain="node03" 
name="xen-fence"/>
                                 </method>
                         </fence>
                 </clusternode>
                 <clusternode name="node01.internal.lan" nodeid="2" 
votes="1">
                         <fence>
                                 <method name="1">
                                         <device domain="node01" 
name="xen-fence"/>
                                 </method>
                         </fence>
                 </clusternode>
                 <clusternode name="node02.internal.lan" nodeid="3" 
votes="1">
                         <fence>
                                 <method name="1">
                                         <device domain="node2" 
name="xen-fence"/>
                                 </method>
                         </fence>
                 </clusternode>
         </clusternodes>
         <cman/>
         <fencedevices>
                 <fencedevice agent="fence_xvm" name="xen-fence"/>
         </fencedevices>
         <rm>
                 <failoverdomains>
                         <failoverdomain name="grid1" ordered="1" 
restricted="1">
                                 <failoverdomainnode 
name="node03.internal.lan" priority="3"/>
                                 <failoverdomainnode 
name="node01.internal.lan" priority="1"/>
                                 <failoverdomainnode 
name="node02.internal.lan" priority="2"/>
                         </failoverdomain>
                         <failoverdomain name="grid2" ordered="1" 
restricted="1">
                                 <failoverdomainnode 
name="node03.internal.lan" priority="2"/>
                                 <failoverdomainnode 
name="node01.internal.lan" priority="3"/>
                                 <failoverdomainnode 
name="node02.internal.lan" priority="1"/>
                         </failoverdomain>
                         <failoverdomain name="grid3" ordered="1" 
restricted="1">
                                 <failoverdomainnode 
name="node03.internal.lan" priority="1"/>
                                 <failoverdomainnode 
name="node01.internal.lan" priority="2"/>
                                 <failoverdomainnode 
name="node02.internal.lan" priority="3"/>
                         </failoverdomain>
                 </failoverdomains>
                 <resources>
                         <ip address="192.168.1.23" monitor_link="1"/>
                         <ip address="192.168.1.24" monitor_link="1"/>
                         <ip address="192.168.1.25" monitor_link="1"/>
                         <nfsexport name="nfsexport1"/>
                         <nfsexport name="nfsexport2"/>
                         <nfsexport name="nfsexport3"/>
                         <nfsclient allow_recover="0" name="nfsclient1" 
target="*"/>
                         <nfsclient allow_recover="0" name="nfsclient2" 
target="*"/>
                         <nfsclient allow_recover="0" name="nfsclient3" 
target="*"/>
                         <clusterfs device="/dev/vg0/gfslv2" 
force_unmount="0" fsid="59408" fstype="gfs" mountpoint="/gfsdata" 
name="gfs" options="acl"/>
                 </resources>
                 <service autostart="1" domain="grid1" exclusive="0" 
name="nfs1" recovery="relocate">
                         <clusterfs ref="gfs">
                                 <nfsexport ref="nfsexport1">
                                         <nfsclient ref="nfsclient1"/>
                                 </nfsexport>
                         </clusterfs>
                         <ip ref="192.168.1.23"/>
                 </service>
                 <service autostart="1" domain="grid2" exclusive="0" 
name="nfs2" recovery="relocate">
                         <clusterfs ref="gfs">
                                 <nfsexport ref="nfsexport2">
                                         <nfsclient ref="nfsclient2"/>
                                 </nfsexport>
                         </clusterfs>
                         <ip ref="192.168.1.24"/>
                 </service>
                 <service autostart="1" domain="grid3" exclusive="0" 
name="nfs3" recovery="relocate">
                         <clusterfs ref="gfs">
                                 <nfsexport ref="nfsexport3">
                                         <nfsclient ref="nfsclient3"/>
                                 </nfsexport>
                         </clusterfs>
                         <ip ref="192.168.1.25"/>
                 </service>
         </rm>
</cluster>

-- 
This message has been scanned for viruses and
dangerous content by MailScanner, and is
believed to be clean.




More information about the Linux-cluster mailing list