[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[Linux-cluster] Some problems using fence_vmware_ng under ESXi 4



Hi all,

I have installed two rhel5.4 nodes virtual guests, el5prodnode01 and el5prodnode02, under esxi 4 host and I need to use fence_vmware_ng as a fence device. All works ok except when ESXi starts or is rebooted. I have configured under ESXi host to start automatically el5prodnode01 only when host is rebooted or starts, but when el5prodnode01 guest automatically starts tries to launch el5prodnode02 every time. Why? Is this the normal procedure for fence_vmware_ng device?? How can I stop this feature or malfunction??

My cluster.conf is:

<?xml version="1.0"?>
<cluster alias="VirtualRHELCluster" config_version="4" name="VirtCluster">
        <fence_daemon post_fail_delay="0" post_join_delay="3"/>
        <quorumd interval="1" tko="10" votes="1" label="prodqdisk" log_level="4">
<heuristic program="ping 172.25.50.11 -c1 -t1" score="1" interval="2" tko="3"/>
        </quorumd>
        <clusternodes>
                <clusternode name="node01.hpulabs.org" nodeid="1" votes="1">
                        <multicast addr="239.192.25.11" interface="eth2"/>
                        <fence>
                                <method name="1">
                                        <device name="vmware_fence_node1"/>
                                </method>
                        </fence>
                </clusternode>
                <clusternode name="node02.hpulabs.org" nodeid="2" votes="1">
                        <multicast addr="239.192.25.11" interface="eth2"/>
                        <fence>
                                <method name="1">
                                        <device name="vmware_fence_node2"/>
                                </method>
                        </fence>
                </clusternode>
        </clusternodes>
        <cman expected_votes="3" two_node="0">
                <multicast addr="239.192.25.11"/>
        </cman>
        <fencedevices>
<fencedevice agent="fence_vmware_ng" name="vmware_fence_node1" ipaddr="172.25.50.11" login="root" passwd="rootpass" port="el5prodnode01"/> <fencedevice agent="fence_vmware_ng" name="vmware_fence_node2" ipaddr="172.25.50.11" login="root" passwd="rootpass" port="el5prodnode02"/>
                <fencedevice agent="fence_manual" name="manual-fence"/>
        </fencedevices>
        <rm log_facility="local4" log_level="7">
                <failoverdomains>
                        <failoverdomain name="PriCluster1" ordered="1" restricted="1">
<failoverdomainnode name="node01.hpulabs.org" priority="1"/> <failoverdomainnode name="node02.hpulabs.org" priority="2"/>
                        </failoverdomain>
                        <failoverdomain name="PriCluster2" ordered="1" restricted="1">
<failoverdomainnode name="node02.hpulabs.org" priority="1"/> <failoverdomainnode name="node01.hpulabs.org" priority="2"/>
                        </failoverdomain>
                        <failoverdomain name="FirstNode" restricted="1">
<failoverdomainnode name="node01.hpulabs.org" priority="1"/>
                        </failoverdomain>
                        <failoverdomain name="SecondNode" restricted="1">
<failoverdomainnode name="node02.hpulabs.org" priority="1"/>
                        </failoverdomain>
                </failoverdomains>
                <resources>
<fs device="/dev/clustervol/infravol" force_fsck="0" force_unmount="1" fstype="ext3" mountpoint="/data/services/infra" name="infradata" options="rw"/> <fs device="/dev/clustervol/mirrorvol" force_fsck="0" force_unmount="1" fstype="ext3" mountpoint="/data/services/www" name="mirrordata" options="rw"/> <script file="/data/config/etc/init.d/postfix-cluster" name="postfix-cluster"/>
                </resources>
	</rm>
</cluster>

Many thanks for your help.

--
CL Martinez
carlopmart {at} gmail {d0t} com


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]