[Linux-cluster] Re: fence_manual missing /tmp/fence_manual.fifo
denis
denisb+gmane at gmail.com
Fri Mar 28 09:25:00 UTC 2008
Maciej Bogucki wrote:
>> Are you certain you want to continue? [yN] y
>> can't open /tmp/fence_manual.fifo: No such file or directory
> Hello,
>
> Do You have the latest version of fence package?
> How does look like Your cluster.conf?
Hi,
I have installed the patched cman package from Lon H. to fix the broken
rgmanager communications bug :
https://bugzilla.redhat.com/show_bug.cgi?id=327721
Version : 2.0.73 Vendor: Red Hat, Inc.
Release : 1.6.el5.test.bz327721 Build Date: Mon 26 Nov 2007
07:22:55 PM CET
Install Date: Thu 27 Mar 2008 03:28:20 PM CET Build Host:
hs20-bc1-6.build.redhat.com
Group : System Environment/Base Source RPM:
cman-2.0.73-1.6.el5.test.bz327721.src.rpm
Size : 1164641 License: GPL
Signature : (none)
Packager : Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>
URL : http://sources.redhat.com/cluster/
Summary : cman - The Cluster Manager
cluster.conf @ pastebin : http://pastebin.com/m5f8787a2
cluster.conf :
<?xml version="1.0"?>
<cluster alias="cluster_customer" config_version="31"
name="cluster_customer">
<rm log_level="4"/>
<fence_daemon clean_start="0" post_fail_delay="0"
post_join_delay="3"/>
<clusternodes>
<clusternode name="node1.domain.com" nodeid="1" votes="1">
<fence>
<method name="1">
<device blade="1" name="ArenaMM"/>
</method>
<method name="2">
<device name="LastResortNode1"/>
</method>
</fence>
</clusternode>
<clusternode name="node2.domain.com" nodeid="2" votes="1">
<fence>
<method name="1">
<device blade="2" name="ArenaMM"/>
</method>
<method name="2">
<device name="LastResortNode2"/>
</method>
</fence>
</clusternode>
</clusternodes>
<cman expected_votes="1" two_node="1"/>
<fencedevices>
<fencedevice agent="fence_bladecenter"
ipaddr="10.20.1.7" login="*****" name="ArenaMM" debuglog="/tmp/fencelog"
passwd="******"/>
<fencedevice agent="fence_manual" name="LastResortNode1"
nodename="node1.domain.com"/>
<fencedevice agent="fence_manual" name="LastResortNode2"
nodename="node2.domain.com"/>
</fencedevices>
<rm>
<failoverdomains>
<failoverdomain name="failover_proxy"
ordered="1" restricted="1">
<failoverdomainnode
name="node1.domain.com" priority="10"/>
<failoverdomainnode
name="node2.domain.com" priority="20"/>
</failoverdomain>
<failoverdomain name="failover_database"
ordered="1" restricted="1">
<failoverdomainnode
name="node1.domain.com" priority="20"/>
<failoverdomainnode
name="node2.domain.com" priority="10"/>
</failoverdomain>
</failoverdomains>
<resources>
<ip address="192.168.0.101" monitor_link="1"/>
<ip address="192.168.0.102" monitor_link="1"/>
<script file="/etc/init.d/haproxy" name="HAProxy"/>
<mysql config_file="/etc/my.cnf"
listen_address="192.168.0.102" name="MySQL" shutdown_wait="60"/>
<fs device="/dev/mapper/mysql" force_fsck="1"
force_unmount="1" fsid="8426" fstype="ext3" mountpoint="/var/lib/mysql"
name="disk_mysql" self_fence="0"/>
</resources>
<service autostart="1" domain="failover_proxy"
exclusive="0" name="customer_proxy" recovery="restart">
<ip ref="192.168.0.101">
<script ref="HAProxy"/>
</ip>
</service>
<service autostart="1" domain="failover_database"
exclusive="0" name="customer_database" recovery="restart">
<ip ref="192.168.0.102">
<fs ref="disk_mysql">
<mysql ref="MySQL"/>
</fs>
</ip>
</service>
</rm>
<totem consensus="4800" join="60" token="10000"
token_retransmits_before_loss_const="20"/>
</cluster>
Regards
--
Denis
More information about the Linux-cluster
mailing list