[Cluster-devel] conga ./conga.spec.in.in luci/site/luci/Extens ...
rmccabe at sourceware.org
rmccabe at sourceware.org
Wed Sep 17 06:29:56 UTC 2008
CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: rmccabe at sourceware.org 2008-09-17 06:29:55
Modified files:
. : conga.spec.in.in
luci/site/luci/Extensions: RicciQueries.py cluster_adapters.py
Log message:
Fix bz459623
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.45.2.101&r2=1.45.2.102
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.10&r2=1.1.4.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.46&r2=1.120.2.47
--- conga/conga.spec.in.in 2008/08/27 14:59:17 1.45.2.101
+++ conga/conga.spec.in.in 2008/09/17 06:29:54 1.45.2.102
@@ -315,6 +315,7 @@
* Wed Aug 27 2008 Ryan McCabe <rmccabe at redhat.com> 0.12.1-4
- Fix bz459562 (charset configuration fix for luci)
- Fix bz459469 (An unknown device type was given: "gnbd.")
+- Fix bz459623 (Conga error adding new node to existing cluster)
* Thu Aug 07 2008 Ryan McCabe <rmccabe at redhat.com> 0.12.1-3
- More fixes for bz429350
--- conga/luci/site/luci/Extensions/RicciQueries.py 2008/03/12 15:13:12 1.1.4.10
+++ conga/luci/site/luci/Extensions/RicciQueries.py 2008/09/17 06:29:54 1.1.4.11
@@ -6,12 +6,10 @@
# Free Software Foundation.
from xml.dom import minidom
-from ricci_communicator import RicciCommunicator
+from ricci_communicator import RicciCommunicator, batch_status
from LuciSyslog import get_logger
from conga_constants import LUCI_DEBUG_MODE
-
-
luci_log = get_logger()
def addClusterNodeBatch(cluster_name,
@@ -115,7 +113,7 @@
batch.append('<module name="cluster">')
batch.append('<request API_version="1.0">')
batch.append('<function_call name="start_node">')
- batch.append('<var mutable="false" name="enable_services" type="boolean" value="true"/>"')
+ batch.append('<var mutable="false" name="enable_services" type="boolean" value="true"/>')
batch.append('</function_call>')
batch.append('</request>')
batch.append('</module>')
@@ -342,6 +340,30 @@
ricci_xml = rc.batch_run(batch_str)
return batchAttemptResult(ricci_xml)
+def setClusterConfSync(rc, clusterconf, propagate=True):
+ if propagate is True:
+ propg = 'true'
+ else:
+ propg = 'false'
+
+ conf = str(clusterconf).replace('<?xml version="1.0"?>', '')
+ conf = conf.replace('<?xml version="1.0" ?>', '')
+ conf = conf.replace('<? xml version="1.0"?>', '')
+ conf = conf.replace('<? xml version="1.0" ?>', '')
+
+ batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="set_cluster.conf"><var type="boolean" name="propagate" mutable="false" value="%s"/><var type="xml" mutable="false" name="cluster.conf">%s</var></function_call></request></module>' % (propg, conf)
+
+ ricci_xml = rc.batch_run(batch_str, async=False)
+ if not ricci_xml:
+ return False
+ batch_xml = ricci_xml.getElementsByTagName('batch')
+ if not batch_xml:
+ return None
+ (num, total) = batch_status(batch_xml[0])
+ if num == total:
+ return True
+ return False
+
def getNodeLogs(rc):
from time import time, ctime
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2008/07/17 16:36:56 1.120.2.46
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2008/09/17 06:29:54 1.120.2.47
@@ -378,8 +378,8 @@
if cluster_os is None:
if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('Unable to determine cluster OS for %s' % clustername)
- return (False, { 'errors': [ 'Unable to determine the version of the cluster suite this cluster is running' ] })
+ luci_log.debug_verbose('Unable to determine the operating system version for %s' % clustername)
+ return (False, { 'errors': [ 'Unable to determine the version of the cluster software this cluster is running' ] })
shared_storage = False
try:
@@ -547,41 +547,22 @@
try:
skeys = system_list.keys()
skeys.sort()
+ batch_node = rq.addClusterNodeBatch(clustername,
+ True,
+ True,
+ shared_storage,
+ False,
+ download_pkgs,
+ model.GULM_ptr is not None,
+ reboot_nodes)
+ batch_node_xml = minidom.parseString('<batch>%s</batch>' % batch_node)
+ batch_node_xml = batch_node_xml.getElementsByTagName('batch')[0]
+ if not batch_node_xml:
+ raise Exception, 'batch is blank'
+
for x in skeys:
i = system_list[x]
-
- try:
- batch_node = rq.addClusterNodeBatch(clustername,
- True,
- True,
- shared_storage,
- False,
- download_pkgs,
- model.GULM_ptr is not None,
- reboot_nodes)
- if not batch_node:
- raise Exception, 'batch is blank'
- system_list[x]['batch'] = batch_node
- except Exception, e:
- cur_system['errors'] = True
- incomplete = True
-
- try:
- if not cur_system['prev_auth']:
- rc.unauth()
- del cur_system['trusted']
- except Exception, e:
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VACN12: %s: %r %s' \
- % (cur_host, e, str(e)))
-
- errors.append('Unable to initiate cluster join for node "%s"' \
- % cur_host)
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VACN13: %s: %r %s' \
- % (cur_host, e, str(e)))
- continue
-
+ system_list[x]['batch'] = batch_node_xml
next_node_id += 1
new_node = ClusterNode()
new_node.attr_hash['name'] = str(i['host'])
@@ -600,36 +581,20 @@
if not conf_str:
raise Exception, 'Unable to save the new cluster model'
- batch_number, result = rq.setClusterConf(cluster_ricci, conf_str)
- if not batch_number or not result:
- raise Exception, 'batch or result is None'
+ # Propagate the new cluster.conf to the existing nodes
+ # before having any of the new nodes join. If this fails,
+ # abort the whole process.
+ result = rq.setClusterConfSync(cluster_ricci, conf_str)
+ if result != True:
+ errors.append('Unable to update the cluster configuration on existing cluster nodes')
+ request.SESSION.set('add_node', add_cluster)
+ return (False, { 'errors': errors, 'messages': messages })
except Exception, e:
incomplete = True
errors.append('Unable to generate the new cluster configuration')
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('VACN14: %s' % e)
- # Propagate the new cluster.conf to the existing nodes
- # before having any of the new nodes join. If this fails,
- # abort the whole process.
- try:
- while True:
- batch_ret = cluster_ricci.batch_status(batch_number)
- code = batch_ret[0]
- if code is True:
- break
- if code == -1:
- errors.append(batch_ret[1])
- raise Exception, str(batch_ret[1])
- if code is False:
- time.sleep(0.5)
- except Exception, e:
- incomplete = True
- errors.append('Unable to update the cluster node list for %s' \
- % clustername)
- if LUCI_DEBUG_MODE is True:
- luci_log.debug_verbose('VACN15: %r %s' % (e, str(e)))
-
if incomplete or len(errors) > 0:
request.SESSION.set('add_node', add_cluster)
return (False, { 'errors': errors, 'messages': messages })
More information about the Cluster-devel
mailing list