[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...



CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	RHEL4
Changes by:	rmccabe sourceware org	2007-08-09 21:35:22

Added files:
	luci/site/luci/Extensions: LuciClusterActions.py 
	                           LuciClusterInfo.py LuciDB.py 
	                           LuciZope.py LuciZopeAsync.py 
	                           LuciZopeClusterPortal.py 
	                           LuciZopeExternal.py LuciZopePerm.py 
	                           ResourceHandler.py RicciQueries.py 

Log message:
	Merge in fixes from the RHEL5 branch, pass 4

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.4.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.10.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.6.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZope.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.4.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopeAsync.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.1.4.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopeClusterPortal.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.2.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopeExternal.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.3.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopePerm.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.2.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ResourceHandler.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.2.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=NONE&r2=1.7.2.1

/cvs/cluster/conga/luci/site/luci/Extensions/LuciClusterActions.py,v  -->  standard output
revision 1.4.2.1
--- conga/luci/site/luci/Extensions/LuciClusterActions.py
+++ -	2007-08-09 21:35:22.549359000 +0000
@@ -0,0 +1,725 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from LuciSyslog import get_logger
+import RicciQueries as rq
+
+from ricci_communicator import RicciCommunicator
+
+from LuciDB import set_node_flag, getRicciAgent, \
+	getClusterNode, getStorageNode, NodeBusy, \
+	setNodeStatus, resolve_nodename, \
+	delCluster, delClusterSystem, \
+	CLUSTER_NODE_NEED_AUTH
+
+from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE, \
+	NODE_DELETE, NODE_FORCE_DELETE, CLUSTER_DELETE, CLUSTERLIST, \
+	NODE_FENCE, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, NODE_REBOOT, \
+	RESOURCE_ADD, RESOURCE_CONFIG, RESOURCE_REMOVE, \
+	SERVICE_DELETE, SERVICE_RESTART, SERVICE_START, SERVICE_STOP
+
+luci_log = get_logger()
+
+#
+# Cluster service tasks
+#
+
+def RestartCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
+	batch_number, result = rq.restartService(rc, svcname)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc0: %s failed' % svcname)
+		return (False, { 'errors': [ 'An error occurred while restarting cluster service "%s"' % svcname ] })
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_RESTART,
+			'Restarting service "%s"' % svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc1: error setting flags for service %s for cluster %s' % (svcname, cluname))
+
+def StartCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+	nodename = fvars['nodename']
+
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
+	batch_number, result = rq.startService(rc, svcname, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc0: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'An error occurred while starting cluster service "%s"' % svcname ] })
+
+	try:
+		if nodename is not None:
+			status_msg = 'Starting cluster service "%s" on node "%s"' \
+				% (svcname, nodename)
+		else:
+			status_msg = 'Starting cluster service "%s"' % svcname
+
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_START, status_msg)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc1: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+
+def StopCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
+	batch_number, result = rq.stopService(rc, svcname)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc0: stop %s failed' % svcname)
+		return (False, { 'errors': [ 'An error occurred while stopping cluster service "%s"' % svcname ] })
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_STOP,
+			'Stopping cluster service "%s"' % svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc1: error setting flags for service %s for cluster %s' % (svcname, cluname))
+
+def DeleteCluSvc(self, rc, fvars, model):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	if svcname is None or cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteCluSvc0: svcname: %s, cluname %s' \
+				% (svcname, cluname))
+		return (False, { 'errors': [ 'Both a cluster service name and the cluster name must be given' ] })
+
+	try:
+		model.deleteService(svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteCluSvc1: Unable to find a service named %s for cluster %s: %r %s' % (svcname, cluname, e, str(e)))
+		return (False, { 'errors': [ 'Error removing cluster service "%s"' % svcname ]})
+
+	ret = propagateClusterConfAsync(self, model, rc,
+			SERVICE_DELETE, 'Removing service "%s"' % svcname)
+	if ret[0] is False:
+		return ret
+
+def MigrateCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+	nodename = fvars['nodename']
+
+	if svcname is None or cluname is None or nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc0: svc: %s, clu: %s, nn: %s' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'A cluster service name, the cluster name, and the target node name must be given' ] })
+
+	batch_number, result = rq.migrateService(rc, svcname, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc0: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'Error migrating cluster service "%s" to node "%s"' % (svcname, nodename) ]})
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_START,
+			'Migrating service "%s" to node "%s"' % (svcname, nodename))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc1: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+
+#
+# Cluster resource-related tasks
+#
+
+def DeleteResource(self, rc, model, resname):
+	errstr = 'An error occurred while attempting to delete this cluster resource'
+	if resname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteResource0: no res name')
+		return (False, { 'errors': [ '%s: no resource name was given' % errstr ]})
+
+	try:
+		model.deleteResource(resname)
+	except KeyError, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteResource1: no res %s: %r %s' \
+				% (resname, e, str(e)))
+		return (False, { 'errors': [ '%s: no resource named "%s" was found' % (errstr, resname) ]})
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteResource2: err: %s: %r %s' \
+				% (resname, e, str(e)))
+		return (False, { 'errors': [ '%s: unable to delete resource "%s"' % (errstr, resname) ]})
+
+	ret = propagateClusterConfAsync(self, model, rc,
+			RESOURCE_REMOVE, 'Removing cluster resource "%s"' % resname)
+	if ret[0] is False:
+		return ret
+
+def AddResource(self, rc, model, res):
+	resname = None
+	try:
+		resname = res.getName()
+		model.getResourcesPtr().addChild(res)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('AddResource0: %r %s' % (e, str(e)))
+		if resname is not None:
+			errstr = 'Unable to add new resource "%s"' % resname
+		else:
+			errstr = 'Unable to add this new resource'
+		return (False, { 'errors': [ errstr ] })
+
+	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_ADD,
+			'Creating new cluster resource "%s"' % resname)
+	if ret[0] is False:
+		return ret
+
+def EditResource(self, rc, model, res):
+	resname = None
+	try:
+		resname = res.getName()
+		model.getResourcesPtr().addChild(res)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('EditResource0: %r %s' % (e, str(e)))
+		if resname is not None:
+			errstr = 'Unable to edit cluster resource "%s"' % resname
+		else:
+			errstr = 'Unable to edit this cluster resource'
+		return (False, { 'errors': [ errstr ] })
+
+	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_CONFIG,
+				'Configuring cluster resource "%s"' % resname)
+
+	if ret[0] is False:
+		return ret
+
+#
+# Cluster node membership-related tasks
+#
+
+def NodeJoinCluster(self, rc, clustername, nodename_resolved):
+	batch_number, result = rq.nodeJoinCluster(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ0: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_JOIN_CLUSTER,
+			'Node "%s" joining cluster "%s"' % (nodename_resolved, clustername))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ1: failed to set flags: %r %s' \
+				% (e, str(e)))
+		return None
+	return True
+
+def NodeLeaveCluster(	self,
+						rc,
+						clustername,
+						nodename_resolved,
+						stop_cluster=False):
+	reported_cluname = None
+	try:
+		cluster_info = rc.cluster_info()
+		reported_cluname = cluster_info[0] or cluster_info[1]
+		if not reported_cluname:
+			raise Exception, 'not a cluster member'
+		if reported_cluname.lower() != clustername.lower():
+			raise Exception, 'cluster mismatch: expected %s, got %s' \
+								% (clustername, reported_cluname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NCL0: no cluster name: %r %s' % (e, str(e)))
+		return None
+
+	if NodeBusy(self, clustername, nodename_resolved, rc) is not False:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NLC1: %s is busy, can\'t leave cluster yet.' \
+				% nodename_resolved)
+		return None
+
+	batch_number, result = rq.nodeLeaveCluster(rc, cluster_shutdown=stop_cluster)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NLC2: %s: batch_number or result is None' \
+				% nodename_resolved)
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_LEAVE_CLUSTER,
+			'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NLC3: failed to set flags: %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+	return True
+
+def NodeForceDeleteFromCluster(self, model, clustername, nodename, nodename_resolved):
+	rc = getRicciAgent(self, clustername,
+			exclude_names=[ nodename_resolved, nodename ], exclude_busy=True)
+
+	if rc is None:
+		rc = getRicciAgent(self, clustername,
+			exclude_names=[ nodename_resolved, nodename ])
+
+	if rc is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC0: no agent to delete node %s "%s"' \
+				% (nodename_resolved, clustername))
+		return None
+
+	try:
+		model.deleteNodeByName(nodename.lower())
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC1: deleteNode %s: %r %s' \
+				% (nodename, e, str(e)))
+		return None
+
+	try:
+		model.setModified(True)
+		str_buf = str(model.exportModelAsString())
+		if not str_buf:
+			raise Exception, 'model string is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC2: exportModelAsString: %r %s' \
+				% (e, str(e)))
+		return None
+
+	batch_number, result = rq.setClusterConf(rc, str_buf)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC3: batch number is None')
+		return None
+
+	try:
+		ret = delClusterSystem(self, clustername, nodename_resolved)
+		if ret is not None:
+			raise Exception, ret
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC4: error deleting %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_FORCE_DELETE,
+			'Forcing the deletion of node "%s"' % nodename)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NFDFC5: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def NodeDeleteFromCluster(	self,
+							rc,
+							model,
+							clustername,
+							nodename,
+							nodename_resolved,
+							delete_cluster=False):
+
+	# We need to get a node name other than the node
+	# to be deleted, then delete the node from the cluster.conf
+	# and propogate it. We will need two ricci agents for this task,
+	# unless we are deleting the cluster itself.
+
+	if delete_cluster is False:
+		# Make sure we can find a second node before we hose anything.
+		rc2 = getRicciAgent(self, clustername,
+				exclude_names=[ nodename_resolved ], exclude_busy=True)
+
+		if rc2 is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND0: unable to find ricci agent to delete %s from %s' % (nodename_resolved, clustername))
+			return None
+
+	# First, delete cluster.conf from node to be deleted.
+	# next, have node leave cluster.
+
+	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND5: batch_number and/or result is None')
+		return None
+
+	# Unless we're deleting the whole cluster, it is not worth
+	# flagging this node in DB, as we are going to delete it
+	# anyway. Now, we need to delete node from model and send out
+	# new cluster.conf
+
+	if delete_cluster is True:
+		try:
+			set_node_flag(self, clustername, rc.hostname(),
+				str(batch_number), CLUSTER_DELETE,
+				'Deleting cluster "%s": Deleting node "%s"' \
+					% (clustername, nodename_resolved))
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND5a: failed to set flags: %r %s' \
+					% (e, str(e)))
+	else:
+		try:
+			model.deleteNodeByName(nodename.lower())
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND6: deleteNode %s: %r %s' \
+					% (nodename, e, str(e)))
+			return None
+
+		try:
+			model.setModified(True)
+			str_buf = str(model.exportModelAsString())
+			if not str_buf:
+				raise Exception, 'model string is blank'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND7: exportModelAsString: %r %s' \
+					% (e, str(e)))
+				return None
+
+		# propagate the new cluster.conf via the second node
+		batch_number, result = rq.setClusterConf(rc2, str_buf)
+		if batch_number is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND8: batch number is None')
+			return None
+
+	try:
+		ret = delClusterSystem(self, clustername, nodename_resolved)
+		if ret is not None:
+			raise Exception, ret
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND9: error deleting %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+
+	if delete_cluster:
+		return True
+
+	try:
+		set_node_flag(self, clustername, rc2.hostname(),
+			str(batch_number), NODE_DELETE,
+			'Deleting node "%s"' % nodename)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND10: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+#
+# Cluster management-related tasks.
+#
+
+def ClusterStart(self, model):
+	if model is None:
+		return None
+
+	clustername = model.getClusterName()
+	nodes = model.getNodes()
+	if not nodes or len(nodes) < 1:
+		return None
+
+	errors = 0
+	for node in nodes:
+		nodename = node.getName().strip()
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+		try:
+			# XXX - do this in parallel
+			rc = RicciCommunicator(nodename_resolved)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart: RC %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
+			errors += 1
+			continue
+
+		if NodeJoinCluster(self, rc, clustername, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart1: nodeJoin %s' \
+					% nodename_resolved)
+			errors += 1
+
+	return errors
+
+def ClusterStop(self, model, delete=False):
+	if model is None:
+		return None
+
+	clustername = model.getClusterName()
+	nodes = model.getNodes()
+	if not nodes or len(nodes) < 1:
+		return None
+
+	errors = 0
+	for node in nodes:
+		nodename = node.getName().strip()
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+		try:
+			# XXX - do this in parallel
+			rc = RicciCommunicator(nodename_resolved)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStop0: [%d] RC %s: %r %s' \
+					% (delete is True, str(nodename_resolved), e, str(e)))
+			errors += 1
+			continue
+
+		if delete is True:
+			ret = NodeDeleteFromCluster(self, rc, model, clustername,
+					nodename, nodename_resolved, delete_cluster=True)
+			if ret is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
+				errors += 1
+		else:
+			ret = NodeLeaveCluster(self, rc, clustername,
+					nodename_resolved, stop_cluster=True)
+			if ret is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
+						% (nodename_resolved))
+				errors += 1
+	return errors
+
+def ClusterRestart(self, model):
+	snum_err = ClusterStop(self, model)
+	if snum_err:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart0: ClusterStop: %d errs' \
+				% snum_err)
+
+	jnum_err = ClusterStart(self, model)
+	if jnum_err:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart1: ClusterStart: %d errs' \
+				% jnum_err)
+	return snum_err + jnum_err
+
+def ClusterDelete(self, model):
+	try:
+		clustername = model.getClusterName()
+		if not clustername:
+			raise Exception, 'no cluster name found'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ClusterDelete0: %r %s' % (e, str(e)))
+		return None
+
+	# Try to stop all the cluster nodes before deleting any.
+	num_errors = ClusterStop(self, model, delete=False)
+	if num_errors > 0:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ClusterDelete1: %s: %d errors' \
+				% (clustername, num_errors))
+		return None
+
+	# If the cluster is stopped, delete all of the nodes.
+	num_errors = ClusterStop(self, model, delete=True)
+	if num_errors > 0:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ClusterDelete2: %s: %d errors' \
+				% (clustername, num_errors))
+		return None
+
+	try:
+		ret = delCluster(self, clustername)
+		if ret is not None:
+			raise Exception, ret
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ClusterDelete3: %s: %r %s' \
+				% (clustername, e, str(e)))
+		return None
+	return CLUSTERLIST
+
+def NodeReboot(self, rc, clustername, nodename_resolved):
+	batch_number, result = rq.nodeReboot(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR0: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_REBOOT,
+			'Node "%s" is being rebooted' % nodename_resolved)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR1: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def NodeFence(self, clustername, nodename, nodename_resolved):
+	rc = getRicciAgent(self, clustername,
+			exclude_names=[ nodename_resolved, nodename ], exclude_busy=True)
+	if rc is None:
+		rc = getRicciAgent(self, clustername,
+				exclude_names=[ nodename_resolved, nodename ])
+	if rc is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF0: no ricci to fence %s for cluster %s' \
+				% (nodename_resolved, clustername))
+		return None
+			
+	batch_number, result = rq.nodeFence(rc, nodename_resolved)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF1: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_FENCE,
+			'Node "%s" is being fenced by node "%s"' \
+				% (nodename_resolved, rc.hostname()))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF2: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def propagateClusterConfAsync(	self,
+								model,
+								rc=None,
+								action=CLUSTER_CONFIG,
+								pmsg=None):
+	errors = list()
+	messages = list()
+
+	try:
+		clustername = model.getClusterName()
+		if not clustername:
+			raise Exception, 'cluster name from model.getClusterName() is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC0: getClusterName: %r %s' \
+				% (e, str(e)))
+		errors.append('Unable to determine cluster name')
+		return (False, { 'errors': errors, 'messages': messages })
+
+	if rc is None:
+		rc = getRicciAgent(self, clustername, exclude_busy=True)
+	if rc is None:
+		rc = getRicciAgent(self, clustername)
+
+	if rc is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC1: no ricci agent for the %s cluster' \
+				% clustername)
+		errors.append('Unable to contact a ricci agent for cluster "%s"' \
+			% clustername)
+		return (False, { 'errors': errors, 'messages': messages })
+
+	try:
+		model.setModified(True)
+		conf_str = str(model.exportModelAsString()).strip()
+		if not conf_str:
+			raise Exception, 'The new cluster configuration is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC2: %r %s' % (e, str(e)))
+		errors.append(repr(e))
+		return (False, { 'errors': errors, 'messages': messages })
+
+	batch_id, result = rq.setClusterConf(rc, conf_str)
+	if batch_id is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC3: batchid or result is None')
+		errors.append('Unable to propagate a new cluster configuration for %s' \
+			% clustername)
+		return (False, { 'errors': errors, 'messages': messages })
+
+	if pmsg is None:
+		pmsg = 'Updating the cluster configuration for "%s"' % clustername
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			batch_id, action, pmsg)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCC4: set_node_flag: %r %s' \
+				% (e, str(e)))
+
+	return (True, { 'errors': errors, 'messages': messages, 'batchid': batch_id })
+
+def GetSystemLogs(self, fvars):
+	nodename = fvars['nodename']
+	if nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GSL0: no node name')
+		return 'No system name was given'
+
+	clustername = fvars['clustername']
+	if clustername is None:
+		nodename_resolved = nodename
+	else:
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+	try:
+		rc = RicciCommunicator(nodename_resolved)
+		if not rc:
+			raise Exception, 'no rc'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GSL1: unexpected exception for %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+		return 'Ricci error while getting logs for %s' % nodename_resolved
+
+	if not rc.authed():
+		try:
+			snode = getStorageNode(self, nodename_resolved)
+			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GSL2: %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
+
+		if clustername is not None:
+			try:
+				cnode = getClusterNode(self, nodename_resolved, clustername)
+				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GNL5: %s: %r %s' \
+						% (nodename_resolved, e, str(e)))
+		return 'Luci is not authenticated to %s. Reauthenticate first.' \
+			% nodename
+
+	return rq.getNodeLogs(rc)
/cvs/cluster/conga/luci/site/luci/Extensions/LuciClusterInfo.py,v  -->  standard output
revision 1.10.2.1
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py
+++ -	2007-08-09 21:35:22.667793000 +0000
@@ -0,0 +1,1620 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from Products.Archetypes.utils import make_uuid
+from ClusterModel.ModelBuilder import ModelBuilder
+import RicciQueries as rq
+from ricci_communicator import RicciCommunicator
+from FenceHandler import FENCE_OPTS
+from LuciSyslog import get_logger
+from LuciDB import resolve_nodename
+from LuciZope import GetReqVars
+
+from conga_constants import CLUSTER_CONFIG, CLUSTER_DELETE, \
+	CLUSTER_PROCESS, CLUSTER_RESTART, CLUSTER_START, CLUSTER_STOP, \
+	NODE_FORCE_DELETE, FDOM, FENCEDEV, NODE, NODE_ACTIVE, \
+	NODE_ACTIVE_STR, NODE_DELETE, NODE_FENCE, NODE_INACTIVE, \
+	NODE_INACTIVE_STR, NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, \
+	NODE_PROCESS, NODE_REBOOT, NODE_UNKNOWN, NODE_UNKNOWN_STR, \
+	PROP_FENCE_TAB, PROP_GENERAL_TAB, PROP_GULM_TAB, PROP_MCAST_TAB, \
+	PROP_QDISK_TAB, RESOURCE, RESOURCE_CONFIG, RESOURCE_REMOVE, \
+	SERVICE, SERVICE_DELETE, SERVICE_MIGRATE, SERVICE_RESTART, \
+	SERVICE_START, SERVICE_STOP, VM_CONFIG, \
+	LUCI_DEBUG_MODE, LUCI_CLUSTER_BASE_URL
+
+luci_log = get_logger()
+
+def getnodes(self, model):
+	try:
+		return map(lambda x: str(x.getName()), model.getNodes())
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getnodes0: %r %s' % (e, str(e)))
+	return []
+
+def getResourceInfo(model, name, baseurl, res=None):
+	if res is None:
+		try:
+			res = model.getResourceByName(name)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRI0: %s: %r %s' % (name, e, str(e)))
+			return {}
+
+	res_info = {}
+	res_name = res.getName().strip()
+
+	res_info['name'] = res_name
+	res_info['attrs'] = res.attr_hash
+	res_info['type'] = res.resource_type
+	res_info['tag_name'] = res.TAG_NAME
+
+	cluname = model.getClusterName()
+	res_info['cfgurl'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
+		% (baseurl, cluname, res_name, RESOURCE_CONFIG)
+	res_info['url'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
+		% (baseurl, cluname, res_name, RESOURCE)
+	res_info['delurl'] = '%s?clustername=%s&resourcename=%s&pagetype=%s' \
+		% (baseurl, cluname, res_name, RESOURCE_REMOVE)
+	return res_info
+
+def getResources(model, baseurl):
+	if model is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getResources0: model is none')
+		return []
+	return map(lambda x: getResourceInfo(model, None, baseurl, x), model.getResources())
+
+def getClusterStatusModel(model):
+	results = list()
+	vals = {}
+
+	try:
+		clustername = model.getClusterName()
+		clusteralias = model.getClusterAlias()
+		vals['type'] = 'cluster'
+		vals['alias'] = clusteralias
+		vals['name'] = clustername
+		vals['error'] = True
+		vals['votes'] = '[unknown]'
+		vals['quorate'] = '[unknown]'
+		vals['minQuorum'] = '[unknown]'
+		results.append(vals)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSM0: %r %s' % (e, str(e)))
+		return None
+
+	try:
+		nodelist = model.getNodes()
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSM1: %r %s' % (e, str(e)))
+		return None
+
+	for node in nodelist:
+		node_val = {}
+		node_val['type'] = 'node'
+		try:
+			node_name = node.getName()
+			if not node_name:
+				raise Exception, 'cluster node name is unknown'
+		except:
+			node_name = '[unknown]'
+
+		node_val['name'] = node_name
+		node_val['clustered'] = '[unknown]'
+		node_val['online'] = '[unknown]'
+		node_val['error'] = True
+
+		try:
+			votes = node.getVotes()
+			if not votes:
+				raise Exception, 'unknown unmber of votes'
+		except:
+			votes = '[unknown]'
+
+		node_val['votes'] = votes
+		results.append(node_val)
+	return results
+
+def getClusterStatus(self, request, rc, cluname=None):
+	try:
+		doc = rq.getClusterStatusBatch(rc) or None
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCS0: error: %r: %r %s' \
+				% (cluname, e, str(e)))
+		doc = None
+
+	if doc is None:
+		model = LuciExtractCluModel(self, request, cluname)
+		if model is not None:
+			try:
+				cinfo = getClusterStatusModel(model)
+				if not cinfo or len(cinfo) < 1:
+					raise Exception, 'cinfo is None'
+				return cinfo
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GCS1: %r: %r %s' \
+						% (cluname, e, str(e)))
+
+	if doc is None:
+		try:
+			from LuciDB import getClusterStatusDB
+			if cluname:
+				clustername = cluname
+			else:
+				fvars = GetReqVars(request, [ 'clustername' ])
+
+				clustername = fvars['clustername']
+				if clustername is None:
+					raise Exception, 'unable to determine cluster name'
+
+			cinfo = getClusterStatusDB(self, clustername)
+			if not cinfo or len(cinfo) < 1:
+				raise Exception, 'cinfo is None'
+			return cinfo
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCS2: cluster %r info from DB: %r %s' \
+					% (cluname, e, str(e)))
+		return []
+
+	results = list()
+	vals = {}
+	vals['type'] = 'cluster'
+
+	try:
+		vals['alias'] = doc.firstChild.getAttribute('alias')
+	except AttributeError, e:
+		vals['alias'] = doc.firstChild.getAttribute('name')
+
+	vals['votes'] = doc.firstChild.getAttribute('votes')
+	vals['name'] = doc.firstChild.getAttribute('name')
+	vals['minQuorum'] = doc.firstChild.getAttribute('minQuorum')
+	vals['quorate'] = doc.firstChild.getAttribute('quorate')
+	results.append(vals)
+
+	for node in doc.firstChild.childNodes:
+		if node.nodeName == 'node':
+			vals = {}
+			vals['type'] = 'node'
+			vals['clustered'] = node.getAttribute('clustered')
+			vals['name'] = node.getAttribute('name')
+			vals['online'] = node.getAttribute('online')
+			vals['uptime'] = node.getAttribute('uptime')
+			vals['votes'] = node.getAttribute('votes')
+			results.append(vals)
+		elif node.nodeName == 'service':
+			vals = {}
+			vals['type'] = 'service'
+			vals['name'] = node.getAttribute('name')
+			vals['nodename'] = node.getAttribute('nodename')
+			vals['running'] = node.getAttribute('running')
+			try:
+				vals['is_vm'] = node.getAttribute('vm').lower() == 'true'
+			except:
+				vals['is_vm'] = False
+			vals['failed'] = node.getAttribute('failed')
+			vals['autostart'] = node.getAttribute('autostart')
+			results.append(vals)
+	return results
+
+def getServicesInfo(self, status, model, req):
+	svc_map = {}
+	maplist = list()
+	fvars = GetReqVars(req, [ 'clustername', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	cluname = fvars['clustername']
+	if cluname is None:
+		cluname = model.getClusterName()
+
+	nodes = model.getNodes()
+	for item in status:
+		if item['type'] == 'service':
+			itemmap = {}
+			itemmap['name'] = item['name']
+
+			cur_node = None
+			if item['running'] == 'true':
+				cur_node = item['nodename']
+				itemmap['running'] = 'true'
+				itemmap['nodename'] = cur_node
+				itemmap['disableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_STOP)
+				itemmap['restarturl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_RESTART)
+			else:
+				itemmap['enableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_START)
+
+			itemmap['autostart'] = item['autostart']
+
+			try:
+				svc = model.retrieveServiceByName(item['name'])
+				itemmap['cfgurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE)
+				itemmap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_DELETE)
+			except:
+				try:
+					svc = model.retrieveVMsByName(item['name'])
+					itemmap['is_vm'] = True
+					itemmap['cfgurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], VM_CONFIG)
+					itemmap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], VM_CONFIG)
+				except:
+					continue
+
+			starturls = list()
+			for node in nodes:
+				cur_nodename = node.getName()
+				if node.getName() != cur_node:
+					starturl = {}
+					starturl['nodename'] = cur_nodename
+					starturl['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, item['name'], SERVICE_START, cur_nodename)
+					starturls.append(starturl)
+
+					if itemmap.has_key('is_vm') and itemmap['is_vm'] is True:
+						migrate_url = { 'nodename': cur_nodename }
+						migrate_url['migrate'] = True
+						migrate_url['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, item['name'], SERVICE_MIGRATE, cur_nodename)
+						starturls.append(migrate_url)
+
+			itemmap['links'] = starturls
+
+			dom = svc.getAttribute('domain')
+			if dom is not None:
+				itemmap['faildom'] = dom
+			else:
+				itemmap['faildom'] = 'No Failover Domain'
+			maplist.append(itemmap)
+
+	svc_map['services'] = maplist
+	return svc_map
+
+def recurse_resources(parent_uuid, child, resource_list, indent_ctr, parent=None):
+	#First, add the incoming child as a resource
+	#Next, check for children of it
+	#Call yourself on every children
+	#then return
+
+	rc_map = {}
+	if parent is not None:
+		rc_map['parent'] = parent
+	rc_map['name'] = child.getName()
+
+	#Note: Final version needs all resource attrs
+	if child.isRefObject() is True:
+		rc_map['ref_object'] = True
+		rc_map['tag_name'] = child.getObj().TAG_NAME
+		rc_map['type'] = child.getObj().getResourceType()
+		rc_map['attrs'] = child.getObj().getAttributes()
+	else:
+		rc_map['tag_name'] = child.TAG_NAME
+		rc_map['type'] = child.getResourceType()
+		rc_map['attrs'] = child.getAttributes()
+
+	rc_map['indent_ctr'] = indent_ctr
+
+	rc_map['uuid'] = make_uuid('resource')
+	rc_map['parent_uuid'] = parent_uuid
+
+	resource_list.append(rc_map)
+	kids = child.getChildren()
+	child_depth = 0
+	new_indent_ctr = indent_ctr + 1
+	for kid in kids:
+		cdepth = recurse_resources(rc_map['uuid'], kid, resource_list, new_indent_ctr, child)
+		child_depth = max(cdepth, child_depth)
+
+	rc_map['max_depth'] = child_depth
+	return child_depth + 1
+
+
+def get_fdom_names(model):
+	return map(lambda x: x.getName(), model.getFailoverDomains())
+
+
+def getServiceInfo(self, status, model, req):
+	root_uuid = 'toplevel'
+
+	fvars = GetReqVars(req, [ 'clustername', 'servicename', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getServiceInfo0: no model: %r' % model)
+		return {}
+
+	#set up struct for service config page
+	hmap = {}
+
+	try:
+		cluname = fvars['clustername'] or model.getClusterName()
+		hmap['fdoms'] = get_fdom_names(model)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getServiceInfo1: %r %s' % (e, str(e)))
+		hmap['fdoms'] = list()
+
+	hmap['root_uuid'] = root_uuid
+	# uuids for the service page needed when new resources are created
+	hmap['uuid_list'] = map(lambda x: make_uuid('resource'), xrange(30))
+
+	servicename = fvars['servicename']
+	if servicename is None:
+		return hmap
+
+	if len(status) > 0:
+		nodenames = model.getNodeNames()
+
+	for item in status:
+		innermap = {}
+		if item['type'] == 'service':
+			if item['name'] == servicename:
+				hmap['name'] = servicename
+				hmap['autostart'] = item['autostart']
+
+				starturls = list()
+				if item['running'] == 'true':
+					hmap['running'] = 'true'
+					nodename = item['nodename']
+					innermap['current'] = 'Running on %s' % nodename
+
+					innermap['disableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_STOP)
+					innermap['restarturl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_RESTART)
+					innermap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_DELETE)
+
+					# In this case, determine where it can run...
+					for node in nodenames:
+						if node != nodename:
+							starturl = {}
+							starturl['nodename'] = node 
+							starturl['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_START, node)
+							starturls.append(starturl)
+
+							if item.has_key('is_vm') and item['is_vm'] is True:
+								migrate_url = { 'nodename': node }
+								migrate_url['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_MIGRATE, node)
+								migrate_url['migrate'] = True
+								starturls.append(migrate_url)
+					innermap['links'] = starturls
+				else:
+					#Do not set ['running'] in this case...ZPT will detect it is missing
+					innermap['current'] = 'Stopped'
+					innermap['enableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_START)
+					innermap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_DELETE)
+
+					starturls = list()
+					for node in nodenames:
+						starturl = {}
+
+						starturl['nodename'] = node
+						starturl['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_START, node)
+						starturls.append(starturl)
+
+						if item.has_key('is_vm') and item['is_vm'] is True:
+							migrate_url = { 'nodename': node }
+							migrate_url['url'] = '%s?clustername=%s&servicename=%s&pagetype=%s&nodename=%s' % (baseurl, cluname, servicename, SERVICE_MIGRATE, node)
+							migrate_url['migrate'] = True
+							starturls.append(migrate_url)
+					innermap['links'] = starturls
+				hmap['innermap'] = innermap
+
+	# Now build hashes for resources under service.
+	# first get service by name from model
+
+	svc = model.getService(servicename)
+	try:
+		hmap['domain'] = svc.getAttribute('domain')
+	except:
+		hmap['domain'] = None
+
+	try:
+		hmap['recovery'] = svc.getAttribute('recovery')
+	except:
+		hmap['recovery'] = None
+
+	try:
+		if int(svc.getAttribute('exclusive')):
+			hmap['exclusive'] = 'true'
+		else:
+			hmap['exclusive'] = 'false'
+	except:
+		hmap['exclusive'] = 'false'
+
+	resource_list = list()
+	if svc is not None:
+		indent_ctr = 0
+		children = svc.getChildren()
+		for child in children:
+			recurse_resources(root_uuid, child, resource_list, indent_ctr)
+
+	hmap['resource_list'] = resource_list
+	return hmap
+
+def getFdomInfo(self, model, request):
+	fhash = {}
+	fhash['members'] = {}
+
+	try:
+		fdom = model.getFailoverDomainByName(request['fdomname'])
+		fhash['name'] = fdom.getName()
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFdomInfo0: %r %s' % (e, str(e)))
+		return fhash
+
+	ordered_attr = fdom.getAttribute('ordered')
+	if ordered_attr is not None and (ordered_attr == 'true' or ordered_attr == '1'):
+		fhash['prioritized'] = '1'
+	else:
+		fhash['prioritized'] = '0'
+
+	restricted_attr = fdom.getAttribute('restricted')
+	if restricted_attr is not None and (restricted_attr == 'true' or restricted_attr == '1'):
+		fhash['restricted'] = '1'
+	else:
+		fhash['restricted'] = '0'
+
+	nodes = fdom.getChildren()
+	for node in nodes:
+		try:
+			priority = node.getAttribute('priority')
+		except:
+			priority = '1'
+		fhash['members'][node.getName()] = { 'priority': priority }
+	return fhash
+
+def getFdomsInfo(self, model, request, clustatus):
+	fvars = GetReqVars(request, [ 'clustername', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername']
+	if clustername is None:
+		return {}
+
+	slist = list()
+	nlist = list()
+	for item in clustatus:
+		if item['type'] == 'node':
+			nlist.append(item)
+		elif item['type'] == 'service':
+			slist.append(item)
+
+	fdomlist = list()
+	for fdom in model.getFailoverDomains():
+		fdom_map = {}
+		fdom_name = fdom.getName()
+		fdom_map['name'] = fdom_name
+		fdom_map['cfgurl'] = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
+			% (baseurl, FDOM, clustername, fdom.getName())
+
+		ordered_attr = fdom.getAttribute('ordered')
+		if ordered_attr is not None and (ordered_attr == 'true' or ordered_attr == '1'):
+			fdom_map['ordered'] = True
+		else:
+			fdom_map['ordered'] = False
+
+		restricted_attr = fdom.getAttribute('restricted')
+		if restricted_attr is not None and (restricted_attr == 'true' or restricted_attr == '1'):
+			fdom_map['restricted'] = True
+		else:
+			fdom_map['restricted'] = False
+
+		nodelist = list()
+		for node in fdom.getChildren():
+			nodesmap = {}
+			ndname = node.getName()
+
+			for nitem in nlist:
+				if nitem['name'] == ndname:
+					nodesmap['nodename'] = ndname
+					nodesmap['nodecfgurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
+						% (baseurl, clustername, ndname, NODE)
+					if nitem['clustered'] == 'true':
+						nodesmap['status'] = NODE_ACTIVE
+					elif nitem['online'] == 'false':
+						nodesmap['status'] = NODE_UNKNOWN
+					else:
+						nodesmap['status'] = NODE_INACTIVE
+					priority_attr =	node.getAttribute('priority')
+					if priority_attr is not None:
+						nodesmap['priority'] = '0'
+					nodelist.append(nodesmap)
+		fdom_map['nodeslist'] = nodelist
+
+		svclist = list()
+		tmp = model.getServices()
+		tmp.extend(model.getVMs())
+		for svc in tmp:
+			svcname = svc.getName()
+			for sitem in slist:
+				if sitem['name'] == svcname:
+					domain = svc.getAttribute('domain')
+					if domain == fdom_name:
+						svcmap = {}
+						svcmap['name'] = svcname
+						svcmap['status'] = sitem['running']
+						if svc.getTagName() == 'vm':
+							svcmap['svcurl'] = '%s?pagetype=%s&clustername=%s&servicename=%s' % (baseurl, VM_CONFIG, clustername, svcname)
+						else:
+							svcmap['svcurl'] = '%s?pagetype=%s&clustername=%s&servicename=%s' % (baseurl, SERVICE, clustername, svcname)
+						svcmap['location'] = sitem['nodename']
+						svclist.append(svcmap)
+		fdom_map['svclist'] = svclist
+		fdomlist.append(fdom_map)
+
+	return fdomlist
+
+def getClusterInfo(self, model, req):
+	fvars = GetReqVars(req, [ 'clustername', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	cluname = fvars['clustername']
+	if cluname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCI0: unable to determine cluster name')
+		return {}
+
+	clumap = {}
+	if not model:
+		try:
+			model = getModelForCluster(self, cluname)
+			if not model:
+				raise Exception, 'model is none'
+			req.SESSION.set('model', model)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCI1: unable to get model for cluster %s: %r %s' % (cluname, e, str(e)))
+			return {}
+	else:
+		totem = model.getTotemPtr()
+		if totem:
+			clumap['totem'] = totem.getAttributes()
+
+	prop_baseurl = '%s?pagetype=%s&clustername=%s&' \
+		% (baseurl, CLUSTER_CONFIG, cluname)
+	basecluster_url = '%stab=%s' % (prop_baseurl, PROP_GENERAL_TAB)
+	# needed:
+	clumap['basecluster_url'] = basecluster_url
+	# name field
+	clumap['clustername'] = model.getClusterAlias()
+	# config version
+	cp = model.getClusterPtr()
+	clumap['config_version'] = cp.getConfigVersion()
+
+	# xvmd info
+	clumap['fence_xvmd'] = model.hasFenceXVM()
+
+	#-------------
+	#new cluster params - if rhel5
+	#-------------
+	gulm_ptr = model.getGULMPtr()
+	if not gulm_ptr:
+		#Fence Daemon Props
+		fencedaemon_url = '%stab=%s' % (prop_baseurl, PROP_FENCE_TAB)
+		clumap['fencedaemon_url'] = fencedaemon_url
+		fdp = model.getFenceDaemonPtr()
+		pjd = fdp.getAttribute('post_join_delay')
+		if pjd is None:
+			pjd = '6'
+		pfd = fdp.getAttribute('post_fail_delay')
+		if pfd is None:
+			pfd = '0'
+		#post join delay
+		clumap['pjd'] = pjd
+		#post fail delay
+		clumap['pfd'] = pfd
+
+		#-------------
+		#if multicast
+		multicast_url = '%stab=%s' % (prop_baseurl, PROP_MCAST_TAB)
+		clumap['multicast_url'] = multicast_url
+		#mcast addr
+		is_mcast = model.isMulticast()
+		if is_mcast:
+			clumap['mcast_addr'] = model.getMcastAddr()
+			clumap['is_mcast'] = 'True'
+		else:
+			clumap['is_mcast'] = 'False'
+			clumap['mcast_addr'] = '1.2.3.4'
+		clumap['gulm'] = False
+	else:
+		#-------------
+		# GULM params (RHEL4 only)
+		#-------------
+		lockserv_list = list()
+		clunodes = model.getNodes()
+		gulm_lockservs = map(lambda x: x.getName(), gulm_ptr.getChildren())
+		lockserv_list = map(lambda x: (x, True), gulm_lockservs)
+		for node in clunodes:
+			n = node.getName()
+			if not n in gulm_lockservs:
+				lockserv_list.append((n, False))
+		clumap['gulm'] = True
+		clumap['gulm_url'] = '%stab=%s' % (prop_baseurl, PROP_GULM_TAB)
+		clumap['gulm_lockservers'] = lockserv_list
+
+	#-------------
+	# quorum disk params
+	#-------------
+	quorumd_url = '%stab=%s' % (prop_baseurl, PROP_QDISK_TAB)
+	clumap['quorumd_url'] = quorumd_url
+	is_quorumd = model.isQuorumd()
+	clumap['is_quorumd'] = is_quorumd
+	clumap['interval'] = ''
+	clumap['tko'] = ''
+	clumap['votes'] = ''
+	clumap['min_score'] = ''
+	clumap['device'] = ''
+	clumap['label'] = ''
+
+	# list struct for heuristics...
+	hlist = list()
+	if is_quorumd:
+		qdp = model.getQuorumdPtr()
+		interval = qdp.getAttribute('interval')
+		if interval is not None:
+			clumap['interval'] = interval
+
+		tko = qdp.getAttribute('tko')
+		if tko is not None:
+			clumap['tko'] = tko
+
+		votes = qdp.getAttribute('votes')
+		if votes is not None:
+			clumap['votes'] = votes
+
+		min_score = qdp.getAttribute('min_score')
+		if min_score is not None:
+			clumap['min_score'] = min_score
+
+		device = qdp.getAttribute('device')
+		if device is not None:
+			clumap['device'] = device
+
+		label = qdp.getAttribute('label')
+		if label is not None:
+			clumap['label'] = label
+
+		heuristic_kids = qdp.getChildren()
+		for kid in heuristic_kids:
+			hmap = {}
+			hprog = kid.getAttribute('program')
+			if hprog is None:
+				continue
+
+			hscore = kid.getAttribute('score')
+			hmap['hprog'] = hprog
+			if hscore is not None:
+				hmap['hscore'] = hscore
+			else:
+				hmap['hscore'] = ''
+
+			hinterval = kid.getAttribute('interval')
+			if hinterval is not None:
+				hmap['hinterval'] = hinterval
+			else:
+				hmap['hinterval'] = ''
+			hlist.append(hmap)
+	clumap['hlist'] = hlist
+
+	return clumap
+
+def getClustersInfo(self, status, req):
+	clu_map = {}
+
+	fvars = GetReqVars(req, [ 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	nodelist = list()
+	svclist = list()
+	clulist = list()
+	for item in status:
+		if item['type'] == 'node':
+			nodelist.append(item)
+		elif item['type'] == 'service':
+			svclist.append(item)
+		elif item['type'] == 'cluster':
+			clulist.append(item)
+		else:
+			continue
+
+	if len(clulist) < 1:
+		return {}
+	clu = clulist[0]
+	if clu.has_key('error'):
+		clu_map['error'] = clu['error']
+	if clu.has_key('errmsg'):
+		clu_map['errmsg'] = clu['errmsg']
+	clustername = clu['name']
+	if not clu['alias']:
+		clu_map['clusteralias'] = clu['alias']
+	else:
+		clu_map['clusteralias'] = clustername
+	clu_map['clustername'] = clustername
+	if clu['quorate'] == 'true':
+		clu_map['status'] = 'Quorate'
+		clu_map['running'] = 'true'
+	else:
+		clu_map['status'] = 'Not Quorate'
+		clu_map['running'] = 'false'
+	clu_map['votes'] = clu['votes']
+	clu_map['minquorum'] = clu['minQuorum']
+
+	clu_map['clucfg'] = '%s?pagetype=%s&clustername=%s' \
+		% (baseurl, CLUSTER_CONFIG, clustername)
+
+	clu_map['restart_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+		% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_RESTART)
+	clu_map['stop_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+		% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_STOP)
+	clu_map['start_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+		% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_START)
+	clu_map['delete_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+		% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_DELETE)
+
+	svc_dict_list = list()
+	for svc in svclist:
+		svc_dict = {}
+		svcname = svc['name']
+		svc_dict['name'] = svcname
+		svc_dict['servicename'] = svcname
+		svc_dict['nodename'] = svc['nodename']
+		svc_dict['srunning'] = svc['running']
+
+		if svc.has_key('is_vm') and svc['is_vm'] is True:
+			target_page = VM_CONFIG
+		else:
+			target_page = SERVICE
+
+		svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+			% (baseurl, target_page, clustername, svcname)
+		svc_dict['svcurl'] = svcurl
+		svc_dict_list.append(svc_dict)
+
+	clu_map['currentservices'] = svc_dict_list
+	node_dict_list = list()
+
+	for item in nodelist:
+		nmap = {}
+		name = item['name']
+		nmap['nodename'] = name
+		cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
+			% (baseurl, NODE, clustername, name)
+		nmap['configurl'] = cfgurl
+		if item['clustered'] == 'true':
+			nmap['status'] = NODE_ACTIVE
+		elif item['online'] == 'false':
+			nmap['status'] = NODE_UNKNOWN
+		else:
+			nmap['status'] = NODE_INACTIVE
+		node_dict_list.append(nmap)
+
+	clu_map['currentnodes'] = node_dict_list
+	return clu_map
+
+def getNodeInfo(self, model, status, request):
+	infohash = {}
+	item = None
+	fvars = GetReqVars(request, [ 'URL', 'clustername', 'nodename' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	nodestate = NODE_ACTIVE
+	svclist = list()
+	for thing in status:
+		if thing['type'] == 'service':
+			svclist.append(thing)
+
+	clustername = fvars['clustername']
+	nodename = fvars['nodename']
+	if clustername is None or nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getNodeInfo0: %r %r' \
+				% (clustername, nodename))
+		return {}
+
+	# extract correct node line from cluster status
+	found = False
+	for item in status:
+		if (item['type'] == 'node') and (item['name'] == nodename):
+			if item['online'] == 'false':
+				nodestate = NODE_UNKNOWN
+			elif item['clustered'] == 'true':
+				nodestate = NODE_ACTIVE
+			else:
+				nodestate = NODE_INACTIVE
+			found = True
+			break
+
+	if found is False:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getNodeInfo1: Unable to find node "%s" in cluster status' % nodename)
+		nodestate = NODE_UNKNOWN
+
+	infohash['nodestate'] = nodestate
+	infohash['nodename'] = nodename
+
+	# set up drop down links
+	if nodestate == NODE_ACTIVE:
+		infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, nodename, clustername)
+		infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
+		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+		infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
+	elif nodestate == NODE_INACTIVE:
+		infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, nodename, clustername)
+		infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
+		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+		infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
+	else:
+		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+		infohash['force_delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FORCE_DELETE, nodename, clustername)
+
+	# figure out current services running on this node
+	svc_dict_list = list()
+	for svc in svclist:
+		if svc['nodename'] == nodename:
+			svc_dict = {}
+			svcname = svc['name']
+			svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+				% (baseurl, SERVICE, clustername, svcname)
+			svc_dict['servicename'] = svcname
+			svc_dict['svcurl'] = svcurl
+			svc_dict_list.append(svc_dict)
+
+	infohash['currentservices'] = svc_dict_list
+
+	fdom_dict_list = list()
+	gulm_cluster = False
+	if model:
+		gulm_cluster = model.getGULMPtr() is not None
+		try:
+			infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
+		except:
+			infohash['gulm_lockserver'] = False
+
+		# next is faildoms
+		fdoms = model.getFailoverDomainsForNode(nodename)
+		for fdom in fdoms:
+			fdom_dict = {}
+			fdom_dict['name'] = fdom.getName()
+			fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
+				% (baseurl, FDOM, clustername, fdom.getName())
+			fdom_dict['fdomurl'] = fdomurl
+			fdom_dict_list.append(fdom_dict)
+	else:
+		infohash['gulm_lockserver'] = False
+
+	infohash['fdoms'] = fdom_dict_list
+
+	infohash['d_states'] = None
+	nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+	if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
+		try:
+			rc = RicciCommunicator(nodename_resolved)
+			if not rc:
+				raise Exception, 'connection failed'
+		except Exception, e:
+			rc = None
+			infohash['ricci_error'] = True
+			luci_log.info('Error connecting to %s: %s' \
+				% (nodename_resolved, str(e)))
+
+		if rc is not None:
+			# call service module on node and find out which daemons are running
+			dlist = list()
+			dlist.append('ccsd')
+			if not gulm_cluster:
+				dlist.append('cman')
+				dlist.append('fenced')
+			else:
+				dlist.append('lock_gulmd')
+			dlist.append('rgmanager')
+			states = rq.getDaemonStates(rc, dlist)
+			infohash['d_states'] = states
+	else:
+		infohash['ricci_error'] = True
+
+	infohash['logurl'] = '/luci/logs/?nodename=%s&clustername=%s' \
+		% (nodename_resolved, clustername)
+	return infohash
+
+def getNodesInfo(self, model, status, req):
+	resultlist = list()
+	nodelist = list()
+	svclist = list()
+
+	fvars = GetReqVars(req, [ 'URL', 'clustername' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername']
+
+	#Sort into lists...
+	for item in status:
+		if item['type'] == 'node':
+			nodelist.append(item)
+		elif item['type'] == 'service':
+			svclist.append(item)
+		else:
+			continue
+
+	if clustername is None:
+		try:
+			clustername = model.getClusterName().strip()
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNI0: no cluster name: %r %s' \
+					% (e, str(e)))
+			return {}
+
+	for item in nodelist:
+		nl_map = {}
+		name = item['name']
+		nl_map['nodename'] = name
+
+		try:
+			nl_map['gulm_lockserver'] = model.isNodeLockserver(name)
+		except:
+			nl_map['gulm_lockserver'] = False
+
+		cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
+			% (baseurl, NODE, clustername, name)
+		nl_map['configurl'] = cfgurl
+		nl_map['fenceurl'] = '%s#fence' % cfgurl
+		if item['clustered'] == 'true':
+			nl_map['status'] = NODE_ACTIVE
+			nl_map['status_str'] = NODE_ACTIVE_STR
+		elif item['online'] == 'false':
+			nl_map['status'] = NODE_UNKNOWN
+			nl_map['status_str'] = NODE_UNKNOWN_STR
+		else:
+			nl_map['status'] = NODE_INACTIVE
+			nl_map['status_str'] = NODE_INACTIVE_STR
+
+		nodename_resolved = resolve_nodename(self, clustername, name)
+
+		nl_map['logurl'] = '/luci/logs?nodename=%s&clustername=%s' \
+			% (nodename_resolved, clustername)
+
+		# set up URLs for dropdown menu...
+		if nl_map['status'] == NODE_ACTIVE:
+			nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, name, clustername)
+			nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
+			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+			nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
+		elif nl_map['status'] == NODE_INACTIVE:
+			nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, name, clustername)
+			nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
+			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+			nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
+		else:
+			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+			nl_map['force_delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FORCE_DELETE, name, clustername)
+
+		# figure out current services running on this node
+		svc_dict_list = list()
+		for svc in svclist:
+			if svc['nodename'] == name:
+				svc_dict = {}
+				svcname = svc['name']
+				svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+					% (baseurl, SERVICE, clustername, svcname)
+				svc_dict['servicename'] = svcname
+				svc_dict['svcurl'] = svcurl
+				svc_dict_list.append(svc_dict)
+
+		nl_map['currentservices'] = svc_dict_list
+
+		# next is faildoms
+		if model:
+			fdoms = model.getFailoverDomainsForNode(name)
+		else:
+			nl_map['ricci_error'] = True
+			fdoms = list()
+
+		fdom_dict_list = list()
+		for fdom in fdoms:
+			fdom_dict = {}
+			fdom_dict['name'] = fdom.getName()
+			fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
+				% (baseurl, FDOM, clustername, fdom.getName())
+			fdom_dict['fdomurl'] = fdomurl
+			fdom_dict_list.append(fdom_dict)
+
+		nl_map['fdoms'] = fdom_dict_list
+		resultlist.append(nl_map)
+
+	return resultlist
+
+def getFence(self, model, request):
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFence0: model is None')
+		return {}
+
+	fvars = GetReqVars(request, [ 'URL', 'fencename' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	fencename = fvars['fencename']
+
+	fence_map = {}
+	nodes = model.getNodes()
+	fencedevs = model.getFenceDevices()
+	clustername = model.getClusterName()
+
+	for fencedev in fencedevs:
+		if fencedev.getName().strip() == fencename:
+			fence_map = fencedev.getAttributes()
+			try:
+				fence_map['pretty_name'] = FENCE_OPTS[fencedev.getAgentType()]
+			except:
+				fence_map['unknown'] = True
+				fence_map['pretty_name'] = fencedev.getAgentType()
+
+			nodes_used = list()
+			for node in nodes:
+				flevels = node.getFenceLevels()
+				for flevel in flevels:
+					# These are the method blocks...
+					kids = flevel.getChildren()
+					for kid in kids:
+						# These are actual devices in each level
+						if kid.getName().strip() == fencedev.getName().strip():
+							# See if this fd already has an entry for this node
+							found_duplicate = False
+							for item in nodes_used:
+								if item['nodename'] == node.getName().strip():
+									found_duplicate = True
+							if found_duplicate is True:
+								continue
+							node_hash = {}
+							cur_nodename = node.getName().strip()
+							node_hash['nodename'] = cur_nodename
+							node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
+								% (baseurl, clustername, cur_nodename, NODE)
+							nodes_used.append(node_hash)
+
+			fence_map['nodesused'] = nodes_used
+			return fence_map
+
+	return fence_map
+
+def getFDForInstance(fds, name):
+	for fd in fds:
+		if fd.getName().strip() == name:
+			return fd
+	raise
+
+def getFenceInfo(self, model, request):
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFenceInfo0: model is None')
+		return {}
+
+	fvars = GetReqVars(request, [ 'clustername', 'URL', 'nodename' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername'] or model.getClusterName()
+
+	nodename = fvars['nodename']
+	if nodename is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFenceInfo1: no nodename')
+		return {}
+
+	fence_map = {}
+	level1 = list() #First level fence devices
+	level2 = list() #Second level fence devices
+	shared1 = list() #List of available sharable fence devs not used in level1
+	shared2 = list() #List of available sharable fence devs not used in level2
+	fence_map['level1'] = level1
+	fence_map['level2'] = level2
+	fence_map['shared1'] = shared1
+	fence_map['shared2'] = shared2
+
+	major_num = 1
+	minor_num = 100
+
+	# Here we need to get fences for a node - just the first two levels
+	# Each level has its own list of fence devs used in that level
+	# For each fence dev, a list of instance structs is appended
+	# In addition, for each level, a list of available but unused fence devs
+	# is returned.
+	try:
+		node = model.retrieveNodeByName(nodename)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFenceInfo3: unable to find node name "%s" in current node list: %r %s' % (str(nodename), e, str(e)))
+		return {}
+
+	fds = model.getFenceDevices()
+
+	levels = node.getFenceLevels()
+	len_levels = len(levels)
+
+	if len_levels == 0:
+		return fence_map
+
+	if len_levels >= 1:
+		first_level = levels[0]
+		kids = first_level.getChildren()
+
+		# This is a marker for allowing multi instances
+		# beneath a fencedev
+		last_kid_fd = None
+
+		for kid in kids:
+			instance_name = kid.getName().strip()
+			try:
+				fd = getFDForInstance(fds, instance_name)
+			except:
+				# Set to None in case last time thru loop
+				fd = None
+				continue
+
+			if fd is not None:
+				if fd.isShared() is False:
+					# Not a shared dev... build struct and add
+					fencedev = {}
+
+					try:
+						fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+					except:
+						fencedev['unknown'] = True
+						fencedev['prettyname'] = fd.getAgentType()
+					fencedev['isShared'] = False
+					fencedev['id'] = str(major_num)
+					major_num = major_num + 1
+					devattrs = fd.getAttributes()
+					kees = devattrs.keys()
+					for kee in kees:
+						fencedev[kee] = devattrs[kee]
+					kidattrs = kid.getAttributes()
+					kees = kidattrs.keys()
+					for kee in kees:
+						if kee == 'name':
+							# Don't duplicate name attr
+							continue
+						fencedev[kee] = kidattrs[kee]
+
+					# This fencedev struct is complete, and needs
+					# to be placed on the level1 Q. Because it is
+					# non-shared, we should set last_kid_fd to none.
+					last_kid_fd = None
+					level1.append(fencedev)
+				else:
+					# This dev is shared
+					if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):
+						# just append a new instance struct to last_kid_fd
+						instance_struct = {}
+						instance_struct['id'] = str(minor_num)
+						minor_num = minor_num + 1
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+
+						for kee in kees:
+							if kee != 'name':
+								instance_struct[kee] = kidattrs[kee]
+
+						# Now just add this struct to last_kid_fd
+						# and reset last_kid_fd
+						ilist = last_kid_fd['instance_list']
+						ilist.append(instance_struct)
+						continue
+					else:
+						# Shared, but not used above...so we need
+						# a new fencedev struct
+						fencedev = {}
+						try:
+							fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+						except:
+							fencedev['unknown'] = True
+							fencedev['prettyname'] = fd.getAgentType()
+						fencedev['isShared'] = True
+						fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
+							% (baseurl, clustername, fd.getName().strip(), FENCEDEV)
+						fencedev['id'] = str(major_num)
+						major_num = major_num + 1
+						inlist = list()
+						fencedev['instance_list'] = inlist
+						devattrs = fd.getAttributes()
+						kees = devattrs.keys()
+						for kee in kees:
+							fencedev[kee] = devattrs[kee]
+						instance_struct = {}
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee != 'name':
+								instance_struct[kee] = kidattrs[kee]
+
+						inlist.append(instance_struct)
+						level1.append(fencedev)
+						last_kid_fd = fencedev
+						continue
+		fence_map['level1'] = level1
+
+		# level1 list is complete now, but it is still
+		# necessary to build shared1
+		for fd in fds:
+			isUnique = True
+			if fd.isShared() is False:
+				continue
+			for fdev in level1:
+				if fd.getName().strip() == fdev['name']:
+					isUnique = False
+					break
+			if isUnique is True:
+				shared_struct = {}
+				shared_struct['name'] = fd.getName().strip()
+				agentname = fd.getAgentType()
+				shared_struct['agent'] = agentname
+				try:
+					shared_struct['prettyname'] = FENCE_OPTS[agentname]
+				except:
+					shared_struct['unknown'] = True
+					shared_struct['prettyname'] = agentname
+				shared1.append(shared_struct)
+		fence_map['shared1'] = shared1
+
+	# YUK: This next section violates the DRY rule, :-(
+	if len_levels >= 2:
+		second_level = levels[1]
+		kids = second_level.getChildren()
+		last_kid_fd = None	#This is a marker for allowing multi instances
+												#beneath a fencedev
+		for kid in kids:
+			instance_name = kid.getName().strip()
+			try:
+				fd = getFDForInstance(fds, instance_name)
+			except:
+				fd = None #Set to None in case last time thru loop
+				continue
+			if fd is not None:
+				if fd.isShared() is False:	#Not a shared dev...build struct and add
+					fencedev = {}
+					try:
+						fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+					except:
+						fencedev['unknown'] = True
+						fencedev['prettyname'] = fd.getAgentType()
+					fencedev['isShared'] = False
+					fencedev['id'] = str(major_num)
+					major_num = major_num + 1
+					devattrs = fd.getAttributes()
+					kees = devattrs.keys()
+					for kee in kees:
+						fencedev[kee] = devattrs[kee]
+					kidattrs = kid.getAttributes()
+					kees = kidattrs.keys()
+					for kee in kees:
+						if kee == 'name':
+							continue #Don't duplicate name attr
+						fencedev[kee] = kidattrs[kee]
+					#This fencedev struct is complete, and needs to be placed on the
+					#level2 Q. Because it is non-shared, we should set last_kid_fd
+					#to none.
+					last_kid_fd = None
+					level2.append(fencedev)
+				else:	#This dev is shared
+					if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):	#just append a new instance struct to last_kid_fd
+						instance_struct = {}
+						instance_struct['id'] = str(minor_num)
+						minor_num = minor_num + 1
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee == 'name':
+								continue
+							instance_struct[kee] = kidattrs[kee]
+						#Now just add this struct to last_kid_fd and reset last_kid_fd
+						ilist = last_kid_fd['instance_list']
+						ilist.append(instance_struct)
+						#last_kid_fd = fd
+						continue
+					else: #Shared, but not used above...so we need a new fencedev struct
+						fencedev = {}
+						try:
+							fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+						except:
+							fencedev['unknown'] = True
+							fencedev['prettyname'] = fd.getAgentType()
+						fencedev['isShared'] = True
+						fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
+							% (baseurl, clustername, fd.getName().strip(), FENCEDEV)
+						fencedev['id'] = str(major_num)
+						major_num = major_num + 1
+						inlist = list()
+						fencedev['instance_list'] = inlist
+						devattrs = fd.getAttributes()
+						kees = devattrs.keys()
+						for kee in kees:
+							fencedev[kee] = devattrs[kee]
+						instance_struct = {}
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee == 'name':
+								continue
+							instance_struct[kee] = kidattrs[kee]
+						inlist.append(instance_struct)
+						level2.append(fencedev)
+						last_kid_fd = fencedev
+						continue
+		fence_map['level2'] = level2
+
+		#level2 list is complete but like above, we need to build shared2
+		for fd in fds:
+			isUnique = True
+			if fd.isShared() is False:
+				continue
+			for fdev in level2:
+				if fd.getName().strip() == fdev['name']:
+					isUnique = False
+					break
+			if isUnique is True:
+				shared_struct = {}
+				shared_struct['name'] = fd.getName().strip()
+				agentname = fd.getAgentType()
+				shared_struct['agent'] = agentname
+				try:
+					shared_struct['prettyname'] = FENCE_OPTS[agentname]
+				except:
+					shared_struct['unknown'] = True
+					shared_struct['prettyname'] = agentname
+				shared2.append(shared_struct)
+		fence_map['shared2'] = shared2
+
+	return fence_map
+
+def getFencesInfo(self, model, request):
+	fences_map = {}
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFencesInfo0: model is None')
+		fences_map['fencedevs'] = list()
+		return fences_map
+
+	fvars = GetReqVars(request, [ 'clustername', 'URL' ])
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	clustername = fvars['clustername'] or model.getClusterName()
+
+	fencedevs = list() #This is for the fencedev list page
+
+	#Get list of fence devices
+	fds = model.getFenceDevices()
+	for fd in fds:
+		#This section determines which nodes use the dev
+		#create fencedev hashmap
+		nodes_used = list()
+
+		if fd.isShared() is True:
+			fencedev = {}
+			attr_hash = fd.getAttributes()
+			kees = attr_hash.keys()
+
+			for kee in kees:
+				fencedev[kee] = attr_hash[kee] #copy attrs over
+			try:
+				fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
+			except:
+				fencedev['unknown'] = True
+				fencedev['pretty_name'] = fd.getAgentType()
+
+			fencedev['agent'] = fd.getAgentType()
+			#Add config url for this fencedev
+			fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
+				% (baseurl, clustername, fd.getName().strip(), FENCEDEV)
+
+			nodes = model.getNodes()
+			for node in nodes:
+				flevels = node.getFenceLevels()
+				for flevel in flevels: #These are the method blocks...
+					kids = flevel.getChildren()
+					for kid in kids: #These are actual devices in each level
+						if kid.getName().strip() == fd.getName().strip():
+							#See if this fd already has an entry for this node
+							found_duplicate = False
+							for item in nodes_used:
+								if item['nodename'] == node.getName().strip():
+									found_duplicate = True
+							if found_duplicate is True:
+								continue
+							node_hash = {}
+							cur_nodename = node.getName().strip()
+							node_hash['nodename'] = cur_nodename
+							node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
+								% (baseurl, clustername, cur_nodename, NODE)
+							nodes_used.append(node_hash)
+
+			fencedev['nodesused'] = nodes_used
+			fencedevs.append(fencedev)
+
+	fences_map['fencedevs'] = fencedevs
+	return fences_map
+
+def getVMInfo(self, model, request):
+	vm_map = {}
+	fvars = GetReqVars(request, [ 'clustername', 'servicename', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+	clustername = fvars['clustername']
+	if clustername is None:
+		clustername = model.getName()
+
+	svcname = fvars['servicename']
+	if svcname is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getVMInfo0: no service name')
+		return vm_map
+
+	vm_map['formurl'] = '%s?clustername=%s&pagetype=29&servicename=%s' \
+		% (baseurl, clustername, svcname)
+
+	try:
+		vm = model.retrieveVMsByName(svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('getVMInfo1: %s: %r %s' % (svcname, e, str(e)))
+		return vm_map
+
+	attrs = vm.getAttributes()
+	keys = attrs.keys()
+	for key in keys:
+		vm_map[key] = attrs[key]
+
+	return vm_map
+
+def getResourcesInfo(self, model, request):
+	fvars = GetReqVars(request, [ 'clustername', 'URL' ])
+
+	baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+	if fvars['clustername'] is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getResourcesInfo missing cluster name')
+		return []
+
+	return getResources(model, baseurl)
+
+def getClusterName(self, model):
+	return model.getClusterName()
+
+def getClusterAlias(self, model):
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCA0: no model')
+		return ''
+	alias = model.getClusterAlias()
+	if not alias:
+		return model.getClusterName()
+	return alias
+
+def getModelBuilder(self, rc, isVirtualized):
+	try:
+		cluster_conf_node = rq.getClusterConf(rc)
+		if not cluster_conf_node:
+			raise Exception, 'getClusterConf returned None'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GMB0: unable to get cluster_conf_node in getModelBuilder: %r %s' % (e, str(e)))
+		return None
+
+	try:
+		model = ModelBuilder(0, None, None, cluster_conf_node)
+		if not model:
+			raise Exception, 'ModelBuilder() returned None'
+		model.setIsVirtualized(isVirtualized)
+	except Exception, e:
+		try:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf "%s": %r %s' % (cluster_conf_node.toxml(), e, str(e)))
+		except:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GMB1: ModelBuilder failed')
+		return None
+
+	return model
+
+def getModelForCluster(self, clustername):
+	from LuciDB import getRicciAgent
+
+	rc = getRicciAgent(self, clustername)
+	if not rc:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GMFC0: no ricci agent for %s' % clustername)
+		return None
+
+	try:
+		model = getModelBuilder(None, rc, rc.dom0())
+		if not model:
+			raise Exception, 'model is none'
+		return model
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GMFC1: unable to get model builder for %s: %r %s' % (clustername, e, str(e)))
+	return None
+
+def LuciExtractCluModel(self, request, cluster_name=None):
+	model = None
+	try:
+		model = request.SESSION.get('model')
+		model.getClusterName()
+		return model
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LECM0: %r: %r %s' \
+				% (cluster_name, e, str(e)))
+		model = None
+
+	if not cluster_name:
+		fvar = GetReqVars(request, [ 'clustername' ])
+		cluster_name = fvar['clustername']
+
+	if cluster_name is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LECM1: no cluster name')
+		return None
+
+	try:
+		model = getModelForCluster(self, cluster_name)
+		model.getClusterName()
+		return model
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LECM2: no model for %s: %r %s' \
+				% (cluster_name, e, str(e)))
+	return None
+
+def getClusterOS(self, rc):
+	from HelperFunctions import resolveOSType
+
+	clu_map = {}
+
+	try:
+		os_str = resolveOSType(rc.os())
+		clu_map['os'] = os_str
+		clu_map['isVirtualized'] = rc.dom0()
+	except:
+		# default to rhel5 if something crazy happened.
+		try:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
+		except:
+			# this can throw an exception if the original exception
+			# is caused by rc being None or stale.
+			pass
+		clu_map['os'] = 'rhel5'
+		clu_map['isVirtualized'] = False
+	return clu_map
+
+def getClusterConfNodes(conf_dom):
+	try:
+		cluster_nodes = conf_dom.getElementsByTagName('clusternode')
+		return map(lambda x: str(x.getAttribute('name')), cluster_nodes)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCCN0: %r %s' % (e, str(e)))
+	return None
/cvs/cluster/conga/luci/site/luci/Extensions/LuciDB.py,v  -->  standard output
revision 1.6.2.1
--- conga/luci/site/luci/Extensions/LuciDB.py
+++ -	2007-08-09 21:35:22.772198000 +0000
@@ -0,0 +1,969 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from AccessControl import getSecurityManager
+from ricci_communicator import RicciCommunicator
+from LuciZopePerm import isAdmin
+from LuciSyslog import get_logger
+
+from conga_constants import CLUSTER_FOLDER_PATH, BATCH_ID, TASKTYPE, \
+	FLAG_DESC, CLUSTER_ADD, LAST_STATUS, \
+	STORAGE_FOLDER_PATH, LUCI_DEBUG_MODE
+
+# Cluster node exception attribute flags
+CLUSTER_NODE_NEED_AUTH	= 0x01
+CLUSTER_NODE_NOT_MEMBER	= 0x02
+CLUSTER_NODE_ADDED		= 0x04
+
+luci_log = get_logger()
+
+def getClusterNode(self, nodename, clustername):
+	try:
+		path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename))
+		cluster_node = self.restrictedTraverse(path)
+		if not cluster_node:
+			return None
+		return cluster_node
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getClusterNode0: %s %s: %r %s' \
+				% (nodename, clustername, e, str(e)))
+	return None
+
+def getStorageNode(self, nodename):
+	try:
+		path = str('%s%s' % (STORAGE_FOLDER_PATH, nodename))
+		storage_node = self.restrictedTraverse(path)
+		if not storage_node:
+			raise Exception, 'no storage node found'
+		return storage_node
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getStorageNode0: %s: %r %s' \
+				% (nodename, e, str(e)))
+	return None
+
+def testNodeStatus(node, flag_mask):
+	try:
+		flags = node.getProperty('flags')
+		if flags is None:
+			return False
+		return flags & flag_mask != 0
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('testNodeStatus0: %r %s' % (e, str(e)))
+	return False
+
+def setNodeStatus(node, flag_mask):
+	try:
+		flags = node.getProperty('flags')
+		if flags is None:
+			flags = 0
+		node.manage_changeProperties({ 'flags': flags | flag_mask })
+	except:
+		try:
+			node.manage_addProperty('flags', flag_mask, 'int')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('setNodeStatus0: %r %s' \
+					% (e, str(e)))
+
+def clearNodeStatus(node, flag_mask):
+	try:
+		flags = node.getProperty('flags')
+		if flags is None:
+			return
+		if flags & flag_mask != 0:
+			node.manage_changeProperties({ 'flags': flags & ~flag_mask })
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('clearNodeStatus0: %r %s' \
+				% (e, str(e)))
+
+def set_node_flag(self, cluname, agent, batchid, task, desc):
+	path = str('%s%s' % (CLUSTER_FOLDER_PATH, cluname))
+	batch_id = str(batchid)
+	objname = '%s____flag' % agent
+
+	objpath = ''
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+		objpath = str('%s/%s' % (path, objname))
+		flag = self.restrictedTraverse(objpath)
+		flag.manage_addProperty(BATCH_ID, batch_id, 'string')
+		flag.manage_addProperty(TASKTYPE, task, 'string')
+		flag.manage_addProperty(FLAG_DESC, desc, 'string')
+	except Exception, e:
+		errmsg = 'SNF0: error creating flag (%s,%s,%s) at %s: %s' \
+					% (batch_id, task, desc, objpath, str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('SNF0: %r %s' % (e, errmsg))
+		raise Exception, errmsg
+
+def NodeBusy(self, clustername, nodename, rc=None):
+	try:
+		path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename))
+		nodefolder = self.restrictedTraverse(path)
+		if not nodefolder:
+			raise Exception, 'cannot find database object at %s' % path
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NodeBusy0: (%s,%s) %r %s' \
+				% (clustername, nodename, e, str(e)))
+		return None
+
+	flagname = '%s____flag' % nodename
+
+	try:
+		items = nodefolder.objectItems('ManagedSystem')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NodeBusy1: error getting flags for %s: %r %s' \
+				% (nodefolder[0], e, str(e)))
+		return None
+
+	for item in items:
+		if item[0] != flagname:
+			continue
+
+		# A flag already exists. Check to see whether we're done.
+		if rc is None:
+			try:
+				rc = RicciCommunicator(nodename)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('NodeBusy2: ricci error %s: %r %s' \
+						% (nodename, e, str(e)))
+				# We can't know if we're done or not; err on the
+				# side of caution.
+				return True
+
+		if not rc.authed():
+			try:
+				snode = getStorageNode(self, nodename)
+				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('NodeBusy3: %s: %r %s' \
+						% (nodename, e, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.info('%s not authenticated' % item[0])
+			# The comment above applies here, too.
+			return True
+
+		batch_id = item[1].getProperty(BATCH_ID)
+		batch_ret = rc.batch_status(batch_id)
+		finished = batch_ret[0]
+		if finished is True or finished == -1:
+			if finished == -1:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('NodeBusy5: batch error: %s' \
+						% batch_ret[1])
+
+			try:
+				nodefolder.manage_delObjects([item[0]])
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('NodeBusy6: %s: %r %s' % (item[0], e, str(e)))
+			return False
+
+		# Not finished, so don't remove the flag.
+		return True
+
+	# If this code is ever reached, no flags exist for the node in question.
+	return False
+
+def resolve_nodename(self, clustername, nodename):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		clusterfolder = self.restrictedTraverse(path)
+		objs = clusterfolder.objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RNN0: error for %s/%s: %r %s' \
+				% (nodename, clustername, e, str(e)))
+		return nodename
+
+	for obj in objs:
+		try:
+			if obj[0].find(nodename) != (-1):
+				return obj[0]
+		except:
+			continue
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('RNN1: failed for %s/%s: nothing found' \
+			% (nodename, clustername))
+	return nodename
+
+def resolveClusterChanges(self, clustername, model):
+	try:
+		mb_nodes = model.getNodes()
+		if not mb_nodes or not len(mb_nodes):
+			raise Exception, 'node list is empty'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RCC0: no model builder nodes found for %s: %r %s' % (clustername, e, str(e)))
+		return 'Unable to find cluster nodes for %s' % clustername
+
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		cluster_node = self.restrictedTraverse(path)
+		if not cluster_node:
+			raise Exception, 'cluster node is none'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('RCC1: cant find cluster node for %s: %r %s' \
+				% (clustername, e, str(e)))
+		return 'Unable to find an entry for %s in the Luci database.' % clustername
+
+	try:
+		db_nodes = map(lambda x: x[0], cluster_node.objectItems('Folder'))
+		if not db_nodes or not len(db_nodes):
+			raise Exception, 'no database nodes'
+	except Exception, e:
+		# Should we just create them all? Can this even happen?
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('RCC2: error: %r %s' % (e, str(e)))
+		return 'Unable to find database entries for any nodes in %s' % clustername
+
+	same_host = lambda x, y: x == y or x[:len(y) + 1] == y + '.' or y[:len(x) + 1] == x + '.'
+
+	# this is a really great algorithm.
+	missing_list = list()
+	new_list = list()
+	for i in mb_nodes:
+		for j in db_nodes:
+			f = 0
+			if same_host(i, j):
+				f = 1
+				break
+		if not f:
+			new_list.append(i)
+
+	for i in db_nodes:
+		for j in mb_nodes:
+			f = 0
+			if same_host(i, j):
+				f = 1
+				break
+		if not f:
+			missing_list.append(i)
+
+	messages = list()
+	for i in missing_list:
+		try:
+			# or alternately
+			# new_node = cluster_node.restrictedTraverse(i)
+			# #setNodeStatus(self, new_node, CLUSTER_NODE_NOT_MEMBER)
+			cluster_node.delObjects([i])
+			messages.append('Node "%s" is no longer in a member of cluster "%s." It has been deleted from the management interface for this cluster.' % (i, clustername))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC3: deleted node %s' % i)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC4: delObjects: %s: %r %s' \
+					% (i, e, str(e)))
+
+	new_flags = CLUSTER_NODE_NEED_AUTH | CLUSTER_NODE_ADDED
+	for i in new_list:
+		try:
+			cluster_node.manage_addFolder(i)
+			new_node = cluster_node.restrictedTraverse(str(i))
+			setNodeStatus(self, new_node, new_flags)
+			messages.append('A new cluster node, "%s," is now a member of cluster "%s." It has been added to the management interface for this cluster, but you must authenticate to it in order for it to be fully functional.' % (i, clustername))
+		except Exception, e:
+			messages.append('A new cluster node, "%s," is now a member of cluster "%s,". but it has not been added to the management interface for this cluster as a result of an error creating a database entry for it.' % (i, clustername))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC5: addFolder: %s/%s: %r %s' \
+					% (clustername, i, e, str(e)))
+
+	return messages
+
+def buildClusterCreateFlags(self, batch_map, clustername):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		clusterfolder = self.restrictedTraverse(path)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('buildCCF0: no cluster folder at %s: %r %s' \
+				% (path, e, str(e)))
+		return None
+
+	for key in batch_map.keys():
+		try:
+			key = str(key)
+			batch_id = str(batch_map[key])
+			# This suffix needed to avoid name collision
+			objname = '%s____flag' % key
+
+			clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+			# now designate this new object properly
+			objpath = str('%s/%s' % (path, objname))
+			flag = self.restrictedTraverse(objpath)
+
+			flag.manage_addProperty(BATCH_ID, batch_id, 'string')
+			flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, 'string')
+			flag.manage_addProperty(FLAG_DESC, 'Creating node "%s" for cluster "%s"' % (key, clustername), 'string')
+			flag.manage_addProperty(LAST_STATUS, 0, 'int')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('buildCCF1: error creating flag for %s: %r %s' % (key, e, str(e)))
+
+def manageCluster(self, clustername, node_list, cluster_os):
+	try:
+		clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
+		if not clusters:
+			raise Exception, 'cannot find the cluster entry in the DB'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC0: %s: %r %s' % (clustername, e, str(e)))
+		return 'Unable to create cluster %s: the cluster directory is missing.' % clustername
+
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		newCluster = self.restrictedTraverse(path)
+		if newCluster:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC1: cluster %s: already exists' \
+					% clustername)
+			return 'A cluster named %s is already managed by Luci' % clustername
+	except:
+		pass
+
+	try:
+		clusters.manage_addFolder(clustername)
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		newCluster = self.restrictedTraverse(path)
+		if not newCluster:
+			raise Exception, 'unable to create the cluster DB entry for %s' % clustername
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC2: %s: %r %s' % (clustername, e, str(e)))
+		return 'Unable to create cluster %s: %s' % (clustername, str(e))
+
+	try:
+		newCluster.manage_acquiredPermissions([])
+		newCluster.manage_role('View', ['Access Contents Information', 'View'])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC3: %s: %r %s' % (clustername, e, str(e)))
+		try:
+			clusters.manage_delObjects([clustername])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC4: %s: %r %s' \
+					% (clustername, e, str(e)))
+		return 'Unable to set permissions on new cluster: %s: %s' \
+			% (clustername, str(e))
+
+	try:
+		newCluster.manage_addProperty('cluster_os', cluster_os, 'string')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC5: %s: %s: %r %s' \
+				% (clustername, cluster_os, e, str(e)))
+
+	for i in node_list:
+		host = node_list[i]['host']
+
+		try:
+			newCluster.manage_addFolder(host)
+			path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, host))
+			newSystem = self.restrictedTraverse(path)
+			if not newSystem:
+				raise Exception, 'unable to create cluster system DB entry for node %s' % host
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except Exception, e:
+			try:
+				clusters.manage_delObjects([clustername])
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('MC6: %s: %s: %r %s' \
+						% (clustername, host, e, str(e)))
+
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC7: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+			return 'Unable to create cluster node %s for cluster %s: %s' \
+				% (host, clustername, str(e))
+
+	try:
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
+		if not ssystem:
+			raise Exception, 'The storage DB entry is missing'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MC8: %s: %s: %r %s' \
+				% (clustername, host, e, str(e)))
+		return 'Error adding storage node %s: %s' % (host, str(e))
+
+	# Only add storage systems if the cluster and cluster node DB
+	# objects were added successfully.
+	for i in node_list:
+		host = node_list[i]['host']
+
+		try:
+			# It's already there, as a storage system, no problem.
+			path = str('%s%s' % (STORAGE_FOLDER_PATH, host))
+			dummy = self.restrictedTraverse(path)
+			continue
+		except:
+			pass
+
+		try:
+			ssystem.manage_addFolder(host)
+			path = str('%s%s' % (STORAGE_FOLDER_PATH, host))
+			newSystem = self.restrictedTraverse(path)
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('MC9: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+
+def createClusterSystems(self, clustername, node_list):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		clusterObj = self.restrictedTraverse(path)
+		if not clusterObj:
+			raise Exception, 'cluster %s DB entry is missing' % clustername
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CCS0: %s: %r %s' \
+				% (clustername, e, str(e)))
+		return 'No cluster named "%s" is managed by Luci' % clustername
+
+	for x in node_list:
+		i = node_list[x]
+		host = str(i['host'])
+
+		try:
+			clusterObj.manage_addFolder(host)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CCS0a: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+
+		try:
+			path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, host))
+			newSystem = self.restrictedTraverse(path)
+			if not newSystem:
+				raise Exception, 'cluster node DB entry for %s disappeared from under us' % host
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CCS1: %s: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+			return 'Unable to create cluster node %s for cluster %s: %s' \
+				% (host, clustername, str(e))
+
+	try:
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
+		if not ssystem:
+			raise Exception, 'storage DB entry is missing'
+	except Exception, e:
+		# This shouldn't fail, but if it does, it's harmless right now
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CCS2: %s: %r %s' \
+				% (clustername, host, e, str(e)))
+		return None
+
+	# Only add storage systems if the and cluster node DB
+	# objects were added successfully.
+	for x in node_list:
+		i = node_list[x]
+		host = str(i['host'])
+
+		try:
+			# It's already there, as a storage system, no problem.
+			path = str('%s%s' % (STORAGE_FOLDER_PATH, host))
+			dummy = self.restrictedTraverse(path)
+			continue
+		except:
+			pass
+
+		try:
+			ssystem.manage_addFolder(host)
+			path = str('%s%s' % (STORAGE_FOLDER_PATH, host))
+			newSystem = self.restrictedTraverse(path)
+			newSystem.manage_acquiredPermissions([])
+			newSystem.manage_role('View', [ 'Access contents information' , 'View' ])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CCS3: %s: %r %s' \
+					% (clustername, host, e, str(e)))
+
+def delSystem(self, systemName):
+	try:
+		ssystem = self.restrictedTraverse(STORAGE_FOLDER_PATH)
+		if not ssystem:
+			raise Exception, 'storage DB entry is missing'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delSystem0: %s: %r %s' \
+				% (systemName, e, str(e)))
+		return 'Unable to find storage system %s: %s' % (systemName, str(e))
+
+	try:
+		rc = RicciCommunicator(systemName, enforce_trust=False)
+		if rc is None:
+			raise Exception, 'rc is None'
+	except Exception, e:
+		try:
+			ssystem.manage_delObjects([ systemName ])
+		except Exception, e1:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delSystem1: %s: %r %s' \
+					% (systemName, e1, str(e1)))
+			return 'Unable to delete the storage system %s' % systemName
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delSystem2: %s: %r %s' \
+				% (systemName, e, str(e)))
+		return
+
+	# Only unauthenticate if the system isn't a member of
+	# a managed cluster.
+	cluster_info = rc.cluster_info()
+	if not cluster_info:
+		cluster_name = None
+	elif not cluster_info[0]:
+		cluster_name = cluster_info[1]
+	else:
+		cluster_name = cluster_info[0]
+
+	unauth = False
+	if not cluster_name:
+		# If it's a member of no cluster, unauthenticate
+		unauth = True
+	else:
+		try:
+			path = str('%s%s/%s' % (CLUSTER_FOLDER_PATH, cluster_name, systemName))
+			dummy = self.restrictedTraverse(path).objectItems()
+		except Exception, e:
+			# It's not a member of a managed cluster, so unauthenticate.
+			unauth = True
+
+	if unauth is True:
+		try:
+			rc.unauth()
+		except:
+			pass
+
+	try:
+		ssystem.manage_delObjects([ systemName ])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delSystem3: %s: %r %s' \
+				% (systemName, e, str(e)))
+		return 'Unable to delete storage system %s: %s' \
+			% (systemName, str(e))
+
+def delCluster(self, clustername):
+	try:
+		clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+		if not clusters:
+			raise Exception, 'clusters DB entry is missing'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delCluster0: %r %s' % (e, str(e)))
+		return 'Unable to find cluster %s' % clustername
+
+	err = delClusterSystems(self, clustername)
+	if err:
+		return err
+
+	try:
+		clusters.manage_delObjects([ clustername ])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delCluster1: %s %r %s' \
+				% (clustername, e, str(e)))
+		return 'Unable to delete cluster %s' % clustername
+
+def delClusterSystem(self, cluster, systemName):
+	try:
+		path = str('%s%s' % (STORAGE_FOLDER_PATH, systemName))
+		dummy = self.restrictedTraverse(path).objectItems()
+	except:
+		# It's not a storage system, so unauthenticate.
+		try:
+			rc = RicciCommunicator(systemName, enforce_trust=False)
+			rc.unauth()
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delClusterSystem0: ricci error for %s: %r %s' % (systemName, e, str(e)))
+
+	try:
+		cluster.manage_delObjects([ systemName ])
+	except Exception, e:
+		err_str = 'Error deleting cluster object %s: %s' % (systemName, str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delClusterSystem1: %r %s' % (e, err_str))
+		return err_str
+
+def delClusterSystems(self, clustername):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		cluster = self.restrictedTraverse(path)
+		if not cluster:
+			raise Exception, 'cluster DB entry is missing'
+
+		try:
+			csystems = getClusterSystems(self, clustername)
+			if not csystems or len(csystems) < 1:
+				return None
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delCluSystems0: %r %s' % (e, str(e)))
+			return None
+	except Exception, er:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delCluSystems1: error for %s: %r %s' \
+				% (clustername, er, str(er)))
+		return str(er)
+
+	error_list = list()
+	for i in csystems:
+		err = delClusterSystem(self, cluster, i[0])
+		if err:
+			error_list.append('Unable to delete the cluster system %s: %s\n' \
+				% (i[0], err))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('delCluSystems2: %s' % err)
+	return ''.join(error_list)
+
+def getSystems(self):
+	storage = getStorage(self)
+	clusters = getClusters(self)
+	storageList = list()
+	ret = [{}, [], {}]
+
+	need_auth_hash = {}
+	for i in storage:
+		storageList.append(i[0])
+		if testNodeStatus(i[1], CLUSTER_NODE_NEED_AUTH) is not False:
+			need_auth_hash[i[0]] = i[1]
+
+	chash = {}
+	for i in clusters:
+		csystems = getClusterSystems(self, i[0])
+		cslist = list()
+		for c in csystems:
+			if testNodeStatus(c[1], CLUSTER_NODE_NEED_AUTH) is not False:
+				need_auth_hash[c[0]] = c[1]
+			cslist.append(c[0])
+		chash[i[0]] = cslist
+
+	ret[0] = chash
+	ret[1] = storageList
+	ret[2] = need_auth_hash
+	return ret
+
+def getCluster(self, clustername):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		cluster_obj = self.restrictedTraverse(path)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCobj0: %s: %r %s' \
+				% (clustername, e, str(e)))
+		return None
+
+	if cluster_permission_check(self, cluster_obj):
+		return cluster_obj
+	return None
+
+def getClusterSystems(self, clustername):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		cluster_nodes = self.restrictedTraverse(path).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSy0: %s: %r %s' \
+				% (clustername, e, str(e)))
+		return None
+
+	if isAdmin(self):
+		return cluster_nodes
+	cluster_obj = getCluster(self, clustername)
+	if cluster_permission_check(self, cluster_obj):
+		return cluster_nodes
+	return None
+
+def getClusters(self):
+	try:
+		clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GC0: %r %s' % (e, str(e)))
+		return None
+
+	if isAdmin(self):
+		return clusters
+	return check_clusters(self, clusters)
+
+def getStorage(self):
+	try:
+		storage = self.restrictedTraverse(STORAGE_FOLDER_PATH).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GS0: %r %s' % (e, str(e)))
+		return None
+
+	if isAdmin(self):
+		return storage
+	return allowed_systems(storage)
+
+def check_clusters(self, clusters):
+	ret = []
+	try:
+		user = getSecurityManager().getUser()
+		ret = filter(lambda x: user.has_permission('View', x[1]), clusters)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CC0: %r %s' % (e, str(e)))
+	return ret
+
+def cluster_permission_check(self, cluster):
+	if isAdmin(self):
+		return True
+
+	try:
+		user = getSecurityManager().getUser()
+		if user.has_permission('View', cluster[1]):
+			return True
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CPC0: %s: %r %s' % (cluster, e, str(e)))
+	return False
+
+def allowed_systems(self, systems):
+	user = getSecurityManager().getUser()
+	return filter(lambda x: user.has_permission('View', x[1]), systems)
+
+def access_to_host_allowed(self, hostname, allowed_systems_list):
+	allowed = dict(map(lambda x: [ x[0], None ], allowed_systems_list))
+	return allowed.has_key(hostname)
+
+def getRicciAgent(self, clustername, exclude_names=None, exclude_busy=False):
+	try:
+		perm = cluster_permission_check(self, clustername)
+		if not perm:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA0: no permission for %s' \
+					% clustername)
+			return None
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GRA0: %r %s' % (e, str(e)))
+		return None
+
+	clusterfolder = None
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		clusterfolder = self.restrictedTraverse(path)
+		if not clusterfolder:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('GRA1: cluster folder %s for %s is missing' \
+					% (path, clustername))
+			return None
+
+		nodes = clusterfolder.objectItems('Folder')
+		if len(nodes) < 1:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('GRA2: no cluster nodes for %s found' \
+					% clustername)
+			raise Exception, 'No cluster nodes were found at %s' % path
+	except Exception, e:
+		try:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('GRA3: cluster folder %s for %s is missing: %r %s' % (path, clustername, e, str(e)))
+
+			if clusterfolder is not None and len(clusterfolder.objectItems()) < 1:
+				clusters = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+				clusters.manage_delObjects([clustername])
+		except Exception, ein:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA4: %r %s' % (ein, str(ein)))
+		return None
+
+	cluname = clustername.lower()
+	for node in nodes:
+		hostname = node[0]
+
+		if exclude_names is not None and hostname in exclude_names:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA5: %s is in the excluded names list, excluding it' % hostname)
+			continue
+
+		try:
+			rc = RicciCommunicator(hostname)
+			if not rc:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA6: rc is None')
+				continue
+
+			ricci_hostname = rc.hostname()
+			if not ricci_hostname:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA7: ricci_hostname is blank')
+				continue
+
+			clu_info = rc.cluster_info()
+
+			cur_name = str(clu_info[0]).strip().lower()
+			if not cur_name:
+				cur_name = None
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA8: cluster name is none for %s' \
+						% ricci_hostname)
+				raise Exception, '%s not in a cluster' % ricci_hostname
+
+			cur_alias = str(clu_info[1]).strip().lower()
+			if not cur_alias:
+				cur_alias = None
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA9: no cluster alias for %s' \
+						% ricci_hostname)
+
+			if (cur_name is not None and cluname != cur_name) and (cur_alias is not None and cluname != cur_alias):
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug('GRA10: node %s reports it\'s in cluster [%s:%s] we expect %s' % (ricci_hostname, clu_info[0], clu_info[1], cluname))
+				setNodeStatus(self, node, CLUSTER_NODE_NOT_MEMBER)
+				continue
+
+			if not rc.authed():
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA11: %s is not authenticated' \
+						% ricci_hostname)
+				setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
+				continue
+		except Exception, eout:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRA12: %r %s' % (eout, str(eout)))
+			continue
+
+		if exclude_busy is True:
+			if NodeBusy(self, cluname, ricci_hostname, rc) is not False:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GRA13: %s is busy, excluding' \
+						% ricci_hostname)
+				continue
+		return rc
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug('GRA14: no ricci agent could be found for cluster %s' \
+			% cluname)
+	return None
+
+def getClusterDBObj(self, clustername):
+	try:
+		cluster_path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		nodelist = self.restrictedTraverse(cluster_path)
+		if not nodelist:
+			raise Exception, 'no nodelist'
+		return nodelist
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCDB0: %s -> %s: %r %s' \
+				% (clustername, cluster_path, e, str(e)))
+	return None
+
+def getClusterDBNodes(self, clustername):
+	try:
+		cluster_path = str('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCDBN0: %s -> %s: %r %s' \
+				% (clustername, cluster_path, e, str(e)))
+		return []
+	return nodelist
+
+def getClusterDBVersion(cluster_folder):
+	try:
+		cluster_os = str(cluster_folder.getProperty('cluster_os'))
+		if not cluster_os:
+			raise Exception, 'cluster os is blank'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCDBV0: %s: %r %s' \
+				% (cluster_folder.getId(), e, str(e)))
+		cluster_os = None
+	return cluster_os
+
+def setClusterDBVersion(cluster_folder, version_str):
+	if cluster_folder.getProperty('cluster_os') is None:
+		try:
+			cluster_folder.manage_addProperty('cluster_os',
+				version_str, 'string')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('SCDBV0: %s: %r %s' \
+					% (cluster_folder.getId(), e, str(e)))
+			return None
+	else:
+		try:
+			cluster_folder.manage_changeProperties({'cluster_os': version_str })
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('SCDBV1: %s: %r %s' \
+					% (cluster_folder.getId(), e, str(e)))
+			return None
+	return True
+
+def getClusterFlags(self, cluname):
+	try:
+		path = str('%s%s' % (CLUSTER_FOLDER_PATH, cluname))
+		clusterfolder = self.restrictedTraverse(path)
+		if not clusterfolder:
+			raise Exception, 'clusterfolder is None'
+		return clusterfolder.objectItems('ManagedSystem')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCF0: cluster %s [%s] folder missing: %r %s -- returning empty map' % (cluname, path, e, str(e)))
+	return None
+
+def getClusterStatusDB(self, clustername):
+	results = list()
+	vals = {}
+
+	vals['type'] = 'cluster'
+	vals['alias'] = clustername
+	vals['name'] = clustername
+	vals['error'] = True
+	vals['quorate'] = '[unknown]'
+	vals['votes'] = '[unknown]'
+	vals['minQuorum'] = '[unknown]'
+	results.append(vals)
+
+	nodelist = getClusterDBNodes(self, clustername)
+	if len(nodelist) < 1:
+		luci_log.info('Removing cluster %s because it has no nodes' \
+			% clustername)
+		try:
+			clusters_dir = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+			clusters_dir.manage_delObjects([clustername])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCSDB0b: %s: %r %s' \
+					% (clustername, e, str(e)))
+		return results
+
+	for node in nodelist:
+		node_val = {}
+		node_val['type'] = 'node'
+		node_val['name'] = node[0]
+		node_val['clustered'] = '[unknown]'
+		node_val['online'] = '[unknown]'
+		node_val['error'] = True
+		results.append(node_val)
+	return results
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZope.py,v  -->  standard output
revision 1.4.2.1
--- conga/luci/site/luci/Extensions/LuciZope.py
+++ -	2007-08-09 21:35:22.887659000 +0000
@@ -0,0 +1,147 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from LuciZopePerm import userAuthenticated
+from LuciDB import allowed_systems
+from LuciSyslog import get_logger
+from conga_constants import LUCI_DEBUG_MODE
+
+luci_log = get_logger()
+
+def siteIsSetup(self):
+	import os
+	from ricci_communicator import CERTS_DIR_PATH
+
+	try:
+		return os.path.isfile('%sprivkey.pem' % CERTS_DIR_PATH) and os.path.isfile('%scacert.pem' % CERTS_DIR_PATH)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('SIS0: %r %s' % (e, str(e)))
+	return False
+
+def strFilter(regex, replaceChar, arg):
+	import re
+	return re.sub(regex, replaceChar, arg)
+
+# removes systems that user is not authorized access to
+def get_systems_statuses(self, systems, from_cache=False):
+	from HelperFunctions import get_system_info
+
+	CACHED_INDEX = '_get_systems_statuses()_cached_result_'
+	session = self.REQUEST.SESSION
+	if session.has_key(CACHED_INDEX):
+		res = session[CACHED_INDEX]
+		if res is not None:
+			session.set(CACHED_INDEX, None)
+			if from_cache:
+				return res
+
+	allowed_sys_list = allowed_systems(self, systems)
+	ss_list = get_system_info(self, allowed_sys_list)
+	session.set(CACHED_INDEX, ss_list)
+	return ss_list
+
+def set_persistent_var(self, var_name, default_value):
+	request = self.REQUEST
+	response = request.RESPONSE
+	session = request.SESSION
+
+	# TODO: add username into cookie_prefix, so users don't overwrite each other
+	cookie_prefix = '__luci_storage_cookie_'
+
+	value = default_value
+	if request.has_key(var_name):
+		value = request[var_name]
+	elif session.has_key(var_name):
+		value = session[var_name]
+	elif request.cookies.has_key(cookie_prefix + var_name):
+		value = request.cookies[cookie_prefix + var_name]
+
+	session.set(var_name, value)
+	response.setCookie(cookie_prefix + var_name, value,
+		expires='Tue, 30 Jun 2060 12:00:00 GMT')
+	return value
+
+# returns (str(float), units) that fits best,
+# takes prefered units into account
+
+def bytes_to_value_prefunits(self, bytes):
+	from HelperFunctions import bytes_to_value_units, convert_bytes, get_units_multiplier
+
+	p_units = self.REQUEST.SESSION.get('preferred_size_units')
+	dummy, units = bytes_to_value_units(bytes)
+	if get_units_multiplier(units) > get_units_multiplier(p_units):
+		units = p_units
+	return (convert_bytes(bytes, units), units)
+
+def getTabs(self, req):
+	if not userAuthenticated(self):
+		return []
+
+	htab = {
+		'Title': 'homebase',
+		'Description': 'Home base for this luci server',
+		'Taburl': '/luci/homebase',
+		'isSelected': False
+	}
+	ctab = {
+		'Title': 'cluster',
+		'Description': 'Cluster configuration area',
+		'Taburl': '/luci/cluster/index_html?pagetype=3',
+		'isSelected': False
+	}
+	stab = {
+		'Title': 'storage',
+		'Description': 'Storage configuration page',
+		'Taburl': '/luci/storage',
+		'isSelected': False
+	}
+
+	try:
+		baseurl = req['URL']
+		if baseurl.find('cluster') > (-1):
+			ctab['isSelected'] = True
+		elif baseurl.find('storage') > (-1):
+			stab['isSelected'] = True
+		else:
+			htab['isSelected'] = True
+	except KeyError, e:
+		pass
+	except Exception, e:
+		htab['isSelected'] = True
+
+	return [ htab, ctab, stab ]
+
+def appendModel(request, model):
+	try:
+		request.SESSION.set('model', model)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('Appending model to request failed: %r %s' \
+				% (e, str(e)))
+
+def GetReqVars(req, varlist):
+	ret = {}
+	from types import ListType;
+
+	for i in varlist:
+		pval = None
+		if req and req.has_key(i):
+			pval = req[i]
+			if type(req[i]) is not ListType:
+				pval = req[i].strip()
+			if not pval:
+				pval = None
+		if req and pval is None:
+			if req.form and req.form.has_key(i):
+				pval = req.form[i]
+				if type(req.form[i]) is not ListType:
+					pval.strip()
+				if not pval:
+					pval = None
+		ret[i] = pval
+	return ret
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZopeAsync.py,v  -->  standard output
revision 1.1.4.1
--- conga/luci/site/luci/Extensions/LuciZopeAsync.py
+++ -	2007-08-09 21:35:23.007073000 +0000
@@ -0,0 +1,182 @@
+# Copyright (C) 2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from xml.dom import minidom
+
+from LuciSyslog import get_logger
+from LuciZope import GetReqVars
+from ricci_communicator import RicciCommunicator
+from conga_constants import LUCI_DEBUG_MODE
+
+luci_log = get_logger()
+
+def write_xml_resp(request, xml_obj):
+	request.RESPONSE.setHeader('Content-Type', 'text/xml; charset=UTF-8')
+	request.RESPONSE.setHeader('Cache-Control', 'no-cache, no-store, private')
+	request.RESPONSE.write(str(xml_obj.toprettyxml()))
+
+def result_to_xml(result):
+	import types
+
+	numeric_types = [
+		types.IntType, types.BooleanType, types.LongType, types.FloatType
+	]
+
+	root = minidom.Document()
+
+	def pyobj_to_xml(element_name, element, parent_node):
+		if type(element) is types.DictType:
+			if len(element) > 0:
+				xml_elem = root.createElement('dict')
+				xml_elem.setAttribute('name', str(element_name))
+
+				for i in element.iterkeys():
+					pyobj_to_xml(i, element[i], xml_elem)
+			else:
+				xml_elem = None
+		elif type(element) in [ types.ListType, types.TupleType ]:
+			if len(element) > 0:
+				xml_elem = root.createElement('list')
+				xml_elem.setAttribute('name', str(element_name))
+				for i in element:
+					pyobj_to_xml(element_name, i, xml_elem)
+			else:
+				xml_elem = None
+		else:
+			cur_tagname = None
+			try:
+				if parent_node.tagName == 'list':
+					cur_tagname = parent_node.getAttribute('name')
+			except:
+				cur_tagname = None
+
+			if not cur_tagname:
+				xml_elem = root.createElement('var')
+			else:
+				xml_elem = root.createElement(cur_tagname)
+
+			if type(element) in types.StringTypes:
+				cur_type = 'str'
+			elif type(element) in numeric_types:
+				cur_type = 'num'
+			else:
+				cur_type = None
+
+			if cur_type:
+				try:
+					if parent_node.tagName == 'dict':
+						xml_elem.setAttribute('name', str(element_name))
+				except:
+					pass
+
+				xml_elem.setAttribute('type', cur_type)
+				xml_elem.setAttribute('value', str(element))
+			else:
+				xml_elem = None
+
+		if xml_elem is not None:
+			parent_node.appendChild(xml_elem)
+
+	pyobj_to_xml('result', result[1], root)
+	res_elem = root.createElement('result')
+	res_elem.setAttribute('name', 'success')
+	res_elem.setAttribute('value', str(result[0]).lower())
+	root.firstChild.appendChild(res_elem)
+	return root
+
+def write_err_async(request, err_msg):
+	xml_obj = result_to_xml((False, { 'errors': err_msg }))
+	write_xml_resp(request, xml_obj)
+
+def get_cluster_nodes_async(self, request):
+	from LuciClusterInfo import getClusterConfNodes
+	from RicciQueries import getClusterConf
+
+	fvars = GetReqVars(request, [ 'QUERY_STRING' ])
+	if fvars['QUERY_STRING'] is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCNA: No query string was given')
+		write_err_async(request, 'No node names were given')
+		return None
+
+	try:
+		nodes = fvars['QUERY_STRING'].split('&')
+		node_list = map(lambda x: x[1], filter(lambda x: x[0][:4] == 'node', map(lambda x: x.split('='), nodes)))
+		if not node_list or len(node_list) < 1:
+			raise Exception, 'No node list'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCNA: %r %s' % (e, str(e)))
+		write_err_async(request, 'No node names were given')
+		return None
+
+	errors = list()
+	ret = {}
+	for node_host in node_list:
+		try:
+			rc = RicciCommunicator(node_host)
+			cluster_name = rc.cluster_info()[0]
+			if not cluster_name:
+				errors.append('%s is not a member of a cluster' \
+					% cluster_name)
+				continue
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCNA0: ricci: %s: %r %s' \
+					% (node_host, e, str(e)))
+			errors.append('Unable to communicate with the ricci agent on %s' \
+				% node_host)
+			continue
+
+		try:
+			conf = getClusterConf(rc)
+			node_names = getClusterConfNodes(conf)
+			if not node_names or len(node_names) < 1:
+				raise Exception, 'no nodes'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCNA1: ricci: %s: %r %s' \
+					% (node_host, e, str(e)))
+			errors.append('Unable to retrieve a list of cluster nodes from %s' \
+				% node_host)
+			continue
+		ret[cluster_name] = {
+			'cluster': cluster_name,
+			'num_nodes': len(node_names),
+			'clusternode': node_names
+		}
+
+	ret['errors'] = errors
+	xml_obj = result_to_xml((len(errors) < len(node_list), ret))
+	write_xml_resp(request, xml_obj)
+
+def get_sysinfo_async(self, request):
+	from HelperFunctions import get_system_info
+
+	fvars = GetReqVars(request, [ 'QUERY_STRING' ])
+	try:
+		nodes = fvars['QUERY_STRING'].split('&')
+		node_list = map(lambda x: x[1], filter(lambda x: x[0][:4] == 'node', map(lambda x: x.split('='), nodes)))
+		if not node_list or len(node_list) < 1:
+			raise Exception, 'No node list'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GSIA: %r %s' % (e, str(e)))
+		write_err_async(request, 'No node names were given')
+		return None
+
+	ret = {}
+	try:
+		ret = get_system_info(self, node_list)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNFPA %r: %r %s' \
+				% (request['nodenames'], e, str(e)))
+		write_err_async(request, 'Error retrieving information')
+		return None
+	xml_obj = result_to_xml(True, { 'result': ret })
+	write_xml_resp(request, xml_obj)
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZopeClusterPortal.py,v  -->  standard output
revision 1.2.2.1
--- conga/luci/site/luci/Extensions/LuciZopeClusterPortal.py
+++ -	2007-08-09 21:35:23.144029000 +0000
@@ -0,0 +1,551 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from LuciZopePerm import havePermCreateCluster
+from conga_constants import CLUNAME, CLUSTER, \
+	CLUSTER_ADD, CLUSTER_CONFIG, CLUSTERLIST, CLUSTERS, \
+	FDOM, FDOM_ADD, FDOM_CONFIG, FDOMS, \
+	FENCEDEV, FENCEDEV_ADD, FENCEDEV_CONFIG, FENCEDEVS, \
+	NODE, NODE_ADD, NODE_CONFIG, NODE_GRID, NODE_LIST, NODES, PAGETYPE, \
+	RESOURCE, RESOURCE_ADD, RESOURCE_CONFIG, RESOURCES, \
+	SERVICE, SERVICE_ADD, SERVICE_CONFIG, SERVICE_LIST, SERVICES, \
+	VM_ADD, VM_CONFIG
+
+# Policy for showing the cluster chooser menu:
+# 1) If there are no clusters in the ManagedClusterSystems
+# folder, then only the admin user may see this menu, and
+# the configure option should not be displayed.
+# 2)If there are clusters in the ManagedClusterSystems,
+# then only display chooser if the current user has
+# permissions on at least one. If the user is admin, show ALL clusters
+
+def createCluChooser(self, request, systems):
+	from cluster_adapters import validatePost
+	dummynode = {}
+
+	if request.REQUEST_METHOD == 'POST':
+		ret = validatePost(self, request)
+		try:
+			request.SESSION.set('checkRet', ret[1])
+		except:
+			request.SESSION.set('checkRet', {})
+	else:
+		try:
+			request.SESSION.set('checkRet', {})
+		except:
+			pass
+
+	# First, see if a cluster is chosen, then
+	# check that the current user can access that system
+	cname = None
+	try:
+		cname = request[CLUNAME]
+	except:
+		cname = ''
+
+	try:
+		url = request['URL']
+	except:
+		url = '/luci/cluster/index_html'
+
+	try:
+		pagetype = request[PAGETYPE]
+	except:
+		pagetype = '3'
+
+	cldata = {}
+	cldata['Title'] = 'Cluster List'
+	cldata['cfg_type'] = 'clusters'
+	cldata['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTERLIST)
+	cldata['Description'] = 'Clusters available for configuration'
+	if pagetype == CLUSTERLIST:
+		cldata['currentItem'] = True
+	else:
+		cldata['currentItem'] = False
+
+	UserHasPerms = havePermCreateCluster(self)
+	if UserHasPerms:
+		cladd = {}
+		cladd['Title'] = 'Create a New Cluster'
+		cladd['cfg_type'] = 'clusteradd'
+		cladd['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTER_ADD)
+		cladd['Description'] = 'Create a Cluster'
+		if pagetype == CLUSTER_ADD:
+			cladd['currentItem'] = True
+		else:
+			cladd['currentItem'] = False
+
+	clcfg = {}
+	clcfg['Title'] = 'Configure'
+	clcfg['cfg_type'] = 'clustercfg'
+	clcfg['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTERS)
+	clcfg['Description'] = 'Configure a cluster'
+	if pagetype == CLUSTERS:
+		clcfg['currentItem'] = True
+	else:
+		clcfg['currentItem'] = False
+
+	#test...
+	#clcfg['show_children'] = True
+	#Add all cluster type pages here:
+	if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
+		clcfg['show_children'] = True
+	else:
+		clcfg['show_children'] = False
+
+	#loop through all clusters
+	syslist = list()
+	for system in systems:
+		clsys = {}
+		clsys['Title'] = system[0]
+		clsys['cfg_type'] = 'cluster'
+		clsys['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, CLUSTER, system[0])
+		clsys['Description'] = 'Configure this cluster'
+
+		if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
+			if cname == system[0]:
+				clsys['currentItem'] = True
+			else:
+				clsys['currentItem'] = False
+		else:
+			clsys['currentItem'] = False
+		syslist.append(clsys)
+
+	clcfg['children'] = syslist
+
+	mylist = list()
+	mylist.append(cldata)
+	if UserHasPerms:
+		mylist.append(cladd)
+	mylist.append(clcfg)
+	dummynode['children'] = mylist
+
+	return dummynode
+
+def createCluConfigTree(self, request, model):
+	dummynode = {}
+
+	if not model:
+		return {}
+
+	# There should be a positive page type
+	try:
+		pagetype = request[PAGETYPE]
+	except:
+		pagetype = '3'
+
+	try:
+		url = request['URL']
+	except:
+		url = '/luci/cluster/index_html'
+
+	# The only way this method can run is if there exists
+	# a clustername query var
+	cluname = request['clustername']
+
+	nd = {}
+	nd['Title'] = 'Nodes'
+	nd['cfg_type'] = 'nodes'
+	nd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODES, cluname)
+	nd['Description'] = 'Node configuration for this cluster'
+	if pagetype == NODES or pagetype == NODE_GRID or pagetype == NODE_LIST or pagetype == NODE_CONFIG or pagetype == NODE_ADD or pagetype == NODE:
+		nd['show_children'] = True
+	else:
+		nd['show_children'] = False
+	if pagetype == '0':
+		nd['show_children'] = False
+
+	if pagetype == NODES:
+		nd['currentItem'] = True
+	else:
+		nd['currentItem'] = False
+
+
+	ndadd = {}
+	ndadd['Title'] = 'Add a Node'
+	ndadd['cfg_type'] = 'nodeadd'
+	ndadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODE_ADD, cluname)
+	ndadd['Description'] = 'Add a node to this cluster'
+	if pagetype == NODE_ADD:
+		ndadd['currentItem'] = True
+	else:
+		ndadd['currentItem'] = False
+
+	ndcfg = {}
+	ndcfg['Title'] = 'Configure'
+	ndcfg['cfg_type'] = 'nodecfg'
+	ndcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODE_CONFIG, cluname)
+	ndcfg['Description'] = 'Configure cluster nodes'
+	if pagetype == NODE_CONFIG or pagetype == NODE or pagetype == NODES or pagetype == NODE_LIST or pagetype == NODE_GRID or pagetype == NODE_ADD:
+		ndcfg['show_children'] = True
+	else:
+		ndcfg['show_children'] = False
+	if pagetype == NODE_CONFIG:
+		ndcfg['currentItem'] = True
+	else:
+		ndcfg['currentItem'] = False
+
+	nodes = model.getNodes()
+	nodenames = list()
+	for node in nodes:
+		nodenames.append(node.getName())
+
+	cfgablenodes = list()
+	for nodename in nodenames:
+		cfg = {}
+		cfg['Title'] = nodename
+		cfg['cfg_type'] = 'node'
+		cfg['absolute_url'] = '%s?pagetype=%s&nodename=%s&clustername=%s' % (url, NODE, nodename, cluname)
+		cfg['Description'] = 'Configure this cluster node'
+		if pagetype == NODE:
+			try:
+				nname = request['nodename']
+			except KeyError, e:
+				nname = ''
+			if nodename == nname:
+				cfg['currentItem'] = True
+			else:
+				cfg['currentItem'] = False
+		else:
+			cfg['currentItem'] = False
+
+		cfgablenodes.append(cfg)
+
+	#Now add nodename structs as children of the config element
+	ndcfg['children'] = cfgablenodes
+
+	ndkids = list()
+	ndkids.append(ndadd)
+	ndkids.append(ndcfg)
+
+	nd['children'] = ndkids
+
+	##################################################################
+	sv = {}
+	sv['Title'] = 'Services'
+	sv['cfg_type'] = 'services'
+	sv['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICES, cluname)
+	sv['Description'] = 'Service configuration for this cluster'
+	if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE or pagetype == SERVICE_LIST or pagetype == VM_ADD or pagetype == VM_CONFIG:
+		sv['show_children'] = True
+	else:
+		sv['show_children'] = False
+	if pagetype == SERVICES or pagetype == SERVICE_LIST:
+		sv['currentItem'] = True
+	else:
+		sv['currentItem'] = False
+
+	svadd = {}
+	svadd['Title'] = 'Add a Service'
+	svadd['cfg_type'] = 'serviceadd'
+	svadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICE_ADD, cluname)
+	svadd['Description'] = 'Add a Service to this cluster'
+	if pagetype == SERVICE_ADD:
+		svadd['currentItem'] = True
+	else:
+		svadd['currentItem'] = False
+
+	if model.getIsVirtualized() is True:
+		vmadd = {}
+		vmadd['Title'] = 'Add a Virtual Service'
+		vmadd['cfg_type'] = 'vmadd'
+		vmadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, VM_ADD, cluname)
+		vmadd['Description'] = 'Add a Virtual Service to this cluster'
+		if pagetype == VM_ADD:
+			vmadd['currentItem'] = True
+		else:
+			vmadd['currentItem'] = False
+
+	svcfg = {}
+	svcfg['Title'] = 'Configure a Service'
+	svcfg['cfg_type'] = 'servicecfg'
+	svcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICE_CONFIG, cluname)
+	svcfg['Description'] = 'Configure a Service for this cluster'
+	if pagetype == SERVICE_CONFIG or pagetype == SERVICE or pagetype == VM_CONFIG:
+		svcfg['show_children'] = True
+	else:
+		svcfg['show_children'] = False
+	if pagetype == SERVICE_CONFIG or pagetype == VM_CONFIG:
+		svcfg['currentItem'] = True
+	else:
+		svcfg['currentItem'] = False
+
+	services = model.getServices()
+	serviceable = list()
+
+	for service in services:
+		servicename = service.getName()
+		svc = {}
+		svc['Title'] = servicename
+		svc['cfg_type'] = 'service'
+		svc['absolute_url'] = '%s?pagetype=%s&servicename=%s&clustername=%s' % (url, SERVICE, servicename, cluname)
+		svc['Description'] = 'Configure this service'
+		if pagetype == SERVICE:
+			try:
+				sname = request['servicename']
+			except KeyError, e:
+				sname = ''
+			if servicename == sname:
+				svc['currentItem'] = True
+			else:
+				svc['currentItem'] = False
+		else:
+			svc['currentItem'] = False
+
+		serviceable.append(svc)
+
+	vms = model.getVMs()
+	for vm in vms:
+		name = vm.getName()
+		svc = {}
+		svc['Title'] = name
+		svc['cfg_type'] = 'vm'
+		svc['absolute_url'] = '%s?pagetype=%s&servicename=%s&clustername=%s' % (url, VM_CONFIG, name, cluname)
+		svc['Description'] = 'Configure this Virtual Service'
+		if pagetype == VM_CONFIG:
+			try:
+				xname = request['servicename']
+			except KeyError, e:
+				xname = ''
+			if name == xname:
+				svc['currentItem'] = True
+			else:
+				svc['currentItem'] = False
+		else:
+			svc['currentItem'] = False
+
+		serviceable.append(svc)
+
+	svcfg['children'] = serviceable
+
+
+
+	kids = list()
+	kids.append(svadd)
+	if model.getIsVirtualized() is True:
+		kids.append(vmadd)
+	kids.append(svcfg)
+	sv['children'] = kids
+#############################################################
+	rv = {}
+	rv['Title'] = 'Resources'
+	rv['cfg_type'] = 'resources'
+	rv['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCES, cluname)
+	rv['Description'] = 'Resource configuration for this cluster'
+	if pagetype == RESOURCES or pagetype == RESOURCE_CONFIG or pagetype == RESOURCE_ADD or pagetype == RESOURCE:
+		rv['show_children'] = True
+	else:
+		rv['show_children'] = False
+	if pagetype == RESOURCES:
+		rv['currentItem'] = True
+	else:
+		rv['currentItem'] = False
+
+	rvadd = {}
+	rvadd['Title'] = 'Add a Resource'
+	rvadd['cfg_type'] = 'resourceadd'
+	rvadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCE_ADD, cluname)
+	rvadd['Description'] = 'Add a Resource to this cluster'
+	if pagetype == RESOURCE_ADD:
+		rvadd['currentItem'] = True
+	else:
+		rvadd['currentItem'] = False
+
+	rvcfg = {}
+	rvcfg['Title'] = 'Configure a Resource'
+	rvcfg['cfg_type'] = 'resourcecfg'
+	rvcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCE_CONFIG, cluname)
+	rvcfg['Description'] = 'Configure a Resource for this cluster'
+	if pagetype == RESOURCE_CONFIG or pagetype == RESOURCE:
+		rvcfg['show_children'] = True
+	else:
+		rvcfg['show_children'] = False
+	if pagetype == RESOURCE_CONFIG:
+		rvcfg['currentItem'] = True
+	else:
+		rvcfg['currentItem'] = False
+
+	resources = model.getResources()
+	resourceable = list()
+	for resource in resources:
+		resourcename = resource.getName()
+		rvc = {}
+		rvc['Title'] = resourcename
+		rvc['cfg_type'] = 'resource'
+		rvc['absolute_url'] = '%s?pagetype=%s&resourcename=%s&clustername=%s' % (url, RESOURCES, resourcename, cluname)
+		rvc['Description'] = 'Configure this resource'
+		if pagetype == RESOURCE:
+			try:
+				rname = request['resourcename']
+			except KeyError, e:
+				rname = ''
+			if resourcename == rname:
+				rvc['currentItem'] = True
+			else:
+				rvc['currentItem'] = False
+		else:
+			rvc['currentItem'] = False
+
+		resourceable.append(rvc)
+	rvcfg['children'] = resourceable
+
+
+
+	kids = list()
+	kids.append(rvadd)
+	kids.append(rvcfg)
+	rv['children'] = kids
+ ################################################################
+	fd = {}
+	fd['Title'] = 'Failover Domains'
+	fd['cfg_type'] = 'failoverdomains'
+	fd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOMS, cluname)
+	fd['Description'] = 'Failover domain configuration for this cluster'
+	if pagetype == FDOMS or pagetype == FDOM_CONFIG or pagetype == FDOM_ADD or pagetype == FDOM:
+		fd['show_children'] = True
+	else:
+		fd['show_children'] = False
+	if pagetype == FDOMS:
+		fd['currentItem'] = True
+	else:
+		fd['currentItem'] = False
+
+	fdadd = {}
+	fdadd['Title'] = 'Add a Failover Domain'
+	fdadd['cfg_type'] = 'failoverdomainadd'
+	fdadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOM_ADD, cluname)
+	fdadd['Description'] = 'Add a Failover Domain to this cluster'
+	if pagetype == FDOM_ADD:
+		fdadd['currentItem'] = True
+	else:
+		fdadd['currentItem'] = False
+
+	fdcfg = {}
+	fdcfg['Title'] = 'Configure a Failover Domain'
+	fdcfg['cfg_type'] = 'failoverdomaincfg'
+	fdcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOM_CONFIG, cluname)
+	fdcfg['Description'] = 'Configure a Failover Domain for this cluster'
+	if pagetype == FDOM_CONFIG or pagetype == FDOM:
+		fdcfg['show_children'] = True
+	else:
+		fdcfg['show_children'] = False
+	if pagetype == FDOM_CONFIG:
+		fdcfg['currentItem'] = True
+	else:
+		fdcfg['currentItem'] = False
+
+	fdoms = model.getFailoverDomains()
+	fdomable = list()
+	for fdom in fdoms:
+		fdomname = fdom.getName()
+		fdc = {}
+		fdc['Title'] = fdomname
+		fdc['cfg_type'] = 'fdom'
+		fdc['absolute_url'] = '%s?pagetype=%s&fdomname=%s&clustername=%s' % (url, FDOM, fdomname, cluname)
+		fdc['Description'] = 'Configure this Failover Domain'
+		if pagetype == FDOM:
+			try:
+				fname = request['fdomname']
+			except KeyError, e:
+				fname = ''
+			if fdomname == fname:
+				fdc['currentItem'] = True
+			else:
+				fdc['currentItem'] = False
+		else:
+			fdc['currentItem'] = False
+
+		fdomable.append(fdc)
+	fdcfg['children'] = fdomable
+
+
+
+	kids = list()
+	kids.append(fdadd)
+	kids.append(fdcfg)
+	fd['children'] = kids
+#############################################################
+	fen = {}
+	fen['Title'] = 'Shared Fence Devices'
+	fen['cfg_type'] = 'fencedevicess'
+	fen['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEVS, cluname)
+	fen['Description'] = 'Fence Device configuration for this cluster'
+	if pagetype == FENCEDEVS or pagetype == FENCEDEV_CONFIG or pagetype == FENCEDEV_ADD or pagetype == FENCEDEV:
+		fen['show_children'] = True
+	else:
+		fen['show_children'] = False
+	if pagetype == FENCEDEVS:
+		fen['currentItem'] = True
+	else:
+		fen['currentItem'] = False
+
+	fenadd = {}
+	fenadd['Title'] = 'Add a Fence Device'
+	fenadd['cfg_type'] = 'fencedeviceadd'
+	fenadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEV_ADD, cluname)
+	fenadd['Description'] = 'Add a Fence Device to this cluster'
+	if pagetype == FENCEDEV_ADD:
+		fenadd['currentItem'] = True
+	else:
+		fenadd['currentItem'] = False
+
+	fencfg = {}
+	fencfg['Title'] = 'Configure a Fence Device'
+	fencfg['cfg_type'] = 'fencedevicecfg'
+	fencfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEV_CONFIG, cluname)
+	fencfg['Description'] = 'Configure a Fence Device for this cluster'
+	if pagetype == FENCEDEV_CONFIG or pagetype == FENCEDEV:
+		fencfg['show_children'] = True
+	else:
+		fencfg['show_children'] = False
+	if pagetype == FENCEDEV_CONFIG:
+		fencfg['currentItem'] = True
+	else:
+		fencfg['currentItem'] = False
+
+	fences = model.getFenceDevices()
+	fenceable = list()
+	for fence in fences:
+		fencename = fence.getName()
+		fenc = {}
+		fenc['Title'] = fencename
+		fenc['cfg_type'] = 'fencedevice'
+		fenc['absolute_url'] = '%s?pagetype=%s&fencename=%s&clustername=%s' % (url, FENCEDEV, fencename, cluname)
+		fenc['Description'] = 'Configure this Fence Device'
+		if pagetype == FENCEDEV:
+			try:
+				fenname = request['fencename']
+			except KeyError, e:
+				fenname = ''
+			if fencename == fenname:
+				fenc['currentItem'] = True
+			else:
+				fenc['currentItem'] = False
+		else:
+			fenc['currentItem'] = False
+
+		fenceable.append(fenc)
+	fencfg['children'] = fenceable
+
+
+
+	kids = list()
+	kids.append(fenadd)
+	kids.append(fencfg)
+	fen['children'] = kids
+#############################################################
+
+	mylist = list()
+	mylist.append(nd)
+	mylist.append(sv)
+	mylist.append(rv)
+	mylist.append(fd)
+	mylist.append(fen)
+
+	dummynode['children'] = mylist
+
+	return dummynode
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZopeExternal.py,v  -->  standard output
revision 1.3.2.1
--- conga/luci/site/luci/Extensions/LuciZopeExternal.py
+++ -	2007-08-09 21:35:23.301305000 +0000
@@ -0,0 +1,53 @@
+# Copyright (C) 2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+#
+# The only purpose of this file is to aggregate all the functions
+# called by Zope External Methods.
+#
+
+from homebase_adapters import getUserPerms, homebaseControl, \
+	getDefaultUser
+
+from cluster_adapters import clusterTaskProcess, \
+	resourceAdd, resourceDelete, serviceDelete, \
+	getClusterURL, getSystemLogs, getRicciAgentForCluster, \
+	isClusterBusy, nodeTaskProcess, process_cluster_conf_editor, \
+	serviceMigrate, serviceRestart, serviceStart, serviceStop, \
+	getResourceInfo
+
+from HelperFunctions import add_commas, bytes_to_value_units, convert_bytes
+
+from LuciClusterInfo import getClusterAlias, getClusterInfo, getClusterName, \
+	getClustersInfo, getClusterStatus, getFdomInfo, get_fdom_names, \
+	getFdomsInfo, getFence, getFenceInfo, getFencesInfo, getModelBuilder, \
+	getNodeInfo, getnodes, getNodesInfo, getResourcesInfo, \
+	getServiceInfo, getServicesInfo, getVMInfo, getClusterOS
+
+from LuciDB import access_to_host_allowed, allowed_systems, \
+	check_clusters, getRicciAgent, getSystems, getClusters, \
+	getStorage
+
+from LuciZope import appendModel, bytes_to_value_prefunits, \
+	set_persistent_var, strFilter, getTabs, siteIsSetup
+
+from LuciZopeClusterPortal import createCluChooser, createCluConfigTree
+
+from LuciZopePerm import isAdmin, userAuthenticated
+
+from ricci_communicator import get_ricci_communicator
+
+from storage_adapters import createStorageChooser, \
+	createStorageConfigTree, getStorageURL
+
+from StorageReport import apply_storage_changes, cache_storage_report, \
+	get_bd_data, get_mapper_data, get_mappers_data, get_mapper_template_data, \
+	get_mappings_info, get_storage_batch_result, get_storage_report, \
+	group_systems_by_cluster, is_storage_report_cached, validate, \
+	get_content_data
+
+from LuciZopeAsync import get_cluster_nodes_async, get_sysinfo_async
/cvs/cluster/conga/luci/site/luci/Extensions/LuciZopePerm.py,v  -->  standard output
revision 1.2.2.1
--- conga/luci/site/luci/Extensions/LuciZopePerm.py
+++ -	2007-08-09 21:35:23.488645000 +0000
@@ -0,0 +1,50 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from AccessControl import getSecurityManager
+from conga_constants import PLONE_ROOT
+
+def userAuthenticated(self):
+	try:
+		if (isAdmin(self) or getSecurityManager().getUser().has_role('Authenticated', self.restrictedTraverse(PLONE_ROOT))):
+			return True
+	except Exception, e:
+		pass
+	return False
+
+def isAdmin(self):
+	try:
+		return getSecurityManager().getUser().has_role('Owner', self.restrictedTraverse(PLONE_ROOT))
+	except Exception, e:
+		pass
+	return False
+
+# In case we want to give access to non-admin users in the future
+
+def havePermCreateCluster(self):
+	return isAdmin(self)
+
+def havePermAddStorage(self):
+	return isAdmin(self)
+
+def havePermAddCluster(self):
+	return isAdmin(self)
+
+def havePermAddUser(self):
+	return isAdmin(self)
+
+def havePermDelUser(self):
+	return isAdmin(self)
+
+def havePermRemStorage(self):
+	return isAdmin(self)
+
+def havePermRemCluster(self):
+	return isAdmin(self)
+
+def havePermEditPerms(self):
+	return isAdmin(self)
/cvs/cluster/conga/luci/site/luci/Extensions/ResourceHandler.py,v  -->  standard output
revision 1.2.2.1
--- conga/luci/site/luci/Extensions/ResourceHandler.py
+++ -	2007-08-09 21:35:23.613137000 +0000
@@ -0,0 +1,793 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from ClusterModel.Ip import Ip
+from ClusterModel.Fs import Fs
+from ClusterModel.Clusterfs import Clusterfs
+from ClusterModel.Netfs import Netfs
+from ClusterModel.NFSExport import NFSExport
+from ClusterModel.NFSClient import NFSClient
+from ClusterModel.Script import Script
+from ClusterModel.Samba import Samba
+from ClusterModel.Tomcat5 import Tomcat5
+from ClusterModel.Postgres8 import Postgres8
+from ClusterModel.Apache import Apache
+from ClusterModel.OpenLDAP import OpenLDAP
+from ClusterModel.LVM import LVM
+from ClusterModel.MySQL import MySQL
+from ClusterModel.SAPDatabase import SAPDatabase
+from ClusterModel.SAPInstance import SAPInstance
+from LuciSyslog import get_logger
+from conga_constants import LUCI_DEBUG_MODE
+
+luci_log = get_logger()
+
+def get_fsid_list(model):
+	obj_list = model.searchObjectTree('fs')
+	obj_list.extend(model.searchObjectTree('clusterfs'))
+	return map(lambda x: x.getAttribute('fsid') and int(x.getAttribute('fsid')) or 0, obj_list)
+
+def fsid_is_unique(model, fsid):
+	fsid_list = get_fsid_list(model)
+	return fsid not in fsid_list
+
+def generate_fsid(model, name):
+	import binascii
+	from random import random
+	fsid_list = get_fsid_list(model)
+
+	fsid = binascii.crc32(name) & 0xffff
+	dupe = fsid in fsid_list
+	while dupe is True:
+		fsid = (fsid + random.randrange(1, 0xfffe)) & 0xffff
+		dupe = fsid in fsid_list
+	return fsid
+
+def getResourceForEdit(model, name):
+	resPtr = model.getResourcesPtr()
+	resources = resPtr.getChildren()
+
+	for res in resources:
+		if res.getName() == name:
+			resPtr.removeChild(res)
+			return res
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('GRFE0: unable to find resource "%s"' % name)
+	raise KeyError, name
+
+def addIp(res, rname, form, model):
+	errors = list()
+
+	try:
+		addr = form['ip_address'].strip()
+		if not addr:
+			raise KeyError, 'No IP address was given'
+		res.addAttribute('address', addr)
+	except KeyError, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addIp4: %s' % err)
+
+	if form.has_key('monitorLink'):
+		res.addAttribute('monitor_link', '1')
+	else:
+		res.addAttribute('monitor_link', '0')
+
+	return errors
+
+def addFs(res, rname, form, model):
+	errors = list()
+
+	try:
+		mountpoint = form['mountpoint'].strip()
+		if not mountpoint:
+			raise Exception, 'No mount point was given for "%s"' % rname
+		res.addAttribute('mountpoint', mountpoint)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs6: %s' % err)
+
+	try:
+		device = form['device'].strip()
+		if not device:
+			raise Exception, 'No device was given for "%s"' % rname
+		res.addAttribute('device', device)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs7: %s' % err)
+
+	try:
+		options = form['options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs8: %s' % err)
+
+	try:
+		fstype = form['fstype'].strip()
+		if not fstype:
+			raise Exception, 'No filesystem type was given for "%s"' % rname
+		res.addAttribute('fstype', fstype)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addFs9: %s' % err)
+
+	try:
+		fsid = form['fsid'].strip()
+		if not fsid:
+			raise Exception, 'no fsid'
+		fsid_int = int(fsid)
+		if not fsid_is_unique(model, fsid_int):
+			raise Exception, 'fsid not uniq'
+	except Exception, e:
+		fsid = str(generate_fsid(model, rname))
+	res.addAttribute('fsid', fsid)
+
+	if form.has_key('forceunmount'):
+		res.addAttribute('force_unmount', '1')
+	else:
+		res.addAttribute('force_unmount', '0')
+
+	if form.has_key('selffence'):
+		res.addAttribute('self_fence', '1')
+	else:
+		res.addAttribute('self_fence', '0')
+
+	if form.has_key('checkfs'):
+		res.addAttribute('force_fsck', '1')
+	else:
+		res.addAttribute('force_fsck', '0')
+
+	return errors
+
+def addClusterfs(res, rname, form, model):
+	errors = list()
+
+	try:
+		mountpoint = form['mountpoint'].strip()
+		if not mountpoint:
+			raise Exception, 'No mount point was given for "%s"' % rname
+		res.addAttribute('mountpoint', mountpoint)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addGfs5: %s' % err)
+
+	try:
+		device = form['device'].strip()
+		if not device:
+			raise Exception, 'No device was given for "%s"' % rname
+		res.addAttribute('device', device)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addGfs6: %s' % err)
+
+	try:
+		options = form['options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addGfs7: %s' % err)
+
+	try:
+		fsid = form['fsid'].strip()
+		if not fsid:
+			raise Exception, 'no fsid'
+		fsid_int = int(fsid)
+		if not fsid_is_unique(model, fsid_int):
+			raise Exception, 'not uniq'
+	except Exception, e:
+		fsid = str(generate_fsid(model, rname))
+	res.addAttribute('fsid', fsid)
+
+	if form.has_key('forceunmount'):
+		res.addAttribute('force_unmount', '1')
+	else:
+		res.addAttribute('force_unmount', '0')
+
+	return errors
+
+def addNetfs(res, rname, form, model):
+	errors = list()
+
+	try:
+		mountpoint = form['mountpoint'].strip()
+		if not mountpoint:
+			raise Exception, 'No mount point was given for "%s"' % rname
+		res.addAttribute('mountpoint', mountpoint)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm5: %s' % err)
+
+	try:
+		host = form['host'].strip()
+		if not host:
+			raise Exception, 'No host server was given for "%s"' % rname
+		res.addAttribute('host', host)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm6 error: %s' % err)
+
+	try:
+		options = form['options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm7: %s' % err)
+
+	try:
+		exportpath = form['exportpath'].strip()
+		if not exportpath:
+			raise Exception, 'No export path was given for "%s"' % rname
+		res.addAttribute('exportpath', exportpath)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm8: %s' % err)
+
+	try:
+		nfstype = form['nfstype'].strip().lower()
+		if nfstype != 'nfs' and nfstype != 'nfs4':
+			raise Exception, 'An invalid NFS version "%s" was given for "%s"' \
+								% (nfstype, rname)
+		res.addAttribute('nfstype', nfstype)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsm9: %s' % err)
+
+	if form.has_key('forceunmount'):
+		res.addAttribute('force_unmount', '1')
+	else:
+		res.addAttribute('force_unmount', '0')
+
+	return errors
+
+def addNFSClient(res, rname, form, model):
+	errors = list()
+
+	try:
+		target = form['target'].strip()
+		if not target:
+			raise Exception, 'No target was given for "%s"' % rname
+		res.addAttribute('target', target)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsc5: %s' % err)
+
+	try:
+		options = form['options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addNfsc6: %s' % err)
+
+	if form.has_key('allow_recover'):
+		res.addAttribute('allow_recover', '1')
+	else:
+		res.addAttribute('allow_recover', '0')
+
+	return errors
+
+def addNFSExport(res, rname, form, model):
+	errors = list()
+	return errors
+
+def addScript(res, rname, form, model):
+	errors = list()
+
+	try:
+		path = form['file'].strip()
+		if not path:
+			raise Exception, 'No path to a script file was given for this "%s"' % rname
+		res.addAttribute('file', path)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addScr5: %s' % err)
+
+	return errors
+
+def addSamba(res, rname, form, model):
+	errors = list()
+
+	try:
+		workgroup = form['workgroup'].strip()
+		if not workgroup:
+			raise Exception, 'No workgroup was given for "%s"' % rname
+		res.addAttribute('workgroup', workgroup)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addSmb5: %s' % err)
+
+	return errors
+
+def addApache(res, rname, form, model):
+	errors = list()
+
+	try:
+		server_root = form['server_root'].strip()
+		if not server_root:
+			raise KeyError, 'No server root was given for "%s"' % rname
+		res.addAttribute('server_root', server_root)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache5: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not server_root:
+			raise KeyError, 'No path to the Apache configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache6: %s' % err)
+
+	try:
+		options = form['httpd_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('httpd_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('httpd_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache7: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addApache7: %s' % err)
+
+	return errors
+
+def addMySQL(res, rname, form, model):
+	errors = list()
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the MySQL configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL5: %s' % err)
+
+	try:
+		listen_addr = form['listen_address'].strip()
+		if not listen_addr:
+			raise KeyError, 'No address was given for "%s"' % rname
+		res.addAttribute('listen_address', listen_addr)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL6: %s' % err)
+
+	try:
+		options = form['mysql_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('mysql_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('mysql_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL7: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addMySQL7: %s' % err)
+
+	return errors
+
+def addOpenLDAP(res, rname, form, model):
+	errors = list()
+
+	try:
+		url_list = form['url_list'].strip()
+		if not url_list:
+			raise KeyError, 'No URL list was given for "%s"' % rname
+		res.addAttribute('url_list', url_list)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP5: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the OpenLDAP configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP6: %s' % err)
+
+	try:
+		options = form['slapd_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('slapd_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('slapd_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP7: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addOpenLDAP7: %s' % err)
+
+	return errors
+
+def addPostgres8(res, rname, form, model):
+	errors = list()
+	try:
+		user = form['postmaster_user'].strip()
+		if not user:
+			raise KeyError, 'No postmaster user was given for "%s"' % rname
+		res.addAttribute('postmaster_user', user)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL85: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the PostgreSQL 8 configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL86: %s' % err)
+
+	try:
+		options = form['postmaster_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('postmaster_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('postmaster_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL87: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addPostgreSQL87: %s' % err)
+
+	return errors
+
+def addTomcat5(res, rname, form, model):
+	errors = list()
+
+	try:
+		user = form['tomcat_user'].strip()
+		if not user:
+			raise KeyError, 'No Tomcat user was given for "%s"' % rname
+		res.addAttribute('tomcat_user', user)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat55: %s' % err)
+
+	try:
+		config_file = form['config_file'].strip()
+		if not config_file:
+			raise KeyError, 'No path to the Tomcat 5 configuration file was given for "%s"' % rname
+		res.addAttribute('config_file', config_file)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat56: %s' % err)
+
+	try:
+		options = form['catalina_options'].strip()
+		if not options:
+			raise KeyError, 'no options'
+		res.addAttribute('catalina_options', options)
+	except KeyError, e:
+		try:
+			res.removeAttribute('catalina_options')
+		except:
+			pass
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat57: %s' % err)
+
+	try:
+		catalina_base = form['catalina_base'].strip()
+		if not catalina_base:
+			raise KeyError, 'No cataliny base directory was given for "%s"' % rname
+		res.addAttribute('catalina_base', catalina_base)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat58: %s' % err)
+
+	try:
+		shutdown_wait = int(form['shutdown_wait'].strip())
+		res.addAttribute('shutdown_wait', str(shutdown_wait))
+	except KeyError, e:
+		res.addAttribute('shutdown_wait', '0')
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addTomcat59: %s' % err)
+
+	return errors
+
+def addLVM(res, rname, form, model):
+	errors = list()
+
+	try:
+		vg_name = form['vg_name'].strip()
+		if not vg_name:
+			raise KeyError, 'No volume group name was given for "%s"' % rname
+		res.addAttribute('vg_name', vg_name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addLVM5: %s' % err)
+
+	try:
+		lv_name = form['lv_name'].strip()
+		if not lv_name:
+			raise KeyError, 'No logical volume name was given for "%s"' % rname
+		res.addAttribute('lv_name', lv_name)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addLVM6: %s' % err)
+
+	return errors
+
+def addSAPDatabase(res, rname, form, model):
+	errors = list()
+
+	res.removeAttribute('name')
+	res.addAttribute('SID', rname)
+
+	try:
+		dbtype = form['DBTYPE'].strip()
+		if not dbtype in [ 'ORA', 'DB6', 'ADA' ]:
+			raise Exception, 'You gave an invalid database type: %s' % dbtype
+		res.addAttribute('DBTYPE', dbtype)
+	except Exception, e:
+		err = str(e)
+		errors.append(err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addSAPD1: %s' % err)
+
+	if form.has_key('DBJ2EE_ONLY'):
+		res.addAttribute('DBJ2EE_ONLY', 'TRUE')
+	else:
+		res.removeAttribute('DBJ2EE_ONLY')
+
+	# Optional string parameters
+	for param in [ 'DIR_EXECUTABLE', 'NETSERVICENAME', 'DIR_BOOTSTRAP', 'DIR_SECSTORE' ]:
+		try:
+			pval = form[param].strip()
+			if not pval:
+				raise KeyError, 'blank'
+			res.addAttribute(param, pval)
+		except KeyError, e:
+			res.removeAttribute(param)
+		except Exception, e:
+			err = str(e)
+			errors.append(err)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('addSAPD2: %s %s' % (param, err))
+	return errors
+
+def addSAPInstance(res, rname, form, model):
+	errors = list()
+
+	res.removeAttribute('name')
+	res.addAttribute('InstanceName', rname)
+
+	# Optional string parameters
+	for param in [ 'DIR_EXECUTABLE', 'DIR_PROFILE', 'START_PROFILE' ]:
+		try:
+			pval = form[param].strip()
+			if not pval:
+				raise KeyError, 'blank'
+			res.addAttribute(param, pval)
+		except KeyError, e:
+			res.removeAttribute(param)
+		except Exception, e:
+			err = str(e)
+			errors.append(err)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('addSAPI1: %s %s' % (param, err))
+	return errors
+
+resource_table = {
+	'ip':			[ addIp,			Ip			],
+	'fs':			[ addFs,			Fs			],
+	'gfs':			[ addClusterfs,		Clusterfs	],
+	'nfsm':			[ addNetfs,			Netfs		],
+	'nfsx':			[ addNFSExport,		NFSExport	],
+	'nfsc':			[ addNFSClient,		NFSClient	],
+	'scr':			[ addScript,		Script		],
+	'smb':			[ addSamba,			Samba		],
+	'tomcat-5':		[ addTomcat5,		Tomcat5		],
+	'postgres-8':	[ addPostgres8,		Postgres8	],
+	'apache':		[ addApache,		Apache		],
+	'openldap':		[ addOpenLDAP,		OpenLDAP	],
+	'lvm':			[ addLVM,			LVM			],
+	'mysql':		[ addMySQL,			MySQL		],
+	'SAPDatabase':	[ addSAPDatabase,	SAPDatabase	],
+	'SAPInstance':	[ addSAPInstance,	SAPInstance	]
+}
+
+def create_resource(res_type, form, model):
+	if not resource_table.has_key(res_type):
+		raise Exception, [ 'Unknown resource type: "%s"' % res_type ]
+
+	res = None
+	if form.has_key('edit'):
+		if not form.has_key('oldname'):
+			raise Exception, [ 'Cannot find this resource\'s original name.' ]
+
+		oldname = form['oldname'].strip()
+		if not oldname:
+			raise Exception, [ 'Cannot find this resource\'s original name.' ]
+
+		try:
+			res = getResourceForEdit(model, oldname)
+			if not res:
+				raise Exception, 'not found'
+		except Exception, e:
+			raise Exception, [ 'No Resource named "%s" exists.' % oldname ]
+	else:
+		res = resource_table[res_type][1]()
+
+	if res_type != 'ip':
+		if not form.has_key('resourceName') or not form['resourceName'].strip():
+			raise Exception, [ 'All resources must have a unique name.' ]
+		rname = form['resourceName'].strip()
+		res.addAttribute('name', rname)
+	else:
+		rname = form['ip_address'].strip()
+
+	errors = resource_table[res_type][0](res, rname, form, model)
+	try:
+		dummy = getResourceForEdit(model, rname)
+		if dummy:
+			errors.append('A resource named "%s" already exists.' % rname)
+	except:
+		pass
+
+	if len(errors) > 0:
+		raise Exception, errors
+	return res
/cvs/cluster/conga/luci/site/luci/Extensions/RicciQueries.py,v  -->  standard output
revision 1.7.2.1
--- conga/luci/site/luci/Extensions/RicciQueries.py
+++ -	2007-08-09 21:35:23.742263000 +0000
@@ -0,0 +1,728 @@
+# Copyright (C) 2006-2007 Red Hat, Inc.
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of version 2 of the
+# GNU General Public License as published by the
+# Free Software Foundation.
+
+from xml.dom import minidom
+from ricci_communicator import RicciCommunicator
+from LuciSyslog import get_logger
+from conga_constants import LUCI_DEBUG_MODE
+
+luci_log = get_logger()
+
+def addClusterNodeBatch(cluster_name,
+						install_base,
+						install_services,
+						install_shared_storage,
+						install_LVS,
+						upgrade_rpms,
+						gulm):
+	batch = list()
+
+	batch.append('<?xml version="1.0" ?>')
+	batch.append('<batch>')
+	batch.append('<module name="rpm">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="install">')
+	if upgrade_rpms:
+		batch.append('<var name="upgrade" type="boolean" value="true"/>')
+	else:
+		batch.append('<var name="upgrade" type="boolean" value="false"/>')
+	batch.append('<var name="sets" type="list_xml">')
+	if install_base or install_services or install_shared_storage:
+		if gulm:
+			batch.append('<set name="Cluster Base - Gulm"/>')
+		else:
+			batch.append('<set name="Cluster Base"/>')
+	if install_services:
+		batch.append('<set name="Cluster Service Manager"/>')
+	if install_shared_storage:
+		batch.append('<set name="Clustered Storage"/>')
+	if install_LVS:
+		batch.append('<set name="Linux Virtual Server"/>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	batch.append('<module name="service">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="disable">')
+	batch.append('<var mutable="false" name="services" type="list_xml">')
+	if install_base or install_services or install_shared_storage:
+		if gulm:
+			batch.append('<set name="Cluster Base - Gulm"/>')
+		else:
+			batch.append('<set name="Cluster Base"/>')
+	if install_services:
+		batch.append('<set name="Cluster Service Manager"/>')
+	if install_shared_storage:
+		batch.append('<set name="Clustered Storage"/>')
+	if install_LVS:
+		batch.append('<set name="Linux Virtual Server"/>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	need_reboot = install_base or install_services or install_shared_storage or install_LVS
+	if need_reboot:
+		batch.append('<module name="reboot">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="reboot_now"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+	else:
+		batch.append('<module name="rpm">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="install"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+
+	batch.append('<module name="cluster">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="set_cluster.conf">')
+	batch.append('<var mutable="false" name="propagate" type="boolean" value="false"/>')
+	batch.append('<var mutable="false" name="cluster.conf" type="xml">')
+	batch.append('<cluster config_version="1" name="%s">' % cluster_name)
+	batch.append('<fence_daemon post_fail_delay="0" post_join_delay="3"/>')
+	batch.append('<clusternodes/>')
+	batch.append('<cman/>')
+	batch.append('<fencedevices/>')
+	batch.append('<rm/>')
+	batch.append('</cluster>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	if install_shared_storage:
+		batch.append('<module name="storage">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="enable_clustered_lvm"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+	else:
+		batch.append('<module name="rpm">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="install"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+
+	batch.append('<module name="cluster">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="start_node"/>')
+	batch.append('</request>')
+	batch.append('</module>')
+	batch.append('</batch>')
+
+	return minidom.parseString(''.join(batch)).firstChild
+
+def createClusterBatch( os_str,
+						cluster_name,
+						cluster_alias,
+						nodeList,
+						install_base,
+						install_services,
+						install_shared_storage,
+						install_LVS,
+						upgrade_rpms,
+						gulm_lockservers):
+
+	batch = list()
+	batch.append('<?xml version="1.0" ?>')
+	batch.append('<batch>')
+
+	batch.append('<module name="rpm">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="install">')
+	if upgrade_rpms:
+		batch.append('<var name="upgrade" type="boolean" value="true"/>')
+	else:
+		batch.append('<var name="upgrade" type="boolean" value="false"/>')
+
+	batch.append('<var name="sets" type="list_xml">')
+	if install_base or install_services or install_shared_storage:
+		if gulm_lockservers:
+			batch.append('<set name="Cluster Base - Gulm"/>')
+		else:
+			batch.append('<set name="Cluster Base"/>')
+
+	if install_services:
+		batch.append('<set name="Cluster Service Manager"/>')
+
+	if install_shared_storage:
+		batch.append('<set name="Clustered Storage"/>')
+	if install_LVS:
+		batch.append('<set name="Linux Virtual Server"/>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	batch.append('<module name="service">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="disable">')
+	batch.append('<var mutable="false" name="services" type="list_xml">')
+	if install_base or install_services or install_shared_storage:
+		if gulm_lockservers:
+			batch.append('<set name="Cluster Base - Gulm"/>')
+		else:
+			batch.append('<set name="Cluster Base"/>')
+	if install_services:
+		batch.append('<set name="Cluster Service Manager"/>')
+	if install_shared_storage:
+		batch.append('<set name="Clustered Storage"/>')
+	if install_LVS:
+		batch.append('<set name="Linux Virtual Server"/>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	need_reboot = install_base or install_services or install_shared_storage or install_LVS
+	if need_reboot:
+		batch.append('<module name="reboot">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="reboot_now"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+	else:
+		batch.append('<module name="rpm">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="install"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+
+	batch.append('<module name="cluster">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="set_cluster.conf">')
+	batch.append('<var mutable="false" name="propagate" type="boolean" value="false"/>')
+	batch.append('<var mutable="false" name="cluster.conf" type="xml">')
+	batch.append('<cluster config_version="1" name="%s" alias="%s">' % (cluster_name, cluster_alias))
+	batch.append('<fence_daemon post_fail_delay="0" post_join_delay="3"/>')
+	batch.append('<clusternodes>')
+	x = 1
+	for i in nodeList:
+		if os_str == 'rhel4':
+			batch.append('<clusternode name="%s" votes="1"/>' % i)
+		else:
+			batch.append('<clusternode name="%s" votes="1" nodeid="%d"/>' % (i, x))
+		x += 1
+	batch.append('</clusternodes>')
+
+	if not gulm_lockservers:
+		if len(nodeList) == 2:
+			batch.append('<cman expected_votes="1" two_node="1"/>')
+		else:
+			batch.append('<cman/>')
+	batch.append('<fencedevices/>')
+	batch.append('<rm/>')
+	if gulm_lockservers:
+		batch.append('<gulm>')
+		for i in gulm_lockservers:
+			batch.append('<lockserver name="%s" />' % i)
+		batch.append('</gulm>')
+	batch.append('</cluster>')
+	batch.append('</var>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+
+	if install_shared_storage:
+		batch.append('<module name="storage">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="enable_clustered_lvm"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+	else:
+		batch.append('<module name="rpm">')
+		batch.append('<request API_version="1.0">')
+		batch.append('<function_call name="install"/>')
+		batch.append('</request>')
+		batch.append('</module>')
+
+	batch.append('<module name="cluster">')
+	batch.append('<request API_version="1.0">')
+	batch.append('<function_call name="start_node">')
+	batch.append('<var mutable="false" name="cluster_startup" type="boolean" value="true"/>')
+	batch.append('</function_call>')
+	batch.append('</request>')
+	batch.append('</module>')
+	batch.append('</batch>')
+
+	return minidom.parseString(''.join(batch)).firstChild
+
+def batchAttemptResult(doc):
+	if not doc:
+		return (None, None)
+
+	try:
+		batch = doc.getElementsByTagName('batch')
+		if not batch or len(batch) < 1:
+			raise Exception, 'no batch tag was found'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('BAR0: %r %s' % (e, str(e)))
+		return (None, None)
+
+	for i in batch:
+		try:
+			batch_number = str(i.getAttribute('batch_id'))
+			result = str(i.getAttribute('status'))
+			return (batch_number, result)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('BAR1: %s' % e)
+
+	if LUCI_DEBUG_MODE is True:
+		try:
+			luci_log.debug_verbose('BAR2: batchid, status not in \"%s\"' \
+				% doc.toxml())
+		except:
+			pass
+	return (None, None)
+
+def getClusterStatusBatch(rc):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="status"/></request></module>'
+	ricci_xml = rc.batch_run(batch_str, async=False)
+
+	if not ricci_xml:
+		return None
+
+	try:
+		cluster_tags = ricci_xml.getElementsByTagName('cluster')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB0: %r %s' % (e, str(e)))
+		return None
+
+	if len(cluster_tags) < 1:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB1: %d entries - expecting 1' \
+				% len(cluster_tags))
+			return None
+	elif len(cluster_tags) > 1:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB2: %d entries, expecting 1 use first' \
+				% len(cluster_tags))
+
+	try:
+		cluster_node = cluster_tags[0]
+		if not cluster_node:
+			raise Exception, 'element 0 is None'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB3: %r %s' % (e, str(e)))
+		return None
+
+	try:
+		doc = minidom.Document()
+		doc.appendChild(cluster_node)
+		return doc
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSB4: %r %s' % (e, str(e)))
+
+	return None
+
+def setClusterConf(rc, clusterconf, propagate=True):
+	if propagate is True:
+		propg = 'true'
+	else:
+		propg = 'false'
+
+	conf = str(clusterconf).replace('<?xml version="1.0"?>', '')
+	conf = conf.replace('<?xml version="1.0" ?>', '')
+	conf = conf.replace('<? xml version="1.0"?>', '')
+	conf = conf.replace('<? xml version="1.0" ?>', '')
+
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="set_cluster.conf"><var type="boolean" name="propagate" mutable="false" value="%s"/><var type="xml" mutable="false" name="cluster.conf">%s</var></function_call></request></module>' % (propg, conf)
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def getNodeLogs(rc):
+	from time import time, ctime
+
+	errstr = 'log not accessible'
+
+	batch_str = '<module name="log"><request API_version="1.0"><function_call name="get"><var mutable="false" name="age" type="int" value="18000"/><var mutable="false" name="tags" type="list_str"></var></function_call></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml:
+		return errstr
+	try:
+		log_entries = ricci_xml.getElementsByTagName('logentry')
+		if not log_entries or len(log_entries) < 1:
+			raise Exception, 'no log data is available.'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNL0: retrieving log data from %s: %r %s' \
+				% (rc.hostname(), e, str(e)))
+		return None
+
+	time_now = time()
+	entry_list = list()
+
+	try:
+		# Show older entries first.
+		log_entries.sort(lambda x, y: int(y.getAttribute('age')) - int(x.getAttribute('age')))
+	except:
+		pass
+
+	for i in log_entries:
+		try:
+			log_msg = i.getAttribute('msg')
+		except:
+			log_msg = ''
+
+		if not log_msg:
+			continue
+
+		try:
+			log_age = int(i.getAttribute('age'))
+		except:
+			log_age = 0
+
+		try:
+			log_domain = i.getAttribute('domain')
+		except:
+			log_domain = ''
+
+		try:
+			log_pid = i.getAttribute('pid')
+		except:
+			log_pid = ''
+
+		if log_age:
+			entry_list.append('%s ' % ctime(time_now - log_age))
+		if log_domain:
+			entry_list.append(log_domain)
+		if log_pid:
+			entry_list.append('[%s]' % log_pid)
+		entry_list.append(': %s<br/>' % log_msg)
+	return ''.join(entry_list)
+
+def nodeReboot(rc):
+	batch_str = '<module name="reboot"><request API_version="1.0"><function_call name="reboot_now"/></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def nodeLeaveCluster(rc, cluster_shutdown=False, purge=False):
+	cshutdown = 'false'
+	if cluster_shutdown is True:
+		cshutdown = 'true'
+
+	purge_conf = 'true'
+	if purge is False:
+		purge_conf = 'false'
+
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="stop_node"><var mutable="false" name="cluster_shutdown" type="boolean" value="%s"/><var mutable="false" name="purge_conf" type="boolean" value="%s"/></function_call></request></module>' % (cshutdown, purge_conf)
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def nodeFence(rc, nodename):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="fence_node"><var mutable="false" name="nodename" type="string" value="%s"/></function_call></request></module>' % nodename
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def nodeJoinCluster(rc, cluster_startup=False):
+	cstartup = 'false'
+	if cluster_startup is True:
+		cstartup = 'true'
+
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_node"><var mutable="false" name="cluster_startup" type="boolean" value="%s"/></function_call></request></module>' % cstartup
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def startService(rc, servicename, preferrednode=None):
+	if preferrednode is not None:
+		batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value="%s"/><var mutable="false" name="nodename" type="string" value="%s"/></function_call></request></module>' % (servicename, preferrednode)
+	else:
+		batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="start_service"><var mutable="false" name="servicename" type="string" value="%s"/></function_call></request></module>' % servicename
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def migrateService(rc, servicename, preferrednode):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="migrate_service"><var mutable="false" name="servicename" type="string" value="%s"/><var mutable="false" name="nodename" type="string" value="%s" /></function_call></request></module>' % (servicename, preferrednode)
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def updateServices(rc, enable_list, disable_list):
+	batch_list = list()
+
+	if enable_list and len(enable_list) > 0:
+		batch_list.append('<module name="service"><request API_version="1.0"><function_call name="enable"><var mutable="false" name="services" type="list_xml">')
+		for i in enable_list:
+			batch_list.append('<service name="%s"/>' % str(i))
+		batch_list.append('</var></function_call></request></module>')
+
+	if disable_list and len(disable_list) > 0:
+		batch_list.append('<module name="service"><request API_version="1.0"><function_call name="disable"><var mutable="false" name="services" type="list_xml">')
+		for i in disable_list:
+			batch_list.append('<service name="%s"/>' % str(i))
+		batch_list.append('</var></function_call></request></module>')
+
+	if len(batch_list) < 1:
+		return None, None
+	ricci_xml = rc.batch_run(''.join(batch_list))
+	return batchAttemptResult(ricci_xml)
+
+def restartService(rc, servicename):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="restart_service"><var mutable="false" name="servicename" type="string" value=\"%s\"/></function_call></request></module>' % servicename
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def stopService(rc, servicename):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="stop_service"><var mutable="false" name="servicename" type="string" value=\"%s\"/></function_call></request></module>' % servicename
+
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)
+
+def svc_manage(rc, hostname, servicename, op):
+	svc_func = None
+
+	doc = minidom.Document()
+	elem = doc.createElement('result')
+	elem.setAttribute('success', '0')
+
+	if not servicename:
+		elem.setAttribute('service', 'No service name was specified.')
+		elem.setAttribute('message', 'No service name was specified.')
+
+	if not op:
+		elem.setAttribute('operation', 'No operation was specified.')
+		elem.setAttribute('message', 'No operation was specified.')
+
+	if not servicename or not op:
+		doc.appendChild(elem)
+		return doc
+
+	elem.setAttribute('service', servicename)
+	elem.setAttribute('operation', op)
+	elem.setAttribute('hostname', hostname)
+
+	try:
+		op = op.strip().lower()
+		if op == 'restart' or op == 'start' or op == 'stop':
+			svc_func = op
+		else:
+			raise Exception, op
+	except Exception, e:
+		elem.setAttribute('message', 'Unknown operation: %s' % str(e))
+		doc.appendChild(elem)
+		return doc
+
+	batch_str = '<module name="service"><request API_version="1.0"><function_call name="%s"><var mutable="false" name="services" type="list_xml"><service name="%s"/></var></function_call></request></module>' % (svc_func, servicename)
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		elem.setAttribute('message', 'operation failed')
+		doc.appendChild(elem)
+		return doc
+
+	try:
+		mod_elem = ricci_xml.getElementsByTagName('module')
+		status_code = int(mod_elem[0].getAttribute('status'))
+		if status_code == 0:
+			var_elem = mod_elem[0].getElementsByTagName('var')
+			for i in var_elem:
+				name = i.getAttribute('name').lower()
+				if name == 'success':
+					success = i.getAttribute('value').lower()
+					if success == 'true':
+						elem.setAttribute('success', '1')
+						elem.setAttribute('message', 'success')
+					else:
+						elem.setAttribute('message', 'operation failed')
+					break
+		else:
+			err_msg = mod_elem[0].childNodes[1].getAttribute('description')
+			elem.setAttribute('message', err_msg)
+	except Exception, e:
+		elem.setAttribute('message', 'operation failed')
+
+	doc.appendChild(elem)
+	return doc
+
+def list_services(rc):
+	batch_str = '<module name="service"><request API_version="1.0"><function_call name="list"><var mutable="false" name="description" type="boolean" value="true"/></function_call></request></module>'
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LS0: None returned')
+		return None
+	try:
+		service_tags = ricci_xml.getElementsByTagName('service')
+		return service_tags
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LS1: %r %s' % (e, str(e)))
+	return None
+
+def nodeIsVirtual(rc):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="virt_guest"/></request></module>'
+
+	ricci_xml = rc.batch_run(batch_str, async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('nodeIsVirtual0: None returned')
+		return None
+
+	var_tags = ricci_xml.getElementsByTagName('var')
+	if not var_tags or len(var_tags) < 2:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('nodeIsVirtual1: unexpected response %s' \
+				% ricci_xml.toxml())
+		return None
+
+	success = False
+	virtual = False
+	for i in var_tags:
+		try:
+			name = i.getAttribute('name')
+			if not name:
+				raise Exception, 'name is blank'
+			if name == 'success':
+				result = i.getAttribute('value')
+				if result == 'true':
+					success = True
+			elif name == 'virt_guest':
+				result = i.getAttribute('value')
+				if result == 'true':
+					virtual = True
+			else:
+				raise Exception, 'unexpected attribute name: %s' % name
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('nodeIsVirtual2: error: %r %s' \
+					% (e, str(e)))
+
+	if not success:
+		return None
+	return virtual
+
+def getDaemonStates(rc, dlist):
+	batch_list = list()
+	batch_list.append('<module name="service"><request API_version="1.0"><function_call name="query"><var mutable="false" name="search" type="list_xml">')
+
+	for item in dlist:
+		batch_list.append('<service name=\"%s\"/>' % item)
+	batch_list.append('</var></function_call></request></module>')
+
+	ricci_xml = rc.batch_run(''.join(batch_list), async=False)
+	if not ricci_xml or not ricci_xml.firstChild:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GDS0: no ricci_xml')
+		return None
+	result = extractDaemonInfo(ricci_xml.firstChild)
+	return result
+
+def extractDaemonInfo(bt_node):
+	if not bt_node:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('EDI0: no bt_node')
+		return None
+
+	resultlist = list()
+	svc_nodes = bt_node.getElementsByTagName('service')
+	for node in svc_nodes:
+		svchash = {}
+		try:
+			name = node.getAttribute('name')
+			if not name:
+				raise Exception, 'No name'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('EDI1: no service name: %r %s' \
+					% (e, str(e)))
+			name = '[unknown]'
+		svchash['name'] = name
+
+		try:
+			svc_enabled = node.getAttribute('enabled')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('EDI2: no enabled: %r %s' \
+					% (e, str(e)))
+			svc_enabled = '[unknown]'
+		svchash['enabled'] = svc_enabled
+
+		try:
+			running = node.getAttribute('running')
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('EDI3: no running: %r %s' \
+					% (e, str(e)))
+			running = '[unknown]'
+		svchash['running'] = running
+		resultlist.append(svchash)
+
+	return resultlist
+
+def getClusterConf(rc):
+	import xml.dom
+
+	if rc is None:
+		return None
+
+	doc = minidom.Document()
+	batch = doc.createElement('batch')
+	module = doc.createElement('module')
+	module.setAttribute('name', 'cluster')
+	request = doc.createElement('request')
+	request.setAttribute('API_version', '1.0')
+	call = doc.createElement('function_call')
+	call.setAttribute('name', 'get_cluster.conf')
+	request.appendChild(call)
+	module.appendChild(request)
+	batch.appendChild(module)
+
+	# temporary workaround for ricci bug
+	system_info = rc.hostname()
+	try:
+		rc = RicciCommunicator(system_info)
+		if rc is None:
+			raise Exception, 'unknown error'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCC0: connecting to %s: %r %s' \
+				% (system_info, e, str(e)))
+		return None
+	# end workaround
+
+	try:
+		ret = rc.process_batch(batch)
+		if not ret:
+			raise Exception, 'no XML response'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCC1: process_batch error for %s: %r %s' \
+				% (system_info, e, str(e)))
+		return None
+
+	var_nodes = ret.getElementsByTagName('var')
+	for i in var_nodes:
+		if i.getAttribute('name') == 'cluster.conf':
+			for j in i.childNodes:
+				if j.nodeType == xml.dom.Node.ELEMENT_NODE:
+					return j
+
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('GCC2: no conf node found')
+	return None
+
+def set_xvm_key(rc, key_base64):
+	batch_str = '<module name="cluster"><request API_version="1.0"><function_call name="set_xvm_key"><var mutable="false" name="key_base64" type="string" value="%s"/></function_call></request></module>' % key_base64
+	ricci_xml = rc.batch_run(batch_str)
+	return batchAttemptResult(ricci_xml)


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]