[Cluster-devel] conga/luci/site/luci/Extensions LuciClusterAct ...

rmccabe at sourceware.org rmccabe at sourceware.org
Tue May 22 21:52:07 UTC 2007


CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	EXPERIMENTAL
Changes by:	rmccabe at sourceware.org	2007-05-22 21:52:05

Modified files:
	luci/site/luci/Extensions: LuciClusterActions.py LuciDB.py 
	                           cluster_adapters.py 
	luci/site/luci/Extensions/ClusterModel: ModelBuilder.py 

Log message:
	More refactoring and clean-up.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterActions.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.4&r2=1.1.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.10&r2=1.1.2.11
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.255.2.8&r2=1.255.2.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/ClusterModel/ModelBuilder.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.4&r2=1.1.2.5

--- conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/18 05:23:55	1.1.2.4
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterActions.py	2007/05/22 21:52:03	1.1.2.5
@@ -5,14 +5,691 @@
 # GNU General Public License as published by the
 # Free Software Foundation.
 
-import RicciQueries as rq
-from LuciDB import set_node_flag, getRicciAgent
 from LuciSyslog import get_logger
-from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE
+import RicciQueries as rq
+
+from ricci_communicator import RicciCommunicator, RicciError
+
+from LuciDB import set_node_flag, getRicciAgent, delCluster, \
+	getClusterNode, getStorageNode, noNodeStatusPresent, \
+	setNodeStatus, resolve_nodename
+
+from conga_constants import CLUSTER_CONFIG, LUCI_DEBUG_MODE, \
+	NODE_DELETE, CLUSTER_DELETE, CLUSTER_FOLDER_PATH, \
+	CLUSTERLIST, CLUSTER_NODE_NEED_AUTH, NODE_FENCE, \
+	NODE_JOIN_CLUSTER, NODE_LEAVE_CLUSTER, NODE_REBOOT, \
+	RESOURCE_ADD, RESOURCE_CONFIG, RESOURCE_REMOVE, \
+	SERVICE_DELETE, SERVICE_RESTART, SERVICE_START, SERVICE_STOP
 
 luci_log = get_logger()
 
-def propagateClusterConfAsync(self, model, rc=None):
+#
+# Cluster service tasks
+#
+
+def RestartCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	batch_number, result = rq.restartService(rc, svcname)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc0: %s failed' % svcname)
+		return (False, { 'errors': [ 'An error occurred while restarting cluster service "%s"' % svcname ] })
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_RESTART,
+			'Restarting service "%s"' % svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('RestartCluSvc1: error setting flags for service %s for cluster %s' % (svcname, cluname))
+
+def StartCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+	nodename = fvars['nodename']
+
+	batch_number, result = rq.startService(rc, svcname, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc0: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'An error occurred while starting cluster service "%s"' % svcname ] })
+
+	try:
+		if nodename is not None:
+			status_msg = 'Starting cluster service "%s" on node "%s"' \
+				% (svcname, nodename)
+		else:
+			status_msg = 'Starting cluster service "%s"' % svcname
+
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_START, status_msg)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StartCluSvc1: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+
+def StopCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	batch_number, result = rq.stopService(rc, svcname)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc0: stop %s failed' % svcname)
+		return (False, { 'errors': [ 'An error occurred while stopping cluster service "%s"' % svcname ] })
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_STOP,
+			'Stopping cluster service "%s"' % svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('StopCluSvc1: error setting flags for service %s for cluster %s' % (svcname, cluname))
+
+def DeleteCluSvc(self, rc, fvars, model):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+
+	try:
+		model.deleteService(svcname)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('DeleteCluSvc1: Unable to find a service named %s for cluster %s: %r %s' % (svcname, cluname, e, str(e)))
+		return (False, { 'errors': [ 'Error removing cluster service "%s"' % svcname ]})
+
+	ret = propagateClusterConfAsync(self, model, rc,
+			SERVICE_DELETE, 'Removing service "%s"' % svcname)
+	if ret[0] is False:
+		return ret
+
+def MigrateCluSvc(self, rc, fvars):
+	svcname = fvars['servicename']
+	cluname = fvars['clustername']
+	nodename = fvars['nodename']
+
+	batch_number, result = rq.migrateService(rc, svcname, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc0: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
+		return (False, { 'errors': [ 'Error migrating cluster service "%s" to node "%s"' % (svcname, nodename) ]})
+
+	try:
+		set_node_flag(self, cluname, rc.hostname(),
+			str(batch_number), SERVICE_START,
+			'Migrating service "%s" to node "%s"' % (svcname, nodename))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('MigrateCluSvc1: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+
+#
+# Cluster resource-related tasks
+#
+
+def DeleteResource(self, rc, fvars, model):
+	errstr = 'An error occurred while attempting to set the new cluster.conf'
+	resname = fvars['resourcename']
+
+	try:
+		model.deleteResource(resname)
+	except KeyError, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource4: cant find res %s' % resname)
+		return '%s: the specified resource was not found' % errstr
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource4: cant find res %s' % resname)
+		return '%s: the specified resource was not found' % errstr
+
+	ret = propagateClusterConfAsync(self, model, rc,
+			RESOURCE_REMOVE, 'Removing resource "%s"' % resname)
+	if ret[0] is False:
+		return ret
+
+def AddResource(self, rc, fvars, model, res):
+	try:
+		model.getResourcesPtr().addChild(res)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addResource2: %r %s' % (e, str(e)))
+		return 'Unable to add the new resource'
+
+	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_ADD,
+			'Creating cluster resource "%s"' % res.getName())
+	if ret[0] is False:
+		return ret
+
+def EditResource(self, rc, fvars, model, res):
+	try:
+		model.getResourcesPtr().addChild(res)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('EditResource2: %r %s' % (e, str(e)))
+		return 'Unable to the resource'
+
+	ret = propagateClusterConfAsync(self, model, rc, RESOURCE_CONFIG,
+				'Configuring resource "%s"' % res.getName())
+
+	if ret[0] is False:
+		return ret
+
+#
+# Cluster node membership-related tasks
+#
+
+def nodeJoin(self, rc, clustername, nodename_resolved):
+	batch_number, result = rq.nodeJoinCluster(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ0: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_JOIN_CLUSTER,
+			'Node "%s" joining cluster "%s"' % (nodename_resolved, clustername))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ1: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def nodeLeave(self, rc, clustername, nodename_resolved):
+	path = '%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename_resolved)
+
+	try:
+		nodefolder = self.restrictedTraverse(path)
+		if not nodefolder:
+			raise Exception, 'cannot find database object at %s' % path
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NLO: node_leave_cluster err: %r %s' % (e, str(e)))
+		return None
+
+	objname = '%s____flag' % nodename_resolved
+	fnpresent = noNodeStatusPresent(self, nodefolder, objname, nodename_resolved)
+
+	if fnpresent is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NL1: An error checking flags for %s' \
+				% nodename_resolved)
+		return None
+
+	if fnpresent is False:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NL2: flags still present for %s -- bailing out' \
+				% nodename_resolved)
+		return None
+
+	batch_number, result = rq.nodeLeaveCluster(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NL3: nodeLeaveCluster error: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_LEAVE_CLUSTER,
+			'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NL4: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
+	# We need to get a node name other than the node
+	# to be deleted, then delete the node from the cluster.conf
+	# and propogate it. We will need two ricci agents for this task,
+	# unless we are deleting the cluster itself.
+
+	if not delete_cluster:
+		# Make sure we can find a second node before we hose anything.
+		found_one = False
+
+		path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+
+		try:
+			clusterfolder = self.restrictedTraverse(path)
+			if not clusterfolder:
+				raise Exception, 'no cluster folder at %s' % path
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND0: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
+			return None
+
+		try:
+			nodes = clusterfolder.objectItems('Folder')
+			if not nodes or len(nodes) < 1:
+				raise Exception, 'no cluster nodes in DB'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND1: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
+
+		for node in nodes:
+			if node[1].getId().find(nodename) != (-1):
+				continue
+			# here we make certain the node is up...
+			# XXX- we should also make certain this host is still
+			# in the cluster we believe it is.
+
+			try:
+				rc2 = RicciCommunicator(node[1].getId())
+				if not rc2:
+					raise Exception, 'ND1a: rc2 is None'
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('ND2: ricci %s error: %r %s' \
+						% (node[0], e, str(e)))
+				continue
+
+			if not rc2.authed():
+				try:
+					setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				try:
+					snode = getStorageNode(self, node[0])
+					setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+				except:
+					pass
+
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('ND3: %s is not authed' % node[0])
+				rc2 = None
+				continue
+			else:
+				found_one = True
+				break
+
+		if not found_one:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
+			return None
+
+	# First, delete cluster.conf from node to be deleted.
+	# next, have node leave cluster.
+	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND5: batch_number and/or result is None')
+		return None
+
+	# Unless we're deleting the whole cluster, it is not worth
+	# flagging this node in DB, as we are going to delete it
+	# anyway. Now, we need to delete node from model and send out
+	# new cluster.conf
+
+	if delete_cluster:
+		try:
+			set_node_flag(self, clustername, rc.hostname(),
+				str(batch_number), CLUSTER_DELETE,
+				'Deleting cluster "%s": Deleting node "%s"' \
+					% (clustername, nodename_resolved))
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND5a: failed to set flags: %r %s' \
+					% (e, str(e)))
+	else:
+		delete_target = None
+		nodelist = model.getNodes()
+		find_node = nodename.lower()
+		for n in nodelist:
+			try:
+				if n.getName().lower() == find_node:
+					delete_target = n
+					break
+			except:
+				continue
+
+		if delete_target is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' % (nodename, clustername))
+			return None
+
+		try:
+			model.deleteNode(delete_target)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND6a: deleteNode %s failed: %r %s' \
+					% (delete_target.getName(), e, str(e)))
+
+		try:
+			model.setModified(True)
+			str_buf = model.exportModelAsString()
+			if not str_buf:
+				raise Exception, 'model string is blank'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND7: exportModelAsString: %r %s' \
+					% (e, str(e)))
+				return None
+
+		# propagate the new cluster.conf via the second node
+		batch_number, result = rq.setClusterConf(rc2, str(str_buf))
+		if batch_number is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
+			return None
+
+	# Now we need to delete the node from the DB
+	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		clusterfolder.manage_delObjects([nodename_resolved])
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND9: error deleting %s at %s: %r %s' \
+				% (nodename_resolved, path, e, str(e)))
+
+	if delete_cluster:
+		return True
+
+	try:
+		set_node_flag(self, clustername, rc2.hostname(),
+			str(batch_number), NODE_DELETE,
+			'Deleting node "%s"' % nodename_resolved)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND10: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+#
+# Cluster management-related tasks.
+#
+
+def clusterStart(self, model):
+	if model is None:
+		return None
+
+	clustername = model.getClusterName()
+	nodes = model.getNodes()
+	if not nodes or len(nodes) < 1:
+		return None
+
+	errors = 0
+	for node in nodes:
+		nodename = node.getName().strip()
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+		try:
+			# XXX - do this in parallel
+			rc = RicciCommunicator(nodename_resolved)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart: RC %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
+			errors += 1
+			continue
+		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart1: nodeJoin %s' \
+					% nodename_resolved)
+			errors += 1
+
+	return errors
+
+def clusterStop(self, model, delete=False):
+	if model is None:
+		return None
+
+	clustername = model.getClusterName()
+	nodes = model.getNodes()
+	if not nodes or len(nodes) < 1:
+		return None
+
+	errors = 0
+	for node in nodes:
+		nodename = node.getName().strip()
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+		try:
+			# XXX - do this in parallel
+			rc = RicciCommunicator(nodename_resolved)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStop0: [%d] RC %s: %r %s' \
+					% (delete is True, str(nodename_resolved), e, str(e)))
+			errors += 1
+			continue
+
+		if delete is True:
+			ret = nodeDelete(self, rc, model, clustername, nodename,
+					nodename_resolved, delete_cluster=True)
+			if ret is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
+				errors += 1
+		else:
+			if nodeLeave(self, rc, clustername, nodename_resolved) is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
+						% (nodename_resolved))
+				errors += 1
+	return errors
+
+def clusterRestart(self, model):
+	snum_err = clusterStop(self, model)
+	if snum_err:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart0: clusterStop: %d errs' \
+				% snum_err)
+	jnum_err = clusterStart(self, model)
+	if jnum_err:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart1: clusterStart: %d errs' \
+				% jnum_err)
+	return snum_err + jnum_err
+
+def clusterDelete(self, model):
+	# Try to stop all the cluster nodes before deleting any.
+	num_errors = clusterStop(self, model, delete=False)
+	if num_errors > 0:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluDelete: %d errors' % num_errors)
+		return None
+
+	# If the cluster is stopped, delete all of the nodes.
+	num_errors = clusterStop(self, model, delete=True)
+	try:
+		clustername = model.getClusterName()
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
+		return None
+
+	if num_errors < 1:
+		try:
+			delCluster(self, clustername)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('clusterDelete1: %s: %r %s' \
+					% (clustername, e, str(e)))
+
+		try:
+			clusterfolder = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
+			if len(clusterfolder.objectItems()) < 1:
+				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
+				clusters.manage_delObjects([clustername])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('clusterDelete2: %s %r %s' \
+					% (clustername, e, str(e)))
+		return CLUSTERLIST
+	else:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
+				% (clustername, num_errors))
+
+def forceNodeReboot(self, rc, clustername, nodename_resolved):
+	batch_number, result = rq.nodeReboot(rc)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR0: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_REBOOT,
+			'Node "%s" is being rebooted' % nodename_resolved)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR1: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+def forceNodeFence(self, clustername, nodename, nodename_resolved):
+	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+
+	try:
+		clusterfolder = self.restrictedTraverse(path)
+		if not clusterfolder:
+			raise Exception, 'no cluster folder at %s' % path
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('FNF0: The cluster folder %s could not be found: %r %s' % (clustername, e, str(e)))
+		return None
+
+	try:
+		nodes = clusterfolder.objectItems('Folder')
+		if not nodes or len(nodes) < 1:
+			raise Exception, 'no cluster nodes'
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('FNF1: No cluster nodes for %s were found: %r %s' \
+				% (clustername, e, str(e)))
+		return None
+
+	found_one = False
+	for node in nodes:
+		if node[1].getId().find(nodename) != (-1):
+			continue
+
+		try:
+			rc = RicciCommunicator(node[1].getId())
+			if not rc:
+				raise Exception, 'rc is None'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('FNF2: ricci error for host %s: %r %s' \
+					% (node[0], e, str(e)))
+			continue
+
+		if not rc.authed():
+			rc = None
+			try:
+				snode = getStorageNode(self, node[1].getId())
+				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+			except:
+				pass
+
+			try:
+				setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
+			except:
+				pass
+
+			continue
+		found_one = True
+		break
+
+	if not found_one:
+		return None
+
+	batch_number, result = rq.nodeFence(rc, nodename)
+	if batch_number is None or result is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF3: batch_number and/or result is None')
+		return None
+
+	try:
+		set_node_flag(self, clustername, rc.hostname(),
+			str(batch_number), NODE_FENCE,
+			'Node "%s" is being fenced' % nodename_resolved)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF4: failed to set flags: %r %s' \
+				% (e, str(e)))
+	return True
+
+#
+# Cluster-independent tasks.
+#
+
+def getLogsForNode(self, request):
+	try:
+		nodename = request['nodename']
+	except KeyError, e:
+		try:
+			nodename = request.form['nodename']
+		except:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNL0: no node name')
+			return 'Unable to get node name to retrieve logging information'
+
+	clustername = None
+	try:
+		clustername = request['clustername']
+	except KeyError, e:
+		try:
+			clustername = request.form['clustername']
+			if not clustername:
+				raise
+		except:
+			clustername = None
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNL1: no cluster for %s' % nodename)
+	except:
+		pass
+
+	if clustername is None:
+		nodename_resolved = nodename
+	else:
+		nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+	try:
+		rc = RicciCommunicator(nodename_resolved)
+	except RicciError, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNL2: ricci error %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+		return 'Ricci error while getting logs for %s' % nodename_resolved
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNL3: unexpected exception for %s: %r %s' \
+				% (nodename_resolved, e, str(e)))
+		return 'Ricci error while getting logs for %s' % nodename_resolved
+
+	if not rc.authed():
+		try:
+			snode = getStorageNode(self, nodename)
+			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNL4: %s: %r %s' \
+					% (nodename_resolved, e, str(e)))
+
+		if clustername:
+			try:
+				cnode = getClusterNode(self, nodename, clustername)
+				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GNL5: %s: %r %s' \
+						% (nodename_resolved, e, str(e)))
+		return 'Luci is not authenticated to node %s. Reauthenticate first.' \
+			% nodename
+
+	return rq.getNodeLogs(rc)
+
+def propagateClusterConfAsync(	self,
+								model,
+								rc=None,
+								action=CLUSTER_CONFIG,
+								pmsg=None):
 	errors = list()
 	messages = list()
 
@@ -39,6 +716,7 @@
 		return (False, { 'errors': errors, 'messages': messages })
 
 	try:
+		model.setModified(True)
 		conf_str = str(model.exportModelAsString()).strip()
 		if not conf_str:
 			raise Exception, 'The new cluster configuration is blank'
@@ -56,12 +734,15 @@
 			% clustername)
 		return (False, { 'errors': errors, 'messages': messages })
 
+	if pmsg is None:
+		pmsg = 'Updating the cluster configuration for "%s"' % clustername
+
 	try:
-		set_node_flag(self, clustername, rc.hostname(), batch_id,
-			CLUSTER_CONFIG, 'Updating the cluster configuration for "%s"' %
-				clustername)
+		set_node_flag(self, clustername, rc.hostname(),
+			batch_id, action, pmsg)
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
 			luci_log.debug_verbose('PCC4: set_node_flag: %r %s' \
 				% (e, str(e)))
+
 	return (True, { 'errors': errors, 'messages': messages, 'batchid': batch_id })
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/22 02:45:54	1.1.2.10
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/22 21:52:05	1.1.2.11
@@ -634,28 +634,33 @@
 	ret[2] = need_auth_hash
 	return ret
 
-def getClusterSystems(self, clustername):
+def getCluster(self, clustername):
 	try:
-		cluster_nodes = self.restrictedTraverse('%s%s/objectItems' % (CLUSTER_FOLDER_PATH, clustername))('Folder')
+		cluster_obj = self.restrictedTraverse('%s%s' \
+			% (CLUSTER_FOLDER_PATH, clustername))
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSy0: %s: %r %s' \
+			luci_log.debug_verbose('GCobj0: %s: %r %s' \
 				% (clustername, e, str(e)))
 		return None
 
-	if isAdmin(self):
-		return cluster_nodes
+	if isAdmin(self) or cluster_permission_check(self, cluster_obj):
+		return cluster_obj
+	return None
 
+def getClusterSystems(self, clustername):
 	try:
-		cluster =  self.restrictedTraverse('%s%s/objectItems' \
-			% (CLUSTER_FOLDER_PATH, clustername))
+		cluster_nodes = self.restrictedTraverse('%s%s/objectItems' % (CLUSTER_FOLDER_PATH, clustername))('Folder')
 	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCSy1: %s: %r %s' \
+			luci_log.debug_verbose('GCSy0: %s: %r %s' \
 				% (clustername, e, str(e)))
 		return None
 
-	if cluster_permission_check(self, cluster):
+	if isAdmin(self):
+		return cluster_nodes
+	cluster_obj = getCluster(self, clustername)
+	if cluster_permission_check(self, cluster_obj):
 		return cluster_nodes
 	return None
 
@@ -691,8 +696,9 @@
 		user = getSecurityManager().getUser()
 		if user.has_permission('View', cluster[1]):
 			return True
-	except:
-		pass
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('CPC0: %s: %r %s' % (cluster, e, str(e)))
 	return False
 
 def allowed_systems(self, systems):
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/18 05:23:55	1.255.2.8
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/22 21:52:05	1.255.2.9
@@ -3158,218 +3158,6 @@
 		return None
 	return getRicciAgent(self, clustername)
 
-def serviceStart(self, rc, req):
-	svcname = None
-	try:
-		svcname = req['servicename']
-	except:
-		try:
-			svcname = req.form['servicename']
-		except:
-			pass
-
-	if svcname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStart0: no service name')
-		return None
-
-	nodename = None
-	try:
-		nodename = req['nodename']
-	except:
-		try:
-			nodename = req.form['nodename']
-		except:
-			pass
-
-	cluname = None
-	try:
-		cluname = req['clustername']
-	except KeyError, e:
-		try:
-			cluname = req.form['clustername']
-		except:
-			pass
-
-	if cluname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStart2: no cluster name for svc %s' \
-				% svcname)
-		return None
-
-	batch_number, result = rq.startService(rc, svcname, nodename)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStart3: SS(%s,%s,%s) call failed' \
-				% (svcname, cluname, nodename))
-		return None
-
-	try:
-		if nodename:
-			status_msg = 'Starting service "%s" on node "%s"' \
-				% (svcname, nodename)
-		else:
-			status_msg = 'Starting service "%s"' % svcname
-		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_START, status_msg)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStart4: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
-
-	response = req.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (req['URL'], SERVICE_LIST, cluname))
-
-def serviceMigrate(self, rc, req):
-	svcname = None
-	try:
-		svcname = req['servicename']
-	except:
-		try:
-			svcname = req.form['servicename']
-		except:
-			pass
-
-	if svcname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate0: no service name')
-		return None
-
-	nodename = None
-	try:
-		nodename = req['nodename']
-	except:
-		try:
-			nodename = req.form['nodename']
-		except:
-			pass
-
-	if nodename is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate1: no target node name')
-		return None
-
-	cluname = None
-	try:
-		cluname = req['clustername']
-	except KeyError, e:
-		try:
-			cluname = req.form['clustername']
-		except:
-			pass
-
-	if cluname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate2: no cluster name for svc %s' \
-				% svcname)
-		return None
-
-	batch_number, result = rq.migrateService(rc, svcname, nodename)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate3: SS(%s,%s,%s) call failed' \
-				% (svcname, cluname, nodename))
-		return None
-
-	try:
-		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_START, "Migrating service \'%s\' to node \'%s\'" % (svcname, nodename))
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceMigrate4: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
-
-	response = req.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (req['URL'], SERVICE_LIST, cluname))
-
-def serviceRestart(self, rc, req):
-	svcname = None
-	try:
-		svcname = req['servicename']
-	except:
-		try:
-			svcname = req.form['servicename']
-		except:
-			pass
-
-	if svcname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceRestart0: no service name')
-		return None
-
-	cluname = None
-	try:
-		cluname = req['clustername']
-	except:
-		try:
-			cluname = req.form['clustername']
-		except:
-			pass
-
-	if cluname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceRestart1: no cluster for %s' % svcname)
-		return None
-
-	batch_number, result = rq.restartService(rc, svcname)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceRestart2: %s failed' % svcname)
-		return None
-
-	try:
-		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_RESTART, "Restarting service \'%s\'" % svcname)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceRestart3: error setting flags for service %s for cluster %s' % (svcname, cluname))
-
-	response = req.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (req['URL'], SERVICE_LIST, cluname))
-
-def serviceStop(self, rc, req):
-	svcname = None
-	try:
-		svcname = req['servicename']
-	except:
-		try:
-			svcname = req.form['servicename']
-		except:
-			pass
-
-	if svcname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStop0: no service name')
-		return None
-
-	cluname = None
-	try:
-		cluname = req['clustername']
-	except:
-		try:
-			cluname = req.form['clustername']
-		except:
-			pass
-
-	if cluname is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStop1: no cluster name for %s' % svcname)
-		return None
-
-	batch_number, result = rq.stopService(rc, svcname)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStop2: stop %s failed' % svcname)
-		return None
-
-	try:
-		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_STOP, "Stopping service \'%s\'" % svcname)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('serviceStop3: error setting flags for service %s for cluster %s' % (svcname, cluname))
-
-	response = req.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (req['URL'], SERVICE_LIST, cluname))
-
 def clusterTaskProcess(self, model, request):
 	try:
 		task = request['task']
@@ -3379,460 +3167,49 @@
 		except:
 			if LUCI_DEBUG_MODE is True:
 				luci_log.debug_verbose('CTP1: no task specified')
-			task = None
-
-	if not model:
-		try:
-			cluname = request['clustername']
-			if not cluname:
-				raise Exception, 'cluname is blank'
-		except:
-			try:
-				cluname = request.form['clustername']
-				if not cluname:
-					raise Exception, 'cluname is blank'
-			except:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('CTP0: no model/no cluster name')
-				return 'Unable to determine the cluster name'
-		try:
-			model = getModelForCluster(self, cluname)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CPT1: GMFC failed for %s' % cluname)
-			model = None
-
-	if not model:
-		return 'Unable to get the model object for %s' % cluname
-
-	redirect_page = NODES
-	if task == CLUSTER_STOP:
-		clusterStop(self, model)
-	elif task == CLUSTER_START:
-		clusterStart(self, model)
-	elif task == CLUSTER_RESTART:
-		clusterRestart(self, model)
-	elif task == CLUSTER_DELETE:
-		ret = clusterDelete(self, model)
-		if ret is not None:
-			redirect_page = ret
-	else:
-		return 'An unknown cluster task was requested'
-
-	response = request.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (request['URL'], redirect_page, model.getClusterName()))
-
-def nodeLeave(self, rc, clustername, nodename_resolved):
-	path = '%s%s/%s' % (CLUSTER_FOLDER_PATH, clustername, nodename_resolved)
-
-	try:
-		nodefolder = self.restrictedTraverse(path)
-		if not nodefolder:
-			raise Exception, 'cannot find database object at %s' % path
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NLO: node_leave_cluster err: %r %s' % (e, str(e)))
-		return None
-
-	objname = '%s____flag' % nodename_resolved
-	fnpresent = noNodeStatusPresent(self, nodefolder, objname, nodename_resolved)
-
-	if fnpresent is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NL1: An error occurred while checking flags for %s' \
-				% nodename_resolved)
-		return None
-
-	if fnpresent is False:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('NL2: flags are still present for %s -- bailing out' \
-				% nodename_resolved)
-		return None
-
-	batch_number, result = rq.nodeLeaveCluster(rc)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL3: nodeLeaveCluster error: batch_number and/or result is None')
-		return None
-
-	try:
-		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_LEAVE_CLUSTER, 'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NL4: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
-
-def nodeJoin(self, rc, clustername, nodename_resolved):
-	batch_number, result = rq.nodeJoinCluster(rc)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NJ0: batch_number and/or result is None')
-		return None
-
-	try:
-		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_JOIN_CLUSTER, 'Node "%s" joining cluster "%s"' % (nodename_resolved, clustername))
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('NJ1: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
-
-def clusterStart(self, model):
-	if model is None:
-		return None
-
-	clustername = model.getClusterName()
-	nodes = model.getNodes()
-	if not nodes or len(nodes) < 1:
-		return None
-
-	errors = 0
-	for node in nodes:
-		nodename = node.getName().strip()
-		nodename_resolved = resolve_nodename(self, clustername, nodename)
-
-		try:
-			rc = RicciCommunicator(nodename_resolved)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CStart: RC %s: %r %s' \
-					% (nodename_resolved, e, str(e)))
-			errors += 1
-			continue
-		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CStart1: nodeJoin %s' % nodename_resolved)
-			errors += 1
-
-	return errors
-
-def clusterStop(self, model, delete=False):
-	if model is None:
-		return None
-
-	clustername = model.getClusterName()
-	nodes = model.getNodes()
-	if not nodes or len(nodes) < 1:
-		return None
-
-	errors = 0
-	for node in nodes:
-		nodename = node.getName().strip()
-		nodename_resolved = resolve_nodename(self, clustername, nodename)
-
-		try:
-			rc = RicciCommunicator(nodename_resolved)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('CStop0: [%d] RC %s: %r %s' \
-					% (delete is True, str(nodename_resolved), e, str(e)))
-			errors += 1
-			continue
-
-		if delete is True:
-			if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=True) is None:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
-				errors += 1
-		else:
-			if nodeLeave(self, rc, clustername, nodename_resolved) is None:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
-						% (nodename_resolved))
-				errors += 1
-	return errors
-
-def clusterRestart(self, model):
-	snum_err = clusterStop(self, model)
-	if snum_err:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('cluRestart0: clusterStop: %d errs' % snum_err)
-	jnum_err = clusterStart(self, model)
-	if jnum_err:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('cluRestart1: clusterStart: %d errs' % jnum_err)
-	return snum_err + jnum_err
-
-def clusterDelete(self, model):
-	# Try to stop all the cluster nodes before deleting any.
-	num_errors = clusterStop(self, model, delete=False)
-	if num_errors > 0:
-		return None
-
-	# If the cluster is stopped, delete all of the nodes.
-	num_errors = clusterStop(self, model, delete=True)
-	try:
-		clustername = model.getClusterName()
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
-		return None
-
-	if num_errors < 1:
-		try:
-			delCluster(self, clustername)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('clusterDelete1: %s: %r %s' \
-					% (clustername, e, str(e)))
-
-		try:
-			clusterfolder = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
-			if len(clusterfolder.objectItems()) < 1:
-				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
-				clusters.manage_delObjects([clustername])
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('clusterDelete2: %s %r %s' \
-					% (clustername, e, str(e)))
-		return CLUSTERLIST
-	else:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
-				% (clustername, num_errors))
-
-def forceNodeReboot(self, rc, clustername, nodename_resolved):
-	batch_number, result = rq.nodeReboot(rc)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNR0: batch_number and/or result is None')
-		return None
-
-	try:
-		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_REBOOT, 'Node "%s" is being rebooted' % nodename_resolved)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNR1: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
-
-def forceNodeFence(self, clustername, nodename, nodename_resolved):
-	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-
-	try:
-		clusterfolder = self.restrictedTraverse(path)
-		if not clusterfolder:
-			raise Exception, 'no cluster folder at %s' % path
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF0: The cluster folder %s could not be found: %r %s' % (clustername, e, str(e)))
-		return None
-
-	try:
-		nodes = clusterfolder.objectItems('Folder')
-		if not nodes or len(nodes) < 1:
-			raise Exception, 'no cluster nodes'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug('FNF1: No cluster nodes for %s were found: %r %s' \
-				% (clustername, e, str(e)))
-		return None
-
-	found_one = False
-	for node in nodes:
-		if node[1].getId().find(nodename) != (-1):
-			continue
-
-		try:
-			rc = RicciCommunicator(node[1].getId())
-			if not rc:
-				raise Exception, 'rc is None'
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug('FNF2: ricci error for host %s: %r %s' \
-					% (node[0], e, str(e)))
-			continue
-
-		if not rc.authed():
-			rc = None
-			try:
-				snode = getStorageNode(self, node[1].getId())
-				setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
-
-			try:
-				setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
-
-			continue
-		found_one = True
-		break
-
-	if not found_one:
-		return None
-
-	batch_number, result = rq.nodeFence(rc, nodename)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNF3: batch_number and/or result is None')
-		return None
-
-	try:
-		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_FENCE, 'Node "%s" is being fenced' % nodename_resolved)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('FNF4: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
-
-def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
-	# We need to get a node name other than the node
-	# to be deleted, then delete the node from the cluster.conf
-	# and propogate it. We will need two ricci agents for this task,
-	# unless we are deleting the cluster itself.
-
-	if not delete_cluster:
-		# Make sure we can find a second node before we hose anything.
-		found_one = False
-
-		path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-
-		try:
-			clusterfolder = self.restrictedTraverse(path)
-			if not clusterfolder:
-				raise Exception, 'no cluster folder at %s' % path
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND0: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
-			return None
-
-		try:
-			nodes = clusterfolder.objectItems('Folder')
-			if not nodes or len(nodes) < 1:
-				raise Exception, 'no cluster nodes in DB'
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND1: node delete error for cluster %s: %r %s' % (clustername, e, str(e)))
-
-		for node in nodes:
-			if node[1].getId().find(nodename) != (-1):
-				continue
-			# here we make certain the node is up...
-			# XXX- we should also make certain this host is still
-			# in the cluster we believe it is.
-
-			try:
-				rc2 = RicciCommunicator(node[1].getId())
-				if not rc2:
-					raise Exception, 'ND1a: rc2 is None'
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.info('ND2: ricci %s error: %r %s' \
-						% (node[0], e, str(e)))
-				continue
-
-			if not rc2.authed():
-				try:
-					setNodeStatus(node[1], CLUSTER_NODE_NEED_AUTH)
-				except:
-					pass
-
-				try:
-					snode = getStorageNode(self, node[0])
-					setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-				except:
-					pass
-
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('ND3: %s is not authed' % node[0])
-				rc2 = None
-				continue
-			else:
-				found_one = True
-				break
-
-		if not found_one:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
-			return None
-
-	# First, delete cluster.conf from node to be deleted.
-	# next, have node leave cluster.
-	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ND5: batch_number and/or result is None')
-		return None
-
-	# Unless we're deleting the whole cluster, it is not worth
-	# flagging this node in DB, as we are going to delete it
-	# anyway. Now, we need to delete node from model and send out
-	# new cluster.conf
+			task = None
 
-	if delete_cluster:
+	if not model:
 		try:
-			set_node_flag(self, clustername, rc.hostname(), str(batch_number), CLUSTER_DELETE, 'Deleting cluster "%s": Deleting node "%s"' % (clustername, nodename_resolved))
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND5a: failed to set flags: %r %s' \
-					% (e, str(e)))
-	else:
-		delete_target = None
-		nodelist = model.getNodes()
-		find_node = nodename.lower()
-		for n in nodelist:
+			cluname = request['clustername']
+			if not cluname:
+				raise Exception, 'cluname is blank'
+		except:
 			try:
-				if n.getName().lower() == find_node:
-					delete_target = n
-					break
+				cluname = request.form['clustername']
+				if not cluname:
+					raise Exception, 'cluname is blank'
 			except:
-				continue
-
-		if delete_target is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' \
-					% (nodename, clustername))
-			return None
-
-		try:
-			model.deleteNode(delete_target)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND6a: deleteNode %s failed: %r %s' \
-					% (delete_target.getName(), e, str(e)))
-
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CTP0: no model/no cluster name')
+				return 'Unable to determine the cluster name'
 		try:
-			model.setModified(True)
-			str_buf = model.exportModelAsString()
-			if not str_buf:
-				raise Exception, 'model string is blank'
+			model = getModelForCluster(self, cluname)
 		except Exception, e:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND7: exportModelAsString: %r %s' \
-					% (e, str(e)))
-				return None
-
-		# propagate the new cluster.conf via the second node
-		batch_number, result = rq.setClusterConf(rc2, str(str_buf))
-		if batch_number is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
-			return None
+				luci_log.debug_verbose('CPT1: GMFC failed for %s' % cluname)
+			model = None
 
-	# Now we need to delete the node from the DB
-	path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-	try:
-		clusterfolder = self.restrictedTraverse(path)
-		clusterfolder.manage_delObjects([nodename_resolved])
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ND9: error deleting %s at %s: %r %s' \
-				% (nodename_resolved, path, e, str(e)))
+	if not model:
+		return 'Unable to get the model object for %s' % cluname
 
-	if delete_cluster:
-		return True
+	redirect_page = NODES
+	if task == CLUSTER_STOP:
+		clusterStop(self, model)
+	elif task == CLUSTER_START:
+		clusterStart(self, model)
+	elif task == CLUSTER_RESTART:
+		clusterRestart(self, model)
+	elif task == CLUSTER_DELETE:
+		ret = clusterDelete(self, model)
+		if ret is not None:
+			redirect_page = ret
+	else:
+		return 'An unknown cluster task was requested'
 
-	try:
-		set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('ND10: failed to set flags: %r %s' \
-				% (e, str(e)))
-	return True
+	response = request.RESPONSE
+	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+		% (request['URL'], redirect_page, model.getClusterName()))
 
 def nodeTaskProcess(self, model, request):
 	try:
@@ -3969,71 +3346,6 @@
 		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 			% (request['URL'], NODES, clustername))
 
-def getLogsForNode(self, request):
-	try:
-		nodename = request['nodename']
-	except KeyError, e:
-		try:
-			nodename = request.form['nodename']
-		except:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL0: no node name')
-			return 'Unable to get node name to retrieve logging information'
-
-	clustername = None
-	try:
-		clustername = request['clustername']
-	except KeyError, e:
-		try:
-			clustername = request.form['clustername']
-			if not clustername:
-				raise
-		except:
-			clustername = None
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL1: no cluster for %s' % nodename)
-	except:
-		pass
-
-	if clustername is None:
-		nodename_resolved = nodename
-	else:
-		nodename_resolved = resolve_nodename(self, clustername, nodename)
-
-	try:
-		rc = RicciCommunicator(nodename_resolved)
-	except RicciError, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GNL2: ricci error %s: %r %s' \
-				% (nodename_resolved, e, str(e)))
-		return 'Ricci error while getting logs for %s' % nodename_resolved
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GNL3: unexpected exception for %s: %r %s' \
-				% (nodename_resolved, e, str(e)))
-		return 'Ricci error while getting logs for %s' % nodename_resolved
-
-	if not rc.authed():
-		try:
-			snode = getStorageNode(self, nodename)
-			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-		except Exception, e:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GNL4: %s: %r %s' \
-					% (nodename_resolved, e, str(e)))
-
-		if clustername:
-			try:
-				cnode = getClusterNode(self, nodename, clustername)
-				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
-			except Exception, e:
-				if LUCI_DEBUG_MODE is True:
-					luci_log.debug_verbose('GNL5: %s: %r %s' \
-						% (nodename_resolved, e, str(e)))
-		return 'Luci is not authenticated to node %s. Please reauthenticate first' % nodename
-
-	return rq.getNodeLogs(rc)
-
 def isClusterBusy(self, req):
 	items = None
 	busy_map = {}
@@ -4390,255 +3702,6 @@
 		clu_map['isVirtualized'] = False
 	return clu_map
 
-def delService(self, request):
-	errstr = 'An error occurred while attempting to set the new cluster.conf'
-
-	try:
-		model = request.SESSION.get('model')
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService0: no model: %r %s' \
-				% (e, str(e)))
-		return (False, {'errors': [ errstr ] })
-
-	name = None
-	try:
-		name = request['servicename']
-	except:
-		try:
-			name = request.form['servicename']
-		except:
-			pass
-
-	if name is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService1: no service name')
-		return (False, {'errors': [ '%s: no service name was provided' % errstr ]})
-
-	clustername = None
-	try:
-		clustername = request['clustername']
-	except:
-		try:
-			clustername = request.form['clustername']
-		except:
-			pass
-
-	if clustername is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService2: no cluster name for %s' % name)
-		return (False, {'errors': [ '%s: no cluster name was provided' % errstr ]})
-
-	rc = getRicciAgent(self, clustername)
-	if not rc:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService3: unable to get ricci agent for cluster %s' % clustername)
-		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster' % errstr ]})
-
-	try:
-		ragent = rc.hostname()
-		if not ragent:
-			raise Exception, 'unable to determine the hostname of the ricci agent'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService4: %s: %r %s' \
-				% (errstr, e, str(e)))
-		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster' % errstr ]})
-
-	try:
-		model.deleteService(name)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService5: Unable to find a service named %s for cluster %s: %r %s' % (name, clustername, e, str(e)))
-		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
-
-	try:
-		model.setModified(True)
-		conf = model.exportModelAsString()
-		if not conf:
-			raise Exception, 'model string is blank'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService6: exportModelAsString failed: %r %s' % (e, str(e)))
-		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
-
-	batch_number, result = rq.setClusterConf(rc, str(conf))
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService7: missing batch and/or result')
-		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
-
-	try:
-		set_node_flag(self, clustername, ragent, str(batch_number), SERVICE_DELETE, 'Removing service "%s"' % name)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delService8: failed to set flags: %r %s' \
-				% (e, str(e)))
-
-	response = request.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (request['URL'], SERVICES, clustername))
-
-def delResource(self, rc, request):
-	errstr = 'An error occurred while attempting to set the new cluster.conf'
-
-	try:
-		model = request.SESSION.get('model')
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource0: no model: %r %s' \
-				% (e, str(e)))
-		return errstr
-
-	name = None
-	try:
-		name = request['resourcename']
-	except:
-		try:
-			name = request.form['resourcename']
-		except:
-			pass
-
-	if name is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource1: no resource name')
-		return '%s: no resource name was provided' % errstr
-
-	clustername = None
-	try:
-		clustername = request['clustername']
-	except:
-		try:
-			clustername = request.form['clustername']
-		except:
-			pass
-
-	if clustername is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource2: no cluster name for %s' % name)
-		return '%s: could not determine the cluster name' % errstr
-
-	try:
-		ragent = rc.hostname()
-		if not ragent:
-			raise Exception, 'unable to determine the hostname of the ricci agent'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource3: %s: %r %s' \
-				% (errstr, e, str(e)))
-		return '%s: could not determine the ricci agent hostname' % errstr
-
-	resPtr = model.getResourcesPtr()
-	resources = resPtr.getChildren()
-
-	found = 0
-	for res in resources:
-		if res.getName() == name:
-			resPtr.removeChild(res)
-			found = 1
-			break
-
-	if not found:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource4: cant find res %s' % name)
-		return '%s: the specified resource was not found' % errstr
-
-	try:
-		model.setModified(True)
-		conf = model.exportModelAsString()
-		if not conf:
-			raise Exception, 'model string is blank'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource5: exportModelAsString failed: %s' % e)
-		return errstr
-
-	batch_number, result = rq.setClusterConf(rc, str(conf))
-	if batch_number is None or result is None:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource6: missing batch and/or result')
-		return errstr
-
-	try:
-		set_node_flag(self, clustername, ragent, str(batch_number), RESOURCE_REMOVE, 'Removing resource "%s"' % request['resourcename'])
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('delResource7: failed to set flags: %r %s' \
-				% (e, str(e)))
-
-	response = request.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
-		% (request['URL'], RESOURCES, clustername))
-
-def addResource(self, request, model, res):
-	clustername = model.getClusterName()
-	if not clustername:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource0: no cluname from mb')
-		return 'Unable to determine cluster name'
-
-	rc = getRicciAgent(self, clustername)
-	if not rc:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource1: %s' % clustername)
-		return 'Unable to find a ricci agent for the %s cluster' % clustername
-
-	try:
-		model.getResourcesPtr().addChild(res)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource2: %r %s' % (e, str(e)))
-		return 'Unable to add the new resource'
-
-	try:
-		model.setModified(True)
-		conf = model.exportModelAsString()
-		if not conf:
-			raise Exception, 'model string for %s is blank' % clustername
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource3: exportModelAsString: %r %s' \
-				% (e, str(e)))
-		return 'An error occurred while adding this resource'
-
-	try:
-		ragent = rc.hostname()
-		if not ragent:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('addResource4: missing ricci hostname')
-			raise Exception, 'unknown ricci agent hostname'
-
-		batch_number, result = rq.setClusterConf(rc, str(conf))
-		if batch_number is None or result is None:
-			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('addResource5: missing batch_number or result')
-			raise Exception, 'unable to save the new cluster configuration'
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource6: %r %s' % (e, str(e)))
-		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
-
-	try:
-		try:
-			if request.form.has_key('edit'):
-				action_type = RESOURCE_CONFIG
-				action_str = 'Configuring resource "%s"' % res.getName()
-			else:
-				raise Exception, 'new'
-		except Exception, e:
-			action_type = RESOURCE_ADD
-			action_str = 'Creating new resource "%s"' % res.getName()
-
-		set_node_flag(self, clustername, ragent, str(batch_number), action_type, action_str)
-	except Exception, e:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('addResource7: failed to set flags: %r %s' \
-				% (e, str(e)))
-
-	response = request.RESPONSE
-	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true'
-		% (request['URL'], RESOURCES, clustername))
-
 def process_cluster_conf_editor(self, req):
 	clustername = req['clustername']
 	msg_list = list(('\n'))
@@ -4744,3 +3807,135 @@
 	#CALL
 	return {}
 
+def GetRequestVars(req, varlist):
+	ret = {}
+	for i in varlist:
+		pval = None
+		if req.has_key(i):
+			pval = req[i].strip()
+			if not pval:
+				pval = None
+		if pval is None:
+			if req.form and req.form.has_key(i):
+				pval = req.form[i].strip()
+				if not pval:
+					pval = None
+		ret[i] = pval
+	return ret
+
+def GetSvcReqVars(request):
+	return GetRequestVars(request,
+			['clustername', 'servicename', 'nodename', 'URL' ])
+def GetResReqVars(request):
+	return GetRequestVars(request,
+			['clustername', 'resourcename', 'nodename', 'URL' ])
+
+# These are called from external methods.
+
+def serviceRestart(self, rc, req):
+	from LuciClusterActions import RestartCluSvc
+
+	fvars = GetSvcReqVars(req)
+	ret = RestartCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (req['URL'], SERVICE_LIST, cluname))
+	else:
+		return ret
+
+def serviceStop(self, rc, req):
+	from LuciClusterActions import StopCluSvc
+
+	fvars = GetSvcReqVars(req)
+	ret = StopCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (req['URL'], SERVICE_LIST, cluname))
+	else:
+		return ret
+
+def serviceStart(self, rc, req):
+	from LuciClusterActions import StartCluSvc
+
+	fvars = GetSvcReqVars(req)
+	ret = StartCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (req['URL'], SERVICE_LIST, cluname))
+	else:
+		return ret
+
+def serviceDelete(self, rc, req):
+	from LuciClusterActions import DeleteCluSvc
+
+	fvars = GetSvcReqVars(req)
+	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+
+	ret = DeleteCluSvc(self, rc, fvars, model)
+	if ret is None:
+		response = request.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (request['URL'], SERVICES, clustername))
+	else:
+		return ret
+
+def serviceMigrate(self, rc, req):
+	from LuciClusterActions import MigrateCluSvc
+
+	fvars = GetSvcReqVars(req)
+	ret = MigrateCluSvc(self, rc, fvars)
+	if ret is None:
+		response = req.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (req['URL'], SERVICE_LIST, cluname))
+	else:
+		return ret
+
+def resourceDelete(self, rc, req):
+	from LuciClusterActions import DeleteResource
+
+	fvars = GetResReqVars(req)
+	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+
+	ret = DeleteResource(self, rc, fvars, model)
+	if ret is None:
+		response = request.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (request['URL'], RESOURCES, clustername))
+	else:
+		return ret
+
+def resourceAdd(self, rc, req):
+	from LuciClusterActions import AddResource
+
+	fvars = GetResReqVars(req)
+	model = LuciExtractCluModel(self, req, cluster_name=fvars['clustername'])
+
+	# XXX pass in resource
+	ret = AddResource(self, rc, fvars, model, None)
+	if ret is None:
+		response = request.RESPONSE
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+			% (request['URL'], RESOURCES, clustername))
+	else:
+		return ret
+
+def nodeJoin(self, rc, cluname, nodename_resolved):
+	return None
+def nodeLeave(self, rc, cluname, nodename_resolved):
+	return None
+def nodeDelete(self, rc, cluname, nodename_resolved):
+	return None
+
+
+def clusterStart(self, model):
+	return None
+def clusterStop(self, model):
+	return None
+def clusterRestart(self, model):
+	return None
+def clusterDelete(self, model):
+	return None
--- conga/luci/site/luci/Extensions/ClusterModel/Attic/ModelBuilder.py	2007/05/15 21:42:21	1.1.2.4
+++ conga/luci/site/luci/Extensions/ClusterModel/Attic/ModelBuilder.py	2007/05/22 21:52:05	1.1.2.5
@@ -771,6 +771,13 @@
       raise Exception, 'More than one resource is named "%s"' % name
     return res[0]
 
+  def deleteResource(self, name):
+    for i in self.resources_ptr.getChildren():
+      if i.getName() == name:
+        self.resources_ptr.removeChild(i)
+        return i
+    raise KeyError, name
+
   def getClusterNodesPtr(self):
     return self.clusternodes_ptr
 




More information about the Cluster-devel mailing list