[Cluster-devel] conga/luci/site/luci/Extensions LuciClusterInf ...

rmccabe at sourceware.org rmccabe at sourceware.org
Wed May 16 21:27:23 UTC 2007


CVSROOT:	/cvs/cluster
Module name:	conga
Branch: 	EXPERIMENTAL
Changes by:	rmccabe at sourceware.org	2007-05-16 21:27:22

Modified files:
	luci/site/luci/Extensions: LuciClusterInfo.py LuciDB.py 
	                           LuciZope.py cluster_adapters.py 

Log message:
	More cleanup and refactor.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.3&r2=1.1.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.6&r2=1.1.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZope.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.1.2.4&r2=1.1.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=EXPERIMENTAL&r1=1.255.2.5&r2=1.255.2.6

--- conga/luci/site/luci/Extensions/Attic/LuciClusterInfo.py	2007/05/15 21:42:21	1.1.2.3
+++ conga/luci/site/luci/Extensions/Attic/LuciClusterInfo.py	2007/05/16 21:27:21	1.1.2.4
@@ -5,7 +5,9 @@
 # GNU General Public License as published by the
 # Free Software Foundation.
 
+from Products.Archetypes.utils import make_uuid
 from ClusterModel.GeneralError import GeneralError
+from ClusterModel.ModelBuilder import ModelBuilder
 import RicciQueries as rq
 from ricci_communicator import RicciCommunicator
 from FenceHandler import FENCE_OPTS
@@ -29,7 +31,8 @@
 	try:
 		return map(lambda x: str(x.getName()), model.getNodes())
 	except Exception, e:
-		luci_log.debug_verbose('getnodes0: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getnodes0: %r' % e)
 	return []
 
 def getResourceInfo(model, name, res=None):
@@ -59,13 +62,68 @@
 def getResources(model):
 	return map(lambda x: getResourcesInfo(model, None, x), model.getResources())
 
+def getClusterStatusModel(model):
+	results = list()
+	vals = {}
+
+	try:
+		clustername = model.getClusterName()
+		clusteralias = model.getClusterAlias()
+		vals['type'] = 'cluster'
+		vals['alias'] = clusteralias
+		vals['name'] = clustername
+		vals['error'] = True
+		vals['votes'] = '[unknown]'
+		vals['quorate'] = '[unknown]'
+		vals['minQuorum'] = '[unknown]'
+		results.append(vals)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSM0: %r' % e)
+		return None
+
+	try:
+		nodelist = model.getNodes()
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSM1: %r' % e)
+		return None
+
+	for node in nodelist:
+		node_val = {}
+		node_val['type'] = 'node'
+		try:
+			node_name = node.getName()
+			if not node_name:
+				raise Exception, 'cluster node name is unknown'
+		except:
+			node_name = '[unknown]'
+
+		node_val['name'] = node_name
+		node_val['clustered'] = '[unknown]'
+		node_val['online'] = '[unknown]'
+		node_val['error'] = True
+
+		try:
+			votes = node.getVotes()
+			if not votes:
+				raise Exception, 'unknown unmber of votes'
+		except:
+			votes = '[unknown]'
+
+		node_val['votes'] = votes
+		results.append(node_val)
+	return results
+
 def getClusterStatus(self, request, rc, cluname=None):
+
 	try:
 		doc = rq.getClusterStatusBatch(rc)
 		if not doc:
 			raise Exception, 'doc is None'
 	except Exception, e:
-		luci_log.debug_verbose('GCS0: error: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCS0: error: %r' % e)
 		doc = None
 
 	if doc is None and not cluname:
@@ -76,11 +134,14 @@
 				raise Exception, 'cinfo is None'
 			return cinfo
 		except Exception, e:
-			luci_log.debug_verbose('GCS1: %s' % str(e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCS1: %r' % e)
 			doc = None
 
 	if not doc:
 		try:
+			from LuciDB import getClusterStatusDB
+
 			clustername = cluname
 			if clustername is None:
 				try:
@@ -99,12 +160,13 @@
 				raise Exception, 'cinfo is None'
 			return cinfo
 		except Exception, e:
-			luci_log.debug_verbose('GCS1a: unable to get cluster info from DB: %s' % str(e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCS1a: unable to get cluster info from DB: %r' % e)
 		return []
 
 	results = list()
 	vals = {}
-	vals['type'] = "cluster"
+	vals['type'] = 'cluster'
 
 	try:
 		vals['alias'] = doc.firstChild.getAttribute('alias')
@@ -120,7 +182,7 @@
 	for node in doc.firstChild.childNodes:
 		if node.nodeName == 'node':
 			vals = {}
-			vals['type'] = "node"
+			vals['type'] = 'node'
 			vals['clustered'] = node.getAttribute('clustered')
 			vals['name'] = node.getAttribute('name')
 			vals['online'] = node.getAttribute('online')
@@ -167,14 +229,14 @@
 			cluname = '[error retrieving cluster name]'
 
 	for item in status:
-		if item['type'] == "service":
+		if item['type'] == 'service':
 			itemmap = {}
 			itemmap['name'] = item['name']
 
 			cur_node = None
-			if item['running'] == "true":
+			if item['running'] == 'true':
 				cur_node = item['nodename']
-				itemmap['running'] = "true"
+				itemmap['running'] = 'true'
 				itemmap['nodename'] = cur_node
 				itemmap['disableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_STOP)
 				itemmap['restarturl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, item['name'], SERVICE_RESTART)
@@ -213,11 +275,11 @@
 
 			itemmap['links'] = starturls
 
-			dom = svc.getAttribute("domain")
+			dom = svc.getAttribute('domain')
 			if dom is not None:
 				itemmap['faildom'] = dom
 			else:
-				itemmap['faildom'] = "No Failover Domain"
+				itemmap['faildom'] = 'No Failover Domain'
 			maplist.append(itemmap)
 
 	svc_map['services'] = maplist
@@ -228,6 +290,7 @@
 	#Next, check for children of it
 	#Call yourself on every children
 	#then return
+
 	rc_map = {}
 	if parent is not None:
 		rc_map['parent'] = parent
@@ -266,7 +329,6 @@
 
 
 def getServiceInfo(self, status, model, req):
-	from Products.Archetypes.utils import make_uuid
 	#set up struct for service config page
 	hmap = {}
 	root_uuid = 'toplevel'
@@ -340,7 +402,7 @@
 					innermap['links'] = starturls
 				else:
 					#Do not set ['running'] in this case...ZPT will detect it is missing
-					innermap['current'] = "Stopped"
+					innermap['current'] = 'Stopped'
 					innermap['enableurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_START)
 					innermap['delurl'] = '%s?clustername=%s&servicename=%s&pagetype=%s' % (baseurl, cluname, servicename, SERVICE_DELETE)
 
@@ -400,19 +462,20 @@
 	try:
 		fdom = model.getFailoverDomainByName(request['fdomname'])
 	except Exception, e:
-		luci_log.debug_verbose('getFdomInfo0: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFdomInfo0: %r' % e)
 		return fhash
 
 	fhash['name'] = fdom.getName()
 
 	ordered_attr = fdom.getAttribute('ordered')
-	if ordered_attr is not None and (ordered_attr == "true" or ordered_attr == "1"):
+	if ordered_attr is not None and (ordered_attr == 'true' or ordered_attr == '1'):
 		fhash['prioritized'] = '1'
 	else:
 		fhash['prioritized'] = '0'
 
 	restricted_attr = fdom.getAttribute('restricted')
-	if restricted_attr is not None and (restricted_attr == "true" or restricted_attr == "1"):
+	if restricted_attr is not None and (restricted_attr == 'true' or restricted_attr == '1'):
 		fhash['restricted'] = '1'
 	else:
 		fhash['restricted'] = '0'
@@ -426,972 +489,989 @@
 		fhash['members'][node.getName()] = { 'priority': priority }
 	return fhash
 
-
 def getFdomsInfo(self, model, request, clustatus):
-  slist = list()
-  nlist = list()
-  for item in clustatus:
-    if item['type'] == "node":
-      nlist.append(item)
-    elif item['type'] == "service":
-      slist.append(item)
-  fdomlist = list()
-  clustername = request['clustername']
-  baseurl = request['URL']
-  fdoms = model.getFailoverDomains()
-  svcs = model.getServices()
-  for fdom in fdoms:
-    fdom_map = {}
-    fdom_map['name'] = fdom.getName()
-    fdom_map['cfgurl'] = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
-		% (baseurl, FDOM, clustername, fdom.getName())
-    ordered_attr = fdom.getAttribute('ordered')
-    restricted_attr = fdom.getAttribute('restricted')
-    if ordered_attr is not None and (ordered_attr == "true" or ordered_attr == "1"):
-      fdom_map['ordered'] = True
-    else:
-      fdom_map['ordered'] = False
-    if restricted_attr is not None and (restricted_attr == "true" or restricted_attr == "1"):
-      fdom_map['restricted'] = True
-    else:
-      fdom_map['restricted'] = False
-    nodes = fdom.getChildren()
-    nodelist = list()
-    for node in nodes:
-      nodesmap = {}
-      ndname = node.getName()
-      for nitem in nlist:
-        if nitem['name'] == ndname:
-          break
-      nodesmap['nodename'] = ndname
-      nodesmap['nodecfgurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
-		% (baseurl, clustername, ndname, NODE)
-      if nitem['clustered'] == "true":
-        nodesmap['status'] = NODE_ACTIVE
-      elif nitem['online'] == "false":
-        nodesmap['status'] = NODE_UNKNOWN
-      else:
-        nodesmap['status'] = NODE_INACTIVE
-      priority_attr =  node.getAttribute('priority')
-      if priority_attr is not None:
-        nodesmap['priority'] = "0"
-      nodelist.append(nodesmap)
-    fdom_map['nodeslist'] = nodelist
-
-    svclist = list()
-    for svc in svcs:
-      svcname = svc.getName()
-      for sitem in slist:
-        if sitem['name'] == svcname:
-          break  #found more info about service...
-
-      domain = svc.getAttribute("domain")
-      if domain is not None:
-        if domain == fdom.getName():
-          svcmap = {}
-          svcmap['name'] = svcname
-          svcmap['status'] = sitem['running']
-          svcmap['svcurl'] = '%s?pagetype=%s&clustername=%s&servicename=%s' \
-			% (baseurl, SERVICE, clustername, svcname)
-          svcmap['location'] = sitem['nodename']
-          svclist.append(svcmap)
-    fdom_map['svclist'] = svclist
-    fdomlist.append(fdom_map)
-  return fdomlist
+	slist = list()
+	nlist = list()
+	fdomlist = list()
+
+	for item in clustatus:
+		if item['type'] == 'node':
+			nlist.append(item)
+		elif item['type'] == 'service':
+			slist.append(item)
+
+	clustername = request['clustername']
+	baseurl = request['URL']
+	fdoms = model.getFailoverDomains()
+	svcs = model.getServices()
+
+	for fdom in fdoms:
+		fdom_map = {}
+		fdom_name = fdom.getName()
+		fdom_map['name'] = fdom_name
+		fdom_map['cfgurl'] = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
+			% (baseurl, FDOM, clustername, fdom.getName())
+
+		ordered_attr = fdom.getAttribute('ordered')
+		if ordered_attr is not None and (ordered_attr == 'true' or ordered_attr == '1'):
+			fdom_map['ordered'] = True
+		else:
+			fdom_map['ordered'] = False
+
+		restricted_attr = fdom.getAttribute('restricted')
+		if restricted_attr is not None and (restricted_attr == 'true' or restricted_attr == '1'):
+			fdom_map['restricted'] = True
+		else:
+			fdom_map['restricted'] = False
+
+		nodes = fdom.getChildren()
+		nodelist = list()
+		for node in nodes:
+			nodesmap = {}
+			ndname = node.getName()
+
+			for nitem in nlist:
+				if nitem['name'] == ndname:
+					nodesmap['nodename'] = ndname
+					nodesmap['nodecfgurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
+						% (baseurl, clustername, ndname, NODE)
+					if nitem['clustered'] == 'true':
+						nodesmap['status'] = NODE_ACTIVE
+					elif nitem['online'] == 'false':
+						nodesmap['status'] = NODE_UNKNOWN
+					else:
+						nodesmap['status'] = NODE_INACTIVE
+					priority_attr =	node.getAttribute('priority')
+					if priority_attr is not None:
+						nodesmap['priority'] = '0'
+					nodelist.append(nodesmap)
+		fdom_map['nodeslist'] = nodelist
+
+		svclist = list()
+		for svc in svcs:
+			svcname = svc.getName()
+			for sitem in slist:
+				if sitem['name'] == svcname:
+					domain = svc.getAttribute('domain')
+					if domain == fdom_name:
+						svcmap = {}
+						svcmap['name'] = svcname
+						svcmap['status'] = sitem['running']
+						svcmap['svcurl'] = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+							% (baseurl, SERVICE, clustername, svcname)
+						svcmap['location'] = sitem['nodename']
+						svclist.append(svcmap)
+		fdom_map['svclist'] = svclist
+		fdomlist.append(fdom_map)
+
+	return fdomlist
 
 def getClusterInfo(self, model, req):
-  try:
-    cluname = req[CLUNAME]
-  except:
-    try:
-      cluname = req.form['clustername']
-    except:
-      try:
-        cluname = req.form['clusterName']
-      except:
-        luci_log.debug_verbose('GCI0: unable to determine cluster name')
-        return {}
-
-  clumap = {}
-  if model is None:
-    try:
-      model = getModelForCluster(self, cluname)
-      if not model:
-        raise Exception, 'model is none'
-      req.SESSION.set('model', model)
-    except Exception, e:
-      luci_log.debug_verbose('GCI1: unable to get model for cluster %s: %s' % (cluname, str(e)))
-      return {}
-  else:
-    totem = model.getTotemPtr()
-    if totem:
-      clumap['totem'] = totem.getAttributes()
+	try:
+		cluname = req[CLUNAME]
+	except:
+		try:
+			cluname = req.form['clustername']
+		except:
+			try:
+				cluname = req.form['clusterName']
+			except:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GCI0: unable to determine cluster name')
+				return {}
 
-  prop_baseurl = '%s?pagetype=%s&clustername=%s&' \
+	clumap = {}
+	if model is None:
+		try:
+			model = getModelForCluster(self, cluname)
+			if not model:
+				raise Exception, 'model is none'
+			req.SESSION.set('model', model)
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCI1: unable to get model for cluster %s: %r' % (cluname, e))
+			return {}
+	else:
+		totem = model.getTotemPtr()
+		if totem:
+			clumap['totem'] = totem.getAttributes()
+
+	prop_baseurl = '%s?pagetype=%s&clustername=%s&' \
 	% (req['URL'], CLUSTER_CONFIG, cluname)
-  basecluster_url = '%stab=%s' % (prop_baseurl, PROP_GENERAL_TAB)
-  #needed:
-  clumap['basecluster_url'] = basecluster_url
-  #name field
-  clumap['clustername'] = model.getClusterAlias()
-  #config version
-  cp = model.getClusterPtr()
-  clumap['config_version'] = cp.getConfigVersion()
-  #-------------
-  #new cluster params - if rhel5
-  #-------------
-
-  clumap['fence_xvmd'] = model.hasFenceXVM()
-  gulm_ptr = model.getGULMPtr()
-  if not gulm_ptr:
-    #Fence Daemon Props
-    fencedaemon_url = '%stab=%s' % (prop_baseurl, PROP_FENCE_TAB)
-    clumap['fencedaemon_url'] = fencedaemon_url
-    fdp = model.getFenceDaemonPtr()
-    pjd = fdp.getAttribute('post_join_delay')
-    if pjd is None:
-      pjd = "6"
-    pfd = fdp.getAttribute('post_fail_delay')
-    if pfd is None:
-      pfd = "0"
-    #post join delay
-    clumap['pjd'] = pjd
-    #post fail delay
-    clumap['pfd'] = pfd
-
-    #-------------
-    #if multicast
-    multicast_url = '%stab=%s' % (prop_baseurl, PROP_MCAST_TAB)
-    clumap['multicast_url'] = multicast_url
-    #mcast addr
-    is_mcast = model.isMulticast()
-    if is_mcast:
-      clumap['mcast_addr'] = model.getMcastAddr()
-      clumap['is_mcast'] = "True"
-    else:
-      clumap['is_mcast'] = "False"
-      clumap['mcast_addr'] = "1.2.3.4"
-    clumap['gulm'] = False
-  else:
-    #-------------
-    #GULM params (rhel4 only)
-    lockserv_list = list()
-    clunodes = model.getNodes()
-    gulm_lockservs = map(lambda x: x.getName(), gulm_ptr.getChildren())
-    lockserv_list = map(lambda x: (x, True), gulm_lockservs)
-    for node in clunodes:
-      n = node.getName()
-      if not n in gulm_lockservs:
-        lockserv_list.append((n, False))
-    clumap['gulm'] = True
-    clumap['gulm_url'] = '%stab=%s' % (prop_baseurl, PROP_GULM_TAB)
-    clumap['gulm_lockservers'] = lockserv_list
-
-  #-------------
-  #quorum disk params
-  quorumd_url = '%stab=%s' % (prop_baseurl, PROP_QDISK_TAB)
-  clumap['quorumd_url'] = quorumd_url
-  is_quorumd = model.isQuorumd()
-  clumap['is_quorumd'] = is_quorumd
-  clumap['interval'] = ""
-  clumap['tko'] = ""
-  clumap['votes'] = ""
-  clumap['min_score'] = ""
-  clumap['device'] = ""
-  clumap['label'] = ""
-
-  #list struct for heuristics...
-  hlist = list()
-
-  if is_quorumd:
-    qdp = model.getQuorumdPtr()
-    interval = qdp.getAttribute('interval')
-    if interval is not None:
-      clumap['interval'] = interval
-
-    tko = qdp.getAttribute('tko')
-    if tko is not None:
-      clumap['tko'] = tko
-
-    votes = qdp.getAttribute('votes')
-    if votes is not None:
-      clumap['votes'] = votes
-
-    min_score = qdp.getAttribute('min_score')
-    if min_score is not None:
-      clumap['min_score'] = min_score
-
-    device = qdp.getAttribute('device')
-    if device is not None:
-      clumap['device'] = device
-
-    label = qdp.getAttribute('label')
-    if label is not None:
-      clumap['label'] = label
-
-    heuristic_kids = qdp.getChildren()
-
-    for kid in heuristic_kids:
-      hmap = {}
-      hprog = kid.getAttribute('program')
-      if hprog is None:
-        continue
-
-      hscore = kid.getAttribute('score')
-      hmap['hprog'] = hprog
-      if hscore is not None:
-        hmap['hscore'] = hscore
-      else:
-        hmap['hscore'] = ""
-
-      hinterval = kid.getAttribute('interval')
-      if hinterval is not None:
-        hmap['hinterval'] = hinterval
-      else:
-        hmap['hinterval'] = ""
-      hlist.append(hmap)
-  clumap['hlist'] = hlist
+	basecluster_url = '%stab=%s' % (prop_baseurl, PROP_GENERAL_TAB)
+	#needed:
+	clumap['basecluster_url'] = basecluster_url
+	#name field
+	clumap['clustername'] = model.getClusterAlias()
+	#config version
+	cp = model.getClusterPtr()
+	clumap['config_version'] = cp.getConfigVersion()
+	#-------------
+	#new cluster params - if rhel5
+	#-------------
+
+	clumap['fence_xvmd'] = model.hasFenceXVM()
+	gulm_ptr = model.getGULMPtr()
+	if not gulm_ptr:
+		#Fence Daemon Props
+		fencedaemon_url = '%stab=%s' % (prop_baseurl, PROP_FENCE_TAB)
+		clumap['fencedaemon_url'] = fencedaemon_url
+		fdp = model.getFenceDaemonPtr()
+		pjd = fdp.getAttribute('post_join_delay')
+		if pjd is None:
+			pjd = '6'
+		pfd = fdp.getAttribute('post_fail_delay')
+		if pfd is None:
+			pfd = '0'
+		#post join delay
+		clumap['pjd'] = pjd
+		#post fail delay
+		clumap['pfd'] = pfd
+
+		#-------------
+		#if multicast
+		multicast_url = '%stab=%s' % (prop_baseurl, PROP_MCAST_TAB)
+		clumap['multicast_url'] = multicast_url
+		#mcast addr
+		is_mcast = model.isMulticast()
+		if is_mcast:
+			clumap['mcast_addr'] = model.getMcastAddr()
+			clumap['is_mcast'] = 'True'
+		else:
+			clumap['is_mcast'] = 'False'
+			clumap['mcast_addr'] = '1.2.3.4'
+		clumap['gulm'] = False
+	else:
+		#-------------
+		#GULM params (rhel4 only)
+		lockserv_list = list()
+		clunodes = model.getNodes()
+		gulm_lockservs = map(lambda x: x.getName(), gulm_ptr.getChildren())
+		lockserv_list = map(lambda x: (x, True), gulm_lockservs)
+		for node in clunodes:
+			n = node.getName()
+			if not n in gulm_lockservs:
+				lockserv_list.append((n, False))
+		clumap['gulm'] = True
+		clumap['gulm_url'] = '%stab=%s' % (prop_baseurl, PROP_GULM_TAB)
+		clumap['gulm_lockservers'] = lockserv_list
+
+	#-------------
+	#quorum disk params
+	quorumd_url = '%stab=%s' % (prop_baseurl, PROP_QDISK_TAB)
+	clumap['quorumd_url'] = quorumd_url
+	is_quorumd = model.isQuorumd()
+	clumap['is_quorumd'] = is_quorumd
+	clumap['interval'] = ''
+	clumap['tko'] = ''
+	clumap['votes'] = ''
+	clumap['min_score'] = ''
+	clumap['device'] = ''
+	clumap['label'] = ''
+
+	#list struct for heuristics...
+	hlist = list()
+
+	if is_quorumd:
+		qdp = model.getQuorumdPtr()
+		interval = qdp.getAttribute('interval')
+		if interval is not None:
+			clumap['interval'] = interval
+
+		tko = qdp.getAttribute('tko')
+		if tko is not None:
+			clumap['tko'] = tko
+
+		votes = qdp.getAttribute('votes')
+		if votes is not None:
+			clumap['votes'] = votes
+
+		min_score = qdp.getAttribute('min_score')
+		if min_score is not None:
+			clumap['min_score'] = min_score
+
+		device = qdp.getAttribute('device')
+		if device is not None:
+			clumap['device'] = device
+
+		label = qdp.getAttribute('label')
+		if label is not None:
+			clumap['label'] = label
+
+		heuristic_kids = qdp.getChildren()
+
+		for kid in heuristic_kids:
+			hmap = {}
+			hprog = kid.getAttribute('program')
+			if hprog is None:
+				continue
+
+			hscore = kid.getAttribute('score')
+			hmap['hprog'] = hprog
+			if hscore is not None:
+				hmap['hscore'] = hscore
+			else:
+				hmap['hscore'] = ''
+
+			hinterval = kid.getAttribute('interval')
+			if hinterval is not None:
+				hmap['hinterval'] = hinterval
+			else:
+				hmap['hinterval'] = ''
+			hlist.append(hmap)
+	clumap['hlist'] = hlist
 
-  return clumap
+	return clumap
 
 def getClustersInfo(self, status, req):
-  clu_map = {}
-  nodelist = list()
-  svclist = list()
-  clulist = list()
-  baseurl = req['URL']
-
-  for item in status:
-    if item['type'] == "node":
-      nodelist.append(item)
-    elif item['type'] == "service":
-      svclist.append(item)
-    elif item['type'] == "cluster":
-      clulist.append(item)
-    else:
-      continue
-  if len(clulist) < 1:
-    return {}
-  clu = clulist[0]
-  if 'error' in clu:
-    clu_map['error'] = True
-  clustername = clu['name']
-  if clu['alias'] != "":
-    clu_map['clusteralias'] = clu['alias']
-  else:
-    clu_map['clusteralias'] = clustername
-  clu_map['clustername'] = clustername
-  if clu['quorate'] == "true":
-    clu_map['status'] = "Quorate"
-    clu_map['running'] = "true"
-  else:
-    clu_map['status'] = "Not Quorate"
-    clu_map['running'] = "false"
-  clu_map['votes'] = clu['votes']
-  clu_map['minquorum'] = clu['minQuorum']
+	clu_map = {}
+	nodelist = list()
+	svclist = list()
+	clulist = list()
+	baseurl = req['URL']
 
-  clu_map['clucfg'] = '%s?pagetype=%s&clustername=%s' \
+	for item in status:
+		if item['type'] == 'node':
+			nodelist.append(item)
+		elif item['type'] == 'service':
+			svclist.append(item)
+		elif item['type'] == 'cluster':
+			clulist.append(item)
+		else:
+			continue
+	if len(clulist) < 1:
+		return {}
+	clu = clulist[0]
+	if 'error' in clu:
+		clu_map['error'] = True
+	clustername = clu['name']
+	if not clu['alias']:
+		clu_map['clusteralias'] = clu['alias']
+	else:
+		clu_map['clusteralias'] = clustername
+	clu_map['clustername'] = clustername
+	if clu['quorate'] == 'true':
+		clu_map['status'] = 'Quorate'
+		clu_map['running'] = 'true'
+	else:
+		clu_map['status'] = 'Not Quorate'
+		clu_map['running'] = 'false'
+	clu_map['votes'] = clu['votes']
+	clu_map['minquorum'] = clu['minQuorum']
+
+	clu_map['clucfg'] = '%s?pagetype=%s&clustername=%s' \
 	% (baseurl, CLUSTER_CONFIG, clustername)
 
-  clu_map['restart_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+	clu_map['restart_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
 	% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_RESTART)
-  clu_map['stop_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+	clu_map['stop_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
 	% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_STOP)
-  clu_map['start_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+	clu_map['start_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
 	% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_START)
-  clu_map['delete_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
+	clu_map['delete_url'] = '%s?pagetype=%s&clustername=%s&task=%s' \
 	% (baseurl, CLUSTER_PROCESS, clustername, CLUSTER_DELETE)
 
-  svc_dict_list = list()
-  for svc in svclist:
-      svc_dict = {}
-      svc_dict['nodename'] = svc['nodename']
-      svcname = svc['name']
-      svc_dict['name'] = svcname
-      svc_dict['srunning'] = svc['running']
-      svc_dict['servicename'] = svcname
-
-      if svc.has_key('is_vm') and svc['is_vm'] is True:
-        target_page = VM_CONFIG
-      else:
-        target_page = SERVICE
-
-      svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
-		% (baseurl, target_page, clustername, svcname)
-      svc_dict['svcurl'] = svcurl
-      svc_dict_list.append(svc_dict)
-  clu_map['currentservices'] = svc_dict_list
-  node_dict_list = list()
-
-  for item in nodelist:
-    nmap = {}
-    name = item['name']
-    nmap['nodename'] = name
-    cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
+	svc_dict_list = list()
+	for svc in svclist:
+		svc_dict = {}
+		svc_dict['nodename'] = svc['nodename']
+		svcname = svc['name']
+		svc_dict['name'] = svcname
+		svc_dict['srunning'] = svc['running']
+		svc_dict['servicename'] = svcname
+
+		if svc.has_key('is_vm') and svc['is_vm'] is True:
+			target_page = VM_CONFIG
+		else:
+			target_page = SERVICE
+
+		svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+			% (baseurl, target_page, clustername, svcname)
+		svc_dict['svcurl'] = svcurl
+		svc_dict_list.append(svc_dict)
+
+	clu_map['currentservices'] = svc_dict_list
+	node_dict_list = list()
+
+	for item in nodelist:
+		nmap = {}
+		name = item['name']
+		nmap['nodename'] = name
+		cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
 		% (baseurl, NODE, clustername, name)
-    nmap['configurl'] = cfgurl
-    if item['clustered'] == "true":
-      nmap['status'] = NODE_ACTIVE
-    elif item['online'] == "false":
-      nmap['status'] = NODE_UNKNOWN
-    else:
-      nmap['status'] = NODE_INACTIVE
-    node_dict_list.append(nmap)
+		nmap['configurl'] = cfgurl
+		if item['clustered'] == 'true':
+			nmap['status'] = NODE_ACTIVE
+		elif item['online'] == 'false':
+			nmap['status'] = NODE_UNKNOWN
+		else:
+			nmap['status'] = NODE_INACTIVE
+		node_dict_list.append(nmap)
 
-  clu_map['currentnodes'] = node_dict_list
-  return clu_map
+	clu_map['currentnodes'] = node_dict_list
+	return clu_map
 
 def getNodeInfo(self, model, status, request):
-  infohash = {}
-  item = None
-  baseurl = request['URL']
-  nodestate = NODE_ACTIVE
-  svclist = list()
-  for thing in status:
-    if thing['type'] == "service":
-      svclist.append(thing)
-
-  #Get cluster name and node name from request
-  try:
-    clustername = request['clustername']
-    nodename = request['nodename']
-  except Exception, e:
-    luci_log.debug_verbose('getNodeInfo0: %s' % str(e))
-    return {}
-
-  #extract correct node line from cluster status
-  found = False
-  for item in status:
-    if (item['type'] == "node") and (item['name'] == nodename):
-      found = True
-      break
-  if found is False:
-    luci_log.debug_verbose('getNodeInfo1: Unable to resolve node name in cluster status')
-    return {}
-
-  #Now determine state of node...
-  if item['online'] == "false":
-    nodestate = NODE_UNKNOWN
-  elif item['clustered'] == "true":
-    nodestate = NODE_ACTIVE
-  else:
-    nodestate = NODE_INACTIVE
-
-  infohash['nodestate'] = nodestate
-  infohash['nodename'] = nodename
-
-  #set up drop down links
-  if nodestate == NODE_ACTIVE:
-    infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-      % (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, nodename, clustername)
-    infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-      % (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
-    infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-      % (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
-    infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-      % (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
-  elif nodestate == NODE_INACTIVE:
-    infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-      % (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, nodename, clustername)
-    infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-      % (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
-    infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-      % (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
-    infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-      % (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
-  else:
-    infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-      % (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
-
-  #figure out current services running on this node
-  svc_dict_list = list()
-  for svc in svclist:
-    if svc['nodename'] == nodename:
-      svc_dict = {}
-      svcname = svc['name']
-      svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
-        % (baseurl, SERVICE, clustername, svcname)
-      svc_dict['servicename'] = svcname
-      svc_dict['svcurl'] = svcurl
-      svc_dict_list.append(svc_dict)
-
-  infohash['currentservices'] = svc_dict_list
-
-  fdom_dict_list = list()
-  gulm_cluster = False
-  if model:
-    gulm_cluster = model.getGULMPtr() is not None
-    try:
-      infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
-    except:
-      infohash['gulm_lockserver'] = False
-    #next is faildoms
-    fdoms = model.getFailoverDomainsForNode(nodename)
-    for fdom in fdoms:
-      fdom_dict = {}
-      fdom_dict['name'] = fdom.getName()
-      fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
+	infohash = {}
+	item = None
+	baseurl = request['URL']
+	nodestate = NODE_ACTIVE
+	svclist = list()
+	for thing in status:
+		if thing['type'] == 'service':
+			svclist.append(thing)
+
+	# Get cluster name and node name from request
+	try:
+		clustername = request['clustername']
+		nodename = request['nodename']
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getNodeInfo0: %r' % e)
+		return {}
+
+	# extract correct node line from cluster status
+	found = False
+	for item in status:
+		if (item['type'] == 'node') and (item['name'] == nodename):
+			found = True
+			break
+
+	if found is False:
+		item = {}
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getNodeInfo1: Unable to resolve node name in cluster status')
+		return item
+
+	# Now determine state of node...
+	if item['online'] == 'false':
+		nodestate = NODE_UNKNOWN
+	elif item['clustered'] == 'true':
+		nodestate = NODE_ACTIVE
+	else:
+		nodestate = NODE_INACTIVE
+
+	infohash['nodestate'] = nodestate
+	infohash['nodename'] = nodename
+
+	# set up drop down links
+	if nodestate == NODE_ACTIVE:
+		infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, nodename, clustername)
+		infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
+		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+		infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
+	elif nodestate == NODE_INACTIVE:
+		infohash['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, nodename, clustername)
+		infohash['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_REBOOT, nodename, clustername)
+		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+		infohash['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_DELETE, nodename, clustername)
+	else:
+		infohash['fence_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+			% (baseurl, NODE_PROCESS, NODE_FENCE, nodename, clustername)
+
+	# figure out current services running on this node
+	svc_dict_list = list()
+	for svc in svclist:
+		if svc['nodename'] == nodename:
+			svc_dict = {}
+			svcname = svc['name']
+			svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+				% (baseurl, SERVICE, clustername, svcname)
+			svc_dict['servicename'] = svcname
+			svc_dict['svcurl'] = svcurl
+			svc_dict_list.append(svc_dict)
+
+	infohash['currentservices'] = svc_dict_list
+
+	fdom_dict_list = list()
+	gulm_cluster = False
+	if model:
+		gulm_cluster = model.getGULMPtr() is not None
+		try:
+			infohash['gulm_lockserver'] = model.isNodeLockserver(nodename)
+		except:
+			infohash['gulm_lockserver'] = False
+		# next is faildoms
+		fdoms = model.getFailoverDomainsForNode(nodename)
+		for fdom in fdoms:
+			fdom_dict = {}
+			fdom_dict['name'] = fdom.getName()
+			fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
 		% (baseurl, FDOM_CONFIG, clustername, fdom.getName())
-      fdom_dict['fdomurl'] = fdomurl
-      fdom_dict_list.append(fdom_dict)
-  else:
-    infohash['gulm_lockserver'] = False
-
-  infohash['fdoms'] = fdom_dict_list
-
-  #return infohash
-  infohash['d_states'] = None
-
-  nodename_resolved = resolve_nodename(self, clustername, nodename)
-
-  if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
-  #call service module on node and find out which daemons are running
-    try:
-      rc = RicciCommunicator(nodename_resolved)
-      if not rc:
-        raise Exception, 'rc is none'
-    except Exception, e:
-      rc = None
-      infohash['ricci_error'] = True
-      luci_log.info('Error connecting to %s: %s' \
-          % (nodename_resolved, str(e)))
-
-    if rc is not None:
-      dlist = list()
-      dlist.append("ccsd")
-      if not gulm_cluster:
-        dlist.append("cman")
-        dlist.append("fenced")
-      else:
-        dlist.append("lock_gulmd")
-      dlist.append("rgmanager")
-      states = rq.getDaemonStates(rc, dlist)
-      infohash['d_states'] = states
-  else:
-    infohash['ricci_error'] = True
-
-  infohash['logurl'] = '/luci/logs/?nodename=%s&clustername=%s' \
-	% (nodename_resolved, clustername)
-  return infohash
+			fdom_dict['fdomurl'] = fdomurl
+			fdom_dict_list.append(fdom_dict)
+	else:
+		infohash['gulm_lockserver'] = False
+
+	infohash['fdoms'] = fdom_dict_list
+
+	# return infohash
+	infohash['d_states'] = None
+
+	nodename_resolved = resolve_nodename(self, clustername, nodename)
+
+	if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
+	# call service module on node and find out which daemons are running
+		try:
+			rc = RicciCommunicator(nodename_resolved)
+			if not rc:
+				raise Exception, 'rc is none'
+		except Exception, e:
+			rc = None
+			infohash['ricci_error'] = True
+			luci_log.info('Error connecting to %s: %s' \
+				% (nodename_resolved, str(e)))
+
+		if rc is not None:
+			dlist = list()
+			dlist.append('ccsd')
+			if not gulm_cluster:
+				dlist.append('cman')
+				dlist.append('fenced')
+			else:
+				dlist.append('lock_gulmd')
+			dlist.append('rgmanager')
+			states = rq.getDaemonStates(rc, dlist)
+			infohash['d_states'] = states
+	else:
+		infohash['ricci_error'] = True
+
+	infohash['logurl'] = '/luci/logs/?nodename=%s&clustername=%s' \
+		% (nodename_resolved, clustername)
+	return infohash
 
 def getNodesInfo(self, model, status, req):
-  resultlist = list()
-  nodelist = list()
-  svclist = list()
-
-  #Sort into lists...
-  for item in status:
-    if item['type'] == "node":
-      nodelist.append(item)
-    elif item['type'] == "service":
-      svclist.append(item)
-    else:
-      continue
-
-  try:
-    clustername = req['clustername']
-    if not clustername:
-      raise KeyError, 'clustername is blank'
-  except:
-    try:
-      clustername = req.form['clustername']
-      raise KeyError, 'clustername is blank'
-    except:
-      try:
-        clustername = req.form['clusterName']
-      except:
-        try:
-          clustername = model.getClusterName()
-        except:
-          luci_log.debug_verbose('GNI0: unable to determine cluster name')
-          return {}
-
-  for item in nodelist:
-    nl_map = {}
-    name = item['name']
-    nl_map['nodename'] = name
-    try:
-      nl_map['gulm_lockserver'] = model.isNodeLockserver(name)
-    except:
-      nl_map['gulm_lockserver'] = False
-
-    try:
-      baseurl = req['URL']
-    except:
-      baseurl = '/luci/cluster/index_html'
-
-    cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
-      % (baseurl, NODE, clustername, name)
-    nl_map['configurl'] = cfgurl
-    nl_map['fenceurl'] = '%s#fence' % cfgurl
-    if item['clustered'] == "true":
-      nl_map['status'] = NODE_ACTIVE
-      nl_map['status_str'] = NODE_ACTIVE_STR
-    elif item['online'] == "false":
-      nl_map['status'] = NODE_UNKNOWN
-      nl_map['status_str'] = NODE_UNKNOWN_STR
-    else:
-      nl_map['status'] = NODE_INACTIVE
-      nl_map['status_str'] = NODE_INACTIVE_STR
+	resultlist = list()
+	nodelist = list()
+	svclist = list()
+
+	#Sort into lists...
+	for item in status:
+		if item['type'] == 'node':
+			nodelist.append(item)
+		elif item['type'] == 'service':
+			svclist.append(item)
+		else:
+			continue
+
+	try:
+		clustername = req['clustername']
+		if not clustername:
+			raise KeyError, 'clustername is blank'
+	except:
+		try:
+			clustername = req.form['clustername']
+			raise KeyError, 'clustername is blank'
+		except:
+			try:
+				clustername = req.form['clusterName']
+			except:
+				try:
+					clustername = model.getClusterName()
+				except:
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('GNI0: unable to determine cluster name')
+					return {}
+
+	for item in nodelist:
+		nl_map = {}
+		name = item['name']
+		nl_map['nodename'] = name
+		try:
+			nl_map['gulm_lockserver'] = model.isNodeLockserver(name)
+		except:
+			nl_map['gulm_lockserver'] = False
 
-    nodename_resolved = resolve_nodename(self, clustername, name)
+		try:
+			baseurl = req['URL']
+		except:
+			baseurl = '/luci/cluster/index_html'
 
-    nl_map['logurl'] = '/luci/logs?nodename=%s&clustername=%s' \
+		cfgurl = '%s?pagetype=%s&clustername=%s&nodename=%s' \
+			% (baseurl, NODE, clustername, name)
+		nl_map['configurl'] = cfgurl
+		nl_map['fenceurl'] = '%s#fence' % cfgurl
+		if item['clustered'] == 'true':
+			nl_map['status'] = NODE_ACTIVE
+			nl_map['status_str'] = NODE_ACTIVE_STR
+		elif item['online'] == 'false':
+			nl_map['status'] = NODE_UNKNOWN
+			nl_map['status_str'] = NODE_UNKNOWN_STR
+		else:
+			nl_map['status'] = NODE_INACTIVE
+			nl_map['status_str'] = NODE_INACTIVE_STR
+
+		nodename_resolved = resolve_nodename(self, clustername, name)
+
+		nl_map['logurl'] = '/luci/logs?nodename=%s&clustername=%s' \
 		% (nodename_resolved, clustername)
 
-    #set up URLs for dropdown menu...
-    if nl_map['status'] == NODE_ACTIVE:
-      nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-        % (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, name, clustername)
-      nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-        % (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
-      nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-        % (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
-      nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-        % (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
-    elif nl_map['status'] == NODE_INACTIVE:
-      nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-        % (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, name, clustername)
-      nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-        % (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
-      nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-        % (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
-      nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-        % (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
-    else:
-      nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
-        % (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
-
-    #figure out current services running on this node
-    svc_dict_list = list()
-    for svc in svclist:
-      if svc['nodename'] == name:
-        svc_dict = {}
-        svcname = svc['name']
-        svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
-          % (baseurl, SERVICE, clustername, svcname)
-        svc_dict['servicename'] = svcname
-        svc_dict['svcurl'] = svcurl
-        svc_dict_list.append(svc_dict)
-
-    nl_map['currentservices'] = svc_dict_list
-    #next is faildoms
-
-    if model:
-      fdoms = model.getFailoverDomainsForNode(name)
-    else:
-      nl_map['ricci_error'] = True
-      fdoms = list()
-    fdom_dict_list = list()
-    for fdom in fdoms:
-      fdom_dict = {}
-      fdom_dict['name'] = fdom.getName()
-      fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
+		#set up URLs for dropdown menu...
+		if nl_map['status'] == NODE_ACTIVE:
+			nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_LEAVE_CLUSTER, name, clustername)
+			nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
+			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+			nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
+		elif nl_map['status'] == NODE_INACTIVE:
+			nl_map['jl_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_JOIN_CLUSTER, name, clustername)
+			nl_map['reboot_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_REBOOT, name, clustername)
+			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+			nl_map['delete_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_DELETE, name, clustername)
+		else:
+			nl_map['fence_it_url'] = '%s?pagetype=%s&task=%s&nodename=%s&clustername=%s' \
+				% (baseurl, NODE_PROCESS, NODE_FENCE, name, clustername)
+
+		#figure out current services running on this node
+		svc_dict_list = list()
+		for svc in svclist:
+			if svc['nodename'] == name:
+				svc_dict = {}
+				svcname = svc['name']
+				svcurl = '%s?pagetype=%s&clustername=%s&servicename=%s' \
+					% (baseurl, SERVICE, clustername, svcname)
+				svc_dict['servicename'] = svcname
+				svc_dict['svcurl'] = svcurl
+				svc_dict_list.append(svc_dict)
+
+		nl_map['currentservices'] = svc_dict_list
+		#next is faildoms
+
+		if model:
+			fdoms = model.getFailoverDomainsForNode(name)
+		else:
+			nl_map['ricci_error'] = True
+			fdoms = list()
+		fdom_dict_list = list()
+		for fdom in fdoms:
+			fdom_dict = {}
+			fdom_dict['name'] = fdom.getName()
+			fdomurl = '%s?pagetype=%s&clustername=%s&fdomname=%s' \
 		% (baseurl, FDOM_CONFIG, clustername, fdom.getName())
-      fdom_dict['fdomurl'] = fdomurl
-      fdom_dict_list.append(fdom_dict)
+			fdom_dict['fdomurl'] = fdomurl
+			fdom_dict_list.append(fdom_dict)
 
-    nl_map['fdoms'] = fdom_dict_list
-    resultlist.append(nl_map)
+		nl_map['fdoms'] = fdom_dict_list
+		resultlist.append(nl_map)
 
-  return resultlist
+	return resultlist
 
 def getFence(self, model, request):
-  if not model:
-    luci_log.debug_verbose('getFence0: model is None')
-    return {}
-
-  fence_map = {}
-  fencename = request['fencename']
-  fencedevs = model.getFenceDevices()
-  for fencedev in fencedevs:
-    if fencedev.getName().strip() == fencename:
-      fence_map = fencedev.getAttributes()
-      try:
-        fence_map['pretty_name'] = FENCE_OPTS[fencedev.getAgentType()]
-      except:
-        fence_map['unknown'] = True
-        fence_map['pretty_name'] = fencedev.getAgentType()
-
-      nodes_used = list()
-      nodes = model.getNodes()
-      for node in nodes:
-        flevels = node.getFenceLevels()
-        for flevel in flevels: #These are the method blocks...
-          kids = flevel.getChildren()
-          for kid in kids: #These are actual devices in each level
-            if kid.getName().strip() == fencedev.getName().strip():
-              #See if this fd already has an entry for this node
-              found_duplicate = False
-              for item in nodes_used:
-                if item['nodename'] == node.getName().strip():
-                  found_duplicate = True
-              if found_duplicate is True:
-                continue
-              baseurl = request['URL']
-              clustername = model.getClusterName()
-              node_hash = {}
-              cur_nodename = node.getName().strip()
-              node_hash['nodename'] = cur_nodename
-              node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
-                % (baseurl, clustername, cur_nodename, NODE)
-              nodes_used.append(node_hash)
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFence0: model is None')
+		return {}
 
-      fence_map['nodesused'] = nodes_used
-      return fence_map
+	fence_map = {}
+	fencename = request['fencename']
+	fencedevs = model.getFenceDevices()
+	for fencedev in fencedevs:
+		if fencedev.getName().strip() == fencename:
+			fence_map = fencedev.getAttributes()
+			try:
+				fence_map['pretty_name'] = FENCE_OPTS[fencedev.getAgentType()]
+			except:
+				fence_map['unknown'] = True
+				fence_map['pretty_name'] = fencedev.getAgentType()
+
+			nodes_used = list()
+			nodes = model.getNodes()
+			for node in nodes:
+				flevels = node.getFenceLevels()
+				for flevel in flevels: #These are the method blocks...
+					kids = flevel.getChildren()
+					for kid in kids: #These are actual devices in each level
+						if kid.getName().strip() == fencedev.getName().strip():
+							#See if this fd already has an entry for this node
+							found_duplicate = False
+							for item in nodes_used:
+								if item['nodename'] == node.getName().strip():
+									found_duplicate = True
+							if found_duplicate is True:
+								continue
+							baseurl = request['URL']
+							clustername = model.getClusterName()
+							node_hash = {}
+							cur_nodename = node.getName().strip()
+							node_hash['nodename'] = cur_nodename
+							node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
+								% (baseurl, clustername, cur_nodename, NODE)
+							nodes_used.append(node_hash)
+
+			fence_map['nodesused'] = nodes_used
+			return fence_map
 
-  return fence_map
+	return fence_map
 
 def getFDForInstance(fds, name):
-  for fd in fds:
-    if fd.getName().strip() == name:
-      return fd
+	for fd in fds:
+		if fd.getName().strip() == name:
+			return fd
 
-  raise
+	raise
 
 def getFenceInfo(self, model, request):
-  if not model:
-    luci_log.debug_verbose('getFenceInfo00: model is None')
-    return {}
-
-  try:
-    clustername = request['clustername']
-  except:
-    try:
-      clustername = request.form['clustername']
-    except:
-      luci_log.debug_verbose('getFenceInfo0: unable to determine cluster name')
-      return {}
-
-  try:
-    baseurl = request['URL']
-  except Exception, e:
-    luci_log.debug_verbose('getFenceInfo1: no request.URL')
-    return {}
-
-  fence_map = {}
-  level1 = list() #First level fence devices
-  level2 = list() #Second level fence devices
-  shared1 = list() #List of available sharable fence devs not used in level1
-  shared2 = list() #List of available sharable fence devs not used in level2
-  fence_map['level1'] = level1
-  fence_map['level2'] = level2
-  fence_map['shared1'] = shared1
-  fence_map['shared2'] = shared2
-
-  major_num = 1
-  minor_num = 100
-
-  try:
-    nodename = request['nodename']
-  except:
-    try:
-      nodename = request.form['nodename']
-    except:
-      luci_log.debug_verbose('getFenceInfo2: unable to extract nodename: %s' \
-          % str(e))
-      return {}
-
-  #Here we need to get fences for a node - just the first two levels
-  #Each level has its own list of fence devs used in that level
-  #For each fence dev, a list of instance structs is appended
-  #In addition, for each level, a list of available but unused fence devs
-  #is returned.
-  try:
-    node = model.retrieveNodeByName(nodename)
-  except GeneralError, e:
-    luci_log.debug_verbose('getFenceInfo3: unabel to find node name %s in current node list' % (str(nodename), str(e)))
-    return {}
-
-  fds = model.getFenceDevices()
-
-  levels = node.getFenceLevels()
-  len_levels = len(levels)
-
-  if len_levels == 0:
-    return fence_map
-
-  if len_levels >= 1:
-    first_level = levels[0]
-    kids = first_level.getChildren()
-    last_kid_fd = None  #This is a marker for allowing multi instances
-                        #beneath a fencedev
-    for kid in kids:
-      instance_name = kid.getName().strip()
-      try:
-        fd = getFDForInstance(fds, instance_name)
-      except:
-        fd = None #Set to None in case last time thru loop
-        continue
-
-      if fd is not None:
-        if fd.isShared() is False:  #Not a shared dev...build struct and add
-          fencedev = {}
-          try:
-            fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
-          except:
-            fencedev['unknown'] = True
-            fencedev['prettyname'] = fd.getAgentType()
-          fencedev['isShared'] = False
-          fencedev['id'] = str(major_num)
-          major_num = major_num + 1
-          devattrs = fd.getAttributes()
-          kees = devattrs.keys()
-          for kee in kees:
-            fencedev[kee] = devattrs[kee]
-          kidattrs = kid.getAttributes()
-          kees = kidattrs.keys()
-          for kee in kees:
-            if kee == "name":
-              continue #Don't duplicate name attr
-            fencedev[kee] = kidattrs[kee]
-          #This fencedev struct is complete, and needs to be placed on the
-          #level1 Q. Because it is non-shared, we should set last_kid_fd
-          #to none.
-          last_kid_fd = None
-          level1.append(fencedev)
-        else:  #This dev is shared
-          if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):  #just append a new instance struct to last_kid_fd
-            instance_struct = {}
-            instance_struct['id'] = str(minor_num)
-            minor_num = minor_num + 1
-            kidattrs = kid.getAttributes()
-            kees = kidattrs.keys()
-            for kee in kees:
-              if kee == "name":
-                continue
-              instance_struct[kee] = kidattrs[kee]
-            #Now just add this struct to last_kid_fd and reset last_kid_fd
-            ilist = last_kid_fd['instance_list']
-            ilist.append(instance_struct)
-            #last_kid_fd = fd
-            continue
-          else: #Shared, but not used above...so we need a new fencedev struct
-            fencedev = {}
-            try:
-              fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
-            except:
-              fencedev['unknown'] = True
-              fencedev['prettyname'] = fd.getAgentType()
-            fencedev['isShared'] = True
-            fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
-              % (baseurl, clustername, fd.getName().strip(), FENCEDEV)
-            fencedev['id'] = str(major_num)
-            major_num = major_num + 1
-            inlist = list()
-            fencedev['instance_list'] = inlist
-            devattrs = fd.getAttributes()
-            kees = devattrs.keys()
-            for kee in kees:
-              fencedev[kee] = devattrs[kee]
-            instance_struct = {}
-            kidattrs = kid.getAttributes()
-            kees = kidattrs.keys()
-            for kee in kees:
-              if kee == "name":
-                continue
-              instance_struct[kee] = kidattrs[kee]
-            inlist.append(instance_struct)
-            level1.append(fencedev)
-            last_kid_fd = fencedev
-            continue
-    fence_map['level1'] = level1
-
-    #level1 list is complete now, but it is still necessary to build shared1
-    for fd in fds:
-      isUnique = True
-      if fd.isShared() is False:
-        continue
-      for fdev in level1:
-        if fd.getName().strip() == fdev['name']:
-          isUnique = False
-          break
-      if isUnique is True:
-        shared_struct = {}
-        shared_struct['name'] = fd.getName().strip()
-        agentname = fd.getAgentType()
-        shared_struct['agent'] = agentname
-        try:
-          shared_struct['prettyname'] = FENCE_OPTS[agentname]
-        except:
-          shared_struct['unknown'] = True
-          shared_struct['prettyname'] = agentname
-        shared1.append(shared_struct)
-    fence_map['shared1'] = shared1
-
-  #YUK: This next section violates the DRY rule, :-(
-  if len_levels >= 2:
-    second_level = levels[1]
-    kids = second_level.getChildren()
-    last_kid_fd = None  #This is a marker for allowing multi instances
-                        #beneath a fencedev
-    for kid in kids:
-      instance_name = kid.getName().strip()
-      try:
-        fd = getFDForInstance(fds, instance_name)
-      except:
-        fd = None #Set to None in case last time thru loop
-        continue
-      if fd is not None:
-        if fd.isShared() is False:  #Not a shared dev...build struct and add
-          fencedev = {}
-          try:
-            fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
-          except:
-            fencedev['unknown'] = True
-            fencedev['prettyname'] = fd.getAgentType()
-          fencedev['isShared'] = False
-          fencedev['id'] = str(major_num)
-          major_num = major_num + 1
-          devattrs = fd.getAttributes()
-          kees = devattrs.keys()
-          for kee in kees:
-            fencedev[kee] = devattrs[kee]
-          kidattrs = kid.getAttributes()
-          kees = kidattrs.keys()
-          for kee in kees:
-            if kee == "name":
-              continue #Don't duplicate name attr
-            fencedev[kee] = kidattrs[kee]
-          #This fencedev struct is complete, and needs to be placed on the
-          #level2 Q. Because it is non-shared, we should set last_kid_fd
-          #to none.
-          last_kid_fd = None
-          level2.append(fencedev)
-        else:  #This dev is shared
-          if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):  #just append a new instance struct to last_kid_fd
-            instance_struct = {}
-            instance_struct['id'] = str(minor_num)
-            minor_num = minor_num + 1
-            kidattrs = kid.getAttributes()
-            kees = kidattrs.keys()
-            for kee in kees:
-              if kee == "name":
-                continue
-              instance_struct[kee] = kidattrs[kee]
-            #Now just add this struct to last_kid_fd and reset last_kid_fd
-            ilist = last_kid_fd['instance_list']
-            ilist.append(instance_struct)
-            #last_kid_fd = fd
-            continue
-          else: #Shared, but not used above...so we need a new fencedev struct
-            fencedev = {}
-            try:
-              fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
-            except:
-              fencedev['unknown'] = True
-              fencedev['prettyname'] = fd.getAgentType()
-            fencedev['isShared'] = True
-            fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
-              % (baseurl, clustername, fd.getName().strip(), FENCEDEV)
-            fencedev['id'] = str(major_num)
-            major_num = major_num + 1
-            inlist = list()
-            fencedev['instance_list'] = inlist
-            devattrs = fd.getAttributes()
-            kees = devattrs.keys()
-            for kee in kees:
-              fencedev[kee] = devattrs[kee]
-            instance_struct = {}
-            kidattrs = kid.getAttributes()
-            kees = kidattrs.keys()
-            for kee in kees:
-              if kee == "name":
-                continue
-              instance_struct[kee] = kidattrs[kee]
-            inlist.append(instance_struct)
-            level2.append(fencedev)
-            last_kid_fd = fencedev
-            continue
-    fence_map['level2'] = level2
-
-    #level2 list is complete but like above, we need to build shared2
-    for fd in fds:
-      isUnique = True
-      if fd.isShared() is False:
-        continue
-      for fdev in level2:
-        if fd.getName().strip() == fdev['name']:
-          isUnique = False
-          break
-      if isUnique is True:
-        shared_struct = {}
-        shared_struct['name'] = fd.getName().strip()
-        agentname = fd.getAgentType()
-        shared_struct['agent'] = agentname
-        try:
-          shared_struct['prettyname'] = FENCE_OPTS[agentname]
-        except:
-          shared_struct['unknown'] = True
-          shared_struct['prettyname'] = agentname
-        shared2.append(shared_struct)
-    fence_map['shared2'] = shared2
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFenceInfo00: model is None')
+		return {}
+
+	try:
+		clustername = request['clustername']
+	except:
+		try:
+			clustername = request.form['clustername']
+		except:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('getFenceInfo0: unable to determine cluster name')
+			return {}
+
+	try:
+		baseurl = request['URL']
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFenceInfo1: no request.URL')
+		return {}
 
-  return fence_map
+	fence_map = {}
+	level1 = list() #First level fence devices
+	level2 = list() #Second level fence devices
+	shared1 = list() #List of available sharable fence devs not used in level1
+	shared2 = list() #List of available sharable fence devs not used in level2
+	fence_map['level1'] = level1
+	fence_map['level2'] = level2
+	fence_map['shared1'] = shared1
+	fence_map['shared2'] = shared2
+
+	major_num = 1
+	minor_num = 100
+
+	try:
+		nodename = request['nodename']
+	except:
+		try:
+			nodename = request.form['nodename']
+		except:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('getFenceInfo2: unable to extract nodename: %r' % e)
+			return {}
+
+	# Here we need to get fences for a node - just the first two levels
+	# Each level has its own list of fence devs used in that level
+	# For each fence dev, a list of instance structs is appended
+	# In addition, for each level, a list of available but unused fence devs
+	# is returned.
+	try:
+		node = model.retrieveNodeByName(nodename)
+	except GeneralError, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFenceInfo3: unable to find node name "%s" in current node list: %r' % (str(nodename), e))
+		return {}
+
+	fds = model.getFenceDevices()
+
+	levels = node.getFenceLevels()
+	len_levels = len(levels)
+
+	if len_levels == 0:
+		return fence_map
+
+	if len_levels >= 1:
+		first_level = levels[0]
+		kids = first_level.getChildren()
+		last_kid_fd = None	#This is a marker for allowing multi instances
+												#beneath a fencedev
+		for kid in kids:
+			instance_name = kid.getName().strip()
+			try:
+				fd = getFDForInstance(fds, instance_name)
+			except:
+				fd = None #Set to None in case last time thru loop
+				continue
+
+			if fd is not None:
+				if fd.isShared() is False:	#Not a shared dev...build struct and add
+					fencedev = {}
+					try:
+						fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+					except:
+						fencedev['unknown'] = True
+						fencedev['prettyname'] = fd.getAgentType()
+					fencedev['isShared'] = False
+					fencedev['id'] = str(major_num)
+					major_num = major_num + 1
+					devattrs = fd.getAttributes()
+					kees = devattrs.keys()
+					for kee in kees:
+						fencedev[kee] = devattrs[kee]
+					kidattrs = kid.getAttributes()
+					kees = kidattrs.keys()
+					for kee in kees:
+						if kee == 'name':
+							continue #Don't duplicate name attr
+						fencedev[kee] = kidattrs[kee]
+					#This fencedev struct is complete, and needs to be placed on the
+					#level1 Q. Because it is non-shared, we should set last_kid_fd
+					#to none.
+					last_kid_fd = None
+					level1.append(fencedev)
+				else:	#This dev is shared
+					if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):	#just append a new instance struct to last_kid_fd
+						instance_struct = {}
+						instance_struct['id'] = str(minor_num)
+						minor_num = minor_num + 1
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee == 'name':
+								continue
+							instance_struct[kee] = kidattrs[kee]
+						#Now just add this struct to last_kid_fd and reset last_kid_fd
+						ilist = last_kid_fd['instance_list']
+						ilist.append(instance_struct)
+						#last_kid_fd = fd
+						continue
+					else: #Shared, but not used above...so we need a new fencedev struct
+						fencedev = {}
+						try:
+							fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+						except:
+							fencedev['unknown'] = True
+							fencedev['prettyname'] = fd.getAgentType()
+						fencedev['isShared'] = True
+						fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
+							% (baseurl, clustername, fd.getName().strip(), FENCEDEV)
+						fencedev['id'] = str(major_num)
+						major_num = major_num + 1
+						inlist = list()
+						fencedev['instance_list'] = inlist
+						devattrs = fd.getAttributes()
+						kees = devattrs.keys()
+						for kee in kees:
+							fencedev[kee] = devattrs[kee]
+						instance_struct = {}
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee == 'name':
+								continue
+							instance_struct[kee] = kidattrs[kee]
+						inlist.append(instance_struct)
+						level1.append(fencedev)
+						last_kid_fd = fencedev
+						continue
+		fence_map['level1'] = level1
+
+		#level1 list is complete now, but it is still necessary to build shared1
+		for fd in fds:
+			isUnique = True
+			if fd.isShared() is False:
+				continue
+			for fdev in level1:
+				if fd.getName().strip() == fdev['name']:
+					isUnique = False
+					break
+			if isUnique is True:
+				shared_struct = {}
+				shared_struct['name'] = fd.getName().strip()
+				agentname = fd.getAgentType()
+				shared_struct['agent'] = agentname
+				try:
+					shared_struct['prettyname'] = FENCE_OPTS[agentname]
+				except:
+					shared_struct['unknown'] = True
+					shared_struct['prettyname'] = agentname
+				shared1.append(shared_struct)
+		fence_map['shared1'] = shared1
+
+	#YUK: This next section violates the DRY rule, :-(
+	if len_levels >= 2:
+		second_level = levels[1]
+		kids = second_level.getChildren()
+		last_kid_fd = None	#This is a marker for allowing multi instances
+												#beneath a fencedev
+		for kid in kids:
+			instance_name = kid.getName().strip()
+			try:
+				fd = getFDForInstance(fds, instance_name)
+			except:
+				fd = None #Set to None in case last time thru loop
+				continue
+			if fd is not None:
+				if fd.isShared() is False:	#Not a shared dev...build struct and add
+					fencedev = {}
+					try:
+						fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+					except:
+						fencedev['unknown'] = True
+						fencedev['prettyname'] = fd.getAgentType()
+					fencedev['isShared'] = False
+					fencedev['id'] = str(major_num)
+					major_num = major_num + 1
+					devattrs = fd.getAttributes()
+					kees = devattrs.keys()
+					for kee in kees:
+						fencedev[kee] = devattrs[kee]
+					kidattrs = kid.getAttributes()
+					kees = kidattrs.keys()
+					for kee in kees:
+						if kee == 'name':
+							continue #Don't duplicate name attr
+						fencedev[kee] = kidattrs[kee]
+					#This fencedev struct is complete, and needs to be placed on the
+					#level2 Q. Because it is non-shared, we should set last_kid_fd
+					#to none.
+					last_kid_fd = None
+					level2.append(fencedev)
+				else:	#This dev is shared
+					if (last_kid_fd is not None) and (fd.getName().strip() == last_kid_fd['name'].strip()):	#just append a new instance struct to last_kid_fd
+						instance_struct = {}
+						instance_struct['id'] = str(minor_num)
+						minor_num = minor_num + 1
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee == 'name':
+								continue
+							instance_struct[kee] = kidattrs[kee]
+						#Now just add this struct to last_kid_fd and reset last_kid_fd
+						ilist = last_kid_fd['instance_list']
+						ilist.append(instance_struct)
+						#last_kid_fd = fd
+						continue
+					else: #Shared, but not used above...so we need a new fencedev struct
+						fencedev = {}
+						try:
+							fencedev['prettyname'] = FENCE_OPTS[fd.getAgentType()]
+						except:
+							fencedev['unknown'] = True
+							fencedev['prettyname'] = fd.getAgentType()
+						fencedev['isShared'] = True
+						fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
+							% (baseurl, clustername, fd.getName().strip(), FENCEDEV)
+						fencedev['id'] = str(major_num)
+						major_num = major_num + 1
+						inlist = list()
+						fencedev['instance_list'] = inlist
+						devattrs = fd.getAttributes()
+						kees = devattrs.keys()
+						for kee in kees:
+							fencedev[kee] = devattrs[kee]
+						instance_struct = {}
+						kidattrs = kid.getAttributes()
+						kees = kidattrs.keys()
+						for kee in kees:
+							if kee == 'name':
+								continue
+							instance_struct[kee] = kidattrs[kee]
+						inlist.append(instance_struct)
+						level2.append(fencedev)
+						last_kid_fd = fencedev
+						continue
+		fence_map['level2'] = level2
+
+		#level2 list is complete but like above, we need to build shared2
+		for fd in fds:
+			isUnique = True
+			if fd.isShared() is False:
+				continue
+			for fdev in level2:
+				if fd.getName().strip() == fdev['name']:
+					isUnique = False
+					break
+			if isUnique is True:
+				shared_struct = {}
+				shared_struct['name'] = fd.getName().strip()
+				agentname = fd.getAgentType()
+				shared_struct['agent'] = agentname
+				try:
+					shared_struct['prettyname'] = FENCE_OPTS[agentname]
+				except:
+					shared_struct['unknown'] = True
+					shared_struct['prettyname'] = agentname
+				shared2.append(shared_struct)
+		fence_map['shared2'] = shared2
+
+	return fence_map
 
 def getFencesInfo(self, model, request):
-  fences_map = {}
-  if not model:
-    if LUCI_DEBUG_MODE is True:
-      luci_log.debug_verbose('getFencesInfo0: model is None')
-    fences_map['fencedevs'] = list()
-    return fences_map
-
-  clustername = request['clustername']
-  baseurl = request['URL']
-  fencedevs = list() #This is for the fencedev list page
-
-  #Get list of fence devices
-  fds = model.getFenceDevices()
-  for fd in fds:
-    #This section determines which nodes use the dev
-    #create fencedev hashmap
-    nodes_used = list()
-
-    if fd.isShared() is True:
-      fencedev = {}
-      attr_hash = fd.getAttributes()
-      kees = attr_hash.keys()
-
-      for kee in kees:
-        fencedev[kee] = attr_hash[kee] #copy attrs over
-      try:
-        fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
-      except:
-        fencedev['unknown'] = True
-        fencedev['pretty_name'] = fd.getAgentType()
-
-      fencedev['agent'] = fd.getAgentType()
-      #Add config url for this fencedev
-      fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
-        % (baseurl, clustername, fd.getName().strip(), FENCEDEV)
-
-      nodes = model.getNodes()
-      for node in nodes:
-        flevels = node.getFenceLevels()
-        for flevel in flevels: #These are the method blocks...
-          kids = flevel.getChildren()
-          for kid in kids: #These are actual devices in each level
-            if kid.getName().strip() == fd.getName().strip():
-              #See if this fd already has an entry for this node
-              found_duplicate = False
-              for item in nodes_used:
-                if item['nodename'] == node.getName().strip():
-                  found_duplicate = True
-              if found_duplicate is True:
-                continue
-              node_hash = {}
-              cur_nodename = node.getName().strip()
-              node_hash['nodename'] = cur_nodename
-              node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
-                % (baseurl, clustername, cur_nodename, NODE)
-              nodes_used.append(node_hash)
+	fences_map = {}
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getFencesInfo0: model is None')
+		fences_map['fencedevs'] = list()
+		return fences_map
+
+	clustername = request['clustername']
+	baseurl = request['URL']
+	fencedevs = list() #This is for the fencedev list page
+
+	#Get list of fence devices
+	fds = model.getFenceDevices()
+	for fd in fds:
+		#This section determines which nodes use the dev
+		#create fencedev hashmap
+		nodes_used = list()
+
+		if fd.isShared() is True:
+			fencedev = {}
+			attr_hash = fd.getAttributes()
+			kees = attr_hash.keys()
 
-      fencedev['nodesused'] = nodes_used
-      fencedevs.append(fencedev)
+			for kee in kees:
+				fencedev[kee] = attr_hash[kee] #copy attrs over
+			try:
+				fencedev['pretty_name'] = FENCE_OPTS[fd.getAgentType()]
+			except:
+				fencedev['unknown'] = True
+				fencedev['pretty_name'] = fd.getAgentType()
 
-  fences_map['fencedevs'] = fencedevs
-  return fences_map
+			fencedev['agent'] = fd.getAgentType()
+			#Add config url for this fencedev
+			fencedev['cfgurl'] = '%s?clustername=%s&fencename=%s&pagetype=%s' \
+				% (baseurl, clustername, fd.getName().strip(), FENCEDEV)
+
+			nodes = model.getNodes()
+			for node in nodes:
+				flevels = node.getFenceLevels()
+				for flevel in flevels: #These are the method blocks...
+					kids = flevel.getChildren()
+					for kid in kids: #These are actual devices in each level
+						if kid.getName().strip() == fd.getName().strip():
+							#See if this fd already has an entry for this node
+							found_duplicate = False
+							for item in nodes_used:
+								if item['nodename'] == node.getName().strip():
+									found_duplicate = True
+							if found_duplicate is True:
+								continue
+							node_hash = {}
+							cur_nodename = node.getName().strip()
+							node_hash['nodename'] = cur_nodename
+							node_hash['nodeurl'] = '%s?clustername=%s&nodename=%s&pagetype=%s' \
+								% (baseurl, clustername, cur_nodename, NODE)
+							nodes_used.append(node_hash)
+
+			fencedev['nodesused'] = nodes_used
+			fencedevs.append(fencedev)
+
+	fences_map['fencedevs'] = fencedevs
+	return fences_map
 
 def getVMInfo(self, model, request):
 	vm_map = {}
@@ -1447,63 +1527,63 @@
 	#CALL LUCICLUSTERINFO
 	return resList
 
-def getResourceInfo(model, request):
+def getClusterName(self, model):
+	return model.getClusterName()
+
+def getClusterAlias(self, model):
 	if not model:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GRI0: no model object in session')
-		return {}
+			luci_log.debug_verbose('GCA0: no model')
+		return ''
+	alias = model.getClusterAlias()
+	if not alias:
+		return model.getClusterName()
+	return alias
 
-	name = None
+def getModelBuilder(self, rc, isVirtualized):
 	try:
-		name = request['resourcename']
-	except:
-		try:
-			name = request.form['resourcename']
-		except:
-			pass
-
-	if name is None:
-		try:
-			res_type = request.form['type']
-			if res_type == 'ip':
-				name = request.form['value'].strip()
-		except:
-			pass
-
-	if name is None:
+		cluster_conf_node = rq.getClusterConf(rc)
+		if not cluster_conf_node:
+			raise Exception, 'getClusterConf returned None'
+	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GRI1: missing res name')
-		return {}
+			luci_log.debug_verbose('GMB0: unable to get cluster_conf_node in getModelBuilder: %r' % e)
+		return None
 
 	try:
-		cluname = request['clustername']
-	except:
+		model = ModelBuilder(0, None, None, cluster_conf_node)
+		if not model:
+			raise Exception, 'ModelBuilder() returned None'
+	except Exception, e:
 		try:
-			cluname = request.form['clustername']
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf "%r": %r' % (cluster_conf_node.toxml(), e))
 		except:
 			if LUCI_DEBUG_MODE is True:
-				luci_log.debug_verbose('GRI2: missing cluster name')
-			return {}
+				luci_log.debug_verbose('GMB1: ModelBuilder failed')
+		return None
 
-	try:
-		baseurl = request['URL']
-	except:
-		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GRI3: missing URL')
-		return {}
+	model.setIsVirtualized(isVirtualized)
+	return model
 
-	#CALL
-	return {}
+def getModelForCluster(self, clustername):
+	from LuciDB import getRicciAgent
 
-def getClusterName(self, model):
-	return model.getClusterName()
+	rc = getRicciAgent(self, clustername)
+	if not rc:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GMFC0: unable to find a ricci agent for %s' \
+				% clustername)
+		return None
 
-def getClusterAlias(self, model):
-	if not model:
+	try:
+		model = getModelBuilder(None, rc, rc.dom0())
+		if not model:
+			raise Exception, 'model is none'
+	except Exception, e:
 		if LUCI_DEBUG_MODE is True:
-			luci_log.debug_verbose('GCA0: no model')
-		return ''
-	alias = model.getClusterAlias()
-	if not alias:
-		return model.getClusterName()
-	return alias
+			luci_log.debug_verbose('GMFC1: unable to get model builder for %s: %r' \
+				 % (clustername, e))
+		return None
+
+	return model
--- conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/15 21:42:21	1.1.2.6
+++ conga/luci/site/luci/Extensions/Attic/LuciDB.py	2007/05/16 21:27:21	1.1.2.7
@@ -780,3 +780,50 @@
 		luci_log.debug('GRA11: no ricci agent could be found for cluster %s' \
 			% cluname)
 	return None
+
+def getClusterStatusDB(self, clustername):
+	results = list()
+	vals = {}
+
+	vals['type'] = 'cluster'
+	vals['alias'] = clustername
+	vals['name'] = clustername
+	vals['error'] = True
+	vals['quorate'] = '[unknown]'
+	vals['votes'] = '[unknown]'
+	vals['minQuorum'] = '[unknown]'
+	results.append(vals)
+
+	try:
+		cluster_path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
+		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSDB0: %s -> %s: %r' \
+				% (clustername, cluster_path, e))
+		return results
+
+	if len(nodelist) < 1:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GCSDB0a: removing cluster %s because it has no nodes' % clustername)
+		try:
+			clusters_dir = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
+			clusters_dir.manage_delObjects([clustername])
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCSDB0b: %s: %r' % (clustername, e))
+	else:
+		for node in nodelist:
+			try:
+				node_val = {}
+				node_val['type'] = 'node'
+				node_val['name'] = node[0]
+				node_val['clustered'] = '[unknown]'
+				node_val['online'] = '[unknown]'
+				node_val['error'] = True
+				results.append(node_val)
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GCSDB1: %r' % e)
+
+	return results
--- conga/luci/site/luci/Extensions/Attic/LuciZope.py	2007/05/15 21:42:21	1.1.2.4
+++ conga/luci/site/luci/Extensions/Attic/LuciZope.py	2007/05/16 21:27:21	1.1.2.5
@@ -7,6 +7,10 @@
 
 from LuciZopePerm import userAuthenticated
 from LuciDB import allowed_systems
+from LuciSyslog import get_logger
+from conga_constants import LUCI_DEBUG_MODE
+
+luci_log = get_logger()
 
 def siteIsSetup(self):
 	import os
@@ -14,8 +18,9 @@
 
 	try:
 		return os.path.isfile('%sprivkey.pem' % CERTS_DIR_PATH) and os.path.isfile('%scacert.pem' % CERTS_DIR_PATH)
-	except:
-		pass
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('SIS0: %r' % e)
 	return False
 
 def strFilter(regex, replaceChar, arg):
@@ -110,3 +115,11 @@
 		htab['isSelected'] = True
 
 	return [ htab, ctab, stab ]
+
+def appendModel(request, model):
+	try:
+		request.SESSION.set('model', model)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('Appending model to request failed: %r' % e)
+		return 'An error occurred while storing the cluster model'
--- conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/15 21:42:21	1.255.2.5
+++ conga/luci/site/luci/Extensions/cluster_adapters.py	2007/05/16 21:27:21	1.255.2.6
@@ -28,7 +28,7 @@
 from ResourceHandler import create_resource
 from system_adapters import validate_svc_update
 from homebase_adapters import parseHostForm
-from LuciClusterInfo import getClusterInfo
+from LuciClusterInfo import getClusterInfo, getModelBuilder, getModelForCluster
 
 from conga_constants import *
 
@@ -65,11 +65,13 @@
 	try:
 		clusterName = str(request.form['clusterName'])
 	except Exception, e:
-		luci_log.debug_verbose('PNC00: missing cluster name: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PNC00: missing cluster name: %r' % e)
 		clusterName = None
 
 	if clusterName is None:
-		luci_log.debug_verbose('PCN0: no cluster name was given')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('PCN0: no cluster name was given')
 		return (False, { 'errors': [ 'No cluster name was given' ]})
 
 	shared_storage = False
@@ -115,7 +117,8 @@
 				incomplete = True
 				errors.append('Unable to connect to %s: %s' \
 					% (cur_host, str(e)))
-				luci_log.debug_verbose('PCN1: %s: %s' % (cur_host, str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('PCN1: %s: %r' % (cur_host, e))
 				continue
 
 			prev_auth = rc.authed()
@@ -135,7 +138,8 @@
 				incomplete = True
 				errors.append('Error authenticating to %s: %s' \
 					% (cur_host, str(e)))
-				luci_log.debug_verbose('PCN2: %s: %s' % (cur_host, str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('PCN2: %s: %r' % (cur_host, e))
 				continue
 
 			cur_cluster_info = rc.cluster_info()
@@ -153,12 +157,14 @@
 						rc.unauth()
 						del cur_system['trusted']
 				except Exception, e:
-					luci_log.debug_verbose('PCN3: %s: %s' % (cur_host, str(e)))
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('PCN3: %s: %r' % (cur_host, e))
 
 				errors.append('%s reports it is a member of cluster "%s"' \
 					% (cur_host, cur_cluster_name))
-				luci_log.debug_verbose('PCN4: %s: already in %s cluster' \
-					% (cur_host, cur_cluster_name))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('PCN4: %s: already in %s cluster' \
+						% (cur_host, cur_cluster_name))
 				continue
 
 			cur_host_os = resolveOSType(rc.os())
@@ -175,11 +181,13 @@
 						rc.unauth()
 						del cur_system['trusted']
 				except Exception, e:
-					luci_log.debug_verbose('PCN5: %s: %s' % (cur_host, str(e)))
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('PCN5: %s: %r' % (cur_host, e))
 
 				errors.append('The cluster software version on %s (%s) does not match the software on the other cluster nodes (%s)' % (cur_host, cur_host_os, cluster_os))
-				luci_log.debug_verbose('PCN6: version mismatch for %s: (%s vs. %s)' \
-					% (cur_host, cur_host_os, cluster_os))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('PCN6: version mismatch for %s: (%s vs. %s)' \
+						% (cur_host, cur_host_os, cluster_os))
 				continue
 
 	return add_cluster, incomplete, errors, messages
@@ -280,7 +288,8 @@
 		except Exception, e:
 			msg = 'Unable to connect to the ricci agent on %s: %s' % (i, str(e))
 			errors.append(msg)
-			luci_log.debug_verbose(msg)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose(msg)
 			if len(batch_id_map) == 0:
 				request.SESSION.set('create_cluster', add_cluster)
 				return (False, { 'errors': errors, 'messages': messages })
@@ -290,8 +299,9 @@
 			resultNode = rc.process_batch(batchNode, async=True)
 			batch_id_map[i] = resultNode.getAttribute('batch_id')
 		except Exception, e:
-			luci_log.debug_verbose('validateCreateCluster0: %s: %s' \
-				% (i, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('validateCreateCluster0: %s: %r' \
+					% (i, e))
 			errors.append('An error occurred while attempting to add cluster node "%s"' % i)
 			if len(batch_id_map) == 0:
 				request.SESSION.set('create_cluster', add_cluster)
@@ -337,7 +347,8 @@
 		clusterName = None
 
 	if clusterName is None:
-		luci_log.debug_verbose('VACN0: no cluster name was given')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VACN0: no cluster name was given')
 		return (False, { 'errors': [ 'No cluster name was given' ]})
 
 	if cluster_os is None:
@@ -347,7 +358,8 @@
 			if not cluster_folder:
 				raise Exception, 'cluster DB object is missing'
 		except Exception, e:
-			luci_log.debug_verbose('VACN1: %s: %s' % (clusterName, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VACN1: %s: %r' % (clusterName, e))
 			return (False, { 'errors': [ 'The database object for %s is missing' % clusterName ] })
 
 		try:
@@ -355,7 +367,8 @@
 			if not cluster_os:
 				raise Exception, 'cluster os is blank'
 		except Exception, e:
-			luci_log.debug_verbose('VACN2: %s: %s' % (clusterName, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VACN2: %s: %r' % (clusterName, e))
 			cluster_os = None
 
 		if cluster_os is None:
@@ -363,11 +376,13 @@
 				cluster_ricci = getRicciAgent(self, clusterName)
 				cluster_os = resolveOSType(cluster_ricci.os())
 			except Exception, e:
-				luci_log.debug_verbose('VACN3: %s: %s' % (clusterName, str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VACN3: %s: %r' % (clusterName, e))
 				cluster_os = None
 
 	if cluster_os is None:
-		luci_log.debug_verbose('Unable to determine cluster OS for %s' % clusterName)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('Unable to determine cluster OS for %s' % clusterName)
 		return (False, { 'errors': [ 'Unable to determine the version of the cluster suite this cluster is running' ] })
 
 	shared_storage = False
@@ -413,7 +428,8 @@
 				incomplete = True
 				errors.append('Unable to connect to %s: %s' \
 					% (cur_host, str(e)))
-				luci_log.debug_verbose('VACN4: %s: %s' % (cur_host, str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VACN4: %s: %r' % (cur_host, e))
 				continue
 
 			prev_auth = rc.authed()
@@ -433,7 +449,8 @@
 				incomplete = True
 				errors.append('Error authenticating to %s: %s' \
 					% (cur_host, str(e)))
-				luci_log.debug_verbose('VACN5: %s: %s' % (cur_host, str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VACN5: %s: %r' % (cur_host, e))
 				continue
 
 			cur_cluster_info = rc.cluster_info()
@@ -451,11 +468,13 @@
 						rc.unauth()
 						del cur_system['trusted']
 				except Exception, e:
-					luci_log.debug_verbose('VACN6: %s: %s' % (cur_host, str(e)))
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('VACN6: %s: %r' % (cur_host, e))
 
 				errors.append('%s reports it is already a member of cluster "%s"' % (cur_host, cur_cluster_name))
-				luci_log.debug_verbose('VACN7: %s: already in %s cluster' \
-					% (cur_host, cur_cluster_name))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VACN7: %s: already in %s cluster' \
+						% (cur_host, cur_cluster_name))
 				continue
 
 			cur_host_os = resolveOSType(rc.os())
@@ -468,11 +487,13 @@
 						rc.unauth()
 						del cur_system['trusted']
 				except Exception, e:
-					luci_log.debug_verbose('VACN8: %s: %s' % (cur_host, str(e)))
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('VACN8: %s: %r' % (cur_host, e))
 
 				errors.append('The cluster software version on %s (%s) does not match the software on the other cluster nodes (%s)' % (cur_host, cur_host_os, cluster_os))
-				luci_log.debug_verbose('VACN9: version mismatch for %s: (%s vs. %s)' \
-					% (cur_host, cur_host_os, cluster_os))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VACN9: version mismatch for %s: (%s vs. %s)' \
+						% (cur_host, cur_host_os, cluster_os))
 				continue
 
 	if incomplete or len(errors) > 0:
@@ -486,7 +507,8 @@
 	except Exception, e:
 		incomplete = True
 		errors.append('Unable to contact a ricci agent for %s' % clusterName)
-		luci_log.debug_verbose('VACN10: %s: %s' % (clusterName, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VACN10: %s: %r' % (clusterName, e))
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)
@@ -512,7 +534,8 @@
 		incomplete = True
 		errors.append('Unable to build the cluster model for %s' \
 			% clusterName)
-		luci_log.debug_verbose('VACN11: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VACN11: %r' % e)
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)
@@ -544,10 +567,12 @@
 						rc.unauth()
 						del cur_system['trusted']
 				except Exception, e:
-					luci_log.debug_verbose('VACN12: %s: %s' % (cur_host, str(e)))
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('VACN12: %s: %r' % (cur_host, e))
 
 				errors.append('Unable to initiate cluster join for node "%s"' % cur_host)
-				luci_log.debug_verbose('VACN13: %s: %s' % (cur_host, str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VACN13: %s: %r' % (cur_host, e))
 				continue
 
 			next_node_id += 1
@@ -574,7 +599,8 @@
 	except Exception, e:
 		incomplete = True
 		errors.append('Unable to save the new cluster model')
-		luci_log.debug_verbose('VACN14: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VACN14: %s' % e)
 
 	# Propagate the new cluster.conf to the existing nodes
 	# before having any of the new nodes join. If this fails,
@@ -594,7 +620,8 @@
 		incomplete = True
 		errors.append('Unable to update the cluster node list for %s' \
 			% clusterName)
-		luci_log.debug_verbose('VACN15: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VACN15: %r' % e)
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)
@@ -604,7 +631,8 @@
 	if error:
 		incomplete = True
 		errors.append(error)
-		luci_log.debug_verbose('VACN16: %s: %s' % (clusterName, error))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VACN16: %s: %s' % (clusterName, error))
 
 	if incomplete or len(errors) > 0:
 		request.SESSION.set('add_node', add_cluster)
@@ -625,7 +653,8 @@
 			clunode['errors'] = True
 			errors.append('Unable to connect to the ricci agent on %s: %s' \
 				% (cur_host, str(e)))
-			luci_log.info('VACN17: Unable to connect to the ricci daemon on host %s: %s' % (clunode['host'], str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.info('VACN17: Unable to connect to the ricci daemon on host %s: %r' % (clunode['host'], e))
 
 		if success:
 			try:
@@ -634,8 +663,8 @@
 			except Exception, e:
 				clunode['errors'] = True
 				success = False
-				luci_log.debug_verbose('VACN18: %s: %s' \
-					% (cur_host, str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VACN18: %s: %r' % (cur_host, e))
 
 		if not success:
 			incomplete = True
@@ -659,7 +688,8 @@
 		if not form_xml:
 			raise KeyError, 'form_xml must not be blank'
 	except Exception, e:
-		luci_log.debug_verbose('vSA0: no form_xml: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vSA0: no form_xml: %r' % e)
 		return (False, {'errors': ['No resource data was supplied for this service']})
 
 	try:
@@ -667,7 +697,8 @@
 		if not model:
 			raise Exception, 'model is None'
 	except Exception, e:
-		luci_log.debug_verbose('vSA0a: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vSA0a: %r' % e)
 		return (False, {'errors': [ 'The cluster model is missing from the session object' ]})
 
 	try:
@@ -676,7 +707,8 @@
 		if len(forms) < 1:
 			raise
 	except Exception, e:
-		luci_log.debug_verbose('vSA1: error: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vSA1: error: %r' % e)
 		return (False, {'errors': ['The resource data submitted for this service is not properly formed']})
 
 	form_hash = {}
@@ -708,21 +740,24 @@
 			try:
 				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
 			except Exception, e:
-				luci_log.debug_verbose('vSA2: parsing XML: %s' % str(e))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('vSA2: parsing XML: %r' % e)
 
 		try:
 			res_type = dummy_form['type'].strip()
 			if not res_type:
 				raise Exception, 'no resource type'
 		except Exception, e:
-			luci_log.debug_verbose('vSA3: %s' % str(e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('vSA3: %r' % e)
 			return (False, {'errors': [ 'No resource type was specified' ]})
 
 		try:
 			if res_type == 'ip':
 				dummy_form['resourceName'] = dummy_form['ip_address']
 		except Exception, e:
-			luci_log.debug_verbose('vSA3a: type is ip but no addr: %s' % str(e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('vSA3a: type is ip but no addr: %r' % e)
 			return (False, {'errors': [ 'No IP address was given' ]})
 
 		try:
@@ -734,7 +769,8 @@
 				resObj = create_resource(res_type, dummy_form, model)
 		except Exception, e:
 			resObj = None
-			luci_log.debug_verbose('vSA4: type %s: %s' % (res_type, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('vSA4: type %s: %r' % (res_type, e))
 
 		if resObj is None:
 			return (False, {'errors': [ 'An error occurred while adding %s' % res_type ]})
@@ -765,33 +801,39 @@
 	try:
 		service_name = request.form['svc_name'].strip()
 	except Exception, e:
-		luci_log.debug_verbose('vSA5: no service name: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vSA5: no service name: %r' % e)
 		return (False, {'errors': [ 'No service name was given' ]})
 
-	autostart = "1"
+	autostart = '1'
 	try:
-		if not request.form.has_key('autostart') or request.form['autostart'] == "0":
-			autostart = "0"
+		if not request.form.has_key('autostart') or request.form['autostart'] == '0':
+			autostart = '0'
 	except Exception, e:
 		autostart = None
-		luci_log.debug_verbose('vSA5a: error getting autostart: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vSA5a: error getting autostart: %r' % e)
 
-	exclusive = "0"
+	exclusive = '0'
 	try:
-		if not request.form.has_key('exclusive') or request.form['exclusive'] != "1":
-			exclusive = "0"
+		if not request.form.has_key('exclusive') or request.form['exclusive'] != '1':
+			exclusive = '0'
 		else:
-			exclusive = "1"
+			exclusive = '1'
 	except Exception, e:
-		exclusive = "0"
+		exclusive = '0'
 
 	try:
 		cur_service = model.retrieveServiceByName(service_name)
 	except GeneralError, e:
-		luci_log.debug_verbose('vSA5b: no service named %s found' % service_name)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vSA5b: no service named %s found: %r' \
+				% (service_name, e))
 		cur_service = None
 	except Exception, e:
-		luci_log.debug_verbose('vSA5c: no service named %s found: %s' % (service_name, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vSA5c: no service named %s found: %r' \
+				% (service_name, e))
 		cur_service = None
 
 	try:
@@ -803,10 +845,12 @@
 			if cur_service is not None:
 				return (False, {'errors': [ 'A service with the name %s already exists' % service_name ]})
 		else:
-			luci_log.debug_verbose('vSA4a: unknown action %s' % request.form['action'])
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('vSA4a: unknown action %s' % request.form['action'])
 			return (False, {'errors': [ 'An unknown action was specified' ]})
 	except Exception, e:
-		luci_log.debug_verbose('vSA5: no action type: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vSA5: no action type: %r' % e)
 
 	def buildSvcTree(parent, child_id_list):
 		for i in child_id_list:
@@ -815,7 +859,8 @@
 				if not child:
 					raise Exception, 'No object for %s' % i
 			except Exception, e:
-				luci_log.debug_verbose('bST0: %s' % str(e))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('bST0: %r' % e)
 				continue
 			parent.addChild(child)
 			if 'kids' in form_hash[i]:
@@ -836,7 +881,8 @@
 
 	clustername = model.getClusterName()
 	if not clustername:
-		luci_log.debug_verbose('vAS6: no cluname from mb')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vAS6: no cluname from mb')
 		return (False, {'errors': [ 'Unable to determine cluster name' ]})
 
 	try:
@@ -845,27 +891,31 @@
 		if not conf:
 			raise Exception, 'model string for %s is blank' % clustername
 	except Exception, e:
-		luci_log.debug_verbose('vAS6a: exportModelAsString : %s' \
-			% str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vAS6a: exportModelAsString: %r' % e)
 		return (False, {'errors': [ 'An error occurred while adding this service' ]})
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
-		luci_log.debug_verbose('vAS6b: unable to find a ricci agent for cluster %s' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vAS6b: unable to find a ricci agent for cluster %s' % clustername)
 		return 'Unable to find a ricci agent for the %s cluster' % clustername
 
 	try:
 		ragent = rc.hostname()
 		if not ragent:
-			luci_log.debug_verbose('vAS7: missing ricci hostname')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('vAS7: missing ricci hostname')
 			raise Exception, 'unknown ricci agent hostname'
 
 		batch_number, result = rq.setClusterConf(rc, str(conf))
 		if batch_number is None or result is None:
-			luci_log.debug_verbose('vAS8: missing batch_number or result')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('vAS8: missing batch_number or result')
 			raise Exception, 'unable to save the new cluster configuration'
 	except Exception, e:
-		luci_log.debug_verbose('vAS9: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vAS9: %r' % e)
 		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
 
 	try:
@@ -874,7 +924,8 @@
 		else:
 			set_node_flag(self, clustername, ragent, str(batch_number), SERVICE_ADD, 'Creating service "%s"' % service_name)
 	except Exception, e:
-		luci_log.debug_verbose('vAS10: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vAS10: failed to set flags: %r' % e)
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -886,13 +937,15 @@
 		if not res_type:
 			raise KeyError, 'type is blank'
 	except Exception, e:
-		luci_log.debug_verbose('VRA0: type is blank')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VRA0: type is blank')
 		return (False, {'errors': ['No resource type was given']})
 
 	try:
 		model = request.SESSION.get('model')
 	except Exception, e:
-		luci_log.debug_verbose('VRA1: no model: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VRA1: no model: %r' % e)
 		return None
 
 	errors = list()
@@ -909,7 +962,8 @@
 				% res.getName())
 	if len(errors) > 0:
 		errors.append('An error occurred while adding this resource')
-		luci_log.debug_verbose('resource error: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('resource error: %r' % e)
 		return (False, {'errors': errors})
 
 
@@ -965,7 +1019,8 @@
 		model.usesMulticast = True
 		model.mcast_address = addr_str
 	except Exception, e:
-		luci_log.debug('Error updating mcast properties: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('Error updating mcast properties: %r' % e)
 		errors.append('Unable to update cluster multicast properties')
 
 	if len(errors) > 0:
@@ -1138,7 +1193,8 @@
 		old_name = model.getClusterAlias()
 		old_ver = int(cp.getConfigVersion())
 	except Exception, e:
-		luci_log.debug_verbose('getConfigVersion: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('getConfigVersion: %s' % str(e))
 		errors.append('unable to determine the current configuration version')
 		return (False, {'errors': errors})
 
@@ -1170,7 +1226,8 @@
 				cp.addAttribute('alias', cluster_name)
 			cp.setConfigVersion(str(version_num))
 		except Exception, e:
-			luci_log.debug_verbose('unable to update general properties: %s' % str(e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('unable to update general properties: %r' % e)
 			errors.append('Unable to update the cluster configuration')
 
 	try:
@@ -1301,7 +1358,8 @@
 			fd.setPostJoinDelay(str(post_join_delay))
 			fd.setPostFailDelay(str(post_fail_delay))
 	except Exception, e:
-		luci_log.debug_verbose('Unable to update fence daemon properties: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('Unable to update fence daemon properties: %r' % e)
 		errors.append('An error occurred while attempting to update fence daemon properties')
 
 	if len(errors) > 0:
@@ -1360,7 +1418,8 @@
 	try:
 		model = LuciExtractCluModel(self, request)
 	except Exception, e:
-		luci_log.debug_verbose('VCC0a: no model, no cluster name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VCC0a: no model, no cluster name')
 		return (False, {'errors': ['No cluster model was found']})
 
 	try:
@@ -1382,7 +1441,8 @@
 	try:
 		cp = model.getClusterPtr()
 	except:
-		luci_log.debug_verbose('VCC3a: getClusterPtr failed')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VCC3a: getClusterPtr failed')
 		return (False, {'errors': ['No cluster model was found']})
 
 	config_validator = configFormValidators[request.form['configtype']]
@@ -1405,8 +1465,8 @@
 			if not conf_str:
 				raise Exception, 'conf_str is none'
 		except Exception, e:
-			luci_log.debug_verbose('VCC4: export model as string failed: %s' \
-				% str(e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC4: export model as string failed: %r' % e)
 			errors.append('Unable to store the new cluster configuration')
 
 	try:
@@ -1414,7 +1474,8 @@
 		if not clustername:
 			raise Exception, 'cluster name from model.getClusterName() is blank'
 	except Exception, e:
-		luci_log.debug_verbose('VCC5: error: getClusterName: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VCC5: error: getClusterName: %r' % e)
 		errors.append('Unable to determine cluster name from model')
 
 	if len(errors) > 0:
@@ -1423,14 +1484,16 @@
 	if not rc:
 		rc = getRicciAgent(self, clustername)
 	if not rc:
-		luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VCC6: unable to find a ricci agent for the %s cluster' % clustername)
 		errors.append('Unable to contact a ricci agent for cluster %s' \
 			% clustername)
 
 	if rc:
 		batch_id, result = rq.setClusterConf(rc, str(conf_str))
 		if batch_id is None or result is None:
-			luci_log.debug_verbose('VCC7: setCluserConf: batchid or result is None')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VCC7: setCluserConf: batchid or result is None')
 			errors.append('Unable to propagate the new cluster configuration for %s' % clustername)
 		else:
 			try:
@@ -1459,7 +1522,8 @@
 		pass
 
 	if not cluname:
-		luci_log.debug_verbose('LECN0: no cluster name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LECN0: no cluster name')
 	return cluname
 
 def LuciExtractCluModel(self, request, cluster_name=None):
@@ -1467,8 +1531,8 @@
 	if not cluster_name:
 		cluster_name = LuciExtractCluName(self, request)
 		if not cluster_name:
-			luci_log.debug_verbose('LECM0: no cluster name')
-
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('LECM0: no cluster name')
 	try:
 		model = request.SESSION.get('model')
 		if not model:
@@ -1479,10 +1543,12 @@
 	try:
 		model = getModelForCluster(self, cluster_name)
 		if not model:
-			luci_log.debug_verbose('LECM1: empty model')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('LECM1: empty model')
 			model = None
 	except Exception, e:
-		luci_log.debug_verbose('LECM2: no model: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('LECM2: no model: %r' % e)
 		model = None
 	return model
 
@@ -1492,11 +1558,13 @@
 	model = LuciExtractCluModel(self, request)
 
 	if not model:
-		luci_log.debug_verbose('VFE0: no model')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE0: no model')
 		return (False, [ 'No cluster model was found' ])
 
 	if not request.form:
-		luci_log.debug_verbose('VFE: no form was submitted')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE: no form was submitted')
 		return (False, [ 'No form was submitted' ])
 
 	ret_code, ret_obj = validateNewFenceDevice(request.form, model)
@@ -1509,101 +1577,108 @@
 		if not conf_str:
 			raise Exception, 'conf_str is none'
 	except Exception, e:
-		luci_log.debug_verbose('VFE: export model as string failed: %r' % e)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE: export model as string failed: %r' % e)
 		errors.append('Unable to store the new cluster configuration')
 
-	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (request['URL'], FENCEDEV, model.getClusterName(), ret_obj.getAttribute('name')))
+	request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (request['URL'], FENCEDEV, model.getClusterName(), ret_obj))
 
 def validateFenceEdit(self, request):
-  errors = list()
-  messages = list()
-  rc = None
-
-  try:
-    model = request.SESSION.get('model')
-    if not model:
-      raise Exception, 'model is none'
-  except Exception, e:
-    model = None
-    try:
-      cluname = request.form['clustername']
-    except:
-      try:
-        cluname = request['clustername']
-      except:
-        luci_log.debug_verbose('VFE: no model, no cluster name')
-        return (False, {'errors': ['No cluster model was found']})
-
-    try:
-      model = getModelForCluster(self, cluname)
-    except:
-      model = None
-
-    if model is None:
-      luci_log.debug_verbose('VFE: unable to get model from session')
-      return (False, {'errors': ['No cluster model was found']})
-
-  form = None
-  try:
-    response = request.response
-    form = request.form
-    if not form:
-      form = None
-      raise Exception, 'no form was submitted'
-  except:
-    pass
-
-  if form is None:
-    luci_log.debug_verbose('VFE: no form was submitted')
-    return (False, {'errors': ['No form was submitted']})
-
-  #This is a fence edit situation, so the model should already have an
-  #entry for this fence device.
-  #
-  #pass form and model to validation method, then save changes if it passes.
-  error_code, retobj = validateFenceDevice(form, model)
-  if error_code == FD_VAL_SUCCESS:
-    try:
-      conf_str = model.exportModelAsString()
-      if not conf_str:
-        raise Exception, 'conf_str is none'
-    except Exception, e:
-      luci_log.debug_verbose('VFE: export model as string failed: %s' \
-        % str(e))
-      errors.append('Unable to store the new cluster configuration')
-
-    try:
-      clustername = model.getClusterName()
-      if not clustername:
-        raise Exception, 'cluster name from model.getClusterName() is blank'
-    except Exception, e:
-      luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
-      errors.append('Unable to determine cluster name from model')
-
-    if not rc:
-      rc = getRicciAgent(self, clustername)
-      if not rc:
-        luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
-        errors.append('Unable to contact a ricci agent for cluster %s' \
-          % clustername)
-
-    if rc:
-      batch_id, result = rq.setClusterConf(rc, str(conf_str))
-      if batch_id is None or result is None:
-        luci_log.debug_verbose('VFA: setClusterConf: batchid or result is None')
-        errors.append('Unable to propagate the new cluster configuration for %s' \
-          % clustername)
-      else:
-        try:
-          set_node_flag(self, clustername, rc.hostname(), batch_id,
-            CLUSTER_CONFIG, 'Updating fence device "%s"' % retobj)
-        except:
-          pass
-
-    response.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (request['URL'], FENCEDEV, clustername, retobj))
-  else:
-    errors.extend(retobj)
-    return (False, {'errors': errors, 'messages': messages})
+	errors = list()
+	messages = list()
+	rc = None
+
+	try:
+		model = request.SESSION.get('model')
+		if not model:
+			raise Exception, 'model is none'
+	except Exception, e:
+		model = None
+		try:
+			cluname = request.form['clustername']
+		except:
+			try:
+				cluname = request['clustername']
+			except:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VFE: no model, no cluster name')
+				return (False, {'errors': ['No cluster model was found']})
+
+		try:
+			model = getModelForCluster(self, cluname)
+		except:
+			model = None
+
+		if model is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VFE: unable to get model from session')
+			return (False, {'errors': ['No cluster model was found']})
+
+	form = None
+	try:
+		response = request.response
+		form = request.form
+		if not form:
+			form = None
+			raise Exception, 'no form was submitted'
+	except:
+		pass
+
+	if form is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE: no form was submitted')
+		return (False, {'errors': ['No form was submitted']})
+
+	# This is a fence edit situation, so the model should already have an
+	# entry for this fence device.
+	#
+	# pass form and model to validation method, then save changes if it passes.
+	error_code, retobj = validateFenceDevice(form, model)
+	if error_code == FD_VAL_SUCCESS:
+		try:
+			conf_str = model.exportModelAsString()
+			if not conf_str:
+				raise Exception, 'conf_str is none'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VFE: export model as string failed: %r' % e)
+			errors.append('Unable to store the new cluster configuration')
+
+		try:
+			clustername = model.getClusterName()
+			if not clustername:
+				raise Exception, 'cluster name from model.getClusterName() is blank'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VFA: error: getClusterName: %r' % e)
+			errors.append('Unable to determine cluster name from model')
+
+		if not rc:
+			rc = getRicciAgent(self, clustername)
+			if not rc:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
+				errors.append('Unable to contact a ricci agent for cluster %s' \
+					% clustername)
+
+		if rc:
+			batch_id, result = rq.setClusterConf(rc, str(conf_str))
+			if batch_id is None or result is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VFA: setClusterConf: batchid or result is None')
+				errors.append('Unable to propagate the new cluster configuration for %s' \
+					% clustername)
+			else:
+				try:
+					set_node_flag(self, clustername, rc.hostname(), batch_id,
+						CLUSTER_CONFIG, 'Updating fence device "%s"' % retobj)
+				except:
+					pass
+
+		response.redirect('%s?pagetype=%s&clustername=%s&fencename=%s&busyfirst=true' % (request['URL'], FENCEDEV, clustername, retobj))
+	else:
+		errors.extend(retobj)
+		return (False, {'errors': errors, 'messages': messages})
 
 def validateNodeFenceConfig(self, request):
 	errors = list()
@@ -1613,13 +1688,15 @@
 		if not form_xml:
 			raise KeyError, 'form_xml must not be blank'
 	except Exception, e:
-		luci_log.debug_verbose('vNFC0: no form_xml: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC0: no form_xml: %r' % e)
 		return (False, {'errors': ['No fence data was supplied']})
 
 	try:
 		fence_level = int(request.form['fence_level'].strip())
 	except Exception, e:
-		luci_log.debug_verbose('vNFC1: no fence level: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC1: no fence level: %r' % e)
 		return (False, {'errors': ['No fence level was supplied']})
 
 	try:
@@ -1627,7 +1704,8 @@
 		if not nodename:
 			raise Exception, 'nodename is blank'
 	except Exception, e:
-		luci_log.debug_verbose('vNFC2: no nodename: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC2: no nodename: %r' % e)
 		return (False, {'errors': ['No node name was given']})
 
 	try:
@@ -1635,7 +1713,8 @@
 		if not clustername:
 			raise Exception, 'clustername is blank'
 	except Exception, e:
-		luci_log.debug_verbose('vNFC3: no clustername: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC3: no clustername: %r' % e)
 		return (False, {'errors': ['No cluster name was given']})
 
 	try:
@@ -1650,19 +1729,22 @@
 			model = None
 
 	if model is None:
-		luci_log.debug_verbose('vNFC4: unable to get model for cluster %s' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC4: unable to get model for cluster %s' % clustername)
 		return (False, {'errors': ['No cluster model was found']})
 
 	try:
 		doc = minidom.parseString(form_xml)
 	except Exception, e:
-		luci_log.debug_verbose('vNFC5: error: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC5: error: %r' % e)
 		return (False, {'errors': ['The fence data submitted is not properly formed']})
 
 	try:
 		node = model.retrieveNodeByName(nodename)
 	except GeneralError, e:
-		luci_log.debug_verbose('vNFC6: unable to find node name %s in current node list' % (str(nodename), str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC6: unable to find node name %s in current node list: %r' % (str(nodename), e))
 		return (False, {'errors': ['Unable to find the cluster node %s in the node list' % str(nodename) ]})
 
 	fence_level_num = int(fence_level)
@@ -1691,7 +1773,8 @@
 			try:
 				node.getChildren()[0].removeChild(delete_target)
 			except Exception, e:
-				luci_log.debug_verbose('vNFC6a: %s: %s' % (method_id, str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('vNFC6a: %s: %r' % (method_id, e))
 				return (False, {'errors': ['An error occurred while deleting fence method %s' % method_id ]})
 		else:
 			return (True, {'messages': ['No changes were made'] })
@@ -1711,7 +1794,8 @@
 			try:
 				input_type = str(i.getAttribute('type'))
 			except Exception, e:
-				luci_log.debug_verbose('vNFC7: input type: %s' % str(e))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('vNFC7: input type: %r' % e)
 				continue
 
 			if not input_type or input_type == 'button':
@@ -1720,7 +1804,8 @@
 			try:
 				dummy_form[str(i.getAttribute('name'))] = str(i.getAttribute('value'))
 			except Exception, e:
-				luci_log.debug_verbose('vNFC8: parsing XML: %s' % str(e))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('vNFC8: parsing XML: %r' % e)
 
 		if len(dummy_form) < 1:
 			continue
@@ -1729,14 +1814,16 @@
 			try:
 				parent = dummy_form['parent_fencedev']
 			except:
-				luci_log.debug_verbose('vNFC9: no parent for instance')
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('vNFC9: no parent for instance')
 				return (False, {'errors': [ 'Unable to determine what device the current instance uses' ]})
 
 			try:
 				form_hash[parent][1].append(dummy_form)
 				del dummy_form['fence_instance']
 			except Exception, e:
-				luci_log.debug_verbose('vNFC10: no parent for instance')
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('vNFC10: no parent for instance')
 				return (False, {'errors': [ 'Unable to determine what device the current instance uses' ]})
 		else:
 			form_hash[form_id] = (dummy_form, list())
@@ -1750,7 +1837,8 @@
 		try:
 			fence_form, instance_list = form_hash[i]
 		except Exception, e:
-			luci_log.debug_verbose('vNFC11: %s' % str(e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('vNFC11: %r' % e)
 			continue
 
 		try:
@@ -1758,7 +1846,8 @@
 			if not fence_type:
 				raise Exception, 'fence type is blank'
 		except Exception, e:
-			luci_log.debug_verbose('vNFC12: %s %s' % (i, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('vNFC12: %s: %r' % (i, e))
 			fence_type = None
 
 		if 'existing_device' in fence_form:
@@ -1854,155 +1943,167 @@
 		conf = str(model.exportModelAsString())
 		if not conf:
 			raise Exception, 'model string is blank'
-		luci_log.debug_verbose('vNFC16: exported "%s"' % conf)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC16: exported "%s"' % conf)
 	except Exception, e:
-		luci_log.debug_verbose('vNFC17: exportModelAsString failed: %s' \
-			% str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC17: exportModelAsString failed: %r' % e)
 		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration' ]})
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
-		luci_log.debug_verbose('vNFC18: unable to find a ricci agent for cluster %s' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC18: unable to find a ricci agent for cluster %s' % clustername)
 		return (False, {'errors': ['Unable to find a ricci agent for the %s cluster' % clustername ]})
 	ragent = rc.hostname()
 
 	batch_number, result = rq.setClusterConf(rc, conf)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('vNFC19: missing batch and/or result')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC19: missing batch and/or result')
 		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration' ]})
 
 	try:
 		set_node_flag(self, clustername, ragent, str(batch_number), FENCEDEV_NODE_CONFIG, "Updating fence configuration for node \'%s\'" % nodename)
 	except Exception, e:
-		luci_log.debug_verbose('vNFC20: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('vNFC20: failed to set flags: %r' % e)
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&nodename=%s&busyfirst=true' % (request['URL'], NODE, clustername, nodename))
 
 def deleteFenceDevice(self, request):
-  errors = list()
-  messages = list()
-  rc = None
-
-  try:
-    model = request.SESSION.get('model')
-    if not model:
-      raise Exception, 'model is none'
-  except Exception, e:
-    model = None
-    try:
-      cluname = request.form['clustername']
-    except:
-      try:
-        cluname = request['clustername']
-      except:
-        luci_log.debug_verbose('VFE: no model, no cluster name')
-        return (False, {'errors': ['No cluster model was found']})
-
-    try:
-      model = getModelForCluster(self, cluname)
-    except:
-      model = None
-
-    if model is None:
-      luci_log.debug_verbose('VFE: unable to get model from session')
-      return (False, {'errors': ['No cluster model was found']})
-
-  form = None
-  try:
-    response = request.RESPONSE
-    form = request.form
-    if not form:
-      form = None
-      raise Exception, 'no form was submitted'
-  except:
-    pass
-
-  if form is None:
-    luci_log.debug_verbose('VFE: no form was submitted')
-    return (False, {'errors': ['No form was submitted']})
-
-  #get name of fencedev
-  try:
-    fencedev_name = form['orig_name']
-    fencedev_name = fencedev_name.strip()
-  except KeyError, e:
-    return (False, {'errors':['No device name in form submission']})
-
-  fdev_to_delete = None
-  #iterate thru list of current fencedevs and find one to be deleted
-  fdevs = model.getFenceDevices()
-  for fdev in fdevs:
-    if fdev.getName().strip() == fencedev_name:
-      fdev_to_delete = fdev
-      break
-  if fdev_to_delete is None:
-    luci_log.debug_verbose('VFD: Could not find fence device name in model')
-    return (False, {'errors':['Could not find fence device name in model']})
-
-  #get fencedev ptr
-  fdev_ptr = model.getFenceDevicePtr()
-  #remove child
-  try:
-    fdev_ptr.removeChild(fdev_to_delete)
-    error_code = FD_VAL_SUCCESS
-    error_string = "Fence device %s successfully removed from configuration" % fencedev_name
-  except:
-    error_code = FD_VAL_FAIL
-    error_string = "Fence device %s could not be removed from configuration" % fencedev_name
-
-  try:
-    model.removeFenceInstancesForFenceDevice(fencedev_name)
-  except:
-    luci_log.debug_verbose('VFD: Could not remove fence instances for')
-
-
-  if error_code == FD_VAL_SUCCESS:
-    messages.append(error_string)
-    try:
-      model.setModified(True)
-      conf_str = model.exportModelAsString()
-      if not conf_str:
-        raise Exception, 'conf_str is none'
-    except Exception, e:
-      luci_log.debug_verbose('VFE: export model as string failed: %s' \
-      % str(e))
-      errors.append('Unable to store the new cluster configuration')
-
-    try:
-      clustername = model.getClusterName()
-      if not clustername:
-        raise Exception, 'cluster name from model.getClusterName() is blank'
-    except Exception, e:
-      luci_log.debug_verbose('VFA: error: getClusterName: %s' % str(e))
-      errors.append('Unable to determine cluster name from model')
-
-    if not rc:
-      rc = getRicciAgent(self, clustername)
-      if not rc:
-        luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
-        errors.append('Unable to contact a ricci agent for cluster %s' \
-        % clustername)
-
-    if rc:
-      batch_id, result = rq.setClusterConf(rc, str(conf_str))
-      if batch_id is None or result is None:
-        luci_log.debug_verbose('VFA: setCluserConf: batchid or result is None')
-        errors.append('Unable to propagate the new cluster configuration for %s' \
-        % clustername)
-      else:
-        try:
-          set_node_flag(self, clustername, rc.hostname(), batch_id,
-            CLUSTER_CONFIG, 'Removing fence device "%s"' % fencedev_name)
-        except:
-          pass
+	errors = list()
+	messages = list()
+	rc = None
+
+	try:
+		model = request.SESSION.get('model')
+		if not model:
+			raise Exception, 'model is none'
+	except Exception, e:
+		model = None
+		try:
+			cluname = request.form['clustername']
+		except:
+			try:
+				cluname = request['clustername']
+			except:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VFE: no model, no cluster name')
+				return (False, {'errors': ['No cluster model was found']})
+
+		try:
+			model = getModelForCluster(self, cluname)
+		except:
+			model = None
+
+		if model is None:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VFE: unable to get model from session')
+			return (False, {'errors': ['No cluster model was found']})
+
+	form = None
+	try:
+		response = request.RESPONSE
+		form = request.form
+		if not form:
+			form = None
+			raise Exception, 'no form was submitted'
+	except:
+		pass
+
+	if form is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFE: no form was submitted')
+		return (False, {'errors': ['No form was submitted']})
+
+	#get name of fencedev
+	try:
+		fencedev_name = form['orig_name']
+		fencedev_name = fencedev_name.strip()
+	except KeyError, e:
+		return (False, {'errors':['No device name in form submission']})
+
+	fdev_to_delete = None
+	#iterate thru list of current fencedevs and find one to be deleted
+	fdevs = model.getFenceDevices()
+	for fdev in fdevs:
+		if fdev.getName().strip() == fencedev_name:
+			fdev_to_delete = fdev
+			break
+	if fdev_to_delete is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFD: Could not find fence device name in model')
+		return (False, {'errors':['Could not find fence device name in model']})
+
+	#get fencedev ptr
+	fdev_ptr = model.getFenceDevicePtr()
+	#remove child
+	try:
+		fdev_ptr.removeChild(fdev_to_delete)
+		error_code = FD_VAL_SUCCESS
+		error_string = "Fence device %s successfully removed from configuration" % fencedev_name
+	except:
+		error_code = FD_VAL_FAIL
+		error_string = "Fence device %s could not be removed from configuration" % fencedev_name
+
+	try:
+		model.removeFenceInstancesForFenceDevice(fencedev_name)
+	except:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VFD: Could not remove fence instances for')
+
+
+	if error_code == FD_VAL_SUCCESS:
+		messages.append(error_string)
+		try:
+			model.setModified(True)
+			conf_str = model.exportModelAsString()
+			if not conf_str:
+				raise Exception, 'conf_str is none'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VFE: export model as string failed: %r' % e)
+			errors.append('Unable to store the new cluster configuration')
+
+		try:
+			clustername = model.getClusterName()
+			if not clustername:
+				raise Exception, 'cluster name from model.getClusterName() is blank'
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VFA: error: getClusterName: %r' % e)
+			errors.append('Unable to determine cluster name from model')
+
+		if not rc:
+			rc = getRicciAgent(self, clustername)
+			if not rc:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
+				errors.append('Unable to contact a ricci agent for cluster %s' \
+				% clustername)
 
-    response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
+		if rc:
+			batch_id, result = rq.setClusterConf(rc, str(conf_str))
+			if batch_id is None or result is None:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('VFA: setCluserConf: batchid or result is None')
+				errors.append('Unable to propagate the new cluster configuration for %s' \
+				% clustername)
+			else:
+				try:
+					set_node_flag(self, clustername, rc.hostname(), batch_id,
+						CLUSTER_CONFIG, 'Removing fence device "%s"' % fencedev_name)
+				except:
+					pass
+
+		response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
 		% (request['URL'], FENCEDEVS, clustername))
-    return (True, {'errors': errors, 'messages': messages})
-  else:
-    errors.append(error_string)
-    return (False, {'errors': errors, 'messages': messages})
+		return (True, {'errors': errors, 'messages': messages})
+	else:
+		errors.append(error_string)
+		return (False, {'errors': errors, 'messages': messages})
 
 def validateDaemonProperties(self, request):
 	errors = list()
@@ -2018,7 +2119,8 @@
 		pass
 
 	if form is None:
-		luci_log.debug_verbose('VDP0: no form was submitted')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VDP0: no form was submitted')
 		return (False, {'errors': ['No form was submitted']})
 
 	try:
@@ -2027,7 +2129,8 @@
 			raise Exception, 'nodename is blank'
 	except Exception, e:
 		errors.append('Unable to determine the current node name')
-		luci_log.debug_verbose('VDP1: no nodename: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VDP1: no nodename: %r' % e)
 
 	try:
 		clustername = form['clustername'].strip()
@@ -2035,7 +2138,8 @@
 			raise Exception, 'clustername is blank'
 	except Exception, e:
 		errors.append('Unable to determine the current cluster name')
-		luci_log.debug_verbose('VDP2: no clustername: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VDP2: no clustername: %r' % e)
 
 	disable_list = list()
 	enable_list = list()
@@ -2050,10 +2154,12 @@
 					if daemon_prop[1] == '0' and daemon_prop[2] == 'on':
 						enable_list.append(daemon_prop[0])
 		except Exception, e:
-			luci_log.debug_verbose('VDP3: error: %s' % str(i))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('VDP3: error: %s' % str(i))
 
 	if len(enable_list) < 1 and len(disable_list) < 1:
-		luci_log.debug_verbose('VDP4: no changes made')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VDP4: no changes made')
 		response.redirect('%s?pagetype=%s&clustername=%s&nodename=%s' \
 			% (request['URL'], NODE, clustername, nodename))
 
@@ -2063,13 +2169,15 @@
 		if not rc:
 			raise Exception, 'rc is None'
 	except Exception, e:
-		luci_log.debug_verbose('VDP5: RC %s: %s' % (nodename_resolved, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VDP5: RC %s: %r' % (nodename_resolved, e))
 		errors.append('Unable to connect to the ricci agent on %s to update cluster daemon properties' % nodename_resolved)
 		return (False, {'errors': errors})
 
 	batch_id, result = rq.updateServices(rc, enable_list, disable_list)
 	if batch_id is None or result is None:
-		luci_log.debug_verbose('VDP6: setCluserConf: batchid or result is None')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VDP6: setCluserConf: batchid or result is None')
 		errors.append('Unable to update the cluster daemon properties on node %s' % nodename_resolved)
 		return (False, {'errors': errors})
 
@@ -2097,7 +2205,8 @@
 		if not model:
 			raise Exception, 'no model'
 	except Exception, e:
-		luci_log.debug_verbose('validateFdom0: no model: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateFdom0: no model: %r' % e)
 		return (False, {'errors': [ 'Unable to retrieve cluster information' ]})
 
 	prioritized = False
@@ -2134,7 +2243,8 @@
 			raise Exception, 'blank'
 	except Exception, e:
 		errors.append('No name was given for this failover domain')
-		luci_log.debug_verbose('validateFdom0: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateFdom0: %r' % e)
 
 	oldname = None
 	try:
@@ -2152,7 +2262,8 @@
 	if oldname is not None:
 		fdom = model.getFailoverDomainByName(oldname)
 		if fdom is None:
-			luci_log.debug_verbose('validateFdom1: No fdom named %s exists' % oldname)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('validateFdom1: No fdom named %s exists' % oldname)
 			errors.append('No failover domain named "%s" exists' % oldname)
 		else:
 			fdom.addAttribute('name', name)
@@ -2198,7 +2309,8 @@
 		model.setModified(True)
 		conf = str(model.exportModelAsString())
 	except Exception, e:
-		luci_log.debug_verbose('validateFdom2: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateFdom2: %r' % e)
 		errors.append('Unable to update the cluster configuration')
 
 	if len(errors) > 0:
@@ -2206,13 +2318,15 @@
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
-		luci_log.debug_verbose('validateFdom3: unable to find a ricci agent for cluster %s' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateFdom3: unable to find a ricci agent for cluster %s' % clustername)
 		return (False, {'errors': ['Unable to find a ricci agent for the %s cluster' % clustername ]})
 	ragent = rc.hostname()
 
 	batch_number, result = rq.setClusterConf(rc, conf)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('validateFdom4: missing batch and/or result')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateFdom4: missing batch and/or result')
 		return (False, {'errors': [ 'An error occurred while constructing the new cluster configuration' ]})
 
 	try:
@@ -2221,7 +2335,8 @@
 		else:
 			set_node_flag(self, clustername, ragent, str(batch_number), FDOM_ADD, 'Creating failover domain "%s"' % name)
 	except Exception, e:
-		luci_log.debug_verbose('validateFdom5: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateFdom5: failed to set flags: %r' % e)
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&fdomname=%s&busyfirst=true' \
@@ -2237,7 +2352,8 @@
 		if not vm_name:
 			raise Exception, 'blank'
 	except Exception, e:
-		luci_log.debug_verbose('validateVM0: no vm name: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateVM0: no vm name: %r' % e)
 		errors.append('No virtual machine name was given')
 
 	try:
@@ -2245,7 +2361,8 @@
 		if not vm_path:
 			raise Exception, 'blank'
 	except Exception, e:
-		luci_log.debug_verbose('validateVM1: no vm path: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateVM1: no vm path: %r' % e)
 		errors.append('No path to the virtual machine configuration file was given')
 
 	autostart = 1
@@ -2348,7 +2465,8 @@
 		if not stringbuf:
 			raise Exception, 'model is blank'
 	except Exception, e:
-		luci_log.debug_verbose('validateVM2: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateVM2: %r' % e)
 		errors.append('Unable to update the cluster model')
 
 	try:
@@ -2356,7 +2474,8 @@
 		if not clustername:
 			raise Exception, 'cluster name from model.getClusterName() is blank'
 	except Exception, e:
-		luci_log.debug_verbose('validateVM3: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateVM3: %r' % e)
 		errors.append('Unable to determine the cluster name')
 
 	if len(errors) > 0:
@@ -2364,12 +2483,14 @@
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
-		luci_log.debug_verbose('validateVM4: no ricci for %s' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateVM4: no ricci for %s' % clustername)
 		return (False, {'errors': ['Unable to contact a ricci agent for this cluster']})
 
 	batch_number, result = rq.setClusterConf(rc, stringbuf)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('validateVM5: missing batch and/or result')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateVM5: missing batch and/or result')
 		return (False, {'errors': [ 'Error creating virtual machine %s' % vm_name ]})
 
 	try:
@@ -2380,7 +2501,8 @@
 		else:
 			set_node_flag(self, clustername, rc.hostname(), str(batch_number), VM_CONFIG, "Configuring virtual machine service \'%s\'" % vm_name)
 	except Exception, e:
-		luci_log.debug_verbose('validateVM6: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('validateVM6: failed to set flags: %r' % e)
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -2410,11 +2532,13 @@
 	try:
 		pagetype = int(request.form['pagetype'])
 	except Exception, e:
-		luci_log.debug_verbose('VP0: error: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VP0: error: %r' % e)
 		return None
 
 	if not pagetype in formValidators:
-		luci_log.debug_verbose('VP1: no handler for page type %d' % pagetype)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('VP1: no handler for page type %d' % pagetype)
 		return None
 	else:
 		return formValidators[pagetype](self, request)
@@ -2429,543 +2553,544 @@
 # permissions on at least one. If the user is admin, show ALL clusters
 
 def createCluChooser(self, request, systems):
-  dummynode = {}
+	dummynode = {}
 
-  if request.REQUEST_METHOD == 'POST':
-    ret = validatePost(self, request)
-    try:
-      request.SESSION.set('checkRet', ret[1])
-    except:
-      request.SESSION.set('checkRet', {})
-  else:
-    try:
-      request.SESSION.set('checkRet', {})
-    except:
-      pass
-
-  # First, see if a cluster is chosen, then
-  # check that the current user can access that system
-  cname = None
-  try:
-    cname = request[CLUNAME]
-  except:
-    cname = ""
-
-  try:
-    url = request['URL']
-  except:
-    url = "/luci/cluster/index_html"
-
-  try:
-    pagetype = request[PAGETYPE]
-  except:
-    pagetype = '3'
-
-  cldata = {}
-  cldata['Title'] = "Cluster List"
-  cldata['cfg_type'] = "clusters"
-  cldata['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTERLIST)
-  cldata['Description'] = "Clusters available for configuration"
-  if pagetype == CLUSTERLIST:
-    cldata['currentItem'] = True
-  else:
-    cldata['currentItem'] = False
-
-  UserHasPerms = havePermCreateCluster(self)
-  if UserHasPerms:
-    cladd = {}
-    cladd['Title'] = "Create a New Cluster"
-    cladd['cfg_type'] = "clusteradd"
-    cladd['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTER_ADD)
-    cladd['Description'] = "Create a Cluster"
-    if pagetype == CLUSTER_ADD:
-      cladd['currentItem'] = True
-    else:
-      cladd['currentItem'] = False
-
-  clcfg = {}
-  clcfg['Title'] = "Configure"
-  clcfg['cfg_type'] = "clustercfg"
-  clcfg['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTERS)
-  clcfg['Description'] = "Configure a cluster"
-  if pagetype == CLUSTERS:
-    clcfg['currentItem'] = True
-  else:
-    clcfg['currentItem'] = False
-
-  #test...
-  #clcfg['show_children'] = True
-  #Add all cluster type pages here:
-  if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
-    clcfg['show_children'] = True
-  else:
-    clcfg['show_children'] = False
-
-  #loop through all clusters
-  syslist = list()
-  for system in systems:
-    clsys = {}
-    clsys['Title'] = system[0]
-    clsys['cfg_type'] = "cluster"
-    clsys['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, CLUSTER, system[0])
-    clsys['Description'] = "Configure this cluster"
-
-    if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
-      if cname == system[0]:
-        clsys['currentItem'] = True
-      else:
-        clsys['currentItem'] = False
-    else:
-      clsys['currentItem'] = False
-    syslist.append(clsys)
-
-  clcfg['children'] = syslist
-
-  mylist = list()
-  mylist.append(cldata)
-  if UserHasPerms:
-    mylist.append(cladd)
-  mylist.append(clcfg)
-  dummynode['children'] = mylist
+	if request.REQUEST_METHOD == 'POST':
+		ret = validatePost(self, request)
+		try:
+			request.SESSION.set('checkRet', ret[1])
+		except:
+			request.SESSION.set('checkRet', {})
+	else:
+		try:
+			request.SESSION.set('checkRet', {})
+		except:
+			pass
 
-  return dummynode
+	# First, see if a cluster is chosen, then
+	# check that the current user can access that system
+	cname = None
+	try:
+		cname = request[CLUNAME]
+	except:
+		cname = ''
 
-def createCluConfigTree(self, request, model):
-  dummynode = {}
+	try:
+		url = request['URL']
+	except:
+		url = "/luci/cluster/index_html"
 
-  if not model:
-    return {}
+	try:
+		pagetype = request[PAGETYPE]
+	except:
+		pagetype = '3'
 
-  # There should be a positive page type
-  try:
-    pagetype = request[PAGETYPE]
-  except:
-    pagetype = '3'
-
-  try:
-    url = request['URL']
-  except:
-    url = "/luci/cluster/index_html"
-
-  # The only way this method can run is if there exists
-  # a clustername query var
-  cluname = request['clustername']
-
-  nd = {}
-  nd['Title'] = "Nodes"
-  nd['cfg_type'] = "nodes"
-  nd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODES, cluname)
-  nd['Description'] = "Node configuration for this cluster"
-  if pagetype == NODES or pagetype == NODE_GRID or pagetype == NODE_LIST or pagetype == NODE_CONFIG or pagetype == NODE_ADD or pagetype == NODE:
-    nd['show_children'] = True
-  else:
-    nd['show_children'] = False
-  if pagetype == "0":
-    nd['show_children'] = False
-
-  if pagetype == NODES:
-    nd['currentItem'] = True
-  else:
-    nd['currentItem'] = False
-
-
-  ndadd = {}
-  ndadd['Title'] = "Add a Node"
-  ndadd['cfg_type'] = "nodeadd"
-  ndadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODE_ADD, cluname)
-  ndadd['Description'] = "Add a node to this cluster"
-  if pagetype == NODE_ADD:
-    ndadd['currentItem'] = True
-  else:
-    ndadd['currentItem'] = False
-
-  ndcfg = {}
-  ndcfg['Title'] = "Configure"
-  ndcfg['cfg_type'] = "nodecfg"
-  ndcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODE_CONFIG, cluname)
-  ndcfg['Description'] = "Configure cluster nodes"
-  if pagetype == NODE_CONFIG or pagetype == NODE or pagetype == NODES or pagetype == NODE_LIST or pagetype == NODE_GRID or pagetype == NODE_ADD:
-    ndcfg['show_children'] = True
-  else:
-    ndcfg['show_children'] = False
-  if pagetype == NODE_CONFIG:
-    ndcfg['currentItem'] = True
-  else:
-    ndcfg['currentItem'] = False
-
-  nodes = model.getNodes()
-  nodenames = list()
-  for node in nodes:
-    nodenames.append(node.getName())
-
-  cfgablenodes = list()
-  for nodename in nodenames:
-    cfg = {}
-    cfg['Title'] = nodename
-    cfg['cfg_type'] = "node"
-    cfg['absolute_url'] = '%s?pagetype=%s&nodename=%s&clustername=%s' % (url, NODE, nodename, cluname)
-    cfg['Description'] = "Configure this cluster node"
-    if pagetype == NODE:
-      try:
-        nname = request['nodename']
-      except KeyError, e:
-        nname = ""
-      if nodename == nname:
-        cfg['currentItem'] = True
-      else:
-        cfg['currentItem'] = False
-    else:
-      cfg['currentItem'] = False
-
-    cfgablenodes.append(cfg)
-
-  #Now add nodename structs as children of the config element
-  ndcfg['children'] = cfgablenodes
-
-  ndkids = list()
-  ndkids.append(ndadd)
-  ndkids.append(ndcfg)
-
-  nd['children'] = ndkids
-
-  ##################################################################
-  sv = {}
-  sv['Title'] = "Services"
-  sv['cfg_type'] = "services"
-  sv['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICES, cluname)
-  sv['Description'] = "Service configuration for this cluster"
-  if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE or pagetype == SERVICE_LIST or pagetype == VM_ADD or pagetype == VM_CONFIG:
-    sv['show_children'] = True
-  else:
-    sv['show_children'] = False
-  if pagetype == SERVICES or pagetype == SERVICE_LIST:
-    sv['currentItem'] = True
-  else:
-    sv['currentItem'] = False
-
-  svadd = {}
-  svadd['Title'] = "Add a Service"
-  svadd['cfg_type'] = "serviceadd"
-  svadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICE_ADD, cluname)
-  svadd['Description'] = "Add a Service to this cluster"
-  if pagetype == SERVICE_ADD:
-    svadd['currentItem'] = True
-  else:
-    svadd['currentItem'] = False
-
-  if model.getIsVirtualized() is True:
-    vmadd = {}
-    vmadd['Title'] = "Add a Virtual Service"
-    vmadd['cfg_type'] = "vmadd"
-    vmadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, VM_ADD, cluname)
-    vmadd['Description'] = "Add a Virtual Service to this cluster"
-    if pagetype == VM_ADD:
-      vmadd['currentItem'] = True
-    else:
-      vmadd['currentItem'] = False
-
-  svcfg = {}
-  svcfg['Title'] = "Configure a Service"
-  svcfg['cfg_type'] = "servicecfg"
-  svcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICE_CONFIG, cluname)
-  svcfg['Description'] = "Configure a Service for this cluster"
-  if pagetype == SERVICE_CONFIG or pagetype == SERVICE or pagetype == VM_CONFIG:
-    svcfg['show_children'] = True
-  else:
-    svcfg['show_children'] = False
-  if pagetype == SERVICE_CONFIG or pagetype == VM_CONFIG:
-    svcfg['currentItem'] = True
-  else:
-    svcfg['currentItem'] = False
-
-  services = model.getServices()
-  serviceable = list()
-
-  for service in services:
-    servicename = service.getName()
-    svc = {}
-    svc['Title'] = servicename
-    svc['cfg_type'] = "service"
-    svc['absolute_url'] = '%s?pagetype=%s&servicename=%s&clustername=%s' % (url, SERVICE, servicename, cluname)
-    svc['Description'] = "Configure this service"
-    if pagetype == SERVICE:
-      try:
-        sname = request['servicename']
-      except KeyError, e:
-        sname = ""
-      if servicename == sname:
-        svc['currentItem'] = True
-      else:
-        svc['currentItem'] = False
-    else:
-      svc['currentItem'] = False
-
-    serviceable.append(svc)
-
-  vms = model.getVMs()
-  for vm in vms:
-    name = vm.getName()
-    svc = {}
-    svc['Title'] = name
-    svc['cfg_type'] = "vm"
-    svc['absolute_url'] = '%s?pagetype=%s&servicename=%s&clustername=%s' % (url, VM_CONFIG, name, cluname)
-    svc['Description'] = "Configure this Virtual Service"
-    if pagetype == VM_CONFIG:
-      try:
-        xname = request['servicename']
-      except KeyError, e:
-        xname = ""
-      if name == xname:
-        svc['currentItem'] = True
-      else:
-        svc['currentItem'] = False
-    else:
-      svc['currentItem'] = False
-
-    serviceable.append(svc)
-
-  svcfg['children'] = serviceable
-
-
-
-  kids = list()
-  kids.append(svadd)
-  if model.getIsVirtualized() is True:
-    kids.append(vmadd)
-  kids.append(svcfg)
-  sv['children'] = kids
-#############################################################
-  rv = {}
-  rv['Title'] = "Resources"
-  rv['cfg_type'] = "resources"
-  rv['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCES, cluname)
-  rv['Description'] = "Resource configuration for this cluster"
-  if pagetype == RESOURCES or pagetype == RESOURCE_CONFIG or pagetype == RESOURCE_ADD or pagetype == RESOURCE:
-    rv['show_children'] = True
-  else:
-    rv['show_children'] = False
-  if pagetype == RESOURCES:
-    rv['currentItem'] = True
-  else:
-    rv['currentItem'] = False
-
-  rvadd = {}
-  rvadd['Title'] = "Add a Resource"
-  rvadd['cfg_type'] = "resourceadd"
-  rvadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCE_ADD, cluname)
-  rvadd['Description'] = "Add a Resource to this cluster"
-  if pagetype == RESOURCE_ADD:
-    rvadd['currentItem'] = True
-  else:
-    rvadd['currentItem'] = False
-
-  rvcfg = {}
-  rvcfg['Title'] = "Configure a Resource"
-  rvcfg['cfg_type'] = "resourcecfg"
-  rvcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCE_CONFIG, cluname)
-  rvcfg['Description'] = "Configure a Resource for this cluster"
-  if pagetype == RESOURCE_CONFIG or pagetype == RESOURCE:
-    rvcfg['show_children'] = True
-  else:
-    rvcfg['show_children'] = False
-  if pagetype == RESOURCE_CONFIG:
-    rvcfg['currentItem'] = True
-  else:
-    rvcfg['currentItem'] = False
-
-  resources = model.getResources()
-  resourceable = list()
-  for resource in resources:
-    resourcename = resource.getName()
-    rvc = {}
-    rvc['Title'] = resourcename
-    rvc['cfg_type'] = "resource"
-    rvc['absolute_url'] = '%s?pagetype=%s&resourcename=%s&clustername=%s' % (url, RESOURCES, resourcename, cluname)
-    rvc['Description'] = "Configure this resource"
-    if pagetype == RESOURCE:
-      try:
-        rname = request['resourcename']
-      except KeyError, e:
-        rname = ""
-      if resourcename == rname:
-        rvc['currentItem'] = True
-      else:
-        rvc['currentItem'] = False
-    else:
-      rvc['currentItem'] = False
-
-    resourceable.append(rvc)
-  rvcfg['children'] = resourceable
-
-
-
-  kids = list()
-  kids.append(rvadd)
-  kids.append(rvcfg)
-  rv['children'] = kids
- ################################################################
-  fd = {}
-  fd['Title'] = "Failover Domains"
-  fd['cfg_type'] = "failoverdomains"
-  fd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOMS, cluname)
-  fd['Description'] = "Failover domain configuration for this cluster"
-  if pagetype == FDOMS or pagetype == FDOM_CONFIG or pagetype == FDOM_ADD or pagetype == FDOM:
-    fd['show_children'] = True
-  else:
-    fd['show_children'] = False
-  if pagetype == FDOMS:
-    fd['currentItem'] = True
-  else:
-    fd['currentItem'] = False
-
-  fdadd = {}
-  fdadd['Title'] = "Add a Failover Domain"
-  fdadd['cfg_type'] = "failoverdomainadd"
-  fdadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOM_ADD, cluname)
-  fdadd['Description'] = "Add a Failover Domain to this cluster"
-  if pagetype == FDOM_ADD:
-    fdadd['currentItem'] = True
-  else:
-    fdadd['currentItem'] = False
-
-  fdcfg = {}
-  fdcfg['Title'] = "Configure a Failover Domain"
-  fdcfg['cfg_type'] = "failoverdomaincfg"
-  fdcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOM_CONFIG, cluname)
-  fdcfg['Description'] = "Configure a Failover Domain for this cluster"
-  if pagetype == FDOM_CONFIG or pagetype == FDOM:
-    fdcfg['show_children'] = True
-  else:
-    fdcfg['show_children'] = False
-  if pagetype == FDOM_CONFIG:
-    fdcfg['currentItem'] = True
-  else:
-    fdcfg['currentItem'] = False
-
-  fdoms = model.getFailoverDomains()
-  fdomable = list()
-  for fdom in fdoms:
-    fdomname = fdom.getName()
-    fdc = {}
-    fdc['Title'] = fdomname
-    fdc['cfg_type'] = "fdom"
-    fdc['absolute_url'] = '%s?pagetype=%s&fdomname=%s&clustername=%s' % (url, FDOM, fdomname, cluname)
-    fdc['Description'] = "Configure this Failover Domain"
-    if pagetype == FDOM:
-      try:
-        fname = request['fdomname']
-      except KeyError, e:
-        fname = ""
-      if fdomname == fname:
-        fdc['currentItem'] = True
-      else:
-        fdc['currentItem'] = False
-    else:
-      fdc['currentItem'] = False
-
-    fdomable.append(fdc)
-  fdcfg['children'] = fdomable
-
-
-
-  kids = list()
-  kids.append(fdadd)
-  kids.append(fdcfg)
-  fd['children'] = kids
-#############################################################
-  fen = {}
-  fen['Title'] = "Shared Fence Devices"
-  fen['cfg_type'] = "fencedevicess"
-  fen['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEVS, cluname)
-  fen['Description'] = "Fence Device configuration for this cluster"
-  if pagetype == FENCEDEVS or pagetype == FENCEDEV_CONFIG or pagetype == FENCEDEV_ADD or pagetype == FENCEDEV:
-    fen['show_children'] = True
-  else:
-    fen['show_children'] = False
-  if pagetype == FENCEDEVS:
-    fen['currentItem'] = True
-  else:
-    fen['currentItem'] = False
-
-  fenadd = {}
-  fenadd['Title'] = "Add a Fence Device"
-  fenadd['cfg_type'] = "fencedeviceadd"
-  fenadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEV_ADD, cluname)
-  fenadd['Description'] = "Add a Fence Device to this cluster"
-  if pagetype == FENCEDEV_ADD:
-    fenadd['currentItem'] = True
-  else:
-    fenadd['currentItem'] = False
-
-  fencfg = {}
-  fencfg['Title'] = "Configure a Fence Device"
-  fencfg['cfg_type'] = "fencedevicecfg"
-  fencfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEV_CONFIG, cluname)
-  fencfg['Description'] = "Configure a Fence Device for this cluster"
-  if pagetype == FENCEDEV_CONFIG or pagetype == FENCEDEV:
-    fencfg['show_children'] = True
-  else:
-    fencfg['show_children'] = False
-  if pagetype == FENCEDEV_CONFIG:
-    fencfg['currentItem'] = True
-  else:
-    fencfg['currentItem'] = False
-
-  fences = model.getFenceDevices()
-  fenceable = list()
-  for fence in fences:
-    fencename = fence.getName()
-    fenc = {}
-    fenc['Title'] = fencename
-    fenc['cfg_type'] = "fencedevice"
-    fenc['absolute_url'] = '%s?pagetype=%s&fencename=%s&clustername=%s' % (url, FENCEDEV, fencename, cluname)
-    fenc['Description'] = "Configure this Fence Device"
-    if pagetype == FENCEDEV:
-      try:
-        fenname = request['fencename']
-      except KeyError, e:
-        fenname = ""
-      if fencename == fenname:
-        fenc['currentItem'] = True
-      else:
-        fenc['currentItem'] = False
-    else:
-      fenc['currentItem'] = False
-
-    fenceable.append(fenc)
-  fencfg['children'] = fenceable
-
-
-
-  kids = list()
-  kids.append(fenadd)
-  kids.append(fencfg)
-  fen['children'] = kids
-#############################################################
+	cldata = {}
+	cldata['Title'] = "Cluster List"
+	cldata['cfg_type'] = "clusters"
+	cldata['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTERLIST)
+	cldata['Description'] = "Clusters available for configuration"
+	if pagetype == CLUSTERLIST:
+		cldata['currentItem'] = True
+	else:
+		cldata['currentItem'] = False
 
-  mylist = list()
-  mylist.append(nd)
-  mylist.append(sv)
-  mylist.append(rv)
-  mylist.append(fd)
-  mylist.append(fen)
+	UserHasPerms = havePermCreateCluster(self)
+	if UserHasPerms:
+		cladd = {}
+		cladd['Title'] = "Create a New Cluster"
+		cladd['cfg_type'] = "clusteradd"
+		cladd['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTER_ADD)
+		cladd['Description'] = "Create a Cluster"
+		if pagetype == CLUSTER_ADD:
+			cladd['currentItem'] = True
+		else:
+			cladd['currentItem'] = False
 
-  dummynode['children'] = mylist
+	clcfg = {}
+	clcfg['Title'] = "Configure"
+	clcfg['cfg_type'] = "clustercfg"
+	clcfg['absolute_url'] = '%s?pagetype=%s' % (url, CLUSTERS)
+	clcfg['Description'] = "Configure a cluster"
+	if pagetype == CLUSTERS:
+		clcfg['currentItem'] = True
+	else:
+		clcfg['currentItem'] = False
 
-  return dummynode
+	#test...
+	#clcfg['show_children'] = True
+	#Add all cluster type pages here:
+	if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
+		clcfg['show_children'] = True
+	else:
+		clcfg['show_children'] = False
 
-def getClusterURL(self, request, model):
-	try:
-		clustername = request.clustername
-		if not clustername:
-			raise Exception, 'cluster name from request is blank'
+	#loop through all clusters
+	syslist = list()
+	for system in systems:
+		clsys = {}
+		clsys['Title'] = system[0]
+		clsys['cfg_type'] = "cluster"
+		clsys['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, CLUSTER, system[0])
+		clsys['Description'] = "Configure this cluster"
+
+		if pagetype == CLUSTER or pagetype == CLUSTER_CONFIG:
+			if cname == system[0]:
+				clsys['currentItem'] = True
+			else:
+				clsys['currentItem'] = False
+		else:
+			clsys['currentItem'] = False
+		syslist.append(clsys)
+
+	clcfg['children'] = syslist
+
+	mylist = list()
+	mylist.append(cldata)
+	if UserHasPerms:
+		mylist.append(cladd)
+	mylist.append(clcfg)
+	dummynode['children'] = mylist
+
+	return dummynode
+
+def createCluConfigTree(self, request, model):
+	dummynode = {}
+
+	if not model:
+		return {}
+
+	# There should be a positive page type
+	try:
+		pagetype = request[PAGETYPE]
+	except:
+		pagetype = '3'
+
+	try:
+		url = request['URL']
+	except:
+		url = "/luci/cluster/index_html"
+
+	# The only way this method can run is if there exists
+	# a clustername query var
+	cluname = request['clustername']
+
+	nd = {}
+	nd['Title'] = "Nodes"
+	nd['cfg_type'] = "nodes"
+	nd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODES, cluname)
+	nd['Description'] = "Node configuration for this cluster"
+	if pagetype == NODES or pagetype == NODE_GRID or pagetype == NODE_LIST or pagetype == NODE_CONFIG or pagetype == NODE_ADD or pagetype == NODE:
+		nd['show_children'] = True
+	else:
+		nd['show_children'] = False
+	if pagetype == '0':
+		nd['show_children'] = False
+
+	if pagetype == NODES:
+		nd['currentItem'] = True
+	else:
+		nd['currentItem'] = False
+
+
+	ndadd = {}
+	ndadd['Title'] = "Add a Node"
+	ndadd['cfg_type'] = "nodeadd"
+	ndadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODE_ADD, cluname)
+	ndadd['Description'] = "Add a node to this cluster"
+	if pagetype == NODE_ADD:
+		ndadd['currentItem'] = True
+	else:
+		ndadd['currentItem'] = False
+
+	ndcfg = {}
+	ndcfg['Title'] = "Configure"
+	ndcfg['cfg_type'] = "nodecfg"
+	ndcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, NODE_CONFIG, cluname)
+	ndcfg['Description'] = "Configure cluster nodes"
+	if pagetype == NODE_CONFIG or pagetype == NODE or pagetype == NODES or pagetype == NODE_LIST or pagetype == NODE_GRID or pagetype == NODE_ADD:
+		ndcfg['show_children'] = True
+	else:
+		ndcfg['show_children'] = False
+	if pagetype == NODE_CONFIG:
+		ndcfg['currentItem'] = True
+	else:
+		ndcfg['currentItem'] = False
+
+	nodes = model.getNodes()
+	nodenames = list()
+	for node in nodes:
+		nodenames.append(node.getName())
+
+	cfgablenodes = list()
+	for nodename in nodenames:
+		cfg = {}
+		cfg['Title'] = nodename
+		cfg['cfg_type'] = "node"
+		cfg['absolute_url'] = '%s?pagetype=%s&nodename=%s&clustername=%s' % (url, NODE, nodename, cluname)
+		cfg['Description'] = "Configure this cluster node"
+		if pagetype == NODE:
+			try:
+				nname = request['nodename']
+			except KeyError, e:
+				nname = ''
+			if nodename == nname:
+				cfg['currentItem'] = True
+			else:
+				cfg['currentItem'] = False
+		else:
+			cfg['currentItem'] = False
+
+		cfgablenodes.append(cfg)
+
+	#Now add nodename structs as children of the config element
+	ndcfg['children'] = cfgablenodes
+
+	ndkids = list()
+	ndkids.append(ndadd)
+	ndkids.append(ndcfg)
+
+	nd['children'] = ndkids
+
+	##################################################################
+	sv = {}
+	sv['Title'] = "Services"
+	sv['cfg_type'] = "services"
+	sv['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICES, cluname)
+	sv['Description'] = "Service configuration for this cluster"
+	if pagetype == SERVICES or pagetype == SERVICE_CONFIG or pagetype == SERVICE_ADD or pagetype == SERVICE or pagetype == SERVICE_LIST or pagetype == VM_ADD or pagetype == VM_CONFIG:
+		sv['show_children'] = True
+	else:
+		sv['show_children'] = False
+	if pagetype == SERVICES or pagetype == SERVICE_LIST:
+		sv['currentItem'] = True
+	else:
+		sv['currentItem'] = False
+
+	svadd = {}
+	svadd['Title'] = "Add a Service"
+	svadd['cfg_type'] = "serviceadd"
+	svadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICE_ADD, cluname)
+	svadd['Description'] = "Add a Service to this cluster"
+	if pagetype == SERVICE_ADD:
+		svadd['currentItem'] = True
+	else:
+		svadd['currentItem'] = False
+
+	if model.getIsVirtualized() is True:
+		vmadd = {}
+		vmadd['Title'] = "Add a Virtual Service"
+		vmadd['cfg_type'] = "vmadd"
+		vmadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, VM_ADD, cluname)
+		vmadd['Description'] = "Add a Virtual Service to this cluster"
+		if pagetype == VM_ADD:
+			vmadd['currentItem'] = True
+		else:
+			vmadd['currentItem'] = False
+
+	svcfg = {}
+	svcfg['Title'] = "Configure a Service"
+	svcfg['cfg_type'] = "servicecfg"
+	svcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, SERVICE_CONFIG, cluname)
+	svcfg['Description'] = "Configure a Service for this cluster"
+	if pagetype == SERVICE_CONFIG or pagetype == SERVICE or pagetype == VM_CONFIG:
+		svcfg['show_children'] = True
+	else:
+		svcfg['show_children'] = False
+	if pagetype == SERVICE_CONFIG or pagetype == VM_CONFIG:
+		svcfg['currentItem'] = True
+	else:
+		svcfg['currentItem'] = False
+
+	services = model.getServices()
+	serviceable = list()
+
+	for service in services:
+		servicename = service.getName()
+		svc = {}
+		svc['Title'] = servicename
+		svc['cfg_type'] = "service"
+		svc['absolute_url'] = '%s?pagetype=%s&servicename=%s&clustername=%s' % (url, SERVICE, servicename, cluname)
+		svc['Description'] = "Configure this service"
+		if pagetype == SERVICE:
+			try:
+				sname = request['servicename']
+			except KeyError, e:
+				sname = ''
+			if servicename == sname:
+				svc['currentItem'] = True
+			else:
+				svc['currentItem'] = False
+		else:
+			svc['currentItem'] = False
+
+		serviceable.append(svc)
+
+	vms = model.getVMs()
+	for vm in vms:
+		name = vm.getName()
+		svc = {}
+		svc['Title'] = name
+		svc['cfg_type'] = "vm"
+		svc['absolute_url'] = '%s?pagetype=%s&servicename=%s&clustername=%s' % (url, VM_CONFIG, name, cluname)
+		svc['Description'] = "Configure this Virtual Service"
+		if pagetype == VM_CONFIG:
+			try:
+				xname = request['servicename']
+			except KeyError, e:
+				xname = ''
+			if name == xname:
+				svc['currentItem'] = True
+			else:
+				svc['currentItem'] = False
+		else:
+			svc['currentItem'] = False
+
+		serviceable.append(svc)
+
+	svcfg['children'] = serviceable
+
+
+
+	kids = list()
+	kids.append(svadd)
+	if model.getIsVirtualized() is True:
+		kids.append(vmadd)
+	kids.append(svcfg)
+	sv['children'] = kids
+#############################################################
+	rv = {}
+	rv['Title'] = "Resources"
+	rv['cfg_type'] = "resources"
+	rv['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCES, cluname)
+	rv['Description'] = "Resource configuration for this cluster"
+	if pagetype == RESOURCES or pagetype == RESOURCE_CONFIG or pagetype == RESOURCE_ADD or pagetype == RESOURCE:
+		rv['show_children'] = True
+	else:
+		rv['show_children'] = False
+	if pagetype == RESOURCES:
+		rv['currentItem'] = True
+	else:
+		rv['currentItem'] = False
+
+	rvadd = {}
+	rvadd['Title'] = "Add a Resource"
+	rvadd['cfg_type'] = "resourceadd"
+	rvadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCE_ADD, cluname)
+	rvadd['Description'] = "Add a Resource to this cluster"
+	if pagetype == RESOURCE_ADD:
+		rvadd['currentItem'] = True
+	else:
+		rvadd['currentItem'] = False
+
+	rvcfg = {}
+	rvcfg['Title'] = "Configure a Resource"
+	rvcfg['cfg_type'] = "resourcecfg"
+	rvcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, RESOURCE_CONFIG, cluname)
+	rvcfg['Description'] = "Configure a Resource for this cluster"
+	if pagetype == RESOURCE_CONFIG or pagetype == RESOURCE:
+		rvcfg['show_children'] = True
+	else:
+		rvcfg['show_children'] = False
+	if pagetype == RESOURCE_CONFIG:
+		rvcfg['currentItem'] = True
+	else:
+		rvcfg['currentItem'] = False
+
+	resources = model.getResources()
+	resourceable = list()
+	for resource in resources:
+		resourcename = resource.getName()
+		rvc = {}
+		rvc['Title'] = resourcename
+		rvc['cfg_type'] = "resource"
+		rvc['absolute_url'] = '%s?pagetype=%s&resourcename=%s&clustername=%s' % (url, RESOURCES, resourcename, cluname)
+		rvc['Description'] = "Configure this resource"
+		if pagetype == RESOURCE:
+			try:
+				rname = request['resourcename']
+			except KeyError, e:
+				rname = ''
+			if resourcename == rname:
+				rvc['currentItem'] = True
+			else:
+				rvc['currentItem'] = False
+		else:
+			rvc['currentItem'] = False
+
+		resourceable.append(rvc)
+	rvcfg['children'] = resourceable
+
+
+
+	kids = list()
+	kids.append(rvadd)
+	kids.append(rvcfg)
+	rv['children'] = kids
+ ################################################################
+	fd = {}
+	fd['Title'] = "Failover Domains"
+	fd['cfg_type'] = "failoverdomains"
+	fd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOMS, cluname)
+	fd['Description'] = "Failover domain configuration for this cluster"
+	if pagetype == FDOMS or pagetype == FDOM_CONFIG or pagetype == FDOM_ADD or pagetype == FDOM:
+		fd['show_children'] = True
+	else:
+		fd['show_children'] = False
+	if pagetype == FDOMS:
+		fd['currentItem'] = True
+	else:
+		fd['currentItem'] = False
+
+	fdadd = {}
+	fdadd['Title'] = "Add a Failover Domain"
+	fdadd['cfg_type'] = "failoverdomainadd"
+	fdadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOM_ADD, cluname)
+	fdadd['Description'] = "Add a Failover Domain to this cluster"
+	if pagetype == FDOM_ADD:
+		fdadd['currentItem'] = True
+	else:
+		fdadd['currentItem'] = False
+
+	fdcfg = {}
+	fdcfg['Title'] = "Configure a Failover Domain"
+	fdcfg['cfg_type'] = "failoverdomaincfg"
+	fdcfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FDOM_CONFIG, cluname)
+	fdcfg['Description'] = "Configure a Failover Domain for this cluster"
+	if pagetype == FDOM_CONFIG or pagetype == FDOM:
+		fdcfg['show_children'] = True
+	else:
+		fdcfg['show_children'] = False
+	if pagetype == FDOM_CONFIG:
+		fdcfg['currentItem'] = True
+	else:
+		fdcfg['currentItem'] = False
+
+	fdoms = model.getFailoverDomains()
+	fdomable = list()
+	for fdom in fdoms:
+		fdomname = fdom.getName()
+		fdc = {}
+		fdc['Title'] = fdomname
+		fdc['cfg_type'] = "fdom"
+		fdc['absolute_url'] = '%s?pagetype=%s&fdomname=%s&clustername=%s' % (url, FDOM, fdomname, cluname)
+		fdc['Description'] = "Configure this Failover Domain"
+		if pagetype == FDOM:
+			try:
+				fname = request['fdomname']
+			except KeyError, e:
+				fname = ''
+			if fdomname == fname:
+				fdc['currentItem'] = True
+			else:
+				fdc['currentItem'] = False
+		else:
+			fdc['currentItem'] = False
+
+		fdomable.append(fdc)
+	fdcfg['children'] = fdomable
+
+
+
+	kids = list()
+	kids.append(fdadd)
+	kids.append(fdcfg)
+	fd['children'] = kids
+#############################################################
+	fen = {}
+	fen['Title'] = "Shared Fence Devices"
+	fen['cfg_type'] = "fencedevicess"
+	fen['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEVS, cluname)
+	fen['Description'] = "Fence Device configuration for this cluster"
+	if pagetype == FENCEDEVS or pagetype == FENCEDEV_CONFIG or pagetype == FENCEDEV_ADD or pagetype == FENCEDEV:
+		fen['show_children'] = True
+	else:
+		fen['show_children'] = False
+	if pagetype == FENCEDEVS:
+		fen['currentItem'] = True
+	else:
+		fen['currentItem'] = False
+
+	fenadd = {}
+	fenadd['Title'] = "Add a Fence Device"
+	fenadd['cfg_type'] = "fencedeviceadd"
+	fenadd['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEV_ADD, cluname)
+	fenadd['Description'] = "Add a Fence Device to this cluster"
+	if pagetype == FENCEDEV_ADD:
+		fenadd['currentItem'] = True
+	else:
+		fenadd['currentItem'] = False
+
+	fencfg = {}
+	fencfg['Title'] = "Configure a Fence Device"
+	fencfg['cfg_type'] = "fencedevicecfg"
+	fencfg['absolute_url'] = '%s?pagetype=%s&clustername=%s' % (url, FENCEDEV_CONFIG, cluname)
+	fencfg['Description'] = "Configure a Fence Device for this cluster"
+	if pagetype == FENCEDEV_CONFIG or pagetype == FENCEDEV:
+		fencfg['show_children'] = True
+	else:
+		fencfg['show_children'] = False
+	if pagetype == FENCEDEV_CONFIG:
+		fencfg['currentItem'] = True
+	else:
+		fencfg['currentItem'] = False
+
+	fences = model.getFenceDevices()
+	fenceable = list()
+	for fence in fences:
+		fencename = fence.getName()
+		fenc = {}
+		fenc['Title'] = fencename
+		fenc['cfg_type'] = "fencedevice"
+		fenc['absolute_url'] = '%s?pagetype=%s&fencename=%s&clustername=%s' % (url, FENCEDEV, fencename, cluname)
+		fenc['Description'] = "Configure this Fence Device"
+		if pagetype == FENCEDEV:
+			try:
+				fenname = request['fencename']
+			except KeyError, e:
+				fenname = ''
+			if fencename == fenname:
+				fenc['currentItem'] = True
+			else:
+				fenc['currentItem'] = False
+		else:
+			fenc['currentItem'] = False
+
+		fenceable.append(fenc)
+	fencfg['children'] = fenceable
+
+
+
+	kids = list()
+	kids.append(fenadd)
+	kids.append(fencfg)
+	fen['children'] = kids
+#############################################################
+
+	mylist = list()
+	mylist.append(nd)
+	mylist.append(sv)
+	mylist.append(rv)
+	mylist.append(fd)
+	mylist.append(fen)
+
+	dummynode['children'] = mylist
+
+	return dummynode
+
+def getClusterURL(self, request, model):
+	try:
+		clustername = request.clustername
+		if not clustername:
+			raise Exception, 'cluster name from request is blank'
 	except:
 		try:
 			clustername = model.getClusterName()
 			if not clustername:
 				raise Exception, 'cluster name from model is blank'
 		except:
-			luci_log.debug_verbose('GCURL0: unable to get cluster name')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GCURL0: unable to get cluster name')
 			return ''
 
 	return '/luci/cluster/index_html?pagetype=7&clustername=%s' % clustername
@@ -2986,104 +3111,11 @@
 			pass
 
 	if clustername is None:
-		luci_log.debug('GRAFC0: no cluster name was found')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('GRAFC0: no cluster name was found')
 		return None
 	return getRicciAgent(self, clustername)
 
-def getClusterStatusModel(model):
-	results = list()
-	vals = {}
-
-	try:
-		clustername = model.getClusterName()
-		clusteralias = model.getClusterAlias()
-		vals['type'] = 'cluster'
-		vals['alias'] = clusteralias
-		vals['name'] = clustername
-		vals['error'] = True
-		vals['votes'] = '[unknown]'
-		vals['quorate'] = '[unknown]'
-		vals['minQuorum'] = '[unknown]'
-		results.append(vals)
-	except Exception, e:
-		luci_log.debug_verbose('GCSM0: %s' % str(e))
-		return None
-
-	try:
-		nodelist = model.getNodes()
-	except Exception, e:
-		luci_log.debug_verbose('GCSM1: %s' % str(e))
-		return None
-
-	for node in nodelist:
-		node_val = {}
-		node_val['type'] = 'node'
-		try:
-			node_name = node.getName()
-			if not node_name:
-				raise Exception, 'cluster node name is unknown'
-		except:
-			node_name = '[unknown]'
-
-		node_val['name'] = node_name
-		node_val['clustered'] = '[unknown]'
-		node_val['online'] = '[unknown]'
-		node_val['error'] = True
-
-		try:
-			votes = node.getVotes()
-			if not votes:
-				raise Exception, 'unknown unmber of votes'
-		except:
-			votes = '[unknown]'
-
-		node_val['votes'] = votes
-		results.append(node_val)
-	return results
-
-def getClusterStatusDB(self, clustername):
-	results = list()
-	vals = {}
-
-	vals['type'] = 'cluster'
-	vals['alias'] = clustername
-	vals['name'] = clustername
-	vals['error'] = True
-	vals['quorate'] = '[unknown]'
-	vals['votes'] = '[unknown]'
-	vals['minQuorum'] = '[unknown]'
-	results.append(vals)
-
-	try:
-		cluster_path = '%s%s' % (CLUSTER_FOLDER_PATH, clustername)
-		nodelist = self.restrictedTraverse(cluster_path).objectItems('Folder')
-	except Exception, e:
-		luci_log.debug_verbose('GCSDB0: %s -> %s: %s' \
-			% (clustername, cluster_path, str(e)))
-		return results
-
-	if len(nodelist) < 1:
-		luci_log.debug_verbose('GCSDB0a: removing cluster %s because it has no nodes' % clustername)
-		try:
-			clusters_dir = self.restrictedTraverse(CLUSTER_FOLDER_PATH)
-			clusters_dir.manage_delObjects([clustername])
-		except Exception, e:
-			luci_log.debug_verbose('GCSDB0b: %s: %s' % (clustername, str(e)))
-	else:
-		for node in nodelist:
-			try:
-				node_val = {}
-				node_val['type'] = 'node'
-				node_val['name'] = node[0]
-				node_val['clustered'] = '[unknown]'
-				node_val['online'] = '[unknown]'
-				node_val['error'] = True
-				results.append(node_val)
-			except Exception, e:
-				luci_log.debug_verbose('GCSDB1: %s' % str(e))
-
-	return results
-
 def serviceStart(self, rc, req):
 	svcname = None
 	try:
@@ -3095,7 +3127,8 @@
 			pass
 
 	if svcname is None:
-		luci_log.debug_verbose('serviceStart0: no service name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceStart0: no service name')
 		return None
 
 	nodename = None
@@ -3117,14 +3150,16 @@
 			pass
 
 	if cluname is None:
-		luci_log.debug_verbose('serviceStart2: no cluster name for svc %s' \
-			% svcname)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceStart2: no cluster name for svc %s' \
+				% svcname)
 		return None
 
 	batch_number, result = rq.startService(rc, svcname, nodename)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('serviceStart3: SS(%s,%s,%s) call failed' \
-			% (svcname, cluname, nodename))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceStart3: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
 		return None
 
 	try:
@@ -3135,7 +3170,8 @@
 			status_msg = 'Starting service "%s"' % svcname
 		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_START, status_msg)
 	except Exception, e:
-		luci_log.debug_verbose('serviceStart4: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceStart4: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
 
 	response = req.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -3152,7 +3188,8 @@
 			pass
 
 	if svcname is None:
-		luci_log.debug_verbose('serviceMigrate0: no service name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceMigrate0: no service name')
 		return None
 
 	nodename = None
@@ -3165,7 +3202,8 @@
 			pass
 
 	if nodename is None:
-		luci_log.debug_verbose('serviceMigrate1: no target node name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceMigrate1: no target node name')
 		return None
 
 	cluname = None
@@ -3178,20 +3216,23 @@
 			pass
 
 	if cluname is None:
-		luci_log.debug_verbose('serviceMigrate2: no cluster name for svc %s' \
-			% svcname)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceMigrate2: no cluster name for svc %s' \
+				% svcname)
 		return None
 
 	batch_number, result = rq.migrateService(rc, svcname, nodename)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('serviceMigrate3: SS(%s,%s,%s) call failed' \
-			% (svcname, cluname, nodename))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceMigrate3: SS(%s,%s,%s) call failed' \
+				% (svcname, cluname, nodename))
 		return None
 
 	try:
 		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_START, "Migrating service \'%s\' to node \'%s\'" % (svcname, nodename))
 	except Exception, e:
-		luci_log.debug_verbose('serviceMigrate4: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceMigrate4: error setting flags for service %s at node %s for cluster %s' % (svcname, nodename, cluname))
 
 	response = req.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -3208,7 +3249,8 @@
 			pass
 
 	if svcname is None:
-		luci_log.debug_verbose('serviceRestart0: no service name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceRestart0: no service name')
 		return None
 
 	cluname = None
@@ -3221,18 +3263,21 @@
 			pass
 
 	if cluname is None:
-		luci_log.debug_verbose('serviceRestart1: no cluster for %s' % svcname)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceRestart1: no cluster for %s' % svcname)
 		return None
 
 	batch_number, result = rq.restartService(rc, svcname)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('serviceRestart2: %s failed' % svcname)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceRestart2: %s failed' % svcname)
 		return None
 
 	try:
 		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_RESTART, "Restarting service \'%s\'" % svcname)
 	except Exception, e:
-		luci_log.debug_verbose('serviceRestart3: error setting flags for service %s for cluster %s' % (svcname, cluname))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceRestart3: error setting flags for service %s for cluster %s' % (svcname, cluname))
 
 	response = req.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -3249,7 +3294,8 @@
 			pass
 
 	if svcname is None:
-		luci_log.debug_verbose('serviceStop0: no service name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceStop0: no service name')
 		return None
 
 	cluname = None
@@ -3262,18 +3308,21 @@
 			pass
 
 	if cluname is None:
-		luci_log.debug_verbose('serviceStop1: no cluster name for %s' % svcname)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceStop1: no cluster name for %s' % svcname)
 		return None
 
 	batch_number, result = rq.stopService(rc, svcname)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('serviceStop2: stop %s failed' % svcname)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceStop2: stop %s failed' % svcname)
 		return None
 
 	try:
 		set_node_flag(self, cluname, rc.hostname(), str(batch_number), SERVICE_STOP, "Stopping service \'%s\'" % svcname)
 	except Exception, e:
-		luci_log.debug_verbose('serviceStop3: error setting flags for service %s for cluster %s' % (svcname, cluname))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('serviceStop3: error setting flags for service %s for cluster %s' % (svcname, cluname))
 
 	response = req.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -3286,7 +3335,8 @@
 		try:
 			task = request.form['task']
 		except:
-			luci_log.debug_verbose('CTP1: no task specified')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CTP1: no task specified')
 			task = None
 
 	if not model:
@@ -3300,12 +3350,14 @@
 				if not cluname:
 					raise Exception, 'cluname is blank'
 			except:
-				luci_log.debug_verbose('CTP0: no model/no cluster name')
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CTP0: no model/no cluster name')
 				return 'Unable to determine the cluster name'
 		try:
 			model = getModelForCluster(self, cluname)
 		except Exception, e:
-			luci_log.debug_verbose('CPT1: GMFC failed for %s' % cluname)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CPT1: GMFC failed for %s' % cluname)
 			model = None
 
 	if not model:
@@ -3337,43 +3389,50 @@
 		if not nodefolder:
 			raise Exception, 'cannot find database object at %s' % path
 	except Exception, e:
-		luci_log.debug('NLO: node_leave_cluster err: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NLO: node_leave_cluster err: %r' % e)
 		return None
 
 	objname = '%s____flag' % nodename_resolved
 	fnpresent = noNodeStatusPresent(self, nodefolder, objname, nodename_resolved)
 
 	if fnpresent is None:
-		luci_log.debug('NL1: An error occurred while checking flags for %s' \
-			% nodename_resolved)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NL1: An error occurred while checking flags for %s' \
+				% nodename_resolved)
 		return None
 
 	if fnpresent is False:
-		luci_log.debug('NL2: flags are still present for %s -- bailing out' \
-			% nodename_resolved)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('NL2: flags are still present for %s -- bailing out' \
+				% nodename_resolved)
 		return None
 
 	batch_number, result = rq.nodeLeaveCluster(rc)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('NL3: nodeLeaveCluster error: batch_number and/or result is None')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NL3: nodeLeaveCluster error: batch_number and/or result is None')
 		return None
 
 	try:
 		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_LEAVE_CLUSTER, 'Node "%s" leaving cluster "%s"' % (nodename_resolved, clustername))
 	except Exception, e:
-		luci_log.debug_verbose('NL4: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NL4: failed to set flags: %r' % e)
 	return True
 
 def nodeJoin(self, rc, clustername, nodename_resolved):
 	batch_number, result = rq.nodeJoinCluster(rc)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('NJ0: batch_number and/or result is None')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ0: batch_number and/or result is None')
 		return None
 
 	try:
 		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_JOIN_CLUSTER, 'Node "%s" joining cluster "%s"' % (nodename_resolved, clustername))
 	except Exception, e:
-		luci_log.debug_verbose('NJ1: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('NJ1: failed to set flags: %r' % e)
 	return True
 
 def clusterStart(self, model):
@@ -3393,12 +3452,14 @@
 		try:
 			rc = RicciCommunicator(nodename_resolved)
 		except Exception, e:
-			luci_log.debug_verbose('CStart: RC %s: %s' \
-				% (nodename_resolved, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart: RC %s: %r' \
+					% (nodename_resolved, e))
 			errors += 1
 			continue
 		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
-			luci_log.debug_verbose('CStart1: nodeJoin %s' % nodename_resolved)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStart1: nodeJoin %s' % nodename_resolved)
 			errors += 1
 
 	return errors
@@ -3420,29 +3481,34 @@
 		try:
 			rc = RicciCommunicator(nodename_resolved)
 		except Exception, e:
-			luci_log.debug_verbose('CStop0: [%d] RC %s: %s' \
-				% (delete is True, str(nodename_resolved), str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('CStop0: [%d] RC %s: %r' \
+					% (delete is True, str(nodename_resolved), e))
 			errors += 1
 			continue
 
 		if delete is True:
 			if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=True) is None:
-				luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop1: [1] nodeDelete failed')
 				errors += 1
 		else:
 			if nodeLeave(self, rc, clustername, nodename_resolved) is None:
-				luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
-					% (nodename_resolved))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('CStop2: [0] nodeLeave %s' \
+						% (nodename_resolved))
 				errors += 1
 	return errors
 
 def clusterRestart(self, model):
 	snum_err = clusterStop(self, model)
 	if snum_err:
-		luci_log.debug_verbose('cluRestart0: clusterStop: %d errs' % snum_err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart0: clusterStop: %d errs' % snum_err)
 	jnum_err = clusterStart(self, model)
 	if jnum_err:
-		luci_log.debug_verbose('cluRestart1: clusterStart: %d errs' % jnum_err)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('cluRestart1: clusterStart: %d errs' % jnum_err)
 	return snum_err + jnum_err
 
 def clusterDelete(self, model):
@@ -3456,15 +3522,17 @@
 	try:
 		clustername = model.getClusterName()
 	except Exception, e:
-		luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('clusterDelete0: unable to get cluster name')
 		return None
 
 	if num_errors < 1:
 		try:
 			delCluster(self, clustername)
 		except Exception, e:
-			luci_log.debug_verbose('clusterDelete1: %s: %s' \
-				% (clustername, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('clusterDelete1: %s: %r' \
+					% (clustername, e))
 
 		try:
 			clusterfolder = self.restrictedTraverse('%s%s' % (CLUSTER_FOLDER_PATH, clustername))
@@ -3472,23 +3540,27 @@
 				clusters = self.restrictedTraverse(str(CLUSTER_FOLDER_PATH))
 				clusters.manage_delObjects([clustername])
 		except Exception, e:
-			luci_log.debug_verbose('clusterDelete2: %s %s' \
-				% (clustername, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('clusterDelete2: %s %r' \
+					% (clustername, e))
 		return CLUSTERLIST
 	else:
-		luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
-			% (clustername, num_errors))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('clusterDelete2: %s: %d errors' \
+				% (clustername, num_errors))
 
 def forceNodeReboot(self, rc, clustername, nodename_resolved):
 	batch_number, result = rq.nodeReboot(rc)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('FNR0: batch_number and/or result is None')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR0: batch_number and/or result is None')
 		return None
 
 	try:
 		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_REBOOT, 'Node "%s" is being rebooted' % nodename_resolved)
 	except Exception, e:
-		luci_log.debug_verbose('FNR1: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNR1: failed to set flags: %r' % e)
 	return True
 
 def forceNodeFence(self, clustername, nodename, nodename_resolved):
@@ -3499,8 +3571,9 @@
 		if not clusterfolder:
 			raise Exception, 'no cluster folder at %s' % path
 	except Exception, e:
-		luci_log.debug('FNF0: The cluster folder %s could not be found: %s' \
-			 % (clustername, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('FNF0: The cluster folder %s could not be found: %r' \
+				 % (clustername, e))
 		return None
 
 	try:
@@ -3508,8 +3581,9 @@
 		if not nodes or len(nodes) < 1:
 			raise Exception, 'no cluster nodes'
 	except Exception, e:
-		luci_log.debug('FNF1: No cluster nodes for %s were found: %s' \
-			% (clustername, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('FNF1: No cluster nodes for %s were found: %r' \
+				% (clustername, e))
 		return None
 
 	found_one = False
@@ -3522,8 +3596,9 @@
 			if not rc:
 				raise Exception, 'rc is None'
 		except Exception, e:
-			luci_log.debug('FNF2: ricci error for host %s: %s' \
-				% (node[0], str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('FNF2: ricci error for host %s: %r' \
+					% (node[0], e))
 			continue
 
 		if not rc.authed():
@@ -3548,13 +3623,15 @@
 
 	batch_number, result = rq.nodeFence(rc, nodename)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('FNF3: batch_number and/or result is None')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF3: batch_number and/or result is None')
 		return None
 
 	try:
 		set_node_flag(self, clustername, rc.hostname(), str(batch_number), NODE_FENCE, 'Node "%s" is being fenced' % nodename_resolved)
 	except Exception, e:
-		luci_log.debug_verbose('FNF4: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('FNF4: failed to set flags: %r' % e)
 	return True
 
 def nodeDelete(self, rc, model, clustername, nodename, nodename_resolved, delete_cluster=False):
@@ -3574,8 +3651,9 @@
 			if not clusterfolder:
 				raise Exception, 'no cluster folder at %s' % path
 		except Exception, e:
-			luci_log.debug_verbose('ND0: node delete error for cluster %s: %s' \
-				% (clustername, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND0: node delete error for cluster %s: %r' \
+					% (clustername, e))
 			return None
 
 		try:
@@ -3583,8 +3661,9 @@
 			if not nodes or len(nodes) < 1:
 				raise Exception, 'no cluster nodes in DB'
 		except Exception, e:
-			luci_log.debug_verbose('ND1: node delete error for cluster %s: %s' \
-				% (clustername, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND1: node delete error for cluster %s: %r' \
+					% (clustername, e))
 
 		for node in nodes:
 			if node[1].getId().find(nodename) != (-1):
@@ -3598,7 +3677,8 @@
 				if not rc2:
 					raise Exception, 'ND1a: rc2 is None'
 			except Exception, e:
-				luci_log.info('ND2: ricci %s error: %s' % (node[0], str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.info('ND2: ricci %s error: %r' % (node[0], e))
 				continue
 
 			if not rc2.authed():
@@ -3613,7 +3693,8 @@
 				except:
 					pass
 
-				luci_log.debug_verbose('ND3: %s is not authed' % node[0])
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('ND3: %s is not authed' % node[0])
 				rc2 = None
 				continue
 			else:
@@ -3621,14 +3702,16 @@
 				break
 
 		if not found_one:
-			luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND4: unable to find ricci agent to delete %s from %s' % (nodename, clustername))
 			return None
 
 	# First, delete cluster.conf from node to be deleted.
 	# next, have node leave cluster.
 	batch_number, result = rq.nodeLeaveCluster(rc, purge=True)
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('ND5: batch_number and/or result is None')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND5: batch_number and/or result is None')
 		return None
 
 	# Unless we're deleting the whole cluster, it is not worth
@@ -3640,7 +3723,8 @@
 		try:
 			set_node_flag(self, clustername, rc.hostname(), str(batch_number), CLUSTER_DELETE, 'Deleting cluster "%s": Deleting node "%s"' % (clustername, nodename_resolved))
 		except Exception, e:
-			luci_log.debug_verbose('ND5a: failed to set flags: %s' % str(e))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND5a: failed to set flags: %r' % e)
 	else:
 		delete_target = None
 		nodelist = model.getNodes()
@@ -3654,15 +3738,17 @@
 				continue
 
 		if delete_target is None:
-			luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' \
-				% (nodename, clustername))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND6: no delete target for %s in cluster %s' \
+					% (nodename, clustername))
 			return None
 
 		try:
 			model.deleteNode(delete_target)
 		except Exception, e:
-			luci_log.debug_verbose('ND6a: deleteNode %s failed: %s' \
-				% (delete_target.getName(), str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND6a: deleteNode %s failed: %r' \
+					% (delete_target.getName(), e))
 
 		try:
 			model.setModified(True)
@@ -3670,13 +3756,15 @@
 			if not str_buf:
 				raise Exception, 'model string is blank'
 		except Exception, e:
-			luci_log.debug_verbose('ND7: exportModelAsString: %s' % str(e))
-			return None
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND7: exportModelAsString: %r' % e)
+				return None
 
 		# propagate the new cluster.conf via the second node
 		batch_number, result = rq.setClusterConf(rc2, str(str_buf))
 		if batch_number is None:
-			luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ND8: batch number is None after del node in NTP')
 			return None
 
 	# Now we need to delete the node from the DB
@@ -3685,8 +3773,9 @@
 		clusterfolder = self.restrictedTraverse(path)
 		clusterfolder.manage_delObjects([nodename_resolved])
 	except Exception, e:
-		luci_log.debug_verbose('ND9: error deleting %s at %s: %s' \
-			% (nodename_resolved, path, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND9: error deleting %s at %s: %r' \
+				% (nodename_resolved, path, e))
 
 	if delete_cluster:
 		return True
@@ -3694,7 +3783,8 @@
 	try:
 		set_node_flag(self, clustername, rc2.hostname(), str(batch_number), NODE_DELETE, "Deleting node \'%s\'" % nodename_resolved)
 	except Exception, e:
-		luci_log.debug_verbose('ND10: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ND10: failed to set flags: %r' % e)
 	return True
 
 def nodeTaskProcess(self, model, request):
@@ -3704,7 +3794,8 @@
 		try:
 			clustername = request.form['clustername']
 		except:
-			luci_log.debug('NTP0: missing cluster name')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('NTP0: missing cluster name')
 			return (False, {'errors': [ 'No cluster name was given' ]})
 
 	try:
@@ -3713,7 +3804,8 @@
 		try:
 			nodename = request.form['nodename']
 		except:
-			luci_log.debug('NTP1: missing node name')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('NTP1: missing node name')
 			return (False, {'errors': [ 'No node name was given' ]})
 
 	try:
@@ -3722,7 +3814,8 @@
 		try:
 			task = request.form['task']
 		except:
-			luci_log.debug('NTP2: missing task')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('NTP2: missing task')
 			return (False, {'errors': [ 'No node task was given' ]})
 
 	nodename_resolved = resolve_nodename(self, clustername, nodename)
@@ -3736,23 +3829,27 @@
 			if not rc:
 				raise Exception, 'rc is None'
 		except RicciError, e:
-			luci_log.debug('NTP3: ricci error from %s: %s' \
-				% (nodename_resolved, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('NTP3: ricci error from %s: %r' \
+					% (nodename_resolved, e))
 			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
 		except:
-			luci_log.debug('NTP4: ricci error from %s: %s' \
-				% (nodename_resolved, str(e)))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('NTP4: ricci error from %s: %r' \
+					% (nodename_resolved, e))
 			return (False, {'errors': [ 'Unable to connect to the ricci agent on %s' % nodename_resolved ]})
 
 		cluinfo = rc.cluster_info()
 		if not cluinfo[0] and not cluinfo[1]:
-			luci_log.debug('NTP5: node %s not in a cluster (expected %s)' \
-				% (nodename_resolved, clustername))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('NTP5: node %s not in a cluster (expected %s)' \
+					% (nodename_resolved, clustername))
 			return (False, {'errors': [ 'Node "%s" reports it is not in a cluster' % nodename_resolved ]})
 
 		cname = clustername.lower()
 		if cname != cluinfo[0].lower() and cname != cluinfo[1].lower():
-			luci_log.debug('NTP6: node %s in unknown cluster %s:%s (expected %s)' % (nodename_resolved, cluinfo[0], cluinfo[1], clustername))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('NTP6: node %s in unknown cluster %s:%s (expected %s)' % (nodename_resolved, cluinfo[0], cluinfo[1], clustername))
 			return (False, {'errors': [ 'Node "%s" reports it in cluster "%s." We expect it to be a member of cluster "%s"' % (nodename_resolved, cluinfo[0], clustername) ]})
 
 		if not rc.authed():
@@ -3772,13 +3869,15 @@
 				pass
 
 		if rc is None:
-			luci_log.debug('NTP7: node %s is not authenticated' \
-				% nodename_resolved)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('NTP7: node %s is not authenticated' \
+					% nodename_resolved)
 			return (False, {'errors': [ 'Node "%s" is not authenticated' % nodename_resolved ]})
 
 	if task == NODE_LEAVE_CLUSTER:
 		if nodeLeave(self, rc, clustername, nodename_resolved) is None:
-			luci_log.debug_verbose('NTP8: nodeLeave failed')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP8: nodeLeave failed')
 			return (False, {'errors': [ 'Node "%s" failed to leave cluster "%s"' % (nodename_resolved, clustername) ]})
 
 		response = request.RESPONSE
@@ -3786,7 +3885,8 @@
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_JOIN_CLUSTER:
 		if nodeJoin(self, rc, clustername, nodename_resolved) is None:
-			luci_log.debug_verbose('NTP9: nodeJoin failed')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP9: nodeJoin failed')
 			return (False, {'errors': [ 'Node "%s" failed to join cluster "%s"' % (nodename_resolved, clustername) ]})
 
 		response = request.RESPONSE
@@ -3794,7 +3894,8 @@
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_REBOOT:
 		if forceNodeReboot(self, rc, clustername, nodename_resolved) is None:
-			luci_log.debug_verbose('NTP10: nodeReboot failed')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP10: nodeReboot failed')
 			return (False, {'errors': [ 'Node "%s" failed to reboot' \
 				% nodename_resolved ]})
 
@@ -3803,7 +3904,8 @@
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_FENCE:
 		if forceNodeFence(self, clustername, nodename, nodename_resolved) is None:
-			luci_log.debug_verbose('NTP11: nodeFencefailed')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP11: nodeFencefailed')
 			return (False, {'errors': [ 'Fencing of node "%s" failed' \
 				% nodename_resolved]})
 
@@ -3812,7 +3914,8 @@
 			% (request['URL'], NODES, clustername))
 	elif task == NODE_DELETE:
 		if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved) is None:
-			luci_log.debug_verbose('NTP12: nodeDelete failed')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('NTP12: nodeDelete failed')
 			return (False, {'errors': [ 'Deletion of node "%s" from cluster "%s" failed' % (nodename_resolved, clustername) ]})
 
 		response = request.RESPONSE
@@ -3826,7 +3929,8 @@
 		try:
 			nodename = request.form['nodename']
 		except:
-			luci_log.debug_verbose('Unable to get node name to retrieve logging information')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNL0: no node name')
 			return 'Unable to get node name to retrieve logging information'
 
 	clustername = None
@@ -3839,7 +3943,8 @@
 				raise
 		except:
 			clustername = None
-			luci_log.debug_verbose('Unable to find cluster name while retrieving logging information for %s' % nodename)
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNL1: no cluster for %s' % nodename)
 	except:
 		pass
 
@@ -3851,28 +3956,32 @@
 	try:
 		rc = RicciCommunicator(nodename_resolved)
 	except RicciError, e:
-		luci_log.debug_verbose('Ricci error while getting logs for %s: %s' \
-			% (nodename_resolved, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNL2: ricci error %s: %r' \
+				% (nodename_resolved, e))
 		return 'Ricci error while getting logs for %s' % nodename_resolved
-	except:
-		luci_log.debug_verbose('Unexpected exception while getting logs for %s' \
-			% nodename_resolved)
+	except Exception, e:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GNL3: unexpected exception for %s: %r' \
+				% (nodename_resolved, e))
 		return 'Ricci error while getting logs for %s' % nodename_resolved
 
 	if not rc.authed():
 		try:
 			snode = getStorageNode(self, nodename)
 			setNodeStatus(snode, CLUSTER_NODE_NEED_AUTH)
-		except:
-			pass
+		except Exception, e:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GNL4: %s: %r' % (nodename_resolved, e))
 
 		if clustername:
 			try:
 				cnode = getClusterNode(self, nodename, clustername)
 				setNodeStatus(cnode, CLUSTER_NODE_NEED_AUTH)
-			except:
-				pass
-
+			except Exception, e:
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('GNL5: %s: %r' \
+						% (nodename_resolved, e))
 		return 'Luci is not authenticated to node %s. Please reauthenticate first' % nodename
 
 	return rq.getNodeLogs(rc)
@@ -3894,7 +4003,8 @@
 			try:
 				cluname = req.form['clusterName']
 			except:
-				luci_log.debug_verbose('ICB0: No cluster name -- returning empty map')
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('ICB0: No cluster name -- returning empty map')
 				return busy_map
 
 	path = '%s%s' % (CLUSTER_FOLDER_PATH, cluname)
@@ -3904,43 +4014,49 @@
 		if not clusterfolder:
 			raise Exception, 'clusterfolder is None'
 	except Exception, e:
-		luci_log.debug_verbose('ICB1: cluster %s [%s] folder missing: %s -- returning empty map' % (cluname, path, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ICB1: cluster %s [%s] folder missing: %r -- returning empty map' % (cluname, path, e))
 		return busy_map
 	except:
-		luci_log.debug_verbose('ICB2: cluster %s [%s] folder missing: returning empty map' % (cluname, path))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('ICB2: cluster %s [%s] folder missing: returning empty map' % (cluname, path))
 
 	try:
 		items = clusterfolder.objectItems('ManagedSystem')
 		if not items or len(items) < 1:
-			luci_log.debug_verbose('ICB3: NOT BUSY: no flags at %s for cluster %s' % (cluname, path))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ICB3: NOT BUSY: no flags at %s for cluster %s' % (cluname, path))
 			# This returns an empty map, and indicates not busy
 			return busy_map
 	except Exception, e:
-		luci_log.debug('ICB4: An error occurred while looking for cluster %s flags at path %s: %s' % (cluname, path, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('ICB4: An error occurred while looking for cluster %s flags at path %s: %r' % (cluname, path, e))
 		return busy_map
 	except:
-		luci_log.debug('ICB5: An error occurred while looking for cluster %s flags at path %s' % (cluname, path))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug('ICB5: An error occurred while looking for cluster %s flags at path %s' % (cluname, path))
 		return busy_map
 
-	luci_log.debug_verbose('ICB6: %s is busy: %d flags' \
-		% (cluname, len(items)))
+	if LUCI_DEBUG_MODE is True:
+		luci_log.debug_verbose('ICB6: %s is busy: %d flags' \
+			% (cluname, len(items)))
 	busy_map['busy'] = 'true'
 
 	# Ok, here is what is going on...if there is an item,
 	# we need to call ricci to get a batch report.
 	# This report will tell us one of three things:
 	#
-	# #1) the batch task is complete...delete ManagedSystem and render
-	#     normal page
-	# #2) The batch task is NOT done, so meta refresh in 5 secs and try again
-	# #3) The ricci agent has no recollection of the task,
-	#     so handle like 1 above
+	# #1)	the batch task is complete...delete ManagedSystem and render
+	#		normal page
+	# #2)	The batch task is NOT done, so meta refresh in 5 secs and try again
+	# #3)	The ricci agent has no recollection of the task,
+	#		so handle like 1 above
 	###
 	#
 	# Here is what we have to do:
 	# the map should have two lists:
-	#  One list of non-cluster create tasks
-	#  and one of cluster create task structs
+	#	One list of non-cluster create tasks
+	#	and one of cluster create task structs
 	# For each item in items, check if this is a cluster create tasktype
 	# If so, call RC, and then call the batch report method
 	# check for error...if error, report and then remove flag.
@@ -3952,57 +4068,65 @@
 		if tasktype == CLUSTER_ADD or tasktype == NODE_ADD:
 			node_report = {}
 			node_report['isnodecreation'] = True
-			node_report['iserror'] = False  #Default value
+			node_report['iserror'] = False #Default value
 			node_report['desc'] = item[1].getProperty(FLAG_DESC)
 			batch_xml = None
 			# This removes the 'flag' suffix
 			ricci = item[0].split('____')
 
-			luci_log.debug_verbose('ICB6A: using host %s for rc for item %s' \
-				% (ricci[0], item[0]))
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('ICB6A: using host %s for rc for item %s' \
+					% (ricci[0], item[0]))
 
 			try:
 				rc = RicciCommunicator(ricci[0])
 				if not rc:
 					rc = None
-					luci_log.debug_verbose('ICB6b: rc is none')
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('ICB6b: rc is none')
 			except Exception, e:
 				rc = None
-				luci_log.debug_verbose('ICB7: RC: %s: %s' % (cluname, str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('ICB7: RC: %s: %r' % (cluname, e))
 
 			batch_id = None
 			if rc is not None:
 				try:
 					batch_id = item[1].getProperty(BATCH_ID)
-					luci_log.debug_verbose('ICB8: got batch_id %s from %s' \
-						% (batch_id, item[0]))
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('ICB8: got batch_id %s from %s' \
+							% (batch_id, item[0]))
 				except Exception, e:
-					try:
-						luci_log.debug_verbose('ICB8B: failed to get batch_id from %s: %s' % (item[0], str(e)))
-					except:
-						luci_log.debug_verbose('ICB8C: failed to get batch_id from %s' % item[0])
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('ICB8B: failed to get batch_id from %s: %r' % (item[0], e))
 
 				if batch_id is not None:
 					try:
 						batch_xml = rc.batch_report(batch_id)
 						if batch_xml is not None:
-							luci_log.debug_verbose('ICB8D: batch_xml for %s from batch_report is not None -- getting batch status' % batch_id)
+							if LUCI_DEBUG_MODE is True:
+								luci_log.debug_verbose('ICB8D: batch_xml for %s from batch_report is not None -- getting batch status' % batch_id)
 							(creation_status, total) = batch_status(batch_xml)
 							try:
-								luci_log.debug_verbose('ICB8E: batch status returned (%d,%d)' % (creation_status, total))
+								if LUCI_DEBUG_MODE is True:
+									luci_log.debug_verbose('ICB8E: batch status returned (%d,%d)' % (creation_status, total))
 							except:
-								luci_log.debug_verbose('ICB8F: error logging batch status return')
+								if LUCI_DEBUG_MODE is True:
+									luci_log.debug_verbose('ICB8F: error logging batch status return')
 						else:
-							luci_log.debug_verbose('ICB9: batch_xml for cluster is None')
+							if LUCI_DEBUG_MODE is True:
+								luci_log.debug_verbose('ICB9: batch_xml for cluster is None')
 					except Exception, e:
-						luci_log.debug_verbose('ICB9A: error getting batch_xml from rc.batch_report: %s' % str(e))
+						if LUCI_DEBUG_MODE is True:
+							luci_log.debug_verbose('ICB9A: error getting batch_xml from rc.batch_report: %r' % e)
 					# No contact with ricci (-1000)
 					creation_status = RICCI_CONNECT_FAILURE
 					# set to avoid next if statement
 					batch_xml = 'bloody_failure'
 
 			if rc is None or batch_id is None:
-				luci_log.debug_verbose('ICB12: unable to connect to a ricci agent for cluster %s to get batch status')
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('ICB12: unable to connect to a ricci agent for cluster %s to get batch status')
 				# No contact with ricci (-1000)
 				creation_status = RICCI_CONNECT_FAILURE
 				# set to avoid next if statement
@@ -4014,18 +4138,20 @@
 					# We have not displayed this message yet
 					node_report['desc'] = REDIRECT_MSG
 					node_report['iserror'] = True
-					node_report['errormessage'] = ""
+					node_report['errormessage'] = ''
 					nodereports.append(node_report)
 					redirect_message = True
 
-				luci_log.debug_verbose('ICB13: batch job is done -- deleting %s' % item[0])
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('ICB13: batch job is done -- deleting %s' % item[0])
 				clusterfolder.manage_delObjects([item[0]])
 				continue
 
 			del_db_obj = False
 			if creation_status < 0:
 				# an error was encountered
-				luci_log.debug_verbose('ICB13a: %s: CS %d for %s' % (cluname, creation_status, ricci[0]))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('ICB13a: %s: CS %d for %s' % (cluname, creation_status, ricci[0]))
 				if creation_status == RICCI_CONNECT_FAILURE:
 					laststatus = item[1].getProperty(LAST_STATUS)
 
@@ -4088,12 +4214,14 @@
 
 				try:
 					if del_db_obj is True:
-						luci_log.debug_verbose('ICB13a: %s node creation failed for %s: %d: deleting DB entry' % (cluname, ricci[0], creation_status))
+						if LUCI_DEBUG_MODE is True:
+							luci_log.debug_verbose('ICB13a: %s node creation failed for %s: %d: deleting DB entry' % (cluname, ricci[0], creation_status))
 						clusterfolder.manage_delObjects([ricci[0]])
 						clusterfolder.manage_delObjects([item[0]])
 				except Exception, e:
-					luci_log.debug_verbose('ICB14: delObjects: %s: %s' \
-						% (item[0], str(e)))
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('ICB14: delObjects: %s: %r' \
+							% (item[0], e))
 
 				nodereports.append(node_report)
 				continue
@@ -4105,11 +4233,13 @@
 					node_report['statusmessage'] = 'Node created successfully. %s' % REDIRECT_MSG
 					node_report['statusindex'] = creation_status
 					nodereports.append(node_report)
+
 					try:
 						clusterfolder.manage_delObjects([item[0]])
 					except Exception, e:
-						luci_log.info('ICB15: Unable to delete %s: %s' \
-							% (item[0], str(e)))
+						if LUCI_DEBUG_MODE is True:
+							luci_log.info('ICB15: Unable to delete %s: %r' \
+								% (item[0], e))
 					continue
 				else:
 					busy_map['busy'] = 'true'
@@ -4123,7 +4253,8 @@
 						item[1].manage_delProperties(propslist)
 						item[1].manage_addProperty(LAST_STATUS, creation_status, 'int')
 					except Exception, e:
-						luci_log.debug_verbose('ICB16: last_status err: %s %d: %s' % (item[0], creation_status, str(e)))
+						if LUCI_DEBUG_MODE is True:
+							luci_log.debug_verbose('ICB16: last_status err: %s %d: %r' % (item[0], creation_status, e))
 					continue
 		else:
 			node_report = {}
@@ -4137,8 +4268,9 @@
 				rc = None
 				finished = -1
 				err_msg = ''
-				luci_log.debug_verbose('ICB15: ricci error: %s: %s' \
-					% (ricci[0], str(e)))
+				if LUCI_DEBUG_MODE is True:
+					luci_log.debug_verbose('ICB15: ricci error: %s: %r' \
+						% (ricci[0], e))
 
 			if rc is not None:
 				batch_res = rq.checkBatch(rc, item[1].getProperty(BATCH_ID))
@@ -4160,8 +4292,9 @@
 				try:
 					clusterfolder.manage_delObjects([item[0]])
 				except Exception, e:
-					luci_log.info('ICB16: Unable to delete %s: %s' \
-						% (item[0], str(e)))
+					if LUCI_DEBUG_MODE is True:
+						luci_log.info('ICB16: Unable to delete %s: %r' \
+						% (item[0], e))
 			else:
 				node_report = {}
 				busy_map['busy'] = 'true'
@@ -4173,12 +4306,11 @@
 		part1 = req['ACTUAL_URL']
 		part2 = req['QUERY_STRING']
 
-		dex = part2.find("&busyfirst")
-		if dex != (-1):
-			tmpstr = part2[:dex] #This strips off busyfirst var
-		part2 = tmpstr
-		###FIXME - The above assumes that the 'busyfirst' query var is at the
-		###end of the URL...
+		try:
+			dex = part2.replace('&busyfirst=true', '')
+			part2 = dex
+		except:
+			pass
 		busy_map['refreshurl'] = '5; url=%s?%s' % (part1, part2)
 		req['specialpagetype'] = '1'
 	else:
@@ -4199,7 +4331,8 @@
 	except:
 		# default to rhel5 if something crazy happened.
 		try:
-			luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
 		except:
 			# this can throw an exception if the original exception
 			# is caused by rc being None or stale.
@@ -4214,7 +4347,8 @@
 	try:
 		model = request.SESSION.get('model')
 	except Exception, e:
-		luci_log.debug_verbose('delService0: no model: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delService0: no model: %r' % e)
 		return (False, {'errors': [ errstr ] })
 
 	name = None
@@ -4227,7 +4361,8 @@
 			pass
 
 	if name is None:
-		luci_log.debug_verbose('delService1: no service name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delService1: no service name')
 		return (False, {'errors': [ '%s: no service name was provided' % errstr ]})
 
 	clustername = None
@@ -4240,12 +4375,14 @@
 			pass
 
 	if clustername is None:
-		luci_log.debug_verbose('delService2: no cluster name for %s' % name)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delService2: no cluster name for %s' % name)
 		return (False, {'errors': [ '%s: no cluster name was provided' % errstr ]})
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
-		luci_log.debug_verbose('delService3: unable to get ricci agent for cluster %s' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delService3: unable to get ricci agent for cluster %s' % clustername)
 		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster' % errstr ]})
 
 	try:
@@ -4253,13 +4390,15 @@
 		if not ragent:
 			raise Exception, 'unable to determine the hostname of the ricci agent'
 	except Exception, e:
-		luci_log.debug_verbose('delService4: %s: %s' % (errstr, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delService4: %s: %r' % (errstr, e))
 		return (False, {'errors': [ '%s: unable to find a Ricci agent for this cluster' % errstr ]})
 
 	try:
 		model.deleteService(name)
 	except Exception, e:
-		luci_log.debug_verbose('delService5: Unable to find a service named %s for cluster %s' % (name, clustername))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delService5: Unable to find a service named %s for cluster %s: %r' % (name, clustername, e))
 		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
 
 	try:
@@ -4268,19 +4407,21 @@
 		if not conf:
 			raise Exception, 'model string is blank'
 	except Exception, e:
-		luci_log.debug_verbose('delService6: exportModelAsString failed: %s' \
-			% str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delService6: exportModelAsString failed: %r' % e)
 		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
 
 	batch_number, result = rq.setClusterConf(rc, str(conf))
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('delService7: missing batch and/or result')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delService7: missing batch and/or result')
 		return (False, {'errors': [ '%s: error removing service "%s."' % (errstr, name) ]})
 
 	try:
 		set_node_flag(self, clustername, ragent, str(batch_number), SERVICE_DELETE, 'Removing service "%s"' % name)
 	except Exception, e:
-		luci_log.debug_verbose('delService8: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delService8: failed to set flags: %r' % e)
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -4292,7 +4433,8 @@
 	try:
 		model = request.SESSION.get('model')
 	except Exception, e:
-		luci_log.debug_verbose('delResource0: no model: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource0: no model: %r' % e)
 		return errstr
 
 	name = None
@@ -4305,7 +4447,8 @@
 			pass
 
 	if name is None:
-		luci_log.debug_verbose('delResource1: no resource name')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource1: no resource name')
 		return '%s: no resource name was provided' % errstr
 
 	clustername = None
@@ -4318,7 +4461,8 @@
 			pass
 
 	if clustername is None:
-		luci_log.debug_verbose('delResource2: no cluster name for %s' % name)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource2: no cluster name for %s' % name)
 		return '%s: could not determine the cluster name' % errstr
 
 	try:
@@ -4326,7 +4470,8 @@
 		if not ragent:
 			raise Exception, 'unable to determine the hostname of the ricci agent'
 	except Exception, e:
-		luci_log.debug_verbose('delResource3: %s: %s' % (errstr, str(e)))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource3: %s: %r' % (errstr, e))
 		return '%s: could not determine the ricci agent hostname' % errstr
 
 	resPtr = model.getResourcesPtr()
@@ -4340,7 +4485,8 @@
 			break
 
 	if not found:
-		luci_log.debug_verbose('delResource4: cant find res %s' % name)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource4: cant find res %s' % name)
 		return '%s: the specified resource was not found' % errstr
 
 	try:
@@ -4349,19 +4495,21 @@
 		if not conf:
 			raise Exception, 'model string is blank'
 	except Exception, e:
-		luci_log.debug_verbose('delResource5: exportModelAsString failed: %s' \
-			% str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource5: exportModelAsString failed: %s' % e)
 		return errstr
 
 	batch_number, result = rq.setClusterConf(rc, str(conf))
 	if batch_number is None or result is None:
-		luci_log.debug_verbose('delResource6: missing batch and/or result')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource6: missing batch and/or result')
 		return errstr
 
 	try:
 		set_node_flag(self, clustername, ragent, str(batch_number), RESOURCE_REMOVE, 'Removing resource "%s"' % request['resourcename'])
 	except Exception, e:
-		luci_log.debug_verbose('delResource7: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('delResource7: failed to set flags: %r' % e)
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
@@ -4370,18 +4518,21 @@
 def addResource(self, request, model, res):
 	clustername = model.getClusterName()
 	if not clustername:
-		luci_log.debug_verbose('addResource0: no cluname from mb')
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addResource0: no cluname from mb')
 		return 'Unable to determine cluster name'
 
 	rc = getRicciAgent(self, clustername)
 	if not rc:
-		luci_log.debug_verbose('addResource1: %s' % clustername)
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addResource1: %s' % clustername)
 		return 'Unable to find a ricci agent for the %s cluster' % clustername
 
 	try:
 		model.getResourcesPtr().addChild(res)
 	except Exception, e:
-		luci_log.debug_verbose('addResource2: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addResource2: %r' % e)
 		return 'Unable to add the new resource'
 
 	try:
@@ -4390,22 +4541,25 @@
 		if not conf:
 			raise Exception, 'model string for %s is blank' % clustername
 	except Exception, e:
-		luci_log.debug_verbose('addResource3: exportModelAsString: %s' \
-			% str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addResource3: exportModelAsString: %r' % e)
 		return 'An error occurred while adding this resource'
 
 	try:
 		ragent = rc.hostname()
 		if not ragent:
-			luci_log.debug_verbose('addResource4: missing ricci hostname')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('addResource4: missing ricci hostname')
 			raise Exception, 'unknown ricci agent hostname'
 
 		batch_number, result = rq.setClusterConf(rc, str(conf))
 		if batch_number is None or result is None:
-			luci_log.debug_verbose('addResource5: missing batch_number or result')
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('addResource5: missing batch_number or result')
 			raise Exception, 'unable to save the new cluster configuration'
 	except Exception, e:
-		luci_log.debug_verbose('addResource6: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addResource6: %r' % e)
 		return 'An error occurred while propagating the new cluster.conf: %s' % str(e)
 
 	try:
@@ -4421,61 +4575,13 @@
 
 		set_node_flag(self, clustername, ragent, str(batch_number), action_type, action_str)
 	except Exception, e:
-		luci_log.debug_verbose('addResource7: failed to set flags: %s' % str(e))
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('addResource7: failed to set flags: %r' % e)
 
 	response = request.RESPONSE
 	response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true'
 		% (request['URL'], RESOURCES, clustername))
 
-
-def appendModel(request, model):
-	try:
-		request.SESSION.set('model', model)
-	except Exception, e:
-		luci_log.debug_verbose('Appending model to request failed: %r' % e)
-		return 'An error occurred while storing the cluster model'
-
-def getModelBuilder(self, rc, isVirtualized):
-	try:
-		cluster_conf_node = rq.getClusterConf(rc)
-		if not cluster_conf_node:
-			raise Exception, 'getClusterConf returned None'
-	except Exception, e:
-		luci_log.debug_verbose('GMB0: unable to get cluster_conf_node in getModelBuilder: %r' % e)
-		return None
-
-	try:
-		model = ModelBuilder(0, None, None, cluster_conf_node)
-		if not model:
-			raise Exception, 'ModelBuilder() returned None'
-	except Exception, e:
-		try:
-			luci_log.debug_verbose('GMB1: An error occurred while trying to get model for conf "%r": %r' % (cluster_conf_node.toxml(), e))
-		except:
-			luci_log.debug_verbose('GMB1: ModelBuilder failed')
-		return None
-
-	model.setIsVirtualized(isVirtualized)
-	return model
-
-def getModelForCluster(self, clustername):
-	rc = getRicciAgent(self, clustername)
-	if not rc:
-		luci_log.debug_verbose('GMFC0: unable to find a ricci agent for %s' \
-			% clustername)
-		return None
-
-	try:
-		model = getModelBuilder(None, rc, rc.dom0())
-		if not model:
-			raise Exception, 'model is none'
-	except Exception, e:
-		luci_log.debug_verbose('GMFC1: unable to get model builder for %s: %r' \
-			 % (clustername, e))
-		return None
-
-	return model
-
 def process_cluster_conf_editor(self, req):
 	clustername = req['clustername']
 	msg_list = list(('\n'))
@@ -4511,12 +4617,14 @@
 				msg_list.append('Propagating the new cluster.conf')
 				rc = getRicciAgent(self, clustername)
 				if not rc:
-					luci_log.debug_verbose('VFA: unable to find a ricci agent for the %s cluster' % clustername)
+					if LUCI_DEBUG_MODE is True:
+						luci_log.debug_verbose('VFA0: unable to find a ricci agent for the %s cluster' % clustername)
 					msg_list.append('\nUnable to contact a ricci agent for cluster "%s"\n\n' % clustername)
 				else:
 					batch_id, result = rq.setClusterConf(rc, cc_xml.toxml())
 					if batch_id is None or result is None:
-						luci_log.debug_verbose('VFA: setClusterConf: batchid or result is None')
+						if LUCI_DEBUG_MODE is True:
+							luci_log.debug_verbose('VFA1: setClusterConf: batchid or result is None')
 						msg_list.append('\nUnable to propagate the new cluster configuration for cluster "%s"\n\n' % clustername)
 					else:
 						msg_list.append(' - DONE\n')
@@ -4530,3 +4638,52 @@
 			cc = model.exportModelAsString()
 
 	return { 'msg': ''.join(msg_list), 'cluster_conf': cc }
+
+def getResourceInfo(model, request):
+	if not model:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GRI0: no model object in session')
+		return {}
+
+	name = None
+	try:
+		name = request['resourcename']
+	except:
+		try:
+			name = request.form['resourcename']
+		except:
+			pass
+
+	if name is None:
+		try:
+			res_type = request.form['type']
+			if res_type == 'ip':
+				name = request.form['value'].strip()
+		except:
+			pass
+
+	if name is None:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GRI1: missing res name')
+		return {}
+
+	try:
+		cluname = request['clustername']
+	except:
+		try:
+			cluname = request.form['clustername']
+		except:
+			if LUCI_DEBUG_MODE is True:
+				luci_log.debug_verbose('GRI2: missing cluster name')
+			return {}
+
+	try:
+		baseurl = request['URL']
+	except:
+		if LUCI_DEBUG_MODE is True:
+			luci_log.debug_verbose('GRI3: missing URL')
+		return {}
+
+	#CALL
+	return {}
+




More information about the Cluster-devel mailing list